This commit is contained in:
JohnJim0816
2021-05-06 02:07:56 +08:00
parent 747f3238c0
commit b17c8f4e41
107 changed files with 1439 additions and 987 deletions

View File

@@ -5,13 +5,14 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 16:14:34
LastEditor: John
LastEditTime: 2021-03-17 12:35:06
LastEditTime: 2021-05-05 16:58:39
Discription:
Environment:
'''
import numpy as np
from collections import defaultdict
import torch
import dill
class FisrtVisitMC:
''' On-Policy First-Visit MC Control
@@ -20,14 +21,14 @@ class FisrtVisitMC:
self.action_dim = action_dim
self.epsilon = cfg.epsilon
self.gamma = cfg.gamma
self.Q = defaultdict(lambda: np.zeros(action_dim))
self.Q_table = defaultdict(lambda: np.zeros(action_dim))
self.returns_sum = defaultdict(float) # sum of returns
self.returns_count = defaultdict(float)
def choose_action(self,state):
''' e-greed policy '''
if state in self.Q.keys():
best_action = np.argmax(self.Q[state])
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.action_dim, dtype=float) * self.epsilon / self.action_dim
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
@@ -48,19 +49,17 @@ class FisrtVisitMC:
# Calculate average return for this state over all sampled episodes
self.returns_sum[sa_pair] += G
self.returns_count[sa_pair] += 1.0
self.Q[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
self.Q_table[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
def save(self,path):
'''把 Q表格 的数据保存到文件中
'''
import dill
torch.save(
obj=self.Q,
f=path,
obj=self.Q_table,
f=path+"Q_table",
pickle_module=dill
)
def load(self, path):
'''从文件中读取数据到 Q表格
'''
import dill
self.Q =torch.load(f=path,pickle_module=dill)
self.Q_table =torch.load(f=path+"Q_table",pickle_module=dill)