This commit is contained in:
JohnJim0816
2021-03-23 16:10:11 +08:00
parent d4690c2058
commit bf0f2990cf
198 changed files with 1668 additions and 1545 deletions

View File

@@ -2,10 +2,10 @@
## 环境说明
见[环境说明](https://github.com/datawhalechina/leedeeprl-notes/blob/master/codes/env_info.md)中的The Racetrack
见[环境说明](https://github.com/JohnJim0816/reinforcement-learning-tutorials/blob/master/env_info.md)中的The Racetrack
## First-Visit MC 介绍
伪代码
### 伪代码
![mc_control_algo](assets/mc_control_algo.png)

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 16:14:34
LastEditor: John
LastEditTime: 2021-03-12 16:15:12
LastEditTime: 2021-03-17 12:35:06
Discription:
Environment:
'''
@@ -26,11 +26,13 @@ class FisrtVisitMC:
def choose_action(self,state):
''' e-greed policy '''
best_action = np.argmax(self.Q[state])
# action = best_action
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
if state in self.Q.keys():
best_action = np.argmax(self.Q[state])
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
else:
action = np.random.randint(0,self.n_actions)
return action
def update(self,one_ep_transition):
# Find all (state, action) pairs we've visited in this one_ep_transition

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-11 14:26:44
LastEditor: John
LastEditTime: 2021-03-12 16:15:46
LastEditTime: 2021-03-17 12:35:36
Discription:
Environment:
'''
@@ -35,7 +35,7 @@ class MCConfig:
def __init__(self):
self.epsilon = 0.15 # epsilon: The probability to select a random action .
self.gamma = 0.9 # gamma: Gamma discount factor.
self.n_episodes = 300
self.n_episodes = 150
self.n_steps = 2000
def get_mc_args():
@@ -58,8 +58,8 @@ def mc_train(cfg,env,agent):
one_ep_transition = []
state = env.reset()
ep_reward = 0
# while True:
for t in range(cfg.n_steps):
while True:
# for t in range(cfg.n_steps):
action = agent.choose_action(state)
next_state, reward, done = env.step(action)
ep_reward+=reward

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB