This commit is contained in:
JohnJim0816
2021-04-16 14:59:23 +08:00
parent 312b57fdff
commit e4690ac89f
71 changed files with 805 additions and 153 deletions

View File

@@ -14,16 +14,16 @@ import gym
from A2C.multiprocessing_env import SubprocVecEnv
# num_envs = 16
# env_name = "Pendulum-v0"
# env = "Pendulum-v0"
def make_envs(num_envs=16,env_name="Pendulum-v0"):
def make_envs(num_envs=16,env="Pendulum-v0"):
''' 创建多个子环境
'''
num_envs = 16
env_name = "CartPole-v0"
env = "CartPole-v0"
def make_env():
def _thunk():
env = gym.make(env_name)
env = gym.make(env)
return env
return _thunk
@@ -34,10 +34,10 @@ def make_envs(num_envs=16,env_name="Pendulum-v0"):
# if __name__ == "__main__":
# num_envs = 16
# env_name = "CartPole-v0"
# env = "CartPole-v0"
# def make_env():
# def _thunk():
# env = gym.make(env_name)
# env = gym.make(env)
# return env
# return _thunk
@@ -45,4 +45,4 @@ def make_envs(num_envs=16,env_name="Pendulum-v0"):
# envs = [make_env() for i in range(num_envs)]
# envs = SubprocVecEnv(envs)
if __name__ == "__main__":
envs = make_envs(num_envs=16,env_name="CartPole-v0")
envs = make_envs(num_envs=16,env="CartPole-v0")

View File

@@ -5,16 +5,20 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 20:58:21
@LastEditor: John
LastEditTime: 2021-03-20 16:58:04
LastEditTime: 2021-04-05 11:14:39
@Discription:
@Environment: python 3.7.9
'''
import sys,os
sys.path.append(os.getcwd()) # add current terminal path
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import torch
import gym
import datetime
from A2C.agent import A2C
from common.utils import save_results,make_dir,del_empty_dir

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-20 17:43:17
LastEditor: John
LastEditTime: 2021-03-20 19:36:24
LastEditTime: 2021-04-05 11:19:20
Discription:
Environment:
'''
@@ -40,7 +40,7 @@ class A2CConfig:
self.eval_eps = 200
self.eval_steps = 200
self.target_update = 4
self.hidden_dim=256
self.hidden_dim = 256
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -76,7 +76,7 @@ class A2C:
def train(cfg,env,agent):
n_states = env.observation_space.shape[0]
n_actions = env.action_space.n
actor_critic = ActorCritic(n_states, n_actions, hidden_dim)
actor_critic = ActorCritic(n_states, n_actions, cfg.hidden_dim)
ac_optimizer = optim.Adam(actor_critic.parameters(), lr=learning_rate)
all_lengths = []
@@ -112,7 +112,7 @@ def train(cfg,env,agent):
all_lengths.append(steps)
average_lengths.append(np.mean(all_lengths[-10:]))
if episode % 10 == 0:
sys.stdout.write("episode: {}, reward: {}, total length: {}, average length: {} \n".format(episode, np.sum(rewards), steps, average_lengths[-1]))
sys.stdout.write("episode: {}, reward: {}, total length: {}, average length: {} \n".format(episode, np.sum(rewards), steps+1, average_lengths[-1]))
break
# compute Q values
@@ -154,7 +154,7 @@ def train(cfg,env,agent):
plt.show()
if __name__ == "__main__":
cfg = A2CConfig
cfg = A2CConfig()
env = gym.make("CartPole-v0")
n_states = env.observation_space.shape[0]
n_actions = env.action_space.n

View File

@@ -5,7 +5,7 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 20:58:21
@LastEditor: John
LastEditTime: 2021-03-31 01:04:48
LastEditTime: 2021-04-08 21:50:13
@Discription:
@Environment: python 3.7.7
'''
@@ -35,6 +35,7 @@ if not os.path.exists(RESULT_PATH): os.mkdir(RESULT_PATH)
class DDPGConfig:
def __init__(self):
self.env = 'Pendulum-v0'
self.algo = 'DDPG'
self.gamma = 0.99
self.critic_lr = 1e-3
@@ -81,6 +82,7 @@ def train(cfg,env,agent):
if __name__ == "__main__":
cfg = DDPGConfig()
env =
env = NormalizedActions(gym.make("Pendulum-v0"))
env.seed(1) # 设置env随机种子
state_dim = env.observation_space.shape[0]

View File

@@ -5,7 +5,7 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-12 00:48:57
@LastEditor: John
LastEditTime: 2021-04-04 00:26:47
LastEditTime: 2021-04-13 19:03:39
@Discription:
@Environment: python 3.7.7
'''
@@ -21,15 +21,13 @@ from DQN.agent import DQN
from common.plot import plot_rewards
from common.utils import save_results,make_dir,del_empty_dir
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
SAVED_MODEL_PATH = curr_path+"/saved_model/"+SEQUENCE+'/' # path to save model
RESULT_PATH = curr_path+"/results/"+SEQUENCE+'/' # path to save rewards
make_dir(curr_path+"/saved_model/",curr_path+"/results/")
del_empty_dir(curr_path+"/saved_model/",curr_path+"/results/")
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
class DQNConfig:
def __init__(self):
self.algo = "DQN" # name of algo
self.env = 'CartPole-v0'
self.result_path = curr_path+"/results/" +self.env+'/'+curr_time+'/' # path to save results
self.gamma = 0.95
self.epsilon_start = 1 # e-greedy策略的初始epsilon
self.epsilon_end = 0.01
@@ -37,7 +35,7 @@ class DQNConfig:
self.lr = 0.0001 # learning rate
self.memory_capacity = 10000 # Replay Memory容量
self.batch_size = 32
self.train_eps = 300 # 训练的episode数目
self.train_eps = 10 # 训练的episode数目
self.target_update = 2 # target net的更新频率
self.eval_eps = 20 # 测试的episode数目
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
@@ -72,14 +70,13 @@ def train(cfg,env,agent):
if __name__ == "__main__":
cfg = DQNConfig()
env = gym.make('CartPole-v0')
env = gym.make(cfg.env)
env.seed(1)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = DQN(state_dim,action_dim,cfg)
rewards,ma_rewards = train(cfg,env,agent)
make_dir(SAVED_MODEL_PATH,RESULT_PATH)
agent.save(path=SAVED_MODEL_PATH)
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)
del_empty_dir(SAVED_MODEL_PATH,RESULT_PATH)
make_dir(cfg.result_path)
agent.save(path=cfg.result_path)
save_results(rewards,ma_rewards,tag='train',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=cfg.result_path)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

88
codes/DQN/task1.py Normal file
View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python
# coding=utf-8
'''
@Author: John
@Email: johnjim0816@gmail.com
@Date: 2020-06-12 00:48:57
@LastEditor: John
LastEditTime: 2021-04-13 18:49:44
@Discription:
@Environment: python 3.7.7
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import torch
import datetime
from DQN.agent import DQN
from common.plot import plot_rewards
from common.utils import save_results,make_dir,del_empty_dir
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
SAVED_MODEL_PATH = curr_path+"/saved_model/"+SEQUENCE+'/' # path to save model
RESULT_PATH = curr_path+"/results/"+SEQUENCE+'/' # path to save rewards
make_dir(curr_path+"/saved_model/",curr_path+"/results/")
del_empty_dir(curr_path+"/saved_model/",curr_path+"/results/")
class DQNConfig:
def __init__(self):
self.env = 'LunarLander-v2'
self.algo = "DQN" # name of algo
self.gamma = 0.95
self.epsilon_start = 1 # e-greedy策略的初始epsilon
self.epsilon_end = 0.01
self.epsilon_decay = 500
self.lr = 0.0001 # learning rate
self.memory_capacity = 1000000 # Replay Memory容量
self.batch_size = 64
self.train_eps = 300 # 训练的episode数目
self.train_steps = 1000
self.target_update = 2 # target net的更新频率
self.eval_eps = 20 # 测试的episode数目
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
self.hidden_dim = 256 # 神经网络隐藏层维度
def train(cfg,env,agent):
print('Start to train !')
rewards = []
ma_rewards = [] # moveing average reward
for i_episode in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
for i_step in range(cfg.train_steps):
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
agent.memory.push(state, action, reward, next_state, done)
state = next_state
agent.update()
if done:
break
if i_episode % cfg.target_update == 0:
agent.target_net.load_state_dict(agent.policy_net.state_dict())
print('Episode:{}/{}, Reward:{}'.format(i_episode+1,cfg.train_eps,ep_reward))
rewards.append(ep_reward)
# 计算滑动窗口的reward
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('Complete training')
return rewards,ma_rewards
if __name__ == "__main__":
cfg = DQNConfig()
env = gym.make(cfg.env)
env.seed(1)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = DQN(state_dim,action_dim,cfg)
rewards,ma_rewards = train(cfg,env,agent)
make_dir(SAVED_MODEL_PATH,RESULT_PATH)
agent.save(path=SAVED_MODEL_PATH)
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)
del_empty_dir(SAVED_MODEL_PATH,RESULT_PATH)

View File

@@ -5,13 +5,11 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 10:01:09
@LastEditor: John
LastEditTime: 2021-03-29 20:23:48
LastEditTime: 2021-04-05 11:06:23
@Discription:
@Environment: python 3.7.7
'''
import sys,os
from pathlib import Path
import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path

141
codes/PPO/README.md Normal file
View File

@@ -0,0 +1,141 @@
## 原理简介
PPO是一种off-policy算法具有较好的性能其前身是TRPO算法也是policy gradient算法的一种它是现在 OpenAI 默认的强化学习算法,具体原理可参考[PPO算法讲解](https://datawhalechina.github.io/easy-rl/#/chapter5/chapter5)。PPO算法主要有两个变种一个是结合KL penalty的一个是用了clip方法本文实现的是后者即```PPO-clip```。
## 伪代码
要实现必先了解伪代码,伪代码如下:
![在这里插入图片描述](assets/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0pvaG5KaW0w,size_16,color_FFFFFF,t_70.png)
这是谷歌找到的一张比较适合的图,本人比较懒就没有修改,上面的```k```就是第```k```个episode第六步是用随机梯度下降的方法优化这里的损失函数(即```argmax```后面的部分)可能有点难理解,可参考[PPO paper](https://arxiv.org/abs/1707.06347),如下:
![在这里插入图片描述](assets/20210323154236878.png)
第七步就是一个平方损失函数,即实际回报与期望回报的差平方。
## 代码实战
[点击查看完整代码](https://github.com/JohnJim0816/rl-tutorials/tree/master/PPO)
### PPOmemory
首先第三步需要搜集一条轨迹信息,我们可以定义一个```PPOmemory```来存储相关信息:
```python
class PPOMemory:
def __init__(self, batch_size):
self.states = []
self.probs = []
self.vals = []
self.actions = []
self.rewards = []
self.dones = []
self.batch_size = batch_size
def sample(self):
batch_step = np.arange(0, len(self.states), self.batch_size)
indices = np.arange(len(self.states), dtype=np.int64)
np.random.shuffle(indices)
batches = [indices[i:i+self.batch_size] for i in batch_step]
return np.array(self.states),\
np.array(self.actions),\
np.array(self.probs),\
np.array(self.vals),\
np.array(self.rewards),\
np.array(self.dones),\
batches
def push(self, state, action, probs, vals, reward, done):
self.states.append(state)
self.actions.append(action)
self.probs.append(probs)
self.vals.append(vals)
self.rewards.append(reward)
self.dones.append(done)
def clear(self):
self.states = []
self.probs = []
self.actions = []
self.rewards = []
self.dones = []
self.vals = []
```
这里的push函数就是将得到的相关量放入memory中sample就是随机采样出来方便第六步的随机梯度下降。
### PPO model
model就是actor和critic两个网络了
```python
import torch.nn as nn
from torch.distributions.categorical import Categorical
class Actor(nn.Module):
def __init__(self,state_dim, action_dim,
hidden_dim=256):
super(Actor, self).__init__()
self.actor = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim),
nn.Softmax(dim=-1)
)
def forward(self, state):
dist = self.actor(state)
dist = Categorical(dist)
return dist
class Critic(nn.Module):
def __init__(self, state_dim,hidden_dim=256):
super(Critic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, state):
value = self.critic(state)
return value
```
这里Actor就是得到一个概率分布(Categorica也可以是别的分布可以搜索torch distributionsl)critc根据当前状态得到一个值这里的输入维度可以是```state_dim+action_dim```即将action信息也纳入critic网络中这样会更好一些感兴趣的小伙伴可以试试。
### PPO update
定义一个update函数主要实现伪代码中的第六步和第七步
```python
def update(self):
for _ in range(self.n_epochs):
state_arr, action_arr, old_prob_arr, vals_arr,\
reward_arr, dones_arr, batches = \
self.memory.sample()
values = vals_arr
### compute advantage ###
advantage = np.zeros(len(reward_arr), dtype=np.float32)
for t in range(len(reward_arr)-1):
discount = 1
a_t = 0
for k in range(t, len(reward_arr)-1):
a_t += discount*(reward_arr[k] + self.gamma*values[k+1]*\
(1-int(dones_arr[k])) - values[k])
discount *= self.gamma*self.gae_lambda
advantage[t] = a_t
advantage = torch.tensor(advantage).to(self.device)
### SGD ###
values = torch.tensor(values).to(self.device)
for batch in batches:
states = torch.tensor(state_arr[batch], dtype=torch.float).to(self.device)
old_probs = torch.tensor(old_prob_arr[batch]).to(self.device)
actions = torch.tensor(action_arr[batch]).to(self.device)
dist = self.actor(states)
critic_value = self.critic(states)
critic_value = torch.squeeze(critic_value)
new_probs = dist.log_prob(actions)
prob_ratio = new_probs.exp() / old_probs.exp()
weighted_probs = advantage[batch] * prob_ratio
weighted_clipped_probs = torch.clamp(prob_ratio, 1-self.policy_clip,
1+self.policy_clip)*advantage[batch]
actor_loss = -torch.min(weighted_probs, weighted_clipped_probs).mean()
returns = advantage[batch] + values[batch]
critic_loss = (returns-critic_value)**2
critic_loss = critic_loss.mean()
total_loss = actor_loss + 0.5*critic_loss
self.actor_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
total_loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.step()
self.memory.clear()
```
该部分首先从memory中提取搜集到的轨迹信息然后计算gae即advantage接着使用随机梯度下降更新网络最后清除memory以便搜集下一条轨迹信息。
最后实现效果如下:
![在这里插入图片描述](assets/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0pvaG5KaW0w,size_16,color_FFFFFF,t_70-20210405110725113.png)

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-23 15:17:42
LastEditor: John
LastEditTime: 2021-03-23 15:52:34
LastEditTime: 2021-04-11 01:24:24
Discription:
Environment:
'''
@@ -17,16 +17,18 @@ from PPO.model import Actor,Critic
from PPO.memory import PPOMemory
class PPO:
def __init__(self, state_dim, action_dim,cfg):
self.env = cfg.env
self.gamma = cfg.gamma
self.policy_clip = cfg.policy_clip
self.n_epochs = cfg.n_epochs
self.gae_lambda = cfg.gae_lambda
self.device = cfg.device
self.actor = Actor(state_dim, action_dim).to(self.device)
self.critic = Critic(state_dim).to(self.device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.actor = Actor(state_dim, action_dim,cfg.hidden_dim).to(self.device)
self.critic = Critic(state_dim,cfg.hidden_dim).to(self.device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr)
self.memory = PPOMemory(cfg.batch_size)
self.loss = 0
def choose_action(self, observation):
state = torch.tensor([observation], dtype=torch.float).to(self.device)
@@ -74,6 +76,7 @@ class PPO:
critic_loss = (returns-critic_value)**2
critic_loss = critic_loss.mean()
total_loss = actor_loss + 0.5*critic_loss
self.loss = total_loss
self.actor_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
total_loss.backward()
@@ -81,13 +84,13 @@ class PPO:
self.critic_optimizer.step()
self.memory.clear()
def save(self,path):
actor_checkpoint = os.path.join(path, 'actor_torch_ppo.pt')
critic_checkpoint= os.path.join(path, 'critic_torch_ppo.pt')
actor_checkpoint = os.path.join(path, self.env+'_actor.pt')
critic_checkpoint= os.path.join(path, self.env+'_critic.pt')
torch.save(self.actor.state_dict(), actor_checkpoint)
torch.save(self.critic.state_dict(), critic_checkpoint)
def load(self,path):
actor_checkpoint = os.path.join(path, 'actor_torch_ppo.pt')
critic_checkpoint= os.path.join(path, 'critic_torch_ppo.pt')
actor_checkpoint = os.path.join(path, self.env+'_actor.pt')
critic_checkpoint= os.path.join(path, self.env+'_critic.pt')
self.actor.load_state_dict(torch.load(actor_checkpoint))
self.critic.load_state_dict(torch.load(critic_checkpoint))

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -5,12 +5,14 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-22 16:18:10
LastEditor: John
LastEditTime: 2021-03-23 15:52:52
LastEditTime: 2021-04-11 01:24:41
Discription:
Environment:
'''
import sys,os
sys.path.append(os.getcwd()) # add current terminal path to sys.path
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import numpy as np
import torch
@@ -33,15 +35,18 @@ if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹
class PPOConfig:
def __init__(self) -> None:
self.env = 'CartPole-v0'
self.algo = 'PPO'
self.batch_size = 5
self.gamma=0.99
self.n_epochs = 4
self.lr = 0.0003
self.actor_lr = 0.0003
self.critic_lr = 0.0003
self.gae_lambda=0.95
self.policy_clip=0.2
self.hidden_dim = 256
self.update_fre = 20 # frequency of agent update
self.train_eps = 250 # max training episodes
self.train_eps = 300 # max training episodes
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu
def train(cfg,env,agent):
@@ -70,7 +75,8 @@ def train(cfg,env,agent):
else:
ma_rewards.append(ep_reward)
avg_reward = np.mean(rewards[-100:])
if avg_reward > best_reward:
if avg_rewardself.actor_lr = 0.002
self.critic_lr = 0.005 > best_reward:
best_reward = avg_reward
agent.save(path=SAVED_MODEL_PATH)
print('Episode:{}/{}, Reward:{:.1f}, avg reward:{:.1f}, Done:{}'.format(i_episode+1,cfg.train_eps,ep_reward,avg_reward,done))
@@ -78,7 +84,7 @@ def train(cfg,env,agent):
if __name__ == '__main__':
cfg = PPOConfig()
env = gym.make('CartPole-v0')
env = gym.make(cfg.env)
env.seed(1)
state_dim=env.observation_space.shape[0]
action_dim=env.action_space.n

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-23 15:29:24
LastEditor: John
LastEditTime: 2021-03-23 15:29:52
LastEditTime: 2021-04-08 22:36:43
Discription:
Environment:
'''
@@ -13,7 +13,7 @@ import torch.nn as nn
from torch.distributions.categorical import Categorical
class Actor(nn.Module):
def __init__(self,state_dim, action_dim,
hidden_dim=256):
hidden_dim):
super(Actor, self).__init__()
self.actor = nn.Sequential(
@@ -30,7 +30,7 @@ class Actor(nn.Module):
return dist
class Critic(nn.Module):
def __init__(self, state_dim,hidden_dim=256):
def __init__(self, state_dim,hidden_dim):
super(Critic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(state_dim, hidden_dim),

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

97
codes/PPO/task1.py Normal file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-22 16:18:10
LastEditor: John
LastEditTime: 2021-04-11 01:25:43
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import numpy as np
import torch
import datetime
from PPO.agent import PPO
from common.plot import plot_rewards
from common.utils import save_results
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
os.mkdir(SAVED_MODEL_PATH)
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹
os.mkdir(RESULT_PATH)
class PPOConfig:
def __init__(self) -> None:
self.env = 'LunarLander-v2'
self.algo = 'PPO'
self.batch_size = 128
self.gamma=0.95
self.n_epochs = 4
self.actor_lr = 0.002
self.critic_lr = 0.005
self.gae_lambda=0.95
self.policy_clip=0.2
self.hidden_dim = 256
self.update_fre = 20 # frequency of agent update
self.train_eps = 300 # max training episodes
self.train_steps = 1000
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu
def train(cfg,env,agent):
best_reward = env.reward_range[0]
rewards= []
ma_rewards = [] # moving average rewards
avg_reward = 0
running_steps = 0
for i_episode in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
# for i_step in range(cfg.train_steps):
while not done:
action, prob, val = agent.choose_action(state)
state_, reward, done, _ = env.step(action)
running_steps += 1
ep_reward += reward
agent.memory.push(state, action, prob, val, reward, done)
if running_steps % cfg.update_fre == 0:
agent.update()
state = state_
# if done:
# break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
avg_reward = np.mean(rewards[-100:])
if avg_reward > best_reward:
best_reward = avg_reward
agent.save(path=SAVED_MODEL_PATH)
print('Episode:{}/{}, Reward:{:.1f}, avg reward:{:.1f}, Loss:{}'.format(i_episode+1,cfg.train_eps,ep_reward,avg_reward,agent.loss))
return rewards,ma_rewards
if __name__ == '__main__':
cfg = PPOConfig()
env = gym.make(cfg.env)
env.seed(1)
state_dim=env.observation_space.shape[0]
action_dim=env.action_space.n
agent = PPO(state_dim,action_dim,cfg)
rewards,ma_rewards = train(cfg,env,agent)
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)

View File

@@ -22,26 +22,26 @@ python 3.7、pytorch 1.6.0-1.7.1、gym 0.17.0-0.18.0
## 使用说明
运行```main.py```或者```main.ipynb```
运行```main.py```或者```main.ipynb```,或者包含```task```名的文件(比如```task1.py```)
## 算法进度
| 算法名称 | 相关论文材料 | 环境 | 备注 |
| :--------------------------------------: | :---------------------------------------------------------: | ------------------------------------- | :--------------------------------: |
| :--------------------------------------: | :----------------------------------------------------------: | ------------------------------------- | :--------------------------------: |
| [On-Policy First-Visit MC](./MonteCarlo) | | [Racetrack](./envs/racetrack_env.md) | |
| [Q-Learning](./QLearning) | | [CliffWalking-v0](./envs/gym_info.md) | |
| [Sarsa](./Sarsa) | | [Racetrack](./envs/racetrack_env.md) | |
| [DQN](./DQN) | [DQN Paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | [CartPole-v0](./envs/gym_info.md) | |
| [DQN](./DQN) | [DQN Paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf),[Nature DQN Paper](https://www.nature.com/articles/nature14236) | [CartPole-v0](./envs/gym_info.md) | |
| [DQN-cnn](./DQN_cnn) | [DQN Paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | [CartPole-v0](./envs/gym_info.md) | 与DQN相比使用了CNN而不是全链接网络 |
| [DoubleDQN](./DoubleDQN) | | [CartPole-v0](./envs/gym_info.md) | |
| [Hierarchical DQN](HierarchicalDQN) | [H-DQN Paper](https://arxiv.org/abs/1604.06057) | [CartPole-v0](./envs/gym_info.md) | |
| [PolicyGradient](./PolicyGradient) | | [CartPole-v0](./envs/gym_info.md) | |
| A2C | | [CartPole-v0](./envs/gym_info.md) | |
| A3C | | | |
| SAC | | | |
| A2C | [A3C Paper](https://arxiv.org/abs/1602.01783) | [CartPole-v0](./envs/gym_info.md) | |
| A3C | [A3C Paper](https://arxiv.org/abs/1602.01783) | | |
| SAC | [SAC Paper](https://arxiv.org/abs/1801.01290) | | |
| [PPO](./PPO) | [PPO paper](https://arxiv.org/abs/1707.06347) | [CartPole-v0](./envs/gym_info.md) | |
| [DDPG](./DDPG) | [DDPG Paper](https://arxiv.org/abs/1509.02971) | [Pendulum-v0](./envs/gym_info.md) | |
| TD3 | [TD3 Paper](https://arxiv.org/abs/1802.09477) | | |
| GAIL | | | |
| [TD3](./TD3) | [TD3 Paper](https://arxiv.org/abs/1802.09477) | HalfCheetah-v2 | |
| GAIL | [GAIL Paper](https://arxiv.org/abs/1606.03476) | | |
@@ -53,4 +53,3 @@ python 3.7、pytorch 1.6.0-1.7.1、gym 0.17.0-0.18.0
[RL-Adventure](https://github.com/higgsfield/RL-Adventure)
https://www.cnblogs.com/lucifer1997/p/13458563.html

View File

@@ -22,26 +22,26 @@ Note that ```model.py```,```memory.py```,```plot.py``` shall be utilized in diff
python 3.7.9、pytorch 1.6.0、gym 0.18.0
## Usage
run ```main.py``` or ```main.ipynb```
run ```main.py``` or ```main.ipynb```, or run files with ```task```(like ```task1.py```)
## Schedule
| Name | Related materials | Used Envs | Notes |
| :--------------------------------------: | :---------------------------------------------------------: | ------------------------------------- | :------: |
| :--------------------------------------: | :---------------------------------------------------------: | ------------------------------------- | :---: |
| [On-Policy First-Visit MC](./MonteCarlo) | | [Racetrack](./envs/racetrack_env.md) | |
| [Q-Learning](./QLearning) | | [CliffWalking-v0](./envs/gym_info.md) | |
| [Sarsa](./Sarsa) | | [Racetrack](./envs/racetrack_env.md) | |
| [DQN](./DQN) | [DQN-paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | [CartPole-v0](./envs/gym_info.md) | |
| [DQN-cnn](./DQN_cnn) | [DQN-paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | [CartPole-v0](./envs/gym_info.md) | |
| [DoubleDQN](./DoubleDQN) | | [CartPole-v0](./envs/gym_info.md) | not well |
| [DoubleDQN](./DoubleDQN) | | [CartPole-v0](./envs/gym_info.md) | |
| [Hierarchical DQN](HierarchicalDQN) | [Hierarchical DQN](https://arxiv.org/abs/1604.06057) | [CartPole-v0](./envs/gym_info.md) | |
| [PolicyGradient](./PolicyGradient) | | [CartPole-v0](./envs/gym_info.md) | |
| A2C | | [CartPole-v0](./envs/gym_info.md) | |
| A3C | | | |
| SAC | | | |
| A2C | [A3C Paper](https://arxiv.org/abs/1602.01783) | [CartPole-v0](./envs/gym_info.md) | |
| A3C | [A3C Paper](https://arxiv.org/abs/1602.01783) | | |
| SAC | [SAC Paper](https://arxiv.org/abs/1801.01290) | | |
| [PPO](./PPO) | [PPO paper](https://arxiv.org/abs/1707.06347) | [CartPole-v0](./envs/gym_info.md) | |
| [DDPG](./DDPG) | [DDPG Paper](https://arxiv.org/abs/1509.02971) | [Pendulum-v0](./envs/gym_info.md) | |
| TD3 | [Twin Dueling DDPG Paper](https://arxiv.org/abs/1802.09477) | | |
| [TD3](./TD3) | [TD3 Paper](https://arxiv.org/abs/1802.09477) | HalfCheetah-v2 | |
| GAIL | | | |
@@ -52,4 +52,4 @@ run ```main.py``` or ```main.ipynb```
[RL-Adventure](https://github.com/higgsfield/RL-Adventure)
https://www.cnblogs.com/lucifer1997/p/13458563.html

170
codes/TD3/agent.py Normal file
View File

@@ -0,0 +1,170 @@
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from TD3.memory import ReplayBuffer
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
cfg,
):
self.max_action = max_action
self.gamma = cfg.gamma
self.lr = cfg.lr
self.policy_noise = cfg.policy_noise
self.noise_clip = cfg.noise_clip
self.policy_freq = cfg.policy_freq
self.batch_size = cfg.batch_size
self.device = cfg.device
self.total_it = 0
self.actor = Actor(state_dim, action_dim, max_action).to(self.device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(self.device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.memory = ReplayBuffer(state_dim, action_dim)
def choose_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
return self.actor(state).cpu().data.numpy().flatten()
def update(self):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = self.memory.sample(self.batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.gamma * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.lr * param.data + (1 - self.lr) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.lr * param.data + (1 - self.lr) * target_param.data)
def save(self, path):
torch.save(self.critic.state_dict(), path + "td3_critic")
torch.save(self.critic_optimizer.state_dict(), path + "td3_critic_optimizer")
torch.save(self.actor.state_dict(), path + "td3_actor")
torch.save(self.actor_optimizer.state_dict(), path + "td3_actor_optimizer")
def load(self, path):
self.critic.load_state_dict(torch.load(path + "td3_critic"))
self.critic_optimizer.load_state_dict(torch.load(path + "td3_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(path + "td3_actor"))
self.actor_optimizer.load_state_dict(torch.load(path + "td3_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)

View File

@@ -1,14 +1,169 @@
#!/usr/bin/env python
# coding=utf-8
'''
@Author: John
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 23:38:13
@LastEditor: John
@LastEditTime: 2020-06-11 23:38:31
@Discription:
@Environment: python 3.7.7
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import torch
import gym
import numpy as np
import datetime
from TD3.agent import TD3
from common.plot import plot_rewards
from common.utils import save_results,make_dir
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
class TD3Config:
def __init__(self) -> None:
self.algo = 'TD3'
self.env = 'HalfCheetah-v2'
self.seed = 0
self.result_path = curr_path+"/results/" +self.env+'/'+curr_time+'/' # path to save results
self.start_timestep = 25e3 # Time steps initial random policy is used
self.eval_freq = 5e3 # How often (time steps) we evaluate
# self.train_eps = 800
self.max_timestep = 1600000 # Max time steps to run environment
self.expl_noise = 0.1 # Std of Gaussian exploration noise
self.batch_size = 256 # Batch size for both actor and critic
self.gamma = 0.99 # gamma factor
self.lr = 0.0005 # Target network update rate
self.policy_noise = 0.2 # Noise added to target policy during critic update
self.noise_clip = 0.5 # Range to clip target policy noise
self.policy_freq = 2 # Frequency of delayed policy updates
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval(env,agent, seed, eval_episodes=10):
eval_env = gym.make(env)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
# eval_env.render()
action = agent.choose_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
def train(cfg,env,agent):
# Evaluate untrained policy
evaluations = [eval(cfg.env,agent, cfg.seed)]
state, done = env.reset(), False
ep_reward = 0
ep_timesteps = 0
episode_num = 0
rewards = []
ma_rewards = [] # moveing average reward
for t in range(int(cfg.max_timestep)):
ep_timesteps += 1
# Select action randomly or according to policy
if t < cfg.start_timestep:
action = env.action_space.sample()
else:
action = (
agent.choose_action(np.array(state))
+ np.random.normal(0, max_action * cfg.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if ep_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
agent.memory.push(state, action, next_state, reward, done_bool)
state = next_state
ep_reward += reward
# Train agent after collecting sufficient data
if t >= cfg.start_timestep:
agent.update()
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Episode:{episode_num+1}, Episode T:{ep_timesteps}, Reward:{ep_reward:.3f}")
# Reset environment
state, done = env.reset(), False
rewards.append(ep_reward)
# 计算滑动窗口的reward
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
ep_reward = 0
ep_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % cfg.eval_freq == 0:
evaluations.append(eval(cfg.env,agent, cfg.seed))
return rewards, ma_rewards
# def train(cfg,env,agent):
# evaluations = [eval(cfg.env,agent,cfg.seed)]
# ep_reward = 0
# tot_timestep = 0
# rewards = []
# ma_rewards = [] # moveing average reward
# for i_ep in range(int(cfg.train_eps)):
# state, done = env.reset(), False
# ep_reward = 0
# ep_timestep = 0
# while not done:
# ep_timestep += 1
# tot_timestep +=1
# # Select action randomly or according to policy
# if tot_timestep < cfg.start_timestep:
# action = env.action_space.sample()
# else:
# action = (
# agent.choose_action(np.array(state))
# + np.random.normal(0, max_action * cfg.expl_noise, size=action_dim)
# ).clip(-max_action, max_action)
# # action = (
# # agent.choose_action(np.array(state))
# # + np.random.normal(0, max_action * cfg.expl_noise, size=action_dim)
# # ).clip(-max_action, max_action)
# # Perform action
# next_state, reward, done, _ = env.step(action)
# done_bool = float(done) if ep_timestep < env._max_episode_steps else 0
# # Store data in replay buffer
# agent.memory.push(state, action, next_state, reward, done_bool)
# state = next_state
# ep_reward += reward
# # Train agent after collecting sufficient data
# if tot_timestep >= cfg.start_timestep:
# agent.update()
# print(f"Episode:{i_ep}/{cfg.train_eps}, Episode Timestep:{ep_timestep}, Reward:{ep_reward:.3f}")
# rewards.append(ep_reward)
# # 计算滑动窗口的reward
# if ma_rewards:
# ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
# else:
# ma_rewards.append(ep_reward)
# # Evaluate episode
# if (i_ep+1) % cfg.eval_freq == 0:
# evaluations.append(eval(cfg.env,agent, cfg.seed))
# return rewards,ma_rewards
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cfg = TD3Config()
env = gym.make(cfg.env)
env.seed(cfg.seed) # Set seeds
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
agent = TD3(state_dim,action_dim,max_action,cfg)
rewards,ma_rewards = train(cfg,env,agent)
make_dir(cfg.result_path)
agent.save(path=cfg.result_path)
save_results(rewards,ma_rewards,tag='train',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="train",env=cfg.env,algo = cfg.algo,path=cfg.result_path)

View File

@@ -1,34 +1,44 @@
#!/usr/bin/env python
# coding=utf-8
'''
@Author: John
@Email: johnjim0816@gmail.com
@Date: 2020-06-10 15:27:16
@LastEditor: John
@LastEditTime: 2020-06-11 21:04:50
@Discription:
@Environment: python 3.7.7
Author: John
Email: johnjim0816@gmail.com
Date: 2021-04-13 11:00:13
LastEditor: John
LastEditTime: 2021-04-15 01:25:14
Discription:
Environment:
'''
import random
import numpy as np
import torch
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def push(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state, action, reward, next_state, done = map(np.stack, zip(*batch))
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
ind = np.random.randint(0, self.size, size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

View File

@@ -5,15 +5,15 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2020-10-07 20:57:11
LastEditor: John
LastEditTime: 2021-03-31 18:47:28
LastEditTime: 2021-04-08 21:45:09
Discription:
Environment:
'''
import matplotlib.pyplot as plt
import seaborn as sns
def plot_rewards(rewards,ma_rewards,tag="train",algo = "DQN",save=True,path='./'):
def plot_rewards(rewards,ma_rewards,tag="train",env='CartPole-v0',algo = "DQN",save=True,path='./'):
sns.set()
plt.title("average learning curve of {}".format(algo))
plt.title("average learning curve of {} for {}".format(algo,env))
plt.xlabel('epsiodes')
plt.plot(rewards,label='rewards')
plt.plot(ma_rewards,label='moving average rewards')

View File

@@ -5,12 +5,14 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 16:02:24
LastEditor: John
LastEditTime: 2021-04-03 21:42:13
LastEditTime: 2021-04-13 18:34:20
Discription:
Environment:
'''
import os
import numpy as np
from pathlib import Path
def save_results(rewards,ma_rewards,tag='train',path='./results'):
@@ -22,8 +24,7 @@ def save_results(rewards,ma_rewards,tag='train',path='./results'):
def make_dir(*paths):
for path in paths:
if not os.path.exists(path): # check if exists
os.mkdir(path)
Path(path).mkdir(parents=True, exist_ok=True)
def del_empty_dir(*paths):
'''del_empty_dir delete empty folders unders "paths"
'''

Binary file not shown.

Binary file not shown.

View File

@@ -10,7 +10,7 @@ import time
def get_args():
parser = argparse.ArgumentParser(description='CS440 MP4 Snake')
parser.add_argument('--human', default = True, action="store_true",
parser.add_argument('--human', default = False, action="store_true",
help='making the game human playable - default False')
parser.add_argument('--model_name', dest="model_name", type=str, default="checkpoint3.npy",

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-25 23:25:15
LastEditor: John
LastEditTime: 2021-03-26 16:46:52
Discription:
Environment:
'''
from collections import defaultdict
import numpy as np
action_dim = 2
Q_table = defaultdict(lambda: np.zeros(action_dim))
Q_table[str(0)] = 1
print(Q_table[str(0)])
Q_table[str(21)] = 3
print(Q_table[str(21)])