diff --git a/codes/PPO/agent.py b/codes/PPO/agent.py new file mode 100644 index 0000000..34fd1f5 --- /dev/null +++ b/codes/PPO/agent.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Author: John +Email: johnjim0816@gmail.com +Date: 2021-03-23 15:17:42 +LastEditor: John +LastEditTime: 2021-03-23 15:52:34 +Discription: +Environment: +''' +import os +import numpy as np +import torch +import torch.optim as optim +from PPO.model import Actor,Critic +from PPO.memory import PPOMemory +class PPO: + def __init__(self, state_dim, action_dim,cfg): + self.gamma = cfg.gamma + self.policy_clip = cfg.policy_clip + self.n_epochs = cfg.n_epochs + self.gae_lambda = cfg.gae_lambda + self.device = cfg.device + self.actor = Actor(state_dim, action_dim).to(self.device) + self.critic = Critic(state_dim).to(self.device) + self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.lr) + self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.lr) + self.memory = PPOMemory(cfg.batch_size) + + def choose_action(self, observation): + state = torch.tensor([observation], dtype=torch.float).to(self.device) + dist = self.actor(state) + value = self.critic(state) + action = dist.sample() + probs = torch.squeeze(dist.log_prob(action)).item() + action = torch.squeeze(action).item() + value = torch.squeeze(value).item() + return action, probs, value + + def update(self): + for _ in range(self.n_epochs): + state_arr, action_arr, old_prob_arr, vals_arr,\ + reward_arr, dones_arr, batches = \ + self.memory.sample() + values = vals_arr + ### compute advantage ### + advantage = np.zeros(len(reward_arr), dtype=np.float32) + for t in range(len(reward_arr)-1): + discount = 1 + a_t = 0 + for k in range(t, len(reward_arr)-1): + a_t += discount*(reward_arr[k] + self.gamma*values[k+1]*\ + (1-int(dones_arr[k])) - values[k]) + discount *= self.gamma*self.gae_lambda + advantage[t] = a_t + advantage = torch.tensor(advantage).to(self.device) + ### SGD ### + values = torch.tensor(values).to(self.device) + for batch in batches: + states = torch.tensor(state_arr[batch], dtype=torch.float).to(self.device) + old_probs = torch.tensor(old_prob_arr[batch]).to(self.device) + actions = torch.tensor(action_arr[batch]).to(self.device) + dist = self.actor(states) + critic_value = self.critic(states) + critic_value = torch.squeeze(critic_value) + new_probs = dist.log_prob(actions) + prob_ratio = new_probs.exp() / old_probs.exp() + weighted_probs = advantage[batch] * prob_ratio + weighted_clipped_probs = torch.clamp(prob_ratio, 1-self.policy_clip, + 1+self.policy_clip)*advantage[batch] + actor_loss = -torch.min(weighted_probs, weighted_clipped_probs).mean() + returns = advantage[batch] + values[batch] + critic_loss = (returns-critic_value)**2 + critic_loss = critic_loss.mean() + total_loss = actor_loss + 0.5*critic_loss + self.actor_optimizer.zero_grad() + self.critic_optimizer.zero_grad() + total_loss.backward() + self.actor_optimizer.step() + self.critic_optimizer.step() + self.memory.clear() + def save(self,path): + actor_checkpoint = os.path.join(path, 'actor_torch_ppo.pt') + critic_checkpoint= os.path.join(path, 'critic_torch_ppo.pt') + torch.save(self.actor.state_dict(), actor_checkpoint) + torch.save(self.critic.state_dict(), critic_checkpoint) + def load(self,path): + actor_checkpoint = os.path.join(path, 'actor_torch_ppo.pt') + critic_checkpoint= os.path.join(path, 'critic_torch_ppo.pt') + self.actor.load_state_dict(torch.load(actor_checkpoint)) + self.critic.load_state_dict(torch.load(critic_checkpoint)) + + diff --git a/codes/PPO/main.py b/codes/PPO/main.py new file mode 100644 index 0000000..c0d9830 --- /dev/null +++ b/codes/PPO/main.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Author: John +Email: johnjim0816@gmail.com +Date: 2021-03-22 16:18:10 +LastEditor: John +LastEditTime: 2021-03-23 15:52:52 +Discription: +Environment: +''' +import sys,os +sys.path.append(os.getcwd()) # add current terminal path to sys.path +import gym +import numpy as np +import torch +import datetime +from PPO.agent import PPO +from common.plot import plot_rewards +from common.utils import save_results + +SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间 +SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径 +if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹 + os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/") +if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹 + os.mkdir(SAVED_MODEL_PATH) +RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径 +if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹 + os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/") +if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹 + os.mkdir(RESULT_PATH) + +class PPOConfig: + def __init__(self) -> None: + self.algo = 'PPO' + self.batch_size = 5 + self.gamma=0.99 + self.n_epochs = 4 + self.lr = 0.0003 + self.gae_lambda=0.95 + self.policy_clip=0.2 + self.update_fre = 20 # frequency of agent update + self.train_eps = 250 # max training episodes + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu + +def train(cfg,env,agent): + best_reward = env.reward_range[0] + rewards= [] + ma_rewards = [] # moving average rewards + avg_reward = 0 + running_steps = 0 + for i_episode in range(cfg.train_eps): + state = env.reset() + done = False + ep_reward = 0 + while not done: + action, prob, val = agent.choose_action(state) + state_, reward, done, _ = env.step(action) + running_steps += 1 + ep_reward += reward + agent.memory.push(state, action, prob, val, reward, done) + if running_steps % cfg.update_fre == 0: + agent.update() + state = state_ + rewards.append(ep_reward) + if ma_rewards: + ma_rewards.append( + 0.9*ma_rewards[-1]+0.1*ep_reward) + else: + ma_rewards.append(ep_reward) + avg_reward = np.mean(rewards[-100:]) + if avg_reward > best_reward: + best_reward = avg_reward + agent.save(path=SAVED_MODEL_PATH) + print('Episode:{}/{}, Reward:{:.1f}, avg reward:{:.1f}, Done:{}'.format(i_episode+1,cfg.train_eps,ep_reward,avg_reward,done)) + return rewards,ma_rewards + +if __name__ == '__main__': + cfg = PPOConfig() + env = gym.make('CartPole-v0') + env.seed(1) + state_dim=env.observation_space.shape[0] + action_dim=env.action_space.n + agent = PPO(state_dim,action_dim,cfg) + rewards,ma_rewards = train(cfg,env,agent) + save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH) + plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH) \ No newline at end of file diff --git a/codes/PPO/memory.py b/codes/PPO/memory.py new file mode 100644 index 0000000..605fe03 --- /dev/null +++ b/codes/PPO/memory.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Author: John +Email: johnjim0816@gmail.com +Date: 2021-03-23 15:30:46 +LastEditor: John +LastEditTime: 2021-03-23 15:30:55 +Discription: +Environment: +''' +import numpy as np +class PPOMemory: + def __init__(self, batch_size): + self.states = [] + self.probs = [] + self.vals = [] + self.actions = [] + self.rewards = [] + self.dones = [] + self.batch_size = batch_size + def sample(self): + batch_step = np.arange(0, len(self.states), self.batch_size) + indices = np.arange(len(self.states), dtype=np.int64) + np.random.shuffle(indices) + batches = [indices[i:i+self.batch_size] for i in batch_step] + return np.array(self.states),\ + np.array(self.actions),\ + np.array(self.probs),\ + np.array(self.vals),\ + np.array(self.rewards),\ + np.array(self.dones),\ + batches + + def push(self, state, action, probs, vals, reward, done): + self.states.append(state) + self.actions.append(action) + self.probs.append(probs) + self.vals.append(vals) + self.rewards.append(reward) + self.dones.append(done) + + def clear(self): + self.states = [] + self.probs = [] + self.actions = [] + self.rewards = [] + self.dones = [] + self.vals = [] \ No newline at end of file diff --git a/codes/PPO/model.py b/codes/PPO/model.py new file mode 100644 index 0000000..967eba6 --- /dev/null +++ b/codes/PPO/model.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Author: John +Email: johnjim0816@gmail.com +Date: 2021-03-23 15:29:24 +LastEditor: John +LastEditTime: 2021-03-23 15:29:52 +Discription: +Environment: +''' +import torch.nn as nn +from torch.distributions.categorical import Categorical +class Actor(nn.Module): + def __init__(self,state_dim, action_dim, + hidden_dim=256): + super(Actor, self).__init__() + + self.actor = nn.Sequential( + nn.Linear(state_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, action_dim), + nn.Softmax(dim=-1) + ) + def forward(self, state): + dist = self.actor(state) + dist = Categorical(dist) + return dist + +class Critic(nn.Module): + def __init__(self, state_dim,hidden_dim=256): + super(Critic, self).__init__() + self.critic = nn.Sequential( + nn.Linear(state_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 1) + ) + def forward(self, state): + value = self.critic(state) + return value \ No newline at end of file diff --git a/codes/PPO/results/20210323-152513/ma_rewards_train.npy b/codes/PPO/results/20210323-152513/ma_rewards_train.npy new file mode 100644 index 0000000..68b9f79 Binary files /dev/null and b/codes/PPO/results/20210323-152513/ma_rewards_train.npy differ diff --git a/codes/PPO/results/20210323-152513/rewards_curve_train.png b/codes/PPO/results/20210323-152513/rewards_curve_train.png new file mode 100644 index 0000000..4620e87 Binary files /dev/null and b/codes/PPO/results/20210323-152513/rewards_curve_train.png differ diff --git a/codes/PPO/results/20210323-152513/rewards_train.npy b/codes/PPO/results/20210323-152513/rewards_train.npy new file mode 100644 index 0000000..5fdefc7 Binary files /dev/null and b/codes/PPO/results/20210323-152513/rewards_train.npy differ diff --git a/codes/PPO/saved_model/20210323-152513/actor_torch_ppo.pt b/codes/PPO/saved_model/20210323-152513/actor_torch_ppo.pt new file mode 100644 index 0000000..a35a743 Binary files /dev/null and b/codes/PPO/saved_model/20210323-152513/actor_torch_ppo.pt differ diff --git a/codes/PPO/saved_model/20210323-152513/critic_torch_ppo.pt b/codes/PPO/saved_model/20210323-152513/critic_torch_ppo.pt new file mode 100644 index 0000000..ba6bfda Binary files /dev/null and b/codes/PPO/saved_model/20210323-152513/critic_torch_ppo.pt differ diff --git a/codes/README.md b/codes/README.md index 40c7abc..90df45a 100644 --- a/codes/README.md +++ b/codes/README.md @@ -1,53 +1,45 @@ +[Eng](https://github.com/JohnJim0816/reinforcement-learning-tutorials/blob/master/README.md)|[中文](https://github.com/JohnJim0816/reinforcement-learning-tutorials/blob/master/README_cn.md) ## 写在前面 -本项目用于学习RL基础算法,尽量做到: +本项目用于学习RL基础算法,尽量做到: **注释详细**,**结构清晰**。 -* 注释详细 -* 结构清晰 - - 代码结构清晰,主要分为以下几个脚本: +代码结构主要分为以下几个脚本: - * ```env.py``` 用于构建强化学习环境,也可以重新normalize环境,比如给action加noise - * ```model.py``` 强化学习算法的基本模型,比如神经网络,actor,critic等 - * ```memory.py``` 保存Replay Buffer,用于off-policy - * ```agent.py``` RL核心算法,比如dqn等,主要包含update和select_action两个方法, - * ```main.py``` 运行主函数 - * ```params.py``` 保存各种参数 - * ```plot.py``` 利用matplotlib或seaborn绘制rewards图,包括滑动平均的reward,结果保存在result文件夹中 +* ```model.py``` 强化学习算法的基本模型,比如神经网络,actor,critic等 +* ```memory.py``` 保存Replay Buffer,用于off-policy +* ```plot.py``` 利用matplotlib或seaborn绘制rewards图,包括滑动平均的reward,结果保存在result文件夹中 +* ```env.py``` 用于构建强化学习环境,也可以重新自定义环境,比如给action加noise +* ```agent.py``` RL核心算法,比如dqn等,主要包含update和choose_action两个方法, +* ```main.py``` 运行主函数 + +其中```model.py```,```memory.py```,```plot.py``` 由于不同算法都会用到,所以放入```common```文件夹中。 ## 运行环境 -python 3.7.9 - -pytorch 1.6.0 - -tensorboard 2.3.0 - -torchvision 0.7.0 - -gym 0.17.3 +python 3.7.9、pytorch 1.6.0、gym 0.18.0 ## 使用说明 -本仓库使用到的环境信息请跳转[环境说明](https://github.com/JohnJim0816/reinforcement-learning-tutorials/blob/master/env_info.md), 在各算法目录下也有相应说明(比如如何运行程序等) +本repo使用到的[环境说明](https://github.com/JohnJim0816/reinforcement-learning-tutorials/blob/master/env_info.md),在各算法目录下也有README说明 ## 算法进度 -| 算法名称 | 相关论文材料 | 备注 | 进度 | -| :----------------------: | :---------------------------------------------------------: | :--------------------------------: | :--: | -| On-Policy First-Visit MC | | | OK | -| Q-Learning | | | OK | -| SARSA | | | OK | -| DQN | [DQN-paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | | OK | -| DQN-cnn | [DQN-paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | 与DQN相比使用了CNN而不是全链接网络 | OK | -| DoubleDQN | | | OK | -| Hierarchical DQN | [Hierarchical DQN](https://arxiv.org/abs/1604.06057) | | | -| PolicyGradient | | | OK | -| A2C | | | OK | -| DDPG | [DDPG Paper](https://arxiv.org/abs/1509.02971) | | OK | -| TD3 | [Twin Dueling DDPG Paper](https://arxiv.org/abs/1802.09477) | | | -| | | | | +| 算法名称 | 相关论文材料 | 备注 | 进度 | +| :----------------------------------------------------------: | :---------------------------------------------------------: | :----------------------------------------------------------: | :--: | +| On-Policy First-Visit MC | | | OK | +| Q-Learning | | | OK | +| SARSA | | | OK | +| DQN | [DQN-paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | | OK | +| DQN-cnn | [DQN-paper](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) | 与DQN相比使用了CNN而不是全链接网络 | OK | +| DoubleDQN | | 效果不好,待改进 | OK | +| Hierarchical DQN | [Hierarchical DQN](https://arxiv.org/abs/1604.06057) | | | +| PolicyGradient | | | OK | +| A2C | | | OK | +| [PPO](https://github.com/JohnJim0816/rl-tutorials/tree/master/PPO) | [PPO paper](https://arxiv.org/abs/1707.06347) | [PPO算法实战](https://blog.csdn.net/JohnJim0/article/details/115126363) | OK | +| DDPG | [DDPG Paper](https://arxiv.org/abs/1509.02971) | | OK | +| TD3 | [Twin Dueling DDPG Paper](https://arxiv.org/abs/1802.09477) | | | + @@ -57,3 +49,5 @@ gym 0.17.3 [RL-Adventure-2](https://github.com/higgsfield/RL-Adventure-2) [RL-Adventure](https://github.com/higgsfield/RL-Adventure) + +https://www.cnblogs.com/lucifer1997/p/13458563.html