diff --git a/codes/ddpg/README.md b/codes/ddpg/README.md deleted file mode 100644 index 351615b..0000000 --- a/codes/ddpg/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# DDPG - -## 伪代码 - -![image-20210320151900695](assets/image-20210320151900695.png) \ No newline at end of file diff --git a/codes/ddpg/agent.py b/codes/ddpg/agent.py deleted file mode 100644 index 29f34d6..0000000 --- a/codes/ddpg/agent.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -''' -@Author: John -@Email: johnjim0816@gmail.com -@Date: 2020-06-09 20:25:52 -@LastEditor: John -LastEditTime: 2021-03-17 20:43:25 -@Discription: -@Environment: python 3.7.7 -''' -import numpy as np -import torch -import torch.nn as nn -import torch.optim as optim - -from common.model import Actor, Critic -from common.memory import ReplayBuffer - - -class DDPG: - def __init__(self, n_states, n_actions, cfg): - self.device = cfg.device - self.critic = Critic(n_states, n_actions, cfg.hidden_dim).to(cfg.device) - self.actor = Actor(n_states, n_actions, cfg.hidden_dim).to(cfg.device) - self.target_critic = Critic(n_states, n_actions, cfg.hidden_dim).to(cfg.device) - self.target_actor = Actor(n_states, n_actions, cfg.hidden_dim).to(cfg.device) - - for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()): - target_param.data.copy_(param.data) - for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()): - target_param.data.copy_(param.data) - - self.critic_optimizer = optim.Adam( - self.critic.parameters(), lr=cfg.critic_lr) - self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr) - self.memory = ReplayBuffer(cfg.memory_capacity) - self.batch_size = cfg.batch_size - self.soft_tau = cfg.soft_tau - self.gamma = cfg.gamma - - def choose_action(self, state): - state = torch.FloatTensor(state).unsqueeze(0).to(self.device) - action = self.actor(state) - # torch.detach()用于切断反向传播 - return action.detach().cpu().numpy()[0, 0] - - def update(self): - if len(self.memory) < self.batch_size: - return - state, action, reward, next_state, done = self.memory.sample( - self.batch_size) - # 将所有变量转为张量 - state = torch.FloatTensor(state).to(self.device) - next_state = torch.FloatTensor(next_state).to(self.device) - action = torch.FloatTensor(action).to(self.device) - reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device) - done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device) - # 注意critic将(s_t,a)作为输入 - policy_loss = self.critic(state, self.actor(state)) - - policy_loss = -policy_loss.mean() - - next_action = self.target_actor(next_state) - target_value = self.target_critic(next_state, next_action.detach()) - expected_value = reward + (1.0 - done) * self.gamma * target_value - expected_value = torch.clamp(expected_value, -np.inf, np.inf) - - value = self.critic(state, action) - value_loss = nn.MSELoss()(value, expected_value.detach()) - - self.actor_optimizer.zero_grad() - policy_loss.backward() - self.actor_optimizer.step() - - self.critic_optimizer.zero_grad() - value_loss.backward() - self.critic_optimizer.step() - for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()): - target_param.data.copy_( - target_param.data * (1.0 - self.soft_tau) + - param.data * self.soft_tau - ) - for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()): - target_param.data.copy_( - target_param.data * (1.0 - self.soft_tau) + - param.data * self.soft_tau - ) - def save(self,path): - torch.save(self.target_net.state_dict(), path+'DDPG_checkpoint.pth') - - def load(self,path): - self.actor.load_state_dict(torch.load(path+'DDPG_checkpoint.pth')) \ No newline at end of file diff --git a/codes/ddpg/assets/image-20210320151900695.png b/codes/ddpg/assets/image-20210320151900695.png deleted file mode 100644 index fd41201..0000000 Binary files a/codes/ddpg/assets/image-20210320151900695.png and /dev/null differ diff --git a/codes/ddpg/env.py b/codes/ddpg/env.py deleted file mode 100644 index ad7bd0e..0000000 --- a/codes/ddpg/env.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -''' -@Author: John -@Email: johnjim0816@gmail.com -@Date: 2020-06-10 15:28:30 -@LastEditor: John -LastEditTime: 2021-03-19 19:56:46 -@Discription: -@Environment: python 3.7.7 -''' -import gym -import numpy as np - -class NormalizedActions(gym.ActionWrapper): - ''' 将action范围重定在[0.1]之间 - ''' - def action(self, action): - - low_bound = self.action_space.low - upper_bound = self.action_space.high - action = low_bound + (action + 1.0) * 0.5 * (upper_bound - low_bound) - action = np.clip(action, low_bound, upper_bound) - - return action - - def reverse_action(self, action): - low_bound = self.action_space.low - upper_bound = self.action_space.high - action = 2 * (action - low_bound) / (upper_bound - low_bound) - 1 - action = np.clip(action, low_bound, upper_bound) - return action - -class OUNoise(object): - '''Ornstein–Uhlenbeck - ''' - def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000): - self.mu = mu - self.theta = theta - self.sigma = max_sigma - self.max_sigma = max_sigma - self.min_sigma = min_sigma - self.decay_period = decay_period - self.n_actions = action_space.shape[0] - self.low = action_space.low - self.high = action_space.high - self.reset() - - def reset(self): - self.obs = np.ones(self.n_actions) * self.mu - - def evolve_obs(self): - x = self.obs - dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.n_actions) - self.obs = x + dx - return self.obs - - def get_action(self, action, t=0): - ou_obs = self.evolve_obs() - self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period) - return np.clip(action + ou_obs, self.low, self.high) \ No newline at end of file diff --git a/codes/ddpg/main.py b/codes/ddpg/main.py deleted file mode 100644 index 5308ec6..0000000 --- a/codes/ddpg/main.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -''' -@Author: John -@Email: johnjim0816@gmail.com -@Date: 2020-06-11 20:58:21 -@LastEditor: John -LastEditTime: 2021-03-19 19:57:00 -@Discription: -@Environment: python 3.7.7 -''' -import sys,os -sys.path.append(os.getcwd()) # 添加当前终端路径 -import torch -import gym -import numpy as np -import datetime -from DDPG.agent import DDPG -from DDPG.env import NormalizedActions,OUNoise -from common.plot import plot_rewards -from common.utils import save_results - -SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间 -SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径 -if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹 - os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/") -if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹 - os.mkdir(SAVED_MODEL_PATH) -RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径 -if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹 - os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/") -if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹 - os.mkdir(RESULT_PATH) - -class DDPGConfig: - def __init__(self): - self.gamma = 0.99 - self.critic_lr = 1e-3 - self.actor_lr = 1e-4 - self.memory_capacity = 10000 - self.batch_size = 128 - self.train_eps =300 - self.train_steps = 200 - self.eval_eps = 200 - self.eval_steps = 200 - self.target_update = 4 - self.hidden_dim = 30 - self.soft_tau=1e-2 - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -def train(cfg,env,agent): - print('Start to train ! ') - ou_noise = OUNoise(env.action_space) # action noise - rewards = [] - ma_rewards = [] # moving average rewards - ep_steps = [] - for i_episode in range(cfg.train_eps): - state = env.reset() - ou_noise.reset() - ep_reward = 0 - for i_step in range(cfg.train_steps): - action = agent.choose_action(state) - action = ou_noise.get_action( - action, i_step) # 即paper中的random process - next_state, reward, done, _ = env.step(action) - ep_reward += reward - agent.memory.push(state, action, reward, next_state, done) - agent.update() - state = next_state - if done: - break - print('Episode:{}/{}, Reward:{}, Steps:{}, Done:{}'.format(i_episode+1,cfg.train_eps,ep_reward,i_step+1,done)) - ep_steps.append(i_step) - rewards.append(ep_reward) - if ma_rewards: - ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward) - else: - ma_rewards.append(ep_reward) - print('Complete training!') - return rewards,ma_rewards - -if __name__ == "__main__": - cfg = DDPGConfig() - env = NormalizedActions(gym.make("Pendulum-v0")) - env.seed(1) # 设置env随机种子 - n_states = env.observation_space.shape[0] - n_actions = env.action_space.shape[0] - agent = DDPG(n_states,n_actions,cfg) - rewards,ma_rewards = train(cfg,env,agent) - agent.save(path=SAVED_MODEL_PATH) - save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH) - plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH) - \ No newline at end of file