add DDPG
This commit is contained in:
5
codes/DDPG/README.md
Normal file
5
codes/DDPG/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# DDPG
|
||||||
|
|
||||||
|
## 伪代码
|
||||||
|
|
||||||
|

|
||||||
93
codes/DDPG/agent.py
Normal file
93
codes/DDPG/agent.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
'''
|
||||||
|
@Author: John
|
||||||
|
@Email: johnjim0816@gmail.com
|
||||||
|
@Date: 2020-06-09 20:25:52
|
||||||
|
@LastEditor: John
|
||||||
|
LastEditTime: 2021-03-17 20:43:25
|
||||||
|
@Discription:
|
||||||
|
@Environment: python 3.7.7
|
||||||
|
'''
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
|
||||||
|
from common.model import Actor, Critic
|
||||||
|
from common.memory import ReplayBuffer
|
||||||
|
|
||||||
|
|
||||||
|
class DDPG:
|
||||||
|
def __init__(self, n_states, n_actions, cfg):
|
||||||
|
self.device = cfg.device
|
||||||
|
self.critic = Critic(n_states, n_actions, cfg.hidden_dim).to(cfg.device)
|
||||||
|
self.actor = Actor(n_states, n_actions, cfg.hidden_dim).to(cfg.device)
|
||||||
|
self.target_critic = Critic(n_states, n_actions, cfg.hidden_dim).to(cfg.device)
|
||||||
|
self.target_actor = Actor(n_states, n_actions, cfg.hidden_dim).to(cfg.device)
|
||||||
|
|
||||||
|
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
|
||||||
|
target_param.data.copy_(param.data)
|
||||||
|
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
|
||||||
|
target_param.data.copy_(param.data)
|
||||||
|
|
||||||
|
self.critic_optimizer = optim.Adam(
|
||||||
|
self.critic.parameters(), lr=cfg.critic_lr)
|
||||||
|
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr)
|
||||||
|
self.memory = ReplayBuffer(cfg.memory_capacity)
|
||||||
|
self.batch_size = cfg.batch_size
|
||||||
|
self.soft_tau = cfg.soft_tau
|
||||||
|
self.gamma = cfg.gamma
|
||||||
|
|
||||||
|
def choose_action(self, state):
|
||||||
|
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||||
|
action = self.actor(state)
|
||||||
|
# torch.detach()用于切断反向传播
|
||||||
|
return action.detach().cpu().numpy()[0, 0]
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
if len(self.memory) < self.batch_size:
|
||||||
|
return
|
||||||
|
state, action, reward, next_state, done = self.memory.sample(
|
||||||
|
self.batch_size)
|
||||||
|
# 将所有变量转为张量
|
||||||
|
state = torch.FloatTensor(state).to(self.device)
|
||||||
|
next_state = torch.FloatTensor(next_state).to(self.device)
|
||||||
|
action = torch.FloatTensor(action).to(self.device)
|
||||||
|
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
|
||||||
|
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
|
||||||
|
# 注意critic将(s_t,a)作为输入
|
||||||
|
policy_loss = self.critic(state, self.actor(state))
|
||||||
|
|
||||||
|
policy_loss = -policy_loss.mean()
|
||||||
|
|
||||||
|
next_action = self.target_actor(next_state)
|
||||||
|
target_value = self.target_critic(next_state, next_action.detach())
|
||||||
|
expected_value = reward + (1.0 - done) * self.gamma * target_value
|
||||||
|
expected_value = torch.clamp(expected_value, -np.inf, np.inf)
|
||||||
|
|
||||||
|
value = self.critic(state, action)
|
||||||
|
value_loss = nn.MSELoss()(value, expected_value.detach())
|
||||||
|
|
||||||
|
self.actor_optimizer.zero_grad()
|
||||||
|
policy_loss.backward()
|
||||||
|
self.actor_optimizer.step()
|
||||||
|
|
||||||
|
self.critic_optimizer.zero_grad()
|
||||||
|
value_loss.backward()
|
||||||
|
self.critic_optimizer.step()
|
||||||
|
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
|
||||||
|
target_param.data.copy_(
|
||||||
|
target_param.data * (1.0 - self.soft_tau) +
|
||||||
|
param.data * self.soft_tau
|
||||||
|
)
|
||||||
|
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
|
||||||
|
target_param.data.copy_(
|
||||||
|
target_param.data * (1.0 - self.soft_tau) +
|
||||||
|
param.data * self.soft_tau
|
||||||
|
)
|
||||||
|
def save(self,path):
|
||||||
|
torch.save(self.target_net.state_dict(), path+'DDPG_checkpoint.pth')
|
||||||
|
|
||||||
|
def load(self,path):
|
||||||
|
self.actor.load_state_dict(torch.load(path+'DDPG_checkpoint.pth'))
|
||||||
BIN
codes/DDPG/assets/image-20210320151900695.png
Normal file
BIN
codes/DDPG/assets/image-20210320151900695.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 259 KiB |
61
codes/DDPG/env.py
Normal file
61
codes/DDPG/env.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
'''
|
||||||
|
@Author: John
|
||||||
|
@Email: johnjim0816@gmail.com
|
||||||
|
@Date: 2020-06-10 15:28:30
|
||||||
|
@LastEditor: John
|
||||||
|
LastEditTime: 2021-03-19 19:56:46
|
||||||
|
@Discription:
|
||||||
|
@Environment: python 3.7.7
|
||||||
|
'''
|
||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
class NormalizedActions(gym.ActionWrapper):
|
||||||
|
''' 将action范围重定在[0.1]之间
|
||||||
|
'''
|
||||||
|
def action(self, action):
|
||||||
|
|
||||||
|
low_bound = self.action_space.low
|
||||||
|
upper_bound = self.action_space.high
|
||||||
|
action = low_bound + (action + 1.0) * 0.5 * (upper_bound - low_bound)
|
||||||
|
action = np.clip(action, low_bound, upper_bound)
|
||||||
|
|
||||||
|
return action
|
||||||
|
|
||||||
|
def reverse_action(self, action):
|
||||||
|
low_bound = self.action_space.low
|
||||||
|
upper_bound = self.action_space.high
|
||||||
|
action = 2 * (action - low_bound) / (upper_bound - low_bound) - 1
|
||||||
|
action = np.clip(action, low_bound, upper_bound)
|
||||||
|
return action
|
||||||
|
|
||||||
|
class OUNoise(object):
|
||||||
|
'''Ornstein–Uhlenbeck
|
||||||
|
'''
|
||||||
|
def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):
|
||||||
|
self.mu = mu
|
||||||
|
self.theta = theta
|
||||||
|
self.sigma = max_sigma
|
||||||
|
self.max_sigma = max_sigma
|
||||||
|
self.min_sigma = min_sigma
|
||||||
|
self.decay_period = decay_period
|
||||||
|
self.n_actions = action_space.shape[0]
|
||||||
|
self.low = action_space.low
|
||||||
|
self.high = action_space.high
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.obs = np.ones(self.n_actions) * self.mu
|
||||||
|
|
||||||
|
def evolve_obs(self):
|
||||||
|
x = self.obs
|
||||||
|
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.n_actions)
|
||||||
|
self.obs = x + dx
|
||||||
|
return self.obs
|
||||||
|
|
||||||
|
def get_action(self, action, t=0):
|
||||||
|
ou_obs = self.evolve_obs()
|
||||||
|
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)
|
||||||
|
return np.clip(action + ou_obs, self.low, self.high)
|
||||||
92
codes/DDPG/main.py
Normal file
92
codes/DDPG/main.py
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
'''
|
||||||
|
@Author: John
|
||||||
|
@Email: johnjim0816@gmail.com
|
||||||
|
@Date: 2020-06-11 20:58:21
|
||||||
|
@LastEditor: John
|
||||||
|
LastEditTime: 2021-03-19 19:57:00
|
||||||
|
@Discription:
|
||||||
|
@Environment: python 3.7.7
|
||||||
|
'''
|
||||||
|
import sys,os
|
||||||
|
sys.path.append(os.getcwd()) # 添加当前终端路径
|
||||||
|
import torch
|
||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
import datetime
|
||||||
|
from DDPG.agent import DDPG
|
||||||
|
from DDPG.env import NormalizedActions,OUNoise
|
||||||
|
from common.plot import plot_rewards
|
||||||
|
from common.utils import save_results
|
||||||
|
|
||||||
|
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||||
|
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
|
||||||
|
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹
|
||||||
|
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
|
||||||
|
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
|
||||||
|
os.mkdir(SAVED_MODEL_PATH)
|
||||||
|
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
|
||||||
|
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹
|
||||||
|
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
|
||||||
|
if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹
|
||||||
|
os.mkdir(RESULT_PATH)
|
||||||
|
|
||||||
|
class DDPGConfig:
|
||||||
|
def __init__(self):
|
||||||
|
self.gamma = 0.99
|
||||||
|
self.critic_lr = 1e-3
|
||||||
|
self.actor_lr = 1e-4
|
||||||
|
self.memory_capacity = 10000
|
||||||
|
self.batch_size = 128
|
||||||
|
self.train_eps =300
|
||||||
|
self.train_steps = 200
|
||||||
|
self.eval_eps = 200
|
||||||
|
self.eval_steps = 200
|
||||||
|
self.target_update = 4
|
||||||
|
self.hidden_dim = 30
|
||||||
|
self.soft_tau=1e-2
|
||||||
|
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||||
|
def train(cfg,env,agent):
|
||||||
|
print('Start to train ! ')
|
||||||
|
ou_noise = OUNoise(env.action_space) # action noise
|
||||||
|
rewards = []
|
||||||
|
ma_rewards = [] # moving average rewards
|
||||||
|
ep_steps = []
|
||||||
|
for i_episode in range(cfg.train_eps):
|
||||||
|
state = env.reset()
|
||||||
|
ou_noise.reset()
|
||||||
|
ep_reward = 0
|
||||||
|
for i_step in range(cfg.train_steps):
|
||||||
|
action = agent.choose_action(state)
|
||||||
|
action = ou_noise.get_action(
|
||||||
|
action, i_step) # 即paper中的random process
|
||||||
|
next_state, reward, done, _ = env.step(action)
|
||||||
|
ep_reward += reward
|
||||||
|
agent.memory.push(state, action, reward, next_state, done)
|
||||||
|
agent.update()
|
||||||
|
state = next_state
|
||||||
|
if done:
|
||||||
|
break
|
||||||
|
print('Episode:{}/{}, Reward:{}, Steps:{}, Done:{}'.format(i_episode+1,cfg.train_eps,ep_reward,i_step+1,done))
|
||||||
|
ep_steps.append(i_step)
|
||||||
|
rewards.append(ep_reward)
|
||||||
|
if ma_rewards:
|
||||||
|
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||||
|
else:
|
||||||
|
ma_rewards.append(ep_reward)
|
||||||
|
print('Complete training!')
|
||||||
|
return rewards,ma_rewards
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
cfg = DDPGConfig()
|
||||||
|
env = NormalizedActions(gym.make("Pendulum-v0"))
|
||||||
|
env.seed(1) # 设置env随机种子
|
||||||
|
n_states = env.observation_space.shape[0]
|
||||||
|
n_actions = env.action_space.shape[0]
|
||||||
|
agent = DDPG(n_states,n_actions,cfg)
|
||||||
|
rewards,ma_rewards = train(cfg,env,agent)
|
||||||
|
agent.save(path=SAVED_MODEL_PATH)
|
||||||
|
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
|
||||||
|
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)
|
||||||
|
|
||||||
Reference in New Issue
Block a user