update
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-09 20:25:52
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-03-31 00:56:32
|
||||
LastEditTime: 2021-05-04 14:50:17
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -26,6 +26,7 @@ class DDPG:
|
||||
self.target_critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device)
|
||||
self.target_actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device)
|
||||
|
||||
# copy parameters to target net
|
||||
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
|
||||
target_param.data.copy_(param.data)
|
||||
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
|
||||
@@ -42,7 +43,6 @@ class DDPG:
|
||||
def choose_action(self, state):
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||
action = self.actor(state)
|
||||
# torch.detach()用于切断反向传播
|
||||
return action.detach().cpu().numpy()[0, 0]
|
||||
|
||||
def update(self):
|
||||
@@ -50,13 +50,13 @@ class DDPG:
|
||||
return
|
||||
state, action, reward, next_state, done = self.memory.sample(
|
||||
self.batch_size)
|
||||
# 将所有变量转为张量
|
||||
# convert variables to Tensor
|
||||
state = torch.FloatTensor(state).to(self.device)
|
||||
next_state = torch.FloatTensor(next_state).to(self.device)
|
||||
action = torch.FloatTensor(action).to(self.device)
|
||||
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
|
||||
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
|
||||
# 注意critic将(s_t,a)作为输入
|
||||
|
||||
policy_loss = self.critic(state, self.actor(state))
|
||||
policy_loss = -policy_loss.mean()
|
||||
next_action = self.target_actor(next_state)
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 20:58:21
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-04-29 01:58:50
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import sys,os
|
||||
from pathlib import Path
|
||||
import sys,os
|
||||
curr_path = os.path.dirname(__file__)
|
||||
parent_path=os.path.dirname(curr_path)
|
||||
sys.path.append(parent_path) # add current terminal path to sys.path
|
||||
|
||||
import torch
|
||||
import gym
|
||||
import numpy as np
|
||||
import datetime
|
||||
from DDPG.agent import DDPG
|
||||
from DDPG.env import NormalizedActions,OUNoise
|
||||
from common.plot import plot_rewards
|
||||
from common.utils import save_results
|
||||
|
||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
|
||||
SAVED_MODEL_PATH = curr_path+"/saved_model/"+SEQUENCE+'/' # path to save model
|
||||
if not os.path.exists(curr_path+"/saved_model/"): os.mkdir(curr_path+"/saved_model/")
|
||||
if not os.path.exists(SAVED_MODEL_PATH): os.mkdir(SAVED_MODEL_PATH)
|
||||
RESULT_PATH = curr_path+"/results/"+SEQUENCE+'/' # path to save rewards
|
||||
if not os.path.exists(curr_path+"/results/"): os.mkdir(curr_path+"/results/")
|
||||
if not os.path.exists(RESULT_PATH): os.mkdir(RESULT_PATH)
|
||||
|
||||
class DDPGConfig:
|
||||
def __init__(self):
|
||||
self.env = 'Pendulum-v0'
|
||||
self.algo = 'DDPG'
|
||||
self.gamma = 0.99
|
||||
self.critic_lr = 1e-3
|
||||
self.actor_lr = 1e-4
|
||||
self.memory_capacity = 10000
|
||||
self.batch_size = 128
|
||||
self.train_eps =300
|
||||
self.eval_eps = 200
|
||||
self.eval_steps = 200
|
||||
self.target_update = 4
|
||||
self.hidden_dim = 30
|
||||
self.soft_tau=1e-2
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
def train(cfg,env,agent):
|
||||
print('Start to train ! ')
|
||||
ou_noise = OUNoise(env.action_space) # action noise
|
||||
rewards = []
|
||||
ma_rewards = [] # moving average rewards
|
||||
ep_steps = []
|
||||
for i_episode in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
ou_noise.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
i_step = 0
|
||||
while not done:
|
||||
i_step += 1
|
||||
action = agent.choose_action(state)
|
||||
action = ou_noise.get_action(action, i_step) # 即paper中的random process
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
agent.memory.push(state, action, reward, next_state, done)
|
||||
agent.update()
|
||||
state = next_state
|
||||
print('Episode:{}/{}, Reward:{}'.format(i_episode+1,cfg.train_eps,ep_reward))
|
||||
ep_steps.append(i_step)
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = DDPGConfig()
|
||||
env = NormalizedActions(gym.make("Pendulum-v0"))
|
||||
env.seed(1) # 设置env随机种子
|
||||
state_dim = env.observation_space.shape[0]
|
||||
action_dim = env.action_space.shape[0]
|
||||
agent = DDPG(state_dim,action_dim,cfg)
|
||||
rewards,ma_rewards = train(cfg,env,agent)
|
||||
agent.save(path=SAVED_MODEL_PATH)
|
||||
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
|
||||
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 61 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 67 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 69 KiB |
Binary file not shown.
Binary file not shown.
135
codes/DDPG/task0_train.py
Normal file
135
codes/DDPG/task0_train.py
Normal file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 20:58:21
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-05-04 14:49:45
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import sys,os
|
||||
curr_path = os.path.dirname(__file__)
|
||||
parent_path = os.path.dirname(curr_path)
|
||||
sys.path.append(parent_path) # add current terminal path to sys.path
|
||||
|
||||
import datetime
|
||||
import gym
|
||||
import torch
|
||||
|
||||
from DDPG.env import NormalizedActions, OUNoise
|
||||
from DDPG.agent import DDPG
|
||||
from common.utils import save_results,make_dir
|
||||
from common.plot import plot_rewards
|
||||
|
||||
curr_time = datetime.datetime.now().strftime(
|
||||
"%Y%m%d-%H%M%S") # obtain current time
|
||||
|
||||
|
||||
class DDPGConfig:
|
||||
def __init__(self):
|
||||
self.algo = 'DDPG'
|
||||
self.env = 'Pendulum-v0' # env name
|
||||
self.result_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/results/' # path to save results
|
||||
self.model_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/models/' # path to save results
|
||||
self.gamma = 0.99
|
||||
self.critic_lr = 1e-3
|
||||
self.actor_lr = 1e-4
|
||||
self.memory_capacity = 10000
|
||||
self.batch_size = 128
|
||||
self.train_eps = 300
|
||||
self.eval_eps = 50
|
||||
self.eval_steps = 200
|
||||
self.target_update = 4
|
||||
self.hidden_dim = 30
|
||||
self.soft_tau = 1e-2
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
env = NormalizedActions(gym.make(cfg.env))
|
||||
env.seed(seed)
|
||||
state_dim = env.observation_space.shape[0]
|
||||
action_dim = env.action_space.shape[0]
|
||||
agent = DDPG(state_dim,action_dim,cfg)
|
||||
return env,agent
|
||||
|
||||
def train(cfg, env, agent):
|
||||
print('Start to train ! ')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
ou_noise = OUNoise(env.action_space) # action noise
|
||||
rewards = []
|
||||
ma_rewards = [] # moving average rewards
|
||||
for i_episode in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
ou_noise.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
i_step = 0
|
||||
while not done:
|
||||
i_step += 1
|
||||
action = agent.choose_action(state)
|
||||
action = ou_noise.get_action(
|
||||
action, i_step) # 即paper中的random process
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
agent.memory.push(state, action, reward, next_state, done)
|
||||
agent.update()
|
||||
state = next_state
|
||||
print('Episode:{}/{}, Reward:{}'.format(i_episode+1, cfg.train_eps, ep_reward))
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
def eval(cfg, env, agent):
|
||||
print('Start to Eval ! ')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moving average rewards
|
||||
for i_episode in range(cfg.eval_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
i_step = 0
|
||||
while not done:
|
||||
i_step += 1
|
||||
action = agent.choose_action(state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
state = next_state
|
||||
print('Episode:{}/{}, Reward:{}'.format(i_episode+1, cfg.train_eps, ep_reward))
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete Eval!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = DDPGConfig()
|
||||
|
||||
# train
|
||||
env,agent = env_agent_config(cfg,seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(cfg.result_path, cfg.model_path)
|
||||
agent.save(path=cfg.model_path)
|
||||
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
|
||||
plot_rewards(rewards, ma_rewards, tag="train",
|
||||
algo=cfg.algo, path=cfg.result_path)
|
||||
|
||||
# eval
|
||||
env,agent = env_agent_config(cfg,seed=10)
|
||||
agent.load(path=cfg.model_path)
|
||||
rewards,ma_rewards = eval(cfg,env,agent)
|
||||
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
|
||||
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
|
||||
|
||||
Reference in New Issue
Block a user