更新算法模版

This commit is contained in:
johnjim0816
2022-11-06 12:15:36 +08:00
parent 466a17707f
commit dc78698262
256 changed files with 17282 additions and 10229 deletions

View File

@@ -5,7 +5,7 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-09 20:25:52
@LastEditor: John
LastEditTime: 2022-06-09 19:04:44
LastEditTime: 2022-09-27 15:43:21
@Discription:
@Environment: python 3.7.7
'''
@@ -14,96 +14,45 @@ import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity # 经验回放的容量
self.buffer = [] # 缓冲区
self.position = 0
def push(self, state, action, reward, next_state, done):
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
'''
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
return state, action, reward, next_state, done
def __len__(self):
''' 返回当前存储的量
'''
return len(self.buffer)
class Actor(nn.Module):
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
super(Actor, self).__init__()
self.linear1 = nn.Linear(n_states, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, n_actions)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
class Critic(nn.Module):
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
super(Critic, self).__init__()
self.linear1 = nn.Linear(n_states + n_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# 随机初始化为较小的值
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
# 按维数1拼接
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class DDPG:
def __init__(self, n_states, n_actions, cfg):
self.device = torch.device(cfg.device)
self.critic = Critic(n_states, n_actions, cfg.hidden_dim).to(self.device)
self.actor = Actor(n_states, n_actions, cfg.hidden_dim).to(self.device)
self.target_critic = Critic(n_states, n_actions, cfg.hidden_dim).to(self.device)
self.target_actor = Actor(n_states, n_actions, cfg.hidden_dim).to(self.device)
# 复制参数到目标网络
class DDPG:
def __init__(self, models,memories,cfg):
self.device = torch.device(cfg['device'])
self.critic = models['critic'].to(self.device)
self.target_critic = models['critic'].to(self.device)
self.actor = models['actor'].to(self.device)
self.target_actor = models['actor'].to(self.device)
# copy weights from critic to target_critic
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
# copy weights from actor to target_actor
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg['critic_lr'])
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg['actor_lr'])
self.memory = memories['memory']
self.batch_size = cfg['batch_size']
self.gamma = cfg['gamma']
self.tau = cfg['tau']
self.critic_optimizer = optim.Adam(
self.critic.parameters(), lr=cfg.critic_lr)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr)
self.memory = ReplayBuffer(cfg.memory_capacity)
self.batch_size = cfg.batch_size
self.soft_tau = cfg.soft_tau # 软更新参数
self.gamma = cfg.gamma
def choose_action(self, state):
def sample_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
action = self.actor(state)
return action.detach().cpu().numpy()[0, 0]
@torch.no_grad()
def predict_action(self, state):
''' predict action
'''
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
action = self.actor(state)
return action.cpu().numpy()[0, 0]
def update(self):
if len(self.memory) < self.batch_size: # memory 中不满足一个批量时,不更新策略
if len(self.memory) < self.batch_size: # when memory size is less than batch size, return
return
# 从经验回放中(replay memory)中随机采样一个批量的转移(transition)
# sample a random minibatch of N transitions from R
state, action, reward, next_state, done = self.memory.sample(self.batch_size)
# 转变为张量
# convert to tensor
state = torch.FloatTensor(np.array(state)).to(self.device)
next_state = torch.FloatTensor(np.array(next_state)).to(self.device)
action = torch.FloatTensor(np.array(action)).to(self.device)
@@ -126,19 +75,22 @@ class DDPG:
self.critic_optimizer.zero_grad()
value_loss.backward()
self.critic_optimizer.step()
# 软更新
# soft update
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.soft_tau) +
param.data * self.soft_tau
target_param.data * (1.0 - self.tau) +
param.data * self.tau
)
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.soft_tau) +
param.data * self.soft_tau
target_param.data * (1.0 - self.tau) +
param.data * self.tau
)
def save(self,path):
torch.save(self.actor.state_dict(), path+'checkpoint.pt')
def save_model(self,path):
from pathlib import Path
# create path
Path(path).mkdir(parents=True, exist_ok=True)
torch.save(self.actor.state_dict(), f"{path}/actor_checkpoint.pt")
def load(self,path):
self.actor.load_state_dict(torch.load(path+'checkpoint.pt'))
def load_model(self,path):
self.actor.load_state_dict(torch.load(f"{path}/actor_checkpoint.pt"))

152
projects/codes/DDPG/main.py Normal file
View File

@@ -0,0 +1,152 @@
#!/usr/bin/env python
# coding=utf-8
'''
@Author: John
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 20:58:21
@LastEditor: John
LastEditTime: 2022-09-27 15:50:12
@Discription:
@Environment: python 3.7.7
'''
import sys,os
curr_path = os.path.dirname(os.path.abspath(__file__)) # current path
parent_path = os.path.dirname(curr_path) # parent path
sys.path.append(parent_path) # add to system path
import datetime
import gym
import torch
import argparse
import torch.nn as nn
import torch.nn.functional as F
from env import NormalizedActions,OUNoise
from ddpg import DDPG
from common.utils import all_seed
from common.memories import ReplayBufferQue
from common.launcher import Launcher
from envs.register import register_env
class Actor(nn.Module):
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
super(Actor, self).__init__()
self.linear1 = nn.Linear(n_states, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, n_actions)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
class Critic(nn.Module):
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
super(Critic, self).__init__()
self.linear1 = nn.Linear(n_states + n_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# 随机初始化为较小的值
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
# 按维数1拼接
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class Main(Launcher):
def get_args(self):
""" hyperparameters
"""
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
parser = argparse.ArgumentParser(description="hyperparameters")
parser.add_argument('--algo_name',default='DDPG',type=str,help="name of algorithm")
parser.add_argument('--env_name',default='Pendulum-v1',type=str,help="name of environment")
parser.add_argument('--train_eps',default=300,type=int,help="episodes of training")
parser.add_argument('--test_eps',default=20,type=int,help="episodes of testing")
parser.add_argument('--max_steps',default=100000,type=int,help="steps per episode, much larger value can simulate infinite steps")
parser.add_argument('--gamma',default=0.99,type=float,help="discounted factor")
parser.add_argument('--critic_lr',default=1e-3,type=float,help="learning rate of critic")
parser.add_argument('--actor_lr',default=1e-4,type=float,help="learning rate of actor")
parser.add_argument('--memory_capacity',default=8000,type=int,help="memory capacity")
parser.add_argument('--batch_size',default=128,type=int)
parser.add_argument('--target_update',default=2,type=int)
parser.add_argument('--tau',default=1e-2,type=float)
parser.add_argument('--critic_hidden_dim',default=256,type=int)
parser.add_argument('--actor_hidden_dim',default=256,type=int)
parser.add_argument('--device',default='cpu',type=str,help="cpu or cuda")
parser.add_argument('--seed',default=1,type=int,help="random seed")
parser.add_argument('--show_fig',default=False,type=bool,help="if show figure or not")
parser.add_argument('--save_fig',default=True,type=bool,help="if save figure or not")
args = parser.parse_args()
default_args = {'result_path':f"{curr_path}/outputs/{args.env_name}/{curr_time}/results/",
'model_path':f"{curr_path}/outputs/{args.env_name}/{curr_time}/models/",
}
args = {**vars(args),**default_args} # type(dict)
return args
def env_agent_config(self,cfg):
register_env(cfg['env_name'])
env = gym.make(cfg['env_name'])
env = NormalizedActions(env) # decorate with action noise
if cfg['seed'] !=0: # set random seed
all_seed(env,seed=cfg["seed"])
n_states = env.observation_space.shape[0]
n_actions = env.action_space.shape[0]
print(f"n_states: {n_states}, n_actions: {n_actions}")
cfg.update({"n_states":n_states,"n_actions":n_actions}) # update to cfg paramters
models = {"actor":Actor(n_states,n_actions,hidden_dim=cfg['actor_hidden_dim']),"critic":Critic(n_states,n_actions,hidden_dim=cfg['critic_hidden_dim'])}
memories = {"memory":ReplayBufferQue(cfg['memory_capacity'])}
agent = DDPG(models,memories,cfg)
return env,agent
def train(self,cfg, env, agent):
print('Start training!')
ou_noise = OUNoise(env.action_space) # noise of action
rewards = [] # record rewards for all episodes
for i_ep in range(cfg['train_eps']):
state = env.reset()
ou_noise.reset()
ep_reward = 0
for i_step in range(cfg['max_steps']):
action = agent.sample_action(state)
action = ou_noise.get_action(action, i_step+1)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
agent.memory.push((state, action, reward, next_state, done))
agent.update()
state = next_state
if done:
break
if (i_ep+1)%10 == 0:
print(f"Env:{i_ep+1}/{cfg['train_eps']}, Reward:{ep_reward:.2f}")
rewards.append(ep_reward)
print('Finish training!')
return {'rewards':rewards}
def test(self,cfg, env, agent):
print('Start testing!')
rewards = [] # record rewards for all episodes
for i_ep in range(cfg['test_eps']):
state = env.reset()
ep_reward = 0
for i_step in range(cfg['max_steps']):
action = agent.predict_action(state)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
state = next_state
if done:
break
rewards.append(ep_reward)
print(f"Episode:{i_ep+1}/{cfg['test_eps']}, Reward:{ep_reward:.1f}")
print('Finish testing!')
return {'rewards':rewards}
if __name__ == "__main__":
main = Main()
main.run()

View File

@@ -1,18 +0,0 @@
{
"algo_name": "DDPG",
"env_name": "Pendulum-v1",
"train_eps": 300,
"test_eps": 20,
"gamma": 0.99,
"critic_lr": 0.001,
"actor_lr": 0.0001,
"memory_capacity": 8000,
"batch_size": 128,
"target_update": 2,
"soft_tau": 0.01,
"hidden_dim": 256,
"deivce": "cpu",
"result_path": "C:\\Users\\24438\\Desktop\\rl-tutorials/outputs/DDPG/outputs/Pendulum-v1/20220713-225402/results//",
"model_path": "C:\\Users\\24438\\Desktop\\rl-tutorials/outputs/DDPG/outputs/Pendulum-v1/20220713-225402/models/",
"save_fig": true
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

View File

@@ -0,0 +1,25 @@
{
"algo_name": "DDPG",
"env_name": "Pendulum-v1",
"train_eps": 300,
"test_eps": 20,
"max_steps": 100000,
"gamma": 0.99,
"critic_lr": 0.001,
"actor_lr": 0.0001,
"memory_capacity": 8000,
"batch_size": 128,
"target_update": 2,
"tau": 0.01,
"critic_hidden_dim": 256,
"actor_hidden_dim": 256,
"device": "cpu",
"seed": 1,
"show_fig": false,
"save_fig": true,
"result_path": "/Users/jj/Desktop/rl-tutorials/codes/DDPG/outputs/Pendulum-v1/20220927-155053/results/",
"model_path": "/Users/jj/Desktop/rl-tutorials/codes/DDPG/outputs/Pendulum-v1/20220927-155053/models/",
"n_states": 3,
"n_actions": 1,
"training_time": 358.8142900466919
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

View File

@@ -0,0 +1,21 @@
rewards
-116.045416124376
-126.18022935469217
-231.46338228458293
-246.40481094689758
-304.69493818839186
-124.39609191913091
-1.060003582878406
-114.19659653048288
-348.9745708742037
-116.10811133324769
-117.20146333694844
-118.66206784602966
-235.17836229762355
-356.14054913290624
-118.38579118156366
-351.9415915140771
-114.50877866098972
-124.775484599685
-226.47062962476875
-121.48872909193936
1 rewards
2 -116.045416124376
3 -126.18022935469217
4 -231.46338228458293
5 -246.40481094689758
6 -304.69493818839186
7 -124.39609191913091
8 -1.060003582878406
9 -114.19659653048288
10 -348.9745708742037
11 -116.10811133324769
12 -117.20146333694844
13 -118.66206784602966
14 -235.17836229762355
15 -356.14054913290624
16 -118.38579118156366
17 -351.9415915140771
18 -114.50877866098972
19 -124.775484599685
20 -226.47062962476875
21 -121.48872909193936

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -0,0 +1,301 @@
rewards
-1557.8518596631177
-1354.7599369723537
-1375.5732016629706
-1493.8609739040871
-1426.7116204537845
-1235.7920755027762
-1339.1647620443073
-1544.2379906560486
-1539.6232758780877
-1549.5690058648204
-1446.9193195793853
-1520.2666688767558
-1525.0116707122581
-1379.136573640111
-1532.702831768523
-1484.7552963941637
-1359.6699201737677
-1349.6805649166854
-1510.869999766432
-1515.8398785434708
-1447.4648656578254
-1537.3822077872178
-1249.6517039877456
-1350.0302666965736
-1529.4363372505607
-1320.28204807604
-1502.9248141320654
-1545.4861772197075
-1579.928789692619
-1413.296070504152
-1242.4673258663781
-1403.8672028946078
-1452.7199002523635
-871.6071114009982
-1324.1789316121412
-1313.3348146041249
-1059.8722927418046
-1054.232673559123
-973.8956270782459
-972.9936641224186
-972.9477399905655
-947.0613443333731
-737.3866328989184
-958.6068164634295
-739.6973395350705
-886.8383108399455
-775.1430379821574
-937.3115016337417
-700.875502951337
-829.9396339144109
-271.1629773396998
-493.5460684734584
-485.9321719313203
-858.3735607086766
-1145.3440084994113
-1121.1338201339777
-1191.5640831332366
-1350.0425368846784
-249.25438665107953
-727.9051714734406
-368.5579316240395
-392.0611344939354
-955.3231703741553
-488.27956192035265
-362.2734695759137
-949.5440839122496
-496.8460016912189
-726.6871514929877
-424.48641462866266
-954.7075428204689
-608.9650086409792
-848.6059768900151
-866.7052398755033
-856.9846415044439
-751.0342976129083
-749.5118249469103
-509.882299129811
-506.56154097018043
-906.0964475820368
-1318.3941416286855
-1422.2017011876615
-1523.1661091894277
-1209.2850593747999
-1415.0972750475833
-1533.2263827605834
-1405.8345530072663
-1244.3384723384913
-1237.4704845061992
-949.3394417935086
-981.1855396112669
-1241.224568444032
-1033.118364799829
-1017.2403725619487
-981.9727804516916
-853.1877724775591
-869.0652369861646
-1069.8265343327998
-371.73173813891884
-735.5887912713665
-1262.050240428957
-1242.985056062197
-1191.6867713427482
-1328.5323118458034
-1015.5308653784714
-895.3066515461381
-994.1114862316568
-761.4710321387583
-717.6979056272868
-782.302146467708
-640.4913147345328
-725.6469893076355
-497.5346232085584
-1027.1192149202325
-950.0117149822681
-956.1343737377374
-708.9489626669097
-964.5003064113283
-611.9111516886613
-612.3182791021098
-1100.0047939174613
-984.9262458612923
-858.7106075590494
-842.305917848386
-745.9043991922597
-741.2168858394704
-1143.0750387284456
-755.5257242325362
-745.8440029056219
-387.8717950334138
-764.6628701051523
-486.7967495537958
-485.13357559164814
-313.5415216767419
-611.3450529954782
-611.1570544377465
-507.6456747676814
-615.2032627013064
-242.37988821149764
-603.85498620892
-352.2672241055367
-155.99874664988383
-615.4003063516313
-384.9811293551548
-498.80727354456315
-407.6898591217813
-1213.6383844696395
-1122.2425748913884
-592.4819308883913
-478.2046833075051
-891.0254788311132
-482.40204115385
-339.34676196677407
-582.9985110154428
-213.38243627478826
-928.8434951613825
-1545.5433749195483
-1179.5016285049896
-1211.9549773601925
-1396.8082561792166
-1318.073128824395
-597.3837225413702
-564.7793352410449
-723.744223659601
-653.0145534050461
-847.6138123247009
-385.62784320332867
-245.25250602651928
-117.55094416757835
-864.0064774069044
-124.30221387458867
-244.4014050243669
-1148.861754008653
-914.4047868424254
-765.9394408203351
-124.05114610943177
-605.7641303826842
-616.3595829453579
-375.5024692962698
-253.51874076866997
-240.08405245866714
-503.96565579077225
-606.7646526173963
-502.6512112729435
-746.404013238678
-718.8658110051653
-125.65808359856703
-247.62256797883364
-363.69852213666803
-249.21801061415547
-491.7724416523124
-235.37050442527357
-609.6026403583944
-236.05731608228092
-381.19853850450454
-298.7683201867404
-127.64145601534942
-233.4300138495176
-129.11243486763516
-390.0092951263507
-1000.7729892969854
-249.60445310459787
-253.02347910759622
-129.04269174391223
-360.6321251486308
-377.26297602576534
-124.98466986009481
-245.47913567739212
-127.0885254550411
-118.11013006825459
-128.8682755001942
-497.3015586531096
-340.77352433313484
-514.4945799737978
-503.24077308842783
-627.9068157464455
-511.39396524392146
-763.8866112068075
-741.7885082408757
-617.4945380476306
-950.3176437519387
-643.4791402436576
-511.9377874351982
-573.6219349516633
-564.1297823875693
-242.06399233336583
-496.4020380325518
-360.56387982880364
-495.4590728336022
-503.7263345016764
-122.47964616802327
-254.16543926263168
-614.5335268729743
-234.3718017676852
-301.27514663062874
-387.64758894986204
-368.74492411716415
-364.43559131093593
-160.6845848115533
-504.1948947975429
-246.51676032967683
-251.5732500220603
-600.1463819723879
-247.17476928471288
-381.924164337607
-377.4773226068174
-378.511830774651
-126.69199895843033
-365.0506645811703
-130.45052114802874
-374.37400288581813
-502.37678159638887
-374.43552658473055
-241.157211525502
-388.9597456642503
-249.4412385534861
-114.71395078439846
-864.6882327286056
-626.8144095971478
-732.9226896140248
-368.24767905020394
-369.7425524469132
-398.07832598184626
-906.7113918582257
-252.2343258180765
-370.4258473086036
-736.0203154396909
-609.4605173515027
-661.1255920773486
-489.9605291008584
-364.1671188501402
-644.4029089587781
-477.9510457677364
-128.78294672880136
-373.74382001694886
-380.69931133982936
-372.60275628381805
-743.0410655515724
-597.558847789258
-387.94245652694394
-725.3939448944484
-409.1301313430852
-491.8442467896486
-123.0638156839621
-377.9292326597324
-489.27209762667974
-255.63227821371257
-379.5885382060625
-370.2312967024669
-250.94061817008688
-131.2125308195906
-600.3312016651868
-130.84444772735733
-312.6287688438562
-382.4144610039701
-259.03558003697265
-224.92206667096863
-376.81390821359685
-382.39993489751646
-380.25599578593636
-610.1016672243638
1 rewards
2 -1557.8518596631177
3 -1354.7599369723537
4 -1375.5732016629706
5 -1493.8609739040871
6 -1426.7116204537845
7 -1235.7920755027762
8 -1339.1647620443073
9 -1544.2379906560486
10 -1539.6232758780877
11 -1549.5690058648204
12 -1446.9193195793853
13 -1520.2666688767558
14 -1525.0116707122581
15 -1379.136573640111
16 -1532.702831768523
17 -1484.7552963941637
18 -1359.6699201737677
19 -1349.6805649166854
20 -1510.869999766432
21 -1515.8398785434708
22 -1447.4648656578254
23 -1537.3822077872178
24 -1249.6517039877456
25 -1350.0302666965736
26 -1529.4363372505607
27 -1320.28204807604
28 -1502.9248141320654
29 -1545.4861772197075
30 -1579.928789692619
31 -1413.296070504152
32 -1242.4673258663781
33 -1403.8672028946078
34 -1452.7199002523635
35 -871.6071114009982
36 -1324.1789316121412
37 -1313.3348146041249
38 -1059.8722927418046
39 -1054.232673559123
40 -973.8956270782459
41 -972.9936641224186
42 -972.9477399905655
43 -947.0613443333731
44 -737.3866328989184
45 -958.6068164634295
46 -739.6973395350705
47 -886.8383108399455
48 -775.1430379821574
49 -937.3115016337417
50 -700.875502951337
51 -829.9396339144109
52 -271.1629773396998
53 -493.5460684734584
54 -485.9321719313203
55 -858.3735607086766
56 -1145.3440084994113
57 -1121.1338201339777
58 -1191.5640831332366
59 -1350.0425368846784
60 -249.25438665107953
61 -727.9051714734406
62 -368.5579316240395
63 -392.0611344939354
64 -955.3231703741553
65 -488.27956192035265
66 -362.2734695759137
67 -949.5440839122496
68 -496.8460016912189
69 -726.6871514929877
70 -424.48641462866266
71 -954.7075428204689
72 -608.9650086409792
73 -848.6059768900151
74 -866.7052398755033
75 -856.9846415044439
76 -751.0342976129083
77 -749.5118249469103
78 -509.882299129811
79 -506.56154097018043
80 -906.0964475820368
81 -1318.3941416286855
82 -1422.2017011876615
83 -1523.1661091894277
84 -1209.2850593747999
85 -1415.0972750475833
86 -1533.2263827605834
87 -1405.8345530072663
88 -1244.3384723384913
89 -1237.4704845061992
90 -949.3394417935086
91 -981.1855396112669
92 -1241.224568444032
93 -1033.118364799829
94 -1017.2403725619487
95 -981.9727804516916
96 -853.1877724775591
97 -869.0652369861646
98 -1069.8265343327998
99 -371.73173813891884
100 -735.5887912713665
101 -1262.050240428957
102 -1242.985056062197
103 -1191.6867713427482
104 -1328.5323118458034
105 -1015.5308653784714
106 -895.3066515461381
107 -994.1114862316568
108 -761.4710321387583
109 -717.6979056272868
110 -782.302146467708
111 -640.4913147345328
112 -725.6469893076355
113 -497.5346232085584
114 -1027.1192149202325
115 -950.0117149822681
116 -956.1343737377374
117 -708.9489626669097
118 -964.5003064113283
119 -611.9111516886613
120 -612.3182791021098
121 -1100.0047939174613
122 -984.9262458612923
123 -858.7106075590494
124 -842.305917848386
125 -745.9043991922597
126 -741.2168858394704
127 -1143.0750387284456
128 -755.5257242325362
129 -745.8440029056219
130 -387.8717950334138
131 -764.6628701051523
132 -486.7967495537958
133 -485.13357559164814
134 -313.5415216767419
135 -611.3450529954782
136 -611.1570544377465
137 -507.6456747676814
138 -615.2032627013064
139 -242.37988821149764
140 -603.85498620892
141 -352.2672241055367
142 -155.99874664988383
143 -615.4003063516313
144 -384.9811293551548
145 -498.80727354456315
146 -407.6898591217813
147 -1213.6383844696395
148 -1122.2425748913884
149 -592.4819308883913
150 -478.2046833075051
151 -891.0254788311132
152 -482.40204115385
153 -339.34676196677407
154 -582.9985110154428
155 -213.38243627478826
156 -928.8434951613825
157 -1545.5433749195483
158 -1179.5016285049896
159 -1211.9549773601925
160 -1396.8082561792166
161 -1318.073128824395
162 -597.3837225413702
163 -564.7793352410449
164 -723.744223659601
165 -653.0145534050461
166 -847.6138123247009
167 -385.62784320332867
168 -245.25250602651928
169 -117.55094416757835
170 -864.0064774069044
171 -124.30221387458867
172 -244.4014050243669
173 -1148.861754008653
174 -914.4047868424254
175 -765.9394408203351
176 -124.05114610943177
177 -605.7641303826842
178 -616.3595829453579
179 -375.5024692962698
180 -253.51874076866997
181 -240.08405245866714
182 -503.96565579077225
183 -606.7646526173963
184 -502.6512112729435
185 -746.404013238678
186 -718.8658110051653
187 -125.65808359856703
188 -247.62256797883364
189 -363.69852213666803
190 -249.21801061415547
191 -491.7724416523124
192 -235.37050442527357
193 -609.6026403583944
194 -236.05731608228092
195 -381.19853850450454
196 -298.7683201867404
197 -127.64145601534942
198 -233.4300138495176
199 -129.11243486763516
200 -390.0092951263507
201 -1000.7729892969854
202 -249.60445310459787
203 -253.02347910759622
204 -129.04269174391223
205 -360.6321251486308
206 -377.26297602576534
207 -124.98466986009481
208 -245.47913567739212
209 -127.0885254550411
210 -118.11013006825459
211 -128.8682755001942
212 -497.3015586531096
213 -340.77352433313484
214 -514.4945799737978
215 -503.24077308842783
216 -627.9068157464455
217 -511.39396524392146
218 -763.8866112068075
219 -741.7885082408757
220 -617.4945380476306
221 -950.3176437519387
222 -643.4791402436576
223 -511.9377874351982
224 -573.6219349516633
225 -564.1297823875693
226 -242.06399233336583
227 -496.4020380325518
228 -360.56387982880364
229 -495.4590728336022
230 -503.7263345016764
231 -122.47964616802327
232 -254.16543926263168
233 -614.5335268729743
234 -234.3718017676852
235 -301.27514663062874
236 -387.64758894986204
237 -368.74492411716415
238 -364.43559131093593
239 -160.6845848115533
240 -504.1948947975429
241 -246.51676032967683
242 -251.5732500220603
243 -600.1463819723879
244 -247.17476928471288
245 -381.924164337607
246 -377.4773226068174
247 -378.511830774651
248 -126.69199895843033
249 -365.0506645811703
250 -130.45052114802874
251 -374.37400288581813
252 -502.37678159638887
253 -374.43552658473055
254 -241.157211525502
255 -388.9597456642503
256 -249.4412385534861
257 -114.71395078439846
258 -864.6882327286056
259 -626.8144095971478
260 -732.9226896140248
261 -368.24767905020394
262 -369.7425524469132
263 -398.07832598184626
264 -906.7113918582257
265 -252.2343258180765
266 -370.4258473086036
267 -736.0203154396909
268 -609.4605173515027
269 -661.1255920773486
270 -489.9605291008584
271 -364.1671188501402
272 -644.4029089587781
273 -477.9510457677364
274 -128.78294672880136
275 -373.74382001694886
276 -380.69931133982936
277 -372.60275628381805
278 -743.0410655515724
279 -597.558847789258
280 -387.94245652694394
281 -725.3939448944484
282 -409.1301313430852
283 -491.8442467896486
284 -123.0638156839621
285 -377.9292326597324
286 -489.27209762667974
287 -255.63227821371257
288 -379.5885382060625
289 -370.2312967024669
290 -250.94061817008688
291 -131.2125308195906
292 -600.3312016651868
293 -130.84444772735733
294 -312.6287688438562
295 -382.4144610039701
296 -259.03558003697265
297 -224.92206667096863
298 -376.81390821359685
299 -382.39993489751646
300 -380.25599578593636
301 -610.1016672243638

View File

@@ -1,133 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
@Author: John
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 20:58:21
@LastEditor: John
LastEditTime: 2022-07-21 21:51:34
@Discription:
@Environment: python 3.7.7
'''
import sys,os
curr_path = os.path.dirname(os.path.abspath(__file__)) # current path
parent_path = os.path.dirname(curr_path) # parent path
sys.path.append(parent_path) # add to system path
import datetime
import gym
import torch
import argparse
from env import NormalizedActions,OUNoise
from ddpg import DDPG
from common.utils import save_results,make_dir
from common.utils import plot_rewards,save_args
def get_args():
""" Hyperparameters
"""
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # Obtain current time
parser = argparse.ArgumentParser(description="hyperparameters")
parser.add_argument('--algo_name',default='DDPG',type=str,help="name of algorithm")
parser.add_argument('--env_name',default='Pendulum-v1',type=str,help="name of environment")
parser.add_argument('--train_eps',default=300,type=int,help="episodes of training")
parser.add_argument('--test_eps',default=20,type=int,help="episodes of testing")
parser.add_argument('--gamma',default=0.99,type=float,help="discounted factor")
parser.add_argument('--critic_lr',default=1e-3,type=float,help="learning rate of critic")
parser.add_argument('--actor_lr',default=1e-4,type=float,help="learning rate of actor")
parser.add_argument('--memory_capacity',default=8000,type=int,help="memory capacity")
parser.add_argument('--batch_size',default=128,type=int)
parser.add_argument('--target_update',default=2,type=int)
parser.add_argument('--soft_tau',default=1e-2,type=float)
parser.add_argument('--hidden_dim',default=256,type=int)
parser.add_argument('--device',default='cpu',type=str,help="cpu or cuda")
parser.add_argument('--result_path',default=curr_path + "/outputs/" + parser.parse_args().env_name + \
'/' + curr_time + '/results/' )
parser.add_argument('--model_path',default=curr_path + "/outputs/" + parser.parse_args().env_name + \
'/' + curr_time + '/models/' ) # path to save models
parser.add_argument('--save_fig',default=True,type=bool,help="if save figure or not")
args = parser.parse_args()
return args
def env_agent_config(cfg,seed=1):
env = NormalizedActions(gym.make(cfg.env_name)) # 装饰action噪声
env.seed(seed) # 随机种子
n_states = env.observation_space.shape[0]
n_actions = env.action_space.shape[0]
agent = DDPG(n_states,n_actions,cfg)
return env,agent
def train(cfg, env, agent):
print('Start training!')
print(f'Env:{cfg.env_name}, Algorithm:{cfg.algo_name}, Device:{cfg.device}')
ou_noise = OUNoise(env.action_space) # noise of action
rewards = [] # 记录所有回合的奖励
ma_rewards = [] # 记录所有回合的滑动平均奖励
for i_ep in range(cfg.train_eps):
state = env.reset()
ou_noise.reset()
done = False
ep_reward = 0
i_step = 0
while not done:
i_step += 1
action = agent.choose_action(state)
action = ou_noise.get_action(action, i_step)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
agent.memory.push(state, action, reward, next_state, done)
agent.update()
state = next_state
if (i_ep+1)%10 == 0:
print(f'Env:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward:.2f}')
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('Finish training!')
return {'rewards':rewards,'ma_rewards':ma_rewards}
def test(cfg, env, agent):
print('Start testing')
print(f'Env:{cfg.env_name}, Algorithm:{cfg.algo_name}, Device:{cfg.device}')
rewards = [] # 记录所有回合的奖励
ma_rewards = [] # 记录所有回合的滑动平均奖励
for i_ep in range(cfg.test_eps):
state = env.reset()
done = False
ep_reward = 0
i_step = 0
while not done:
i_step += 1
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
state = next_state
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print(f"Epside:{i_ep+1}/{cfg.test_eps}, Reward:{ep_reward:.1f}")
print('Finish testing!')
return {'rewards':rewards,'ma_rewards':ma_rewards}
if __name__ == "__main__":
cfg = get_args()
# training
env,agent = env_agent_config(cfg,seed=1)
res_dic = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
save_args(cfg)
agent.save(path=cfg.model_path)
save_results(res_dic, tag='train',
path=cfg.result_path)
plot_rewards(res_dic['rewards'], res_dic['ma_rewards'], cfg, tag="train")
# testing
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
res_dic = test(cfg,env,agent)
save_results(res_dic, tag='test',
path=cfg.result_path)
plot_rewards(res_dic['rewards'], res_dic['ma_rewards'], cfg, tag="test")