This commit is contained in:
JohnJim0816
2021-05-06 02:07:56 +08:00
parent 747f3238c0
commit b17c8f4e41
107 changed files with 1439 additions and 987 deletions

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

270
codes/DQN/task0_train.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@@ -5,7 +5,7 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-12 00:48:57
@LastEditor: John
LastEditTime: 2021-05-04 15:01:34
LastEditTime: 2021-05-05 16:49:15
@Discription:
@Environment: python 3.7.7
'''
@@ -14,9 +14,9 @@ curr_path = os.path.dirname(__file__)
parent_path = os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import datetime
import torch
import gym
import torch
import datetime
from common.utils import save_results, make_dir
from common.plot import plot_rewards
@@ -32,21 +32,21 @@ class DQNConfig:
self.result_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/models/' # path to save results
self.train_eps = 300 # 训练的episode数目
'/'+curr_time+'/models/' # path to save models
self.train_eps = 300 # max trainng episodes
self.eval_eps = 50 # number of episodes for evaluating
self.gamma = 0.95
self.epsilon_start = 0.90 # e-greedy策略的初始epsilon
self.epsilon_start = 0.90 # start epsilon of e-greedy policy
self.epsilon_end = 0.01
self.epsilon_decay = 500
self.lr = 0.0001 # learning rate
self.memory_capacity = 100000 # Replay Memory容量
self.memory_capacity = 100000 # capacity of Replay Memory
self.batch_size = 64
self.target_update = 2 # target net的更新频率
self.target_update = 4 # update frequency of target net
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
self.hidden_dim = 256 # 神经网络隐藏层维度
"cuda" if torch.cuda.is_available() else "cpu") # check gpu
self.hidden_dim = 256 # hidden size of net
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env)
env.seed(seed)
@@ -60,7 +60,7 @@ def train(cfg, env, agent):
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moveing average reward
for i_episode in range(cfg.train_eps):
for i_ep in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
@@ -73,9 +73,10 @@ def train(cfg, env, agent):
agent.update()
if done:
break
if i_episode % cfg.target_update == 0:
if (i_ep+1) % cfg.target_update == 0:
agent.target_net.load_state_dict(agent.policy_net.state_dict())
print('Episode:{}/{}, Reward:{}'.format(i_episode+1, cfg.train_eps, ep_reward))
if (i_ep+1)%10 == 0:
print('Episode:{}/{}, Reward:{}'.format(i_ep+1, cfg.train_eps, ep_reward))
rewards.append(ep_reward)
# save ma rewards
if ma_rewards:
@@ -86,15 +87,17 @@ def train(cfg, env, agent):
return rewards, ma_rewards
def eval(cfg,env,agent):
rewards = [] # 记录所有episode的reward
ma_rewards = [] # 滑动平均的reward
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.eval_eps):
ep_reward = 0 # 记录每个episode的reward
state = env.reset() # 重置环境, 重新开一局即开始新的一个episode
ep_reward = 0 # reward per episode
state = env.reset()
while True:
action = agent.predict(state) # 根据算法选择一个动作
next_state, reward, done, _ = env.step(action) # 与环境进行一个交互
state = next_state # 存储上一个观察值
action = agent.predict(state)
next_state, reward, done, _ = env.step(action)
state = next_state
ep_reward += reward
if done:
break
@@ -103,11 +106,15 @@ def eval(cfg,env,agent):
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
else:
ma_rewards.append(ep_reward)
print(f"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}")
if (i_ep+1)%10 == 10:
print(f"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}")
print('Complete evaling')
return rewards,ma_rewards
if __name__ == "__main__":
cfg = DQNConfig()
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
@@ -115,7 +122,7 @@ if __name__ == "__main__":
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)

View File

@@ -5,7 +5,7 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-12 00:50:49
@LastEditor: John
LastEditTime: 2021-05-04 15:04:45
LastEditTime: 2021-05-04 22:28:06
@Discription:
@Environment: python 3.7.7
'''
@@ -35,9 +35,10 @@ class DoubleDQN:
self.batch_size = cfg.batch_size
self.policy_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
self.target_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
# target_net的初始模型参数完全复制policy_net
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval() # 不启用 BatchNormalization 和 Dropout
# target_net copy from policy_net
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(param.data)
# self.target_net.eval() # 不启用 BatchNormalization 和 Dropout
# 可查parameters()与state_dict()的区别前者require_grad=True
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
self.loss = 0
@@ -58,9 +59,9 @@ class DoubleDQN:
def choose_action(self, state):
'''选择动作
'''
self.actions_count += 1
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
math.exp(-1. * self.actions_count / self.epsilon_decay)
self.actions_count += 1
if random.random() > self.epsilon:
action = self.predict(state)
else:
@@ -73,7 +74,7 @@ class DoubleDQN:
# 从memory中随机采样transition
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
self.batch_size)
### 转为张量 ###
# convert to tensor
state_batch = torch.tensor(
state_batch, device=self.device, dtype=torch.float)
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(
@@ -84,8 +85,7 @@ class DoubleDQN:
next_state_batch, device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(
done_batch), device=self.device).unsqueeze(1) # 将bool转为float然后转为张量
done_batch), device=self.device) # 将bool转为float然后转为张量
# 计算当前(s_t,a)对应的Q(s_t, a)
q_values = self.policy_net(state_batch)
next_q_values = self.policy_net(next_state_batch)
@@ -104,7 +104,7 @@ class DoubleDQN:
next_state_batch)
# 选出Q(s_t, a)对应的action代入到next_target_values获得target net对应的next_q_value即Q(s_t|a=argmax Q(s_t, a))
next_target_q_value = next_target_values.gather(1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)
q_target = reward_batch + self.gamma * next_target_q_value * (1-done_batch[0])
q_target = reward_batch + self.gamma * next_target_q_value * (1-done_batch)
self.loss = nn.MSELoss()(q_value, q_target.unsqueeze(1)) # 计算 均方误差loss
# 优化模型
self.optimizer.zero_grad() # zero_grad清除上一步所有旧的gradients from the last step

View File

@@ -0,0 +1,194 @@
{
"metadata": {
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.10"
},
"orig_nbformat": 2,
"kernelspec": {
"name": "python3710jvsc74a57bd0366e1054dee9d4501b0eb8f87335afd3c67fc62db6ee611bbc7f8f5a1fefe232",
"display_name": "Python 3.7.10 64-bit ('py37': conda)"
},
"metadata": {
"interpreter": {
"hash": "366e1054dee9d4501b0eb8f87335afd3c67fc62db6ee611bbc7f8f5a1fefe232"
}
}
},
"nbformat": 4,
"nbformat_minor": 2,
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"from pathlib import Path\n",
"curr_path = str(Path().absolute())\n",
"parent_path = str(Path().absolute().parent)\n",
"sys.path.append(parent_path) # add current terminal path to sys.path"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import gym\n",
"import torch\n",
"import datetime\n",
"from DoubleDQN.agent import DoubleDQN\n",
"from common.plot import plot_rewards\n",
"from common.utils import save_results, make_dir\n",
"\n",
"curr_time = datetime.datetime.now().strftime(\n",
" \"%Y%m%d-%H%M%S\") # obtain current time"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class DoubleDQNConfig:\n",
" def __init__(self):\n",
" self.algo = \"DoubleDQN\" # name of algo\n",
" self.env = 'CartPole-v0' # env name\n",
" self.result_path = curr_path+\"/outputs/\" + self.env + \\\n",
" '/'+curr_time+'/results/' # path to save results\n",
" self.model_path = curr_path+\"/outputs/\" + self.env + \\\n",
" '/'+curr_time+'/models/' # path to save models\n",
" self.train_eps = 200 # max tranng episodes\n",
" self.eval_eps = 50 # max evaling episodes\n",
" self.gamma = 0.95\n",
" self.epsilon_start = 1 # start epsilon of e-greedy policy\n",
" self.epsilon_end = 0.01 \n",
" self.epsilon_decay = 500\n",
" self.lr = 0.001 # learning rate\n",
" self.memory_capacity = 100000 # capacity of Replay Memory\n",
" self.batch_size = 64\n",
" self.target_update = 2 # update frequency of target net\n",
" self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # check gpu\n",
" self.hidden_dim = 256 # hidden size of net"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def env_agent_config(cfg,seed=1):\n",
" env = gym.make(cfg.env) \n",
" env.seed(seed)\n",
" state_dim = env.observation_space.shape[0]\n",
" action_dim = env.action_space.n\n",
" agent = DoubleDQN(state_dim,action_dim,cfg)\n",
" return env,agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def train(cfg,env,agent):\n",
" print('Start to train !')\n",
" rewards,ma_rewards = [],[]\n",
" for i_ep in range(cfg.train_eps):\n",
" state = env.reset() \n",
" ep_reward = 0\n",
" while True:\n",
" action = agent.choose_action(state) \n",
" next_state, reward, done, _ = env.step(action)\n",
" ep_reward += reward\n",
" agent.memory.push(state, action, reward, next_state, done) \n",
" state = next_state \n",
" agent.update() \n",
" if done:\n",
" break\n",
" if i_ep % cfg.target_update == 0:\n",
" agent.target_net.load_state_dict(agent.policy_net.state_dict())\n",
" if (i_ep+1)%10 == 0:\n",
" print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward}')\n",
" rewards.append(ep_reward)\n",
" if ma_rewards:\n",
" ma_rewards.append(\n",
" 0.9*ma_rewards[-1]+0.1*ep_reward)\n",
" else:\n",
" ma_rewards.append(ep_reward) \n",
" print('Complete training')\n",
" return rewards,ma_rewards"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def eval(cfg,env,agent):\n",
" print('Start to eval !')\n",
" print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')\n",
" rewards = [] \n",
" ma_rewards = []\n",
" for i_ep in range(cfg.eval_eps):\n",
" state = env.reset() \n",
" ep_reward = 0 \n",
" while True:\n",
" action = agent.predict(state) \n",
" next_state, reward, done, _ = env.step(action) \n",
" state = next_state \n",
" ep_reward += reward\n",
" if done:\n",
" break\n",
" rewards.append(ep_reward)\n",
" if ma_rewards:\n",
" ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)\n",
" else:\n",
" ma_rewards.append(ep_reward)\n",
" print(f\"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}\")\n",
" print('Complete evaling')\n",
" return rewards,ma_rewards "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" cfg = DoubleDQNConfig()\n",
" # train\n",
" env,agent = env_agent_config(cfg,seed=1)\n",
" rewards, ma_rewards = train(cfg, env, agent)\n",
" make_dir(cfg.result_path, cfg.model_path)\n",
" agent.save(path=cfg.model_path)\n",
" save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)\n",
" plot_rewards(rewards, ma_rewards, tag=\"train\",\n",
" algo=cfg.algo, path=cfg.result_path)\n",
"\n",
" # eval\n",
" env,agent = env_agent_config(cfg,seed=10)\n",
" agent.load(path=cfg.model_path)\n",
" rewards,ma_rewards = eval(cfg,env,agent)\n",
" save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)\n",
" plot_rewards(rewards,ma_rewards,tag=\"eval\",env=cfg.env,algo = cfg.algo,path=cfg.result_path)"
]
}
]
}

View File

@@ -5,7 +5,7 @@
@Email: johnjim0816@gmail.com
@Date: 2020-06-12 00:48:57
@LastEditor: John
LastEditTime: 2021-05-04 15:05:37
LastEditTime: 2021-05-04 22:26:59
@Discription:
@Environment: python 3.7.7
'''
@@ -31,21 +31,19 @@ class DoubleDQNConfig:
self.result_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/models/' # path to save results
self.gamma = 0.99
self.epsilon_start = 0.9 # start epsilon of e-greedy policy
self.epsilon_end = 0.01
self.epsilon_decay = 200
self.lr = 0.01 # learning rate
self.memory_capacity = 10000 # capacity of Replay Memory
self.batch_size = 128
self.train_eps = 300 # max tranng episodes
self.train_steps = 200 # max training steps per episode
self.target_update = 2 # update frequency of target net
'/'+curr_time+'/models/' # path to save models
self.train_eps = 200 # max tranng episodes
self.eval_eps = 50 # max evaling episodes
self.eval_steps = 200 # max evaling steps per episode
self.gamma = 0.95
self.epsilon_start = 1 # start epsilon of e-greedy policy
self.epsilon_end = 0.01
self.epsilon_decay = 500
self.lr = 0.001 # learning rate
self.memory_capacity = 100000 # capacity of Replay Memory
self.batch_size = 64
self.target_update = 2 # update frequency of target net
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu
self.hidden_dim = 128 # hidden size of net
self.hidden_dim = 256 # hidden size of net
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env)
@@ -59,20 +57,20 @@ def train(cfg,env,agent):
print('Start to train !')
rewards,ma_rewards = [],[]
for i_ep in range(cfg.train_eps):
state = env.reset() # reset环境状态
state = env.reset()
ep_reward = 0
while True:
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action) # 更新环境参数
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
agent.memory.push(state, action, reward, next_state, done) # 将state等这些transition存入memory
state = next_state # 跳转到下一个状态
agent.update() # 每步更新网络
agent.memory.push(state, action, reward, next_state, done)
state = next_state
agent.update()
if done:
break
if i_ep % cfg.target_update == 0:
agent.target_net.load_state_dict(agent.policy_net.state_dict())
print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward}')
print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward},Epsilon:{agent.epsilon:.2f}')
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
@@ -83,6 +81,8 @@ def train(cfg,env,agent):
return rewards,ma_rewards
def eval(cfg,env,agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = []
for i_ep in range(cfg.eval_eps):
@@ -101,9 +101,12 @@ def eval(cfg,env,agent):
else:
ma_rewards.append(ep_reward)
print(f"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}")
print('Complete evaling')
return rewards,ma_rewards
if __name__ == "__main__":
cfg = DoubleDQNConfig()
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
@@ -112,6 +115,7 @@ if __name__ == "__main__":
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-24 22:18:18
LastEditor: John
LastEditTime: 2021-03-31 14:51:09
LastEditTime: 2021-05-04 22:39:34
Discription:
Environment:
'''
@@ -65,11 +65,11 @@ class HierarchicalDQN:
if self.batch_size > len(self.memory):
return
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(self.batch_size)
state_batch = torch.tensor(state_batch,dtype=torch.float)
action_batch = torch.tensor(action_batch,dtype=torch.int64).unsqueeze(1)
reward_batch = torch.tensor(reward_batch,dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch))
state_batch = torch.tensor(state_batch,device=self.device,dtype=torch.float)
action_batch = torch.tensor(action_batch,device=self.device,dtype=torch.int64).unsqueeze(1)
reward_batch = torch.tensor(reward_batch,device=self.device,dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch,device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch),device=self.device)
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch).squeeze(1)
next_state_values = self.policy_net(next_state_batch).max(1)[0].detach()
expected_q_values = reward_batch + 0.99 * next_state_values * (1-done_batch)
@@ -79,17 +79,17 @@ class HierarchicalDQN:
for param in self.policy_net.parameters(): # clip防止梯度爆炸
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
self.loss_numpy = loss.detach().numpy()
self.loss_numpy = loss.detach().cpu().numpy()
self.losses.append(self.loss_numpy)
def update_meta(self):
if self.batch_size > len(self.meta_memory):
return
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.meta_memory.sample(self.batch_size)
state_batch = torch.tensor(state_batch,dtype=torch.float)
action_batch = torch.tensor(action_batch,dtype=torch.int64).unsqueeze(1)
reward_batch = torch.tensor(reward_batch,dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch))
state_batch = torch.tensor(state_batch,device=self.device,dtype=torch.float)
action_batch = torch.tensor(action_batch,device=self.device,dtype=torch.int64).unsqueeze(1)
reward_batch = torch.tensor(reward_batch,device=self.device,dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch,device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch),device=self.device)
q_values = self.meta_policy_net(state_batch).gather(dim=1, index=action_batch).squeeze(1)
next_state_values = self.meta_policy_net(next_state_batch).max(1)[0].detach()
expected_q_values = reward_batch + 0.99 * next_state_values * (1-done_batch)
@@ -99,7 +99,7 @@ class HierarchicalDQN:
for param in self.meta_policy_net.parameters(): # clip防止梯度爆炸
param.grad.data.clamp_(-1, 1)
self.meta_optimizer.step()
self.meta_loss_numpy = meta_loss.detach().numpy()
self.meta_loss_numpy = meta_loss.detach().cpu().numpy()
self.meta_losses.append(self.meta_loss_numpy)
def save(self, path):

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-29 10:37:32
LastEditor: John
LastEditTime: 2021-03-31 14:58:49
LastEditTime: 2021-05-04 22:35:56
Discription:
Environment:
'''
@@ -21,27 +21,23 @@ import numpy as np
import torch
import gym
from common.utils import save_results
from common.plot import plot_rewards,plot_losses
from common.utils import save_results,make_dir
from common.plot import plot_rewards
from HierarchicalDQN.agent import HierarchicalDQN
SEQUENCE = datetime.datetime.now().strftime(
curr_time = datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") # obtain current time
SAVED_MODEL_PATH = curr_path+"/saved_model/"+SEQUENCE+'/' # path to save model
if not os.path.exists(curr_path+"/saved_model/"):
os.mkdir(curr_path+"/saved_model/")
if not os.path.exists(SAVED_MODEL_PATH):
os.mkdir(SAVED_MODEL_PATH)
RESULT_PATH = curr_path+"/results/"+SEQUENCE+'/' # path to save rewards
if not os.path.exists(curr_path+"/results/"):
os.mkdir(curr_path+"/results/")
if not os.path.exists(RESULT_PATH):
os.mkdir(RESULT_PATH)
class HierarchicalDQNConfig:
def __init__(self):
self.algo = "H-DQN" # name of algo
self.env = 'CartPole-v0'
self.result_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/models/' # path to save models
self.train_eps = 300 # 训练的episode数目
self.eval_eps = 50 # 测试的episode数目
self.gamma = 0.99
self.epsilon_start = 1 # start epsilon of e-greedy policy
self.epsilon_end = 0.01
@@ -49,19 +45,25 @@ class HierarchicalDQNConfig:
self.lr = 0.0001 # learning rate
self.memory_capacity = 10000 # Replay Memory capacity
self.batch_size = 32
self.train_eps = 300 # 训练的episode数目
self.target_update = 2 # target net的更新频率
self.eval_eps = 20 # 测试的episode数目
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
self.hidden_dim = 256 # dimension of hidden layer
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env)
env.seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = HierarchicalDQN(state_dim,action_dim,cfg)
return env,agent
def train(cfg, env, agent):
print('Start to train !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moveing average reward
for i_episode in range(cfg.train_eps):
for i_ep in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
@@ -83,7 +85,7 @@ def train(cfg, env, agent):
state = next_state
agent.update()
agent.meta_memory.push(meta_state, goal, extrinsic_reward, state, done)
print('Episode:{}/{}, Reward:{}, Loss:{:.2f}, Meta_Loss:{:.2f}'.format(i_episode+1, cfg.train_eps, ep_reward,agent.loss_numpy ,agent.meta_loss_numpy ))
print('Episode:{}/{}, Reward:{}, Loss:{:.2f}, Meta_Loss:{:.2f}'.format(i_ep+1, cfg.train_eps, ep_reward,agent.loss_numpy ,agent.meta_loss_numpy ))
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
@@ -93,18 +95,52 @@ def train(cfg, env, agent):
print('Complete training')
return rewards, ma_rewards
def eval(cfg, env, agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moveing average reward
for i_ep in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
while not done:
goal = agent.set_goal(state)
onehot_goal = agent.to_onehot(goal)
extrinsic_reward = 0
while not done and goal != np.argmax(state):
goal_state = np.concatenate([state, onehot_goal])
action = agent.choose_action(goal_state)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
extrinsic_reward += reward
state = next_state
agent.update()
print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward}, Loss:{agent.loss_numpy:.2f}, Meta_Loss:{agent.meta_loss_numpy:.2f}')
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('Complete training')
return rewards, ma_rewards
if __name__ == "__main__":
env = gym.make('CartPole-v0')
env.seed(1)
cfg = HierarchicalDQNConfig()
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = HierarchicalDQN(state_dim, action_dim, cfg)
rewards, ma_rewards = train(cfg, env, agent)
agent.save(path=SAVED_MODEL_PATH)
save_results(rewards, ma_rewards, tag='train', path=RESULT_PATH)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=RESULT_PATH)
plot_losses(agent.losses,algo=cfg.algo, path=RESULT_PATH)
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
agent.save(path=cfg.model_path)
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)

View File

@@ -5,13 +5,14 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 16:14:34
LastEditor: John
LastEditTime: 2021-03-17 12:35:06
LastEditTime: 2021-05-05 16:58:39
Discription:
Environment:
'''
import numpy as np
from collections import defaultdict
import torch
import dill
class FisrtVisitMC:
''' On-Policy First-Visit MC Control
@@ -20,14 +21,14 @@ class FisrtVisitMC:
self.action_dim = action_dim
self.epsilon = cfg.epsilon
self.gamma = cfg.gamma
self.Q = defaultdict(lambda: np.zeros(action_dim))
self.Q_table = defaultdict(lambda: np.zeros(action_dim))
self.returns_sum = defaultdict(float) # sum of returns
self.returns_count = defaultdict(float)
def choose_action(self,state):
''' e-greed policy '''
if state in self.Q.keys():
best_action = np.argmax(self.Q[state])
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.action_dim, dtype=float) * self.epsilon / self.action_dim
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
@@ -48,19 +49,17 @@ class FisrtVisitMC:
# Calculate average return for this state over all sampled episodes
self.returns_sum[sa_pair] += G
self.returns_count[sa_pair] += 1.0
self.Q[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
self.Q_table[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
def save(self,path):
'''把 Q表格 的数据保存到文件中
'''
import dill
torch.save(
obj=self.Q,
f=path,
obj=self.Q_table,
f=path+"Q_table",
pickle_module=dill
)
def load(self, path):
'''从文件中读取数据到 Q表格
'''
import dill
self.Q =torch.load(f=path,pickle_module=dill)
self.Q_table =torch.load(f=path+"Q_table",pickle_module=dill)

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-11 14:26:44
LastEditor: John
LastEditTime: 2021-03-17 12:35:36
Discription:
Environment:
'''
import sys,os
sys.path.append(os.getcwd())
import argparse
import datetime
from envs.racetrack_env import RacetrackEnv
from MonteCarlo.agent import FisrtVisitMC
from common.plot import plot_rewards
from common.utils import save_results
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
os.mkdir(SAVED_MODEL_PATH)
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹
os.mkdir(RESULT_PATH)
class MCConfig:
def __init__(self):
self.epsilon = 0.15 # epsilon: The probability to select a random action .
self.gamma = 0.9 # gamma: Gamma discount factor.
self.n_episodes = 150
self.n_steps = 2000
def get_mc_args():
'''set parameters
'''
parser = argparse.ArgumentParser()
parser.add_argument("--epsilon", default=0.15, type=float) # epsilon: The probability to select a random action . float between 0 and 1.
parser.add_argument("--gamma", default=0.9, type=float) # gamma: Gamma discount factor.
parser.add_argument("--n_episodes", default=150, type=int)
parser.add_argument("--n_steps", default=2000, type=int)
mc_cfg = parser.parse_args()
return mc_cfg
def mc_train(cfg,env,agent):
rewards = []
ma_rewards = [] # moving average rewards
for i_episode in range(cfg.n_episodes):
one_ep_transition = []
state = env.reset()
ep_reward = 0
while True:
# for t in range(cfg.n_steps):
action = agent.choose_action(state)
next_state, reward, done = env.step(action)
ep_reward+=reward
one_ep_transition.append((state, action, reward))
state = next_state
if done:
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
else:
ma_rewards.append(ep_reward)
agent.update(one_ep_transition)
if (i_episode+1)%10==0:
print("Episode:{}/{}: Reward:{}".format(i_episode+1, mc_cfg.n_episodes,ep_reward))
return rewards,ma_rewards
if __name__ == "__main__":
mc_cfg = MCConfig()
env = RacetrackEnv()
action_dim=9
agent = FisrtVisitMC(action_dim,mc_cfg)
rewards,ma_rewards= mc_train(mc_cfg,env,agent)
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
plot_rewards(rewards,ma_rewards,tag="train",algo = "On-Policy First-Visit MC Control",path=RESULT_PATH)

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

View File

@@ -0,0 +1,118 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-11 14:26:44
LastEditor: John
LastEditTime: 2021-05-05 17:27:50
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path = os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import torch
import datetime
from common.utils import save_results,make_dir
from common.plot import plot_rewards
from MonteCarlo.agent import FisrtVisitMC
from envs.racetrack_env import RacetrackEnv
curr_time = datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") # obtain current time
class MCConfig:
def __init__(self):
self.algo = "MC" # name of algo
self.env = 'Racetrack'
self.result_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/models/' # path to save models
# epsilon: The probability to select a random action .
self.epsilon = 0.15
self.gamma = 0.9 # gamma: Gamma discount factor.
self.train_eps = 200
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # check gpu
def env_agent_config(cfg,seed=1):
env = RacetrackEnv()
action_dim = 9
agent = FisrtVisitMC(action_dim, cfg)
return env,agent
def train(cfg, env, agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
one_ep_transition = []
while True:
action = agent.choose_action(state)
next_state, reward, done = env.step(action)
ep_reward += reward
one_ep_transition.append((state, action, reward))
state = next_state
if done:
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
else:
ma_rewards.append(ep_reward)
agent.update(one_ep_transition)
if (i_ep+1) % 10 == 0:
print(f"Episode:{i_ep+1}/{cfg.train_eps}: Reward:{ep_reward}")
print('Complete training')
return rewards, ma_rewards
def eval(cfg, env, agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
while True:
action = agent.choose_action(state)
next_state, reward, done = env.step(action)
ep_reward += reward
state = next_state
if done:
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
else:
ma_rewards.append(ep_reward)
if (i_ep+1) % 10 == 0:
print(f"Episode:{i_ep+1}/{cfg.train_eps}: Reward:{ep_reward}")
return rewards, ma_rewards
if __name__ == "__main__":
cfg = MCConfig()
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
agent.save(path=cfg.model_path)
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

257
codes/PPO/task0_train.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-22 16:18:10
LastEditor: John
LastEditTime: 2021-04-28 10:13:00
LastEditTime: 2021-05-06 00:43:36
Discription:
Environment:
'''
@@ -13,8 +13,8 @@ import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import numpy as np
import torch
import datetime
from PPO.agent import PPO
@@ -29,6 +29,8 @@ class PPOConfig:
self.algo = 'PPO'
self.result_path = curr_path+"/results/" +self.env+'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/results/" +self.env+'/'+curr_time+'/models/' # path to save models
self.train_eps = 200 # max training episodes
self.eval_eps = 50
self.batch_size = 5
self.gamma=0.99
self.n_epochs = 4
@@ -38,10 +40,19 @@ class PPOConfig:
self.policy_clip=0.2
self.hidden_dim = 256
self.update_fre = 20 # frequency of agent update
self.train_eps = 300 # max training episodes
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env)
env.seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = PPO(state_dim,action_dim,cfg)
return env,agent
def train(cfg,env,agent):
print('Start to train !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards= []
ma_rewards = [] # moving average rewards
running_steps = 0
@@ -65,17 +76,46 @@ def train(cfg,env,agent):
else:
ma_rewards.append(ep_reward)
print(f"Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward:.3f}")
print('Complete training')
return rewards,ma_rewards
def eval(cfg,env,agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards= []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.eval_eps):
state = env.reset()
done = False
ep_reward = 0
while not done:
action, prob, val = agent.choose_action(state)
state_, reward, done, _ = env.step(action)
ep_reward += reward
state = state_
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print(f"Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward:.3f}")
print('Complete evaling')
return rewards,ma_rewards
if __name__ == '__main__':
cfg = PPOConfig()
env = gym.make(cfg.env)
env.seed(1) # Set seeds
state_dim=env.observation_space.shape[0]
action_dim=env.action_space.n
agent = PPO(state_dim,action_dim,cfg)
rewards,ma_rewards = train(cfg,env,agent)
make_dir(cfg.result_path,cfg.model_path)
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
agent.save(path=cfg.model_path)
save_results(rewards,ma_rewards,tag='train',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="train",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)

View File

@@ -1,97 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-22 16:18:10
LastEditor: John
LastEditTime: 2021-04-11 01:25:43
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import numpy as np
import torch
import datetime
from PPO.agent import PPO
from common.plot import plot_rewards
from common.utils import save_results
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
os.mkdir(SAVED_MODEL_PATH)
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹
os.mkdir(RESULT_PATH)
class PPOConfig:
def __init__(self) -> None:
self.env = 'LunarLander-v2'
self.algo = 'PPO'
self.batch_size = 128
self.gamma=0.95
self.n_epochs = 4
self.actor_lr = 0.002
self.critic_lr = 0.005
self.gae_lambda=0.95
self.policy_clip=0.2
self.hidden_dim = 256
self.update_fre = 20 # frequency of agent update
self.train_eps = 300 # max training episodes
self.train_steps = 1000
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu
def train(cfg,env,agent):
best_reward = env.reward_range[0]
rewards= []
ma_rewards = [] # moving average rewards
avg_reward = 0
running_steps = 0
for i_episode in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
# for i_step in range(cfg.train_steps):
while not done:
action, prob, val = agent.choose_action(state)
state_, reward, done, _ = env.step(action)
running_steps += 1
ep_reward += reward
agent.memory.push(state, action, prob, val, reward, done)
if running_steps % cfg.update_fre == 0:
agent.update()
state = state_
# if done:
# break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
avg_reward = np.mean(rewards[-100:])
if avg_reward > best_reward:
best_reward = avg_reward
agent.save(path=SAVED_MODEL_PATH)
print('Episode:{}/{}, Reward:{:.1f}, avg reward:{:.1f}, Loss:{}'.format(i_episode+1,cfg.train_eps,ep_reward,avg_reward,agent.loss))
return rewards,ma_rewards
if __name__ == '__main__':
cfg = PPOConfig()
env = gym.make(cfg.env)
env.seed(1)
state_dim=env.observation_space.shape[0]
action_dim=env.action_space.n
agent = PPO(state_dim,action_dim,cfg)
rewards,ma_rewards = train(cfg,env,agent)
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2020-11-22 23:27:44
LastEditor: John
LastEditTime: 2021-03-23 16:37:14
LastEditTime: 2021-05-05 17:33:10
Discription:
Environment:
'''
@@ -64,7 +64,7 @@ class PolicyGradient:
# print(loss)
loss.backward()
self.optimizer.step()
def save_model(self,path):
def save(self,path):
torch.save(self.policy_net.state_dict(), path+'pg_checkpoint.pt')
def load_model(self,path):
def load(self,path):
self.policy_net.load_state_dict(torch.load(path+'pg_checkpoint.pt'))

View File

@@ -1,89 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2020-11-22 23:21:53
LastEditor: John
LastEditTime: 2021-03-23 16:38:54
Discription:
Environment:
'''
import sys,os
sys.path.append(os.getcwd()) # add current terminal path to sys.path
from itertools import count
import datetime
import gym
from PolicyGradient.agent import PolicyGradient
from common.plot import plot_rewards
from common.utils import save_results
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # path to save model
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"):
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
if not os.path.exists(SAVED_MODEL_PATH):
os.mkdir(SAVED_MODEL_PATH)
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # path to save rewards
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"):
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
if not os.path.exists(RESULT_PATH):
os.mkdir(RESULT_PATH)
class PGConfig:
def __init__(self):
self.train_eps = 300 # 训练的episode数目
self.batch_size = 8
self.lr = 0.01 # learning rate
self.gamma = 0.99
self.hidden_dim = 36 # dimmension of hidden layer
def train(cfg,env,agent):
'''下面带pool都是存放的transition序列用于gradient'''
state_pool = [] # 存放每batch_size个episode的state序列
action_pool = []
reward_pool = []
''' 存储每个episode的reward用于绘图'''
rewards = []
ma_rewards = []
for i_episode in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state_pool.append(state)
action_pool.append(float(action))
reward_pool.append(reward)
state = next_state
if done:
print('Episode:', i_episode, ' Reward:', ep_reward)
break
if i_episode > 0 and i_episode % cfg.batch_size == 0:
agent.update(reward_pool,state_pool,action_pool)
state_pool = [] # 每个episode的state
action_pool = []
reward_pool = []
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('complete training')
return rewards, ma_rewards
if __name__ == "__main__":
cfg = PGConfig()
env = gym.make('CartPole-v0') # 可google为什么unwrapped gym此处一般不需要
env.seed(1) # 设置env随机种子
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = PolicyGradient(state_dim,cfg)
rewards, ma_rewards = train(cfg,env,agent)
agent.save_model(SAVED_MODEL_PATH)
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
plot_rewards(rewards,ma_rewards,tag="train",algo = "Policy Gradient",path=RESULT_PATH)

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2020-11-22 23:21:53
LastEditor: John
LastEditTime: 2021-05-05 17:35:20
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path = os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import torch
import datetime
from itertools import count
from PolicyGradient.agent import PolicyGradient
from common.plot import plot_rewards
from common.utils import save_results,make_dir
curr_time = datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") # obtain current time
class PGConfig:
def __init__(self):
self.algo = "PolicyGradient" # name of algo
self.env = 'CartPole-v0'
self.result_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/models/' # path to save models
self.train_eps = 300 # 训练的episode数目
self.eval_eps = 50
self.batch_size = 8
self.lr = 0.01 # learning rate
self.gamma = 0.99
self.hidden_dim = 36 # dimmension of hidden layer
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # check gpu
def env_agent_config(cfg,seed=1):
env = gym.make(cfg.env)
env.seed(seed)
state_dim = env.observation_space.shape[0]
agent = PolicyGradient(state_dim,cfg)
return env,agent
def train(cfg,env,agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
state_pool = [] # 存放每batch_size个episode的state序列
action_pool = []
reward_pool = []
rewards = []
ma_rewards = []
for i_episode in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state_pool.append(state)
action_pool.append(float(action))
reward_pool.append(reward)
state = next_state
if done:
print('Episode:', i_episode, ' Reward:', ep_reward)
break
if i_episode > 0 and i_episode % cfg.batch_size == 0:
agent.update(reward_pool,state_pool,action_pool)
state_pool = [] # 每个episode的state
action_pool = []
reward_pool = []
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('complete training')
return rewards, ma_rewards
def eval(cfg,env,agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = []
for i_episode in range(cfg.eval_eps):
state = env.reset()
ep_reward = 0
for _ in count():
action = agent.choose_action(state) # 根据当前环境state选择action
next_state, reward, done, _ = env.step(action)
ep_reward += reward
if done:
reward = 0
state = next_state
if done:
print('Episode:', i_episode, ' Reward:', ep_reward)
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('complete evaling')
return rewards, ma_rewards
if __name__ == "__main__":
cfg = PGConfig()
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
agent.save(path=cfg.model_path)
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)

View File

@@ -1,108 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: JiangJi
Email: johnjim0816@gmail.com
Date: 2021-04-21 11:07:57
LastEditor: JiangJi
LastEditTime: 2021-04-21 11:15:00
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path=os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import torch
import gym
import numpy as np
import datetime
from common.plot import plot_rewards
from common.utils import save_results,make_dir
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
class TD3Config:
def __init__(self) -> None:
self.algo = 'TD3'
self.env = 'HalfCheetah-v2'
self.seed = 0
self.result_path = curr_path+"/results/" +self.env+'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/results/" +self.env+'/'+curr_time+'/models/' # path to save models
self.eval_freq = 5e3 # How often (time steps) we evaluate
# self.train_eps = 800
self.max_timestep = 4000000 # Max time steps to run environment
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval(env_name,seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
# eval_env.render()
action = eval_env.action_space.sample()
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
def train(cfg,env):
# Evaluate untrained policy
evaluations = [eval(cfg.env, cfg.seed)]
state, done = env.reset(), False
ep_reward = 0
ep_timesteps = 0
episode_num = 0
rewards = []
ma_rewards = [] # moveing average reward
for t in range(int(cfg.max_timestep)):
ep_timesteps += 1
# Select action randomly
action = env.action_space.sample()
# Perform action
next_state, reward, done, _ = env.step(action)
state = next_state
ep_reward += reward
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Episode:{episode_num+1}, Episode T:{ep_timesteps}, Reward:{ep_reward:.3f}")
# Reset environment
state, done = env.reset(), False
rewards.append(ep_reward)
# 计算滑动窗口的reward
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
ep_reward = 0
ep_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % cfg.eval_freq == 0:
evaluations.append(eval(cfg.env, cfg.seed))
return rewards, ma_rewards
if __name__ == "__main__":
cfg = TD3Config()
env = gym.make(cfg.env)
env.seed(cfg.seed) # Set seeds
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
rewards,ma_rewards = train(cfg,env)
make_dir(cfg.result_path)
save_results(rewards,ma_rewards,tag='train',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="train",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
# cfg.result_path = './TD3/results/HalfCheetah-v2/20210416-130341/'
# agent.load(cfg.result_path)
# eval(cfg.env,agent, cfg.seed)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Some files were not shown because too many files have changed in this diff Show More