update
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:50:49
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-03-28 11:07:35
|
||||
LastEditTime: 2021-05-04 22:28:06
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -35,22 +35,16 @@ class DoubleDQN:
|
||||
self.batch_size = cfg.batch_size
|
||||
self.policy_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
|
||||
self.target_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
|
||||
# target_net的初始模型参数完全复制policy_net
|
||||
self.target_net.load_state_dict(self.policy_net.state_dict())
|
||||
self.target_net.eval() # 不启用 BatchNormalization 和 Dropout
|
||||
# target_net copy from policy_net
|
||||
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
|
||||
target_param.data.copy_(param.data)
|
||||
# self.target_net.eval() # 不启用 BatchNormalization 和 Dropout
|
||||
# 可查parameters()与state_dict()的区别,前者require_grad=True
|
||||
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
|
||||
self.loss = 0
|
||||
self.memory = ReplayBuffer(cfg.memory_capacity)
|
||||
|
||||
def choose_action(self, state):
|
||||
'''选择动作
|
||||
'''
|
||||
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
|
||||
math.exp(-1. * self.actions_count / self.epsilon_decay)
|
||||
self.actions_count += 1
|
||||
if random.random() > self.epsilon:
|
||||
with torch.no_grad():
|
||||
def predict(self,state):
|
||||
with torch.no_grad():
|
||||
# 先转为张量便于丢给神经网络,state元素数据原本为float64
|
||||
# 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价
|
||||
state = torch.tensor(
|
||||
@@ -61,6 +55,15 @@ class DoubleDQN:
|
||||
# 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0]))
|
||||
# 所以tensor.max(1)[1]返回最大值对应的下标,即action
|
||||
action = q_value.max(1)[1].item()
|
||||
return action
|
||||
def choose_action(self, state):
|
||||
'''选择动作
|
||||
'''
|
||||
self.actions_count += 1
|
||||
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
|
||||
math.exp(-1. * self.actions_count / self.epsilon_decay)
|
||||
if random.random() > self.epsilon:
|
||||
action = self.predict(state)
|
||||
else:
|
||||
action = random.randrange(self.action_dim)
|
||||
return action
|
||||
@@ -71,7 +74,7 @@ class DoubleDQN:
|
||||
# 从memory中随机采样transition
|
||||
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
|
||||
self.batch_size)
|
||||
### 转为张量 ###
|
||||
# convert to tensor
|
||||
state_batch = torch.tensor(
|
||||
state_batch, device=self.device, dtype=torch.float)
|
||||
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(
|
||||
@@ -82,8 +85,7 @@ class DoubleDQN:
|
||||
next_state_batch, device=self.device, dtype=torch.float)
|
||||
|
||||
done_batch = torch.tensor(np.float32(
|
||||
done_batch), device=self.device).unsqueeze(1) # 将bool转为float然后转为张量
|
||||
|
||||
done_batch), device=self.device) # 将bool转为float然后转为张量
|
||||
# 计算当前(s_t,a)对应的Q(s_t, a)
|
||||
q_values = self.policy_net(state_batch)
|
||||
next_q_values = self.policy_net(next_state_batch)
|
||||
@@ -102,7 +104,7 @@ class DoubleDQN:
|
||||
next_state_batch)
|
||||
# 选出Q(s_t‘, a)对应的action,代入到next_target_values获得target net对应的next_q_value,即Q’(s_t|a=argmax Q(s_t‘, a))
|
||||
next_target_q_value = next_target_values.gather(1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)
|
||||
q_target = reward_batch + self.gamma * next_target_q_value * (1-done_batch[0])
|
||||
q_target = reward_batch + self.gamma * next_target_q_value * (1-done_batch)
|
||||
self.loss = nn.MSELoss()(q_value, q_target.unsqueeze(1)) # 计算 均方误差loss
|
||||
# 优化模型
|
||||
self.optimizer.zero_grad() # zero_grad清除上一步所有旧的gradients from the last step
|
||||
@@ -113,7 +115,9 @@ class DoubleDQN:
|
||||
self.optimizer.step() # 更新模型
|
||||
|
||||
def save(self,path):
|
||||
torch.save(self.target_net.state_dict(), path+'DoubleDQN_checkpoint.pth')
|
||||
torch.save(self.target_net.state_dict(), path+'checkpoint.pth')
|
||||
|
||||
def load(self,path):
|
||||
self.target_net.load_state_dict(torch.load(path+'DoubleDQN_checkpoint.pth'))
|
||||
self.target_net.load_state_dict(torch.load(path+'checkpoint.pth'))
|
||||
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
|
||||
param.data.copy_(target_param.data)
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:48:57
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-03-28 11:05:14
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import sys,os
|
||||
sys.path.append(os.getcwd()) # add current terminal path
|
||||
import gym
|
||||
import torch
|
||||
import datetime
|
||||
from DoubleDQN.agent import DoubleDQN
|
||||
from common.plot import plot_rewards
|
||||
from common.utils import save_results
|
||||
|
||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"):
|
||||
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
|
||||
if not os.path.exists(SAVED_MODEL_PATH):
|
||||
os.mkdir(SAVED_MODEL_PATH)
|
||||
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"):
|
||||
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
|
||||
if not os.path.exists(RESULT_PATH):
|
||||
os.mkdir(RESULT_PATH)
|
||||
|
||||
class DoubleDQNConfig:
|
||||
def __init__(self):
|
||||
self.algo = "Double DQN" # name of algo
|
||||
self.gamma = 0.99
|
||||
self.epsilon_start = 0.9 # e-greedy策略的初始epsilon
|
||||
self.epsilon_end = 0.01
|
||||
self.epsilon_decay = 200
|
||||
self.lr = 0.01 # 学习率
|
||||
self.memory_capacity = 10000 # Replay Memory容量
|
||||
self.batch_size = 128
|
||||
self.train_eps = 300 # 训练的episode数目
|
||||
self.train_steps = 200 # 训练每个episode的最大长度
|
||||
self.target_update = 2 # target net的更新频率
|
||||
self.eval_eps = 20 # 测试的episode数目
|
||||
self.eval_steps = 200 # 测试每个episode的最大长度
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||
self.hidden_dim = 128 # 神经网络隐藏层维度
|
||||
|
||||
|
||||
def train(cfg,env,agent):
|
||||
print('Start to train !')
|
||||
rewards,ma_rewards = [],[]
|
||||
ep_steps = []
|
||||
for i_episode in range(cfg.train_eps):
|
||||
state = env.reset() # reset环境状态
|
||||
ep_reward = 0
|
||||
for i_step in range(cfg.train_steps):
|
||||
action = agent.choose_action(state) # 根据当前环境state选择action
|
||||
next_state, reward, done, _ = env.step(action) # 更新环境参数
|
||||
ep_reward += reward
|
||||
agent.memory.push(state, action, reward, next_state, done) # 将state等这些transition存入memory
|
||||
state = next_state # 跳转到下一个状态
|
||||
agent.update() # 每步更新网络
|
||||
if done:
|
||||
break
|
||||
# 更新target network,复制DQN中的所有weights and biases
|
||||
if i_episode % cfg.target_update == 0:
|
||||
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
||||
print('Episode:{}/{}, Reward:{}, Steps:{}, Done:{}'.format(i_episode+1,cfg.train_eps,ep_reward,i_step,done))
|
||||
ep_steps.append(i_step)
|
||||
rewards.append(ep_reward)
|
||||
# 计算滑动窗口的reward
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = DoubleDQNConfig()
|
||||
env = gym.make('CartPole-v0').unwrapped # 可google为什么unwrapped gym,此处一般不需要
|
||||
env.seed(1) # 设置env随机种子
|
||||
state_dim = env.observation_space.shape[0]
|
||||
action_dim = env.action_space.n
|
||||
agent = DoubleDQN(state_dim,action_dim,cfg)
|
||||
rewards,ma_rewards = train(cfg,env,agent)
|
||||
agent.save(path=SAVED_MODEL_PATH)
|
||||
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
|
||||
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 47 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 57 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 55 KiB |
Binary file not shown.
Binary file not shown.
194
codes/DoubleDQN/task0_train.ipynb
Normal file
194
codes/DoubleDQN/task0_train.ipynb
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.10"
|
||||
},
|
||||
"orig_nbformat": 2,
|
||||
"kernelspec": {
|
||||
"name": "python3710jvsc74a57bd0366e1054dee9d4501b0eb8f87335afd3c67fc62db6ee611bbc7f8f5a1fefe232",
|
||||
"display_name": "Python 3.7.10 64-bit ('py37': conda)"
|
||||
},
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "366e1054dee9d4501b0eb8f87335afd3c67fc62db6ee611bbc7f8f5a1fefe232"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2,
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"from pathlib import Path\n",
|
||||
"curr_path = str(Path().absolute())\n",
|
||||
"parent_path = str(Path().absolute().parent)\n",
|
||||
"sys.path.append(parent_path) # add current terminal path to sys.path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import gym\n",
|
||||
"import torch\n",
|
||||
"import datetime\n",
|
||||
"from DoubleDQN.agent import DoubleDQN\n",
|
||||
"from common.plot import plot_rewards\n",
|
||||
"from common.utils import save_results, make_dir\n",
|
||||
"\n",
|
||||
"curr_time = datetime.datetime.now().strftime(\n",
|
||||
" \"%Y%m%d-%H%M%S\") # obtain current time"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class DoubleDQNConfig:\n",
|
||||
" def __init__(self):\n",
|
||||
" self.algo = \"DoubleDQN\" # name of algo\n",
|
||||
" self.env = 'CartPole-v0' # env name\n",
|
||||
" self.result_path = curr_path+\"/outputs/\" + self.env + \\\n",
|
||||
" '/'+curr_time+'/results/' # path to save results\n",
|
||||
" self.model_path = curr_path+\"/outputs/\" + self.env + \\\n",
|
||||
" '/'+curr_time+'/models/' # path to save models\n",
|
||||
" self.train_eps = 200 # max tranng episodes\n",
|
||||
" self.eval_eps = 50 # max evaling episodes\n",
|
||||
" self.gamma = 0.95\n",
|
||||
" self.epsilon_start = 1 # start epsilon of e-greedy policy\n",
|
||||
" self.epsilon_end = 0.01 \n",
|
||||
" self.epsilon_decay = 500\n",
|
||||
" self.lr = 0.001 # learning rate\n",
|
||||
" self.memory_capacity = 100000 # capacity of Replay Memory\n",
|
||||
" self.batch_size = 64\n",
|
||||
" self.target_update = 2 # update frequency of target net\n",
|
||||
" self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # check gpu\n",
|
||||
" self.hidden_dim = 256 # hidden size of net"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def env_agent_config(cfg,seed=1):\n",
|
||||
" env = gym.make(cfg.env) \n",
|
||||
" env.seed(seed)\n",
|
||||
" state_dim = env.observation_space.shape[0]\n",
|
||||
" action_dim = env.action_space.n\n",
|
||||
" agent = DoubleDQN(state_dim,action_dim,cfg)\n",
|
||||
" return env,agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def train(cfg,env,agent):\n",
|
||||
" print('Start to train !')\n",
|
||||
" rewards,ma_rewards = [],[]\n",
|
||||
" for i_ep in range(cfg.train_eps):\n",
|
||||
" state = env.reset() \n",
|
||||
" ep_reward = 0\n",
|
||||
" while True:\n",
|
||||
" action = agent.choose_action(state) \n",
|
||||
" next_state, reward, done, _ = env.step(action)\n",
|
||||
" ep_reward += reward\n",
|
||||
" agent.memory.push(state, action, reward, next_state, done) \n",
|
||||
" state = next_state \n",
|
||||
" agent.update() \n",
|
||||
" if done:\n",
|
||||
" break\n",
|
||||
" if i_ep % cfg.target_update == 0:\n",
|
||||
" agent.target_net.load_state_dict(agent.policy_net.state_dict())\n",
|
||||
" if (i_ep+1)%10 == 0:\n",
|
||||
" print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward}')\n",
|
||||
" rewards.append(ep_reward)\n",
|
||||
" if ma_rewards:\n",
|
||||
" ma_rewards.append(\n",
|
||||
" 0.9*ma_rewards[-1]+0.1*ep_reward)\n",
|
||||
" else:\n",
|
||||
" ma_rewards.append(ep_reward) \n",
|
||||
" print('Complete training!')\n",
|
||||
" return rewards,ma_rewards"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def eval(cfg,env,agent):\n",
|
||||
" print('Start to eval !')\n",
|
||||
" print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')\n",
|
||||
" rewards = [] \n",
|
||||
" ma_rewards = []\n",
|
||||
" for i_ep in range(cfg.eval_eps):\n",
|
||||
" state = env.reset() \n",
|
||||
" ep_reward = 0 \n",
|
||||
" while True:\n",
|
||||
" action = agent.predict(state) \n",
|
||||
" next_state, reward, done, _ = env.step(action) \n",
|
||||
" state = next_state \n",
|
||||
" ep_reward += reward\n",
|
||||
" if done:\n",
|
||||
" break\n",
|
||||
" rewards.append(ep_reward)\n",
|
||||
" if ma_rewards:\n",
|
||||
" ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)\n",
|
||||
" else:\n",
|
||||
" ma_rewards.append(ep_reward)\n",
|
||||
" print(f\"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}\")\n",
|
||||
" print('Complete evaling!')\n",
|
||||
" return rewards,ma_rewards "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" cfg = DoubleDQNConfig()\n",
|
||||
" # train\n",
|
||||
" env,agent = env_agent_config(cfg,seed=1)\n",
|
||||
" rewards, ma_rewards = train(cfg, env, agent)\n",
|
||||
" make_dir(cfg.result_path, cfg.model_path)\n",
|
||||
" agent.save(path=cfg.model_path)\n",
|
||||
" save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)\n",
|
||||
" plot_rewards(rewards, ma_rewards, tag=\"train\",\n",
|
||||
" algo=cfg.algo, path=cfg.result_path)\n",
|
||||
"\n",
|
||||
" # eval\n",
|
||||
" env,agent = env_agent_config(cfg,seed=10)\n",
|
||||
" agent.load(path=cfg.model_path)\n",
|
||||
" rewards,ma_rewards = eval(cfg,env,agent)\n",
|
||||
" save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)\n",
|
||||
" plot_rewards(rewards,ma_rewards,tag=\"eval\",env=cfg.env,algo = cfg.algo,path=cfg.result_path)"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
123
codes/DoubleDQN/task0_train.py
Normal file
123
codes/DoubleDQN/task0_train.py
Normal file
@@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:48:57
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-05-04 22:26:59
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import sys,os
|
||||
curr_path = os.path.dirname(__file__)
|
||||
parent_path = os.path.dirname(curr_path)
|
||||
sys.path.append(parent_path) # add current terminal path to sys.path
|
||||
|
||||
import gym
|
||||
import torch
|
||||
import datetime
|
||||
from DoubleDQN.agent import DoubleDQN
|
||||
from common.plot import plot_rewards
|
||||
from common.utils import save_results, make_dir
|
||||
|
||||
curr_time = datetime.datetime.now().strftime(
|
||||
"%Y%m%d-%H%M%S") # obtain current time
|
||||
|
||||
class DoubleDQNConfig:
|
||||
def __init__(self):
|
||||
self.algo = "DoubleDQN" # name of algo
|
||||
self.env = 'CartPole-v0' # env name
|
||||
self.result_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/results/' # path to save results
|
||||
self.model_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/models/' # path to save models
|
||||
self.train_eps = 200 # max tranng episodes
|
||||
self.eval_eps = 50 # max evaling episodes
|
||||
self.gamma = 0.95
|
||||
self.epsilon_start = 1 # start epsilon of e-greedy policy
|
||||
self.epsilon_end = 0.01
|
||||
self.epsilon_decay = 500
|
||||
self.lr = 0.001 # learning rate
|
||||
self.memory_capacity = 100000 # capacity of Replay Memory
|
||||
self.batch_size = 64
|
||||
self.target_update = 2 # update frequency of target net
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check gpu
|
||||
self.hidden_dim = 256 # hidden size of net
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
env = gym.make(cfg.env)
|
||||
env.seed(seed)
|
||||
state_dim = env.observation_space.shape[0]
|
||||
action_dim = env.action_space.n
|
||||
agent = DoubleDQN(state_dim,action_dim,cfg)
|
||||
return env,agent
|
||||
|
||||
def train(cfg,env,agent):
|
||||
print('Start to train !')
|
||||
rewards,ma_rewards = [],[]
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
ep_reward = 0
|
||||
while True:
|
||||
action = agent.choose_action(state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
agent.memory.push(state, action, reward, next_state, done)
|
||||
state = next_state
|
||||
agent.update()
|
||||
if done:
|
||||
break
|
||||
if i_ep % cfg.target_update == 0:
|
||||
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
||||
print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward},Epsilon:{agent.epsilon:.2f}')
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
def eval(cfg,env,agent):
|
||||
print('Start to eval !')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = []
|
||||
for i_ep in range(cfg.eval_eps):
|
||||
state = env.reset()
|
||||
ep_reward = 0
|
||||
while True:
|
||||
action = agent.predict(state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
state = next_state
|
||||
ep_reward += reward
|
||||
if done:
|
||||
break
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print(f"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}")
|
||||
print('Complete evaling!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = DoubleDQNConfig()
|
||||
# train
|
||||
env,agent = env_agent_config(cfg,seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(cfg.result_path, cfg.model_path)
|
||||
agent.save(path=cfg.model_path)
|
||||
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
|
||||
plot_rewards(rewards, ma_rewards, tag="train",
|
||||
algo=cfg.algo, path=cfg.result_path)
|
||||
|
||||
# eval
|
||||
env,agent = env_agent_config(cfg,seed=10)
|
||||
agent.load(path=cfg.model_path)
|
||||
rewards,ma_rewards = eval(cfg,env,agent)
|
||||
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
|
||||
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2020-10-15 21:28:00
|
||||
LastEditor: John
|
||||
LastEditTime: 2020-10-15 21:50:30
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
|
||||
def save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path='./results'):
|
||||
if not os.path.exists(result_path): # 检测是否存在文件夹
|
||||
os.mkdir(result_path)
|
||||
np.save(result_path+'rewards_'+tag+'.npy', rewards)
|
||||
np.save(result_path+'moving_average_rewards_'+tag+'.npy', moving_average_rewards)
|
||||
np.save(result_path+'steps_'+tag+'.npy',ep_steps )
|
||||
Reference in New Issue
Block a user