update
@@ -1,33 +0,0 @@
|
||||
## 思路
|
||||
|
||||
见[博客](https://blog.csdn.net/JohnJim0/article/details/111552545)
|
||||
|
||||
## 环境
|
||||
|
||||
python 3.7.9
|
||||
|
||||
pytorch 1.6.0
|
||||
|
||||
tensorboard 2.3.0
|
||||
|
||||
torchvision 0.7.0
|
||||
|
||||
## 使用
|
||||
|
||||
|
||||
train:
|
||||
|
||||
```python
|
||||
python main.py
|
||||
```
|
||||
|
||||
eval:
|
||||
|
||||
```python
|
||||
python main.py --train 0
|
||||
```
|
||||
可视化
|
||||
|
||||
```python
|
||||
tensorboard --logdir logs
|
||||
```
|
||||
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:50:49
|
||||
@LastEditor: John
|
||||
LastEditTime: 2020-12-22 16:20:35
|
||||
LastEditTime: 2021-03-13 15:01:27
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -20,65 +20,51 @@ import torch.nn.functional as F
|
||||
import random
|
||||
import math
|
||||
import numpy as np
|
||||
from memory import ReplayBuffer
|
||||
from model import FCN
|
||||
class DQN:
|
||||
def __init__(self, n_states, n_actions, gamma=0.99, epsilon_start=0.9, epsilon_end=0.05, epsilon_decay=200, memory_capacity=10000, policy_lr=0.01, batch_size=128, device="cpu"):
|
||||
self.actions_count = 0
|
||||
from common.memory import ReplayBuffer
|
||||
from common.model import MLP2
|
||||
class DoubleDQN:
|
||||
def __init__(self, n_states, n_actions, cfg):
|
||||
|
||||
self.n_actions = n_actions # 总的动作个数
|
||||
self.device = device # 设备,cpu或gpu等
|
||||
self.gamma = gamma
|
||||
self.device = cfg.device # 设备,cpu或gpu等
|
||||
self.gamma = cfg.gamma
|
||||
# e-greedy策略相关参数
|
||||
self.epsilon = 0
|
||||
self.epsilon_start = epsilon_start
|
||||
self.epsilon_end = epsilon_end
|
||||
self.epsilon_decay = epsilon_decay
|
||||
self.batch_size = batch_size
|
||||
self.policy_net = FCN(n_states, n_actions).to(self.device)
|
||||
self.target_net = FCN(n_states, n_actions).to(self.device)
|
||||
self.actions_count = 0
|
||||
self.epsilon_start = cfg.epsilon_start
|
||||
self.epsilon_end = cfg.epsilon_end
|
||||
self.epsilon_decay = cfg.epsilon_decay
|
||||
self.batch_size = cfg.batch_size
|
||||
self.policy_net = MLP2(n_states, n_actions,hidden_dim=cfg.hidden_dim).to(self.device)
|
||||
self.target_net = MLP2(n_states, n_actions,hidden_dim=cfg.hidden_dim).to(self.device)
|
||||
# target_net的初始模型参数完全复制policy_net
|
||||
self.target_net.load_state_dict(self.policy_net.state_dict())
|
||||
self.target_net.eval() # 不启用 BatchNormalization 和 Dropout
|
||||
# 可查parameters()与state_dict()的区别,前者require_grad=True
|
||||
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=policy_lr)
|
||||
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
|
||||
self.loss = 0
|
||||
self.memory = ReplayBuffer(memory_capacity)
|
||||
self.memory = ReplayBuffer(cfg.memory_capacity)
|
||||
|
||||
def choose_action(self, state, train=True):
|
||||
def choose_action(self, state):
|
||||
'''选择动作
|
||||
'''
|
||||
if train:
|
||||
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
|
||||
math.exp(-1. * self.actions_count / self.epsilon_decay)
|
||||
self.actions_count += 1
|
||||
if random.random() > self.epsilon:
|
||||
with torch.no_grad():
|
||||
# 先转为张量便于丢给神经网络,state元素数据原本为float64
|
||||
# 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价
|
||||
state = torch.tensor(
|
||||
[state], device=self.device, dtype=torch.float32)
|
||||
# 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>)
|
||||
q_value = self.policy_net(state)
|
||||
# tensor.max(1)返回每行的最大值以及对应的下标,
|
||||
# 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0]))
|
||||
# 所以tensor.max(1)[1]返回最大值对应的下标,即action
|
||||
action = q_value.max(1)[1].item()
|
||||
else:
|
||||
action = random.randrange(self.n_actions)
|
||||
return action
|
||||
else:
|
||||
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
|
||||
math.exp(-1. * self.actions_count / self.epsilon_decay)
|
||||
self.actions_count += 1
|
||||
if random.random() > self.epsilon:
|
||||
with torch.no_grad():
|
||||
# 先转为张量便于丢给神经网络,state元素数据原本为float64
|
||||
# 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价
|
||||
state = torch.tensor(
|
||||
[state], device='cpu', dtype=torch.float32)
|
||||
# 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>)
|
||||
q_value = self.target_net(state)
|
||||
# tensor.max(1)返回每行的最大值以及对应的下标,
|
||||
# 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0]))
|
||||
# 所以tensor.max(1)[1]返回最大值对应的下标,即action
|
||||
action = q_value.max(1)[1].item()
|
||||
return action
|
||||
# 先转为张量便于丢给神经网络,state元素数据原本为float64
|
||||
# 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价
|
||||
state = torch.tensor(
|
||||
[state], device=self.device, dtype=torch.float32)
|
||||
# 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>)
|
||||
q_value = self.policy_net(state)
|
||||
# tensor.max(1)返回每行的最大值以及对应的下标,
|
||||
# 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0]))
|
||||
# 所以tensor.max(1)[1]返回最大值对应的下标,即action
|
||||
action = q_value.max(1)[1].item()
|
||||
else:
|
||||
action = random.randrange(self.n_actions)
|
||||
return action
|
||||
def update(self):
|
||||
|
||||
if len(self.memory) < self.batch_size:
|
||||
@@ -86,8 +72,7 @@ class DQN:
|
||||
# 从memory中随机采样transition
|
||||
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
|
||||
self.batch_size)
|
||||
# 转为张量
|
||||
# 例如tensor([[-4.5543e-02, -2.3910e-01, 1.8344e-02, 2.3158e-01],...,[-1.8615e-02, -2.3921e-01, -1.1791e-02, 2.3400e-01]])
|
||||
### 转为张量 ###
|
||||
state_batch = torch.tensor(
|
||||
state_batch, device=self.device, dtype=torch.float)
|
||||
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(
|
||||
@@ -96,6 +81,7 @@ class DQN:
|
||||
reward_batch, device=self.device, dtype=torch.float) # tensor([1., 1.,...,1])
|
||||
next_state_batch = torch.tensor(
|
||||
next_state_batch, device=self.device, dtype=torch.float)
|
||||
|
||||
done_batch = torch.tensor(np.float32(
|
||||
done_batch), device=self.device).unsqueeze(1) # 将bool转为float然后转为张量
|
||||
|
||||
@@ -112,7 +98,7 @@ class DQN:
|
||||
# 对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward
|
||||
q_target = reward_batch + self.gamma * next_q_state_value * (1-done_batch[0])
|
||||
'''
|
||||
'''以下是Double DQNq_target计算方式,与NatureDQN稍有不同'''
|
||||
'''以下是Double DQN q_target计算方式,与NatureDQN稍有不同'''
|
||||
next_target_values = self.target_net(
|
||||
next_state_batch)
|
||||
# 选出Q(s_t‘, a)对应的action,代入到next_target_values获得target net对应的next_q_value,即Q’(s_t|a=argmax Q(s_t‘, a))
|
||||
@@ -127,8 +113,8 @@ class DQN:
|
||||
param.grad.data.clamp_(-1, 1)
|
||||
self.optimizer.step() # 更新模型
|
||||
|
||||
def save_model(self,path):
|
||||
torch.save(self.target_net.state_dict(), path)
|
||||
def save(self,path):
|
||||
torch.save(self.target_net.state_dict(), path+'DoubleDQN_checkpoint.pth')
|
||||
|
||||
def load_model(self,path):
|
||||
self.target_net.load_state_dict(torch.load(path))
|
||||
def load(self,path):
|
||||
self.target_net.load_state_dict(torch.load(path+'DoubleDQN_checkpoint.pth'))
|
||||
|
||||
@@ -5,37 +5,58 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:48:57
|
||||
@LastEditor: John
|
||||
LastEditTime: 2020-12-22 15:39:46
|
||||
LastEditTime: 2021-03-17 20:11:19
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import sys,os
|
||||
sys.path.append(os.getcwd()) # add current terminal path
|
||||
import gym
|
||||
import torch
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
import os
|
||||
from agent import DQN
|
||||
from params import SEQUENCE,SAVED_MODEL_PATH,RESULT_PATH
|
||||
from params import get_args
|
||||
from utils import save_results
|
||||
import datetime
|
||||
from DoubleDQN.agent import DoubleDQN
|
||||
from common.plot import plot_rewards
|
||||
from common.utils import save_results
|
||||
|
||||
def train(cfg):
|
||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"):
|
||||
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
|
||||
if not os.path.exists(SAVED_MODEL_PATH):
|
||||
os.mkdir(SAVED_MODEL_PATH)
|
||||
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"):
|
||||
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
|
||||
if not os.path.exists(RESULT_PATH):
|
||||
os.mkdir(RESULT_PATH)
|
||||
|
||||
class DoubleDQNConfig:
|
||||
def __init__(self):
|
||||
self.algo = "Double DQN" # 算法名称
|
||||
self.gamma = 0.99
|
||||
self.epsilon_start = 0.9 # e-greedy策略的初始epsilon
|
||||
self.epsilon_end = 0.01
|
||||
self.epsilon_decay = 200
|
||||
self.lr = 0.01 # 学习率
|
||||
self.memory_capacity = 10000 # Replay Memory容量
|
||||
self.batch_size = 128
|
||||
self.train_eps = 250 # 训练的episode数目
|
||||
self.train_steps = 200 # 训练每个episode的最大长度
|
||||
self.target_update = 2 # target net的更新频率
|
||||
self.eval_eps = 20 # 测试的episode数目
|
||||
self.eval_steps = 200 # 测试每个episode的最大长度
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||
self.hidden_dim = 128 # 神经网络隐藏层维度
|
||||
|
||||
|
||||
def train(cfg,env,agent):
|
||||
print('Start to train !')
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||
env = gym.make('CartPole-v0').unwrapped # 可google为什么unwrapped gym,此处一般不需要
|
||||
env.seed(1) # 设置env随机种子
|
||||
n_states = env.observation_space.shape[0]
|
||||
n_actions = env.action_space.n
|
||||
agent = DQN(n_states=n_states, n_actions=n_actions, device=device, gamma=cfg.gamma, epsilon_start=cfg.epsilon_start,
|
||||
epsilon_end=cfg.epsilon_end, epsilon_decay=cfg.epsilon_decay, policy_lr=cfg.policy_lr, memory_capacity=cfg.memory_capacity, batch_size=cfg.batch_size)
|
||||
rewards = []
|
||||
moving_average_rewards = []
|
||||
rewards,ma_rewards = [],[]
|
||||
ep_steps = []
|
||||
log_dir=os.path.split(os.path.abspath(__file__))[0]+"/logs/train/" + SEQUENCE
|
||||
writer = SummaryWriter(log_dir)
|
||||
for i_episode in range(1, cfg.train_eps+1):
|
||||
for i_episode in range(cfg.train_eps):
|
||||
state = env.reset() # reset环境状态
|
||||
ep_reward = 0
|
||||
for i_step in range(1, cfg.train_steps+1):
|
||||
for i_step in range(cfg.train_steps):
|
||||
action = agent.choose_action(state) # 根据当前环境state选择action
|
||||
next_state, reward, done, _ = env.step(action) # 更新环境参数
|
||||
ep_reward += reward
|
||||
@@ -47,80 +68,26 @@ def train(cfg):
|
||||
# 更新target network,复制DQN中的所有weights and biases
|
||||
if i_episode % cfg.target_update == 0:
|
||||
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
||||
print('Episode:', i_episode, ' Reward: %i' %
|
||||
int(ep_reward), 'n_steps:', i_step, 'done: ', done,' Explore: %.2f' % agent.epsilon)
|
||||
print('Episode:{}/{}, Reward:{}, Steps:{}, Done:{}'.format(i_episode+1,cfg.train_eps,ep_reward,i_step,done))
|
||||
ep_steps.append(i_step)
|
||||
rewards.append(ep_reward)
|
||||
# 计算滑动窗口的reward
|
||||
if i_episode == 1:
|
||||
moving_average_rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
moving_average_rewards.append(
|
||||
0.9*moving_average_rewards[-1]+0.1*ep_reward)
|
||||
writer.add_scalars('rewards',{'raw':rewards[-1], 'moving_average': moving_average_rewards[-1]}, i_episode)
|
||||
writer.add_scalar('steps_of_each_episode',
|
||||
ep_steps[-1], i_episode)
|
||||
writer.close()
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
''' 保存模型 '''
|
||||
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
|
||||
os.mkdir(SAVED_MODEL_PATH)
|
||||
agent.save_model(SAVED_MODEL_PATH+'checkpoint.pth')
|
||||
print('model saved!')
|
||||
'''存储reward等相关结果'''
|
||||
save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path=RESULT_PATH)
|
||||
return rewards,ma_rewards
|
||||
|
||||
|
||||
def eval(cfg, saved_model_path = SAVED_MODEL_PATH):
|
||||
print('start to eval !')
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||
if __name__ == "__main__":
|
||||
cfg = DoubleDQNConfig()
|
||||
env = gym.make('CartPole-v0').unwrapped # 可google为什么unwrapped gym,此处一般不需要
|
||||
env.seed(1) # 设置env随机种子
|
||||
n_states = env.observation_space.shape[0]
|
||||
n_actions = env.action_space.n
|
||||
agent = DQN(n_states=n_states, n_actions=n_actions, device=device, gamma=cfg.gamma, epsilon_start=cfg.epsilon_start,
|
||||
epsilon_end=cfg.epsilon_end, epsilon_decay=cfg.epsilon_decay, policy_lr=cfg.policy_lr, memory_capacity=cfg.memory_capacity, batch_size=cfg.batch_size)
|
||||
agent.load_model(saved_model_path+'checkpoint.pth')
|
||||
rewards = []
|
||||
moving_average_rewards = []
|
||||
ep_steps = []
|
||||
log_dir=os.path.split(os.path.abspath(__file__))[0]+"/logs/eval/" + SEQUENCE
|
||||
writer = SummaryWriter(log_dir)
|
||||
for i_episode in range(1, cfg.eval_eps+1):
|
||||
state = env.reset() # reset环境状态
|
||||
ep_reward = 0
|
||||
for i_step in range(1, cfg.eval_steps+1):
|
||||
action = agent.choose_action(state,train=False) # 根据当前环境state选择action
|
||||
next_state, reward, done, _ = env.step(action) # 更新环境参数
|
||||
ep_reward += reward
|
||||
state = next_state # 跳转到下一个状态
|
||||
if done:
|
||||
break
|
||||
print('Episode:', i_episode, ' Reward: %i' %
|
||||
int(ep_reward), 'n_steps:', i_step, 'done: ', done)
|
||||
|
||||
ep_steps.append(i_step)
|
||||
rewards.append(ep_reward)
|
||||
# 计算滑动窗口的reward
|
||||
if i_episode == 1:
|
||||
moving_average_rewards.append(ep_reward)
|
||||
else:
|
||||
moving_average_rewards.append(
|
||||
0.9*moving_average_rewards[-1]+0.1*ep_reward)
|
||||
|
||||
writer.add_scalars('rewards',{'raw':rewards[-1], 'moving_average': moving_average_rewards[-1]}, i_episode)
|
||||
writer.add_scalar('steps_of_each_episode',
|
||||
ep_steps[-1], i_episode)
|
||||
writer.close()
|
||||
'''存储reward等相关结果'''
|
||||
save_results(rewards,moving_average_rewards,ep_steps,tag='eval',result_path=RESULT_PATH)
|
||||
print('Complete evaling!')
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = get_args()
|
||||
if cfg.train:
|
||||
train(cfg)
|
||||
eval(cfg)
|
||||
else:
|
||||
model_path = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"
|
||||
eval(cfg,saved_model_path=model_path)
|
||||
agent = DoubleDQN(n_states,n_actions,cfg)
|
||||
rewards,ma_rewards = train(cfg,env,agent)
|
||||
agent.save(path=SAVED_MODEL_PATH)
|
||||
save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)
|
||||
plot_rewards(rewards,ma_rewards,tag="train",algo = cfg.algo,path=RESULT_PATH)
|
||||
|
||||
@@ -5,12 +5,11 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-10 15:27:16
|
||||
@LastEditor: John
|
||||
LastEditTime: 2020-12-22 12:56:27
|
||||
LastEditTime: 2021-01-20 18:58:37
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import random
|
||||
import numpy as np
|
||||
|
||||
class ReplayBuffer:
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ LastEditTime: 2020-08-19 16:55:54
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
class FCN(nn.Module):
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, n_states=4, n_actions=18):
|
||||
""" 初始化q网络,为全连接网络
|
||||
n_states: 输入的feature即环境的state数目
|
||||
n_actions: 输出的action总个数
|
||||
"""
|
||||
super(FCN, self).__init__()
|
||||
super(MLP, self).__init__()
|
||||
self.fc1 = nn.Linear(n_states, 128) # 输入层
|
||||
self.fc2 = nn.Linear(128, 128) # 隐藏层
|
||||
self.fc3 = nn.Linear(128, n_actions) # 输出层
|
||||
|
||||
@@ -5,7 +5,7 @@ Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2020-12-22 15:22:17
|
||||
LastEditor: John
|
||||
LastEditTime: 2020-12-22 15:26:09
|
||||
LastEditTime: 2021-01-21 14:30:38
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
@@ -16,7 +16,10 @@ import argparse
|
||||
ALGO_NAME = 'Double DQN'
|
||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/'
|
||||
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/result/"+SEQUENCE+'/'
|
||||
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/'
|
||||
|
||||
TRAIN_LOG_DIR=os.path.split(os.path.abspath(__file__))[0]+"/logs/train/" + SEQUENCE
|
||||
EVAL_LOG_DIR=os.path.split(os.path.abspath(__file__))[0]+"/logs/eval/" + SEQUENCE
|
||||
|
||||
def get_args():
|
||||
'''模型参数
|
||||
|
||||
@@ -24,14 +24,14 @@ def plot(item,ylabel='rewards_train', save_fig = True):
|
||||
plt.ylabel(ylabel)
|
||||
plt.xlabel('episodes')
|
||||
if save_fig:
|
||||
plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png")
|
||||
plt.savefig(os.path.dirname(__file__)+"/results/"+ylabel+".png")
|
||||
plt.show()
|
||||
|
||||
|
||||
# plt.show()
|
||||
if __name__ == "__main__":
|
||||
|
||||
output_path = os.path.split(os.path.abspath(__file__))[0]+"/result/"
|
||||
output_path = os.path.split(os.path.abspath(__file__))[0]+"/results/"
|
||||
tag = 'train'
|
||||
rewards=np.load(output_path+"rewards_"+tag+".npy", )
|
||||
moving_average_rewards=np.load(output_path+"moving_average_rewards_"+tag+".npy",)
|
||||
|
||||
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 57 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 56 KiB |
BIN
codes/DoubleDQN/results/20210317-010120/ma_rewards_train.npy
Normal file
BIN
codes/DoubleDQN/results/20210317-010120/rewards_curve_train.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
codes/DoubleDQN/results/20210317-010120/rewards_train.npy
Normal file
@@ -13,7 +13,7 @@ import os
|
||||
import numpy as np
|
||||
|
||||
|
||||
def save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path='./result'):
|
||||
def save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path='./results'):
|
||||
if not os.path.exists(result_path): # 检测是否存在文件夹
|
||||
os.mkdir(result_path)
|
||||
np.save(result_path+'rewards_'+tag+'.npy', rewards)
|
||||
|
||||