update DQN
This commit is contained in:
3
codes/dqn/.vscode/settings.json
vendored
3
codes/dqn/.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
|||||||
{
|
|
||||||
"python.pythonPath": "/Users/jj/anaconda3/envs/py37/bin/python"
|
|
||||||
}
|
|
||||||
@@ -1,3 +1,8 @@
|
|||||||
|
## 思路
|
||||||
|
|
||||||
|
见[我的博客](https://blog.csdn.net/JohnJim0/article/details/109557173)
|
||||||
|
## 环境
|
||||||
|
|
||||||
python 3.7.9
|
python 3.7.9
|
||||||
|
|
||||||
pytorch 1.6.0
|
pytorch 1.6.0
|
||||||
@@ -6,6 +11,7 @@ tensorboard 2.3.0
|
|||||||
|
|
||||||
torchvision 0.7.0
|
torchvision 0.7.0
|
||||||
|
|
||||||
|
## 使用
|
||||||
|
|
||||||
train:
|
train:
|
||||||
|
|
||||||
@@ -18,7 +24,12 @@ eval:
|
|||||||
```python
|
```python
|
||||||
python main.py --train 0
|
python main.py --train 0
|
||||||
```
|
```
|
||||||
|
可视化:
|
||||||
```python
|
```python
|
||||||
tensorboard --logdir logs
|
tensorboard --logdir logs
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Torch知识
|
||||||
|
|
||||||
|
[with torch.no_grad()](https://www.jianshu.com/p/1cea017f5d11)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
@Email: johnjim0816@gmail.com
|
@Email: johnjim0816@gmail.com
|
||||||
@Date: 2020-06-12 00:50:49
|
@Date: 2020-06-12 00:50:49
|
||||||
@LastEditor: John
|
@LastEditor: John
|
||||||
LastEditTime: 2020-10-15 21:56:21
|
LastEditTime: 2020-11-22 11:12:30
|
||||||
@Discription:
|
@Discription:
|
||||||
@Environment: python 3.7.7
|
@Environment: python 3.7.7
|
||||||
'''
|
'''
|
||||||
@@ -24,11 +24,12 @@ from memory import ReplayBuffer
|
|||||||
from model import FCN
|
from model import FCN
|
||||||
class DQN:
|
class DQN:
|
||||||
def __init__(self, n_states, n_actions, gamma=0.99, epsilon_start=0.9, epsilon_end=0.05, epsilon_decay=200, memory_capacity=10000, policy_lr=0.01, batch_size=128, device="cpu"):
|
def __init__(self, n_states, n_actions, gamma=0.99, epsilon_start=0.9, epsilon_end=0.05, epsilon_decay=200, memory_capacity=10000, policy_lr=0.01, batch_size=128, device="cpu"):
|
||||||
self.actions_count = 0
|
|
||||||
self.n_actions = n_actions # 总的动作个数
|
self.n_actions = n_actions # 总的动作个数
|
||||||
self.device = device # 设备,cpu或gpu等
|
self.device = device # 设备,cpu或gpu等
|
||||||
self.gamma = gamma
|
self.gamma = gamma # 奖励的折扣因子
|
||||||
# e-greedy策略相关参数
|
# e-greedy策略相关参数
|
||||||
|
self.actions_count = 0 # 用于epsilon的衰减计数
|
||||||
self.epsilon = 0
|
self.epsilon = 0
|
||||||
self.epsilon_start = epsilon_start
|
self.epsilon_start = epsilon_start
|
||||||
self.epsilon_end = epsilon_end
|
self.epsilon_end = epsilon_end
|
||||||
@@ -67,12 +68,11 @@ class DQN:
|
|||||||
action = random.randrange(self.n_actions)
|
action = random.randrange(self.n_actions)
|
||||||
return action
|
return action
|
||||||
else:
|
else:
|
||||||
with torch.no_grad():
|
with torch.no_grad(): # 取消保存梯度
|
||||||
# 先转为张量便于丢给神经网络,state元素数据原本为float64
|
# 先转为张量便于丢给神经网络,state元素数据原本为float64
|
||||||
# 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价
|
# 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价
|
||||||
state = torch.tensor(
|
state = torch.tensor(
|
||||||
[state], device='cpu', dtype=torch.float32)
|
[state], device='cpu', dtype=torch.float32) # 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>)
|
||||||
# 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>)
|
|
||||||
q_value = self.target_net(state)
|
q_value = self.target_net(state)
|
||||||
# tensor.max(1)返回每行的最大值以及对应的下标,
|
# tensor.max(1)返回每行的最大值以及对应的下标,
|
||||||
# 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0]))
|
# 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0]))
|
||||||
@@ -86,8 +86,8 @@ class DQN:
|
|||||||
# 从memory中随机采样transition
|
# 从memory中随机采样transition
|
||||||
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
|
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
|
||||||
self.batch_size)
|
self.batch_size)
|
||||||
# 转为张量
|
'''转为张量
|
||||||
# 例如tensor([[-4.5543e-02, -2.3910e-01, 1.8344e-02, 2.3158e-01],...,[-1.8615e-02, -2.3921e-01, -1.1791e-02, 2.3400e-01]])
|
例如tensor([[-4.5543e-02, -2.3910e-01, 1.8344e-02, 2.3158e-01],...,[-1.8615e-02, -2.3921e-01, -1.1791e-02, 2.3400e-01]])'''
|
||||||
state_batch = torch.tensor(
|
state_batch = torch.tensor(
|
||||||
state_batch, device=self.device, dtype=torch.float)
|
state_batch, device=self.device, dtype=torch.float)
|
||||||
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(
|
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(
|
||||||
@@ -99,9 +99,8 @@ class DQN:
|
|||||||
done_batch = torch.tensor(np.float32(
|
done_batch = torch.tensor(np.float32(
|
||||||
done_batch), device=self.device).unsqueeze(1) # 将bool转为float然后转为张量
|
done_batch), device=self.device).unsqueeze(1) # 将bool转为float然后转为张量
|
||||||
|
|
||||||
# 计算当前(s_t,a)对应的Q(s_t, a)
|
'''计算当前(s_t,a)对应的Q(s_t, a)'''
|
||||||
# 关于torch.gather,对于a=torch.Tensor([[1,2],[3,4]])
|
'''torch.gather:对于a=torch.Tensor([[1,2],[3,4]]),那么a.gather(1,torch.Tensor([[0],[1]]))=torch.Tensor([[1],[3]])'''
|
||||||
# 那么a.gather(1,torch.Tensor([[0],[1]]))=torch.Tensor([[1],[3]])
|
|
||||||
q_values = self.policy_net(state_batch).gather(
|
q_values = self.policy_net(state_batch).gather(
|
||||||
dim=1, index=action_batch) # 等价于self.forward
|
dim=1, index=action_batch) # 等价于self.forward
|
||||||
# 计算所有next states的V(s_{t+1}),即通过target_net中选取reward最大的对应states
|
# 计算所有next states的V(s_{t+1}),即通过target_net中选取reward最大的对应states
|
||||||
@@ -119,6 +118,7 @@ class DQN:
|
|||||||
self.loss.backward()
|
self.loss.backward()
|
||||||
for param in self.policy_net.parameters(): # clip防止梯度爆炸
|
for param in self.policy_net.parameters(): # clip防止梯度爆炸
|
||||||
param.grad.data.clamp_(-1, 1)
|
param.grad.data.clamp_(-1, 1)
|
||||||
|
|
||||||
self.optimizer.step() # 更新模型
|
self.optimizer.step() # 更新模型
|
||||||
|
|
||||||
def save_model(self,path):
|
def save_model(self,path):
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
@Email: johnjim0816@gmail.com
|
@Email: johnjim0816@gmail.com
|
||||||
@Date: 2020-06-12 00:48:57
|
@Date: 2020-06-12 00:48:57
|
||||||
@LastEditor: John
|
@LastEditor: John
|
||||||
LastEditTime: 2020-10-15 22:00:28
|
LastEditTime: 2020-11-23 11:58:17
|
||||||
@Discription:
|
@Discription:
|
||||||
@Environment: python 3.7.7
|
@Environment: python 3.7.7
|
||||||
'''
|
'''
|
||||||
@@ -16,7 +16,7 @@ import argparse
|
|||||||
from torch.utils.tensorboard import SummaryWriter
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
import datetime
|
import datetime
|
||||||
import os
|
import os
|
||||||
from utils import save_results
|
from utils import save_results,save_model
|
||||||
|
|
||||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/'
|
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/'
|
||||||
@@ -53,7 +53,7 @@ def get_args():
|
|||||||
def train(cfg):
|
def train(cfg):
|
||||||
print('Start to train ! \n')
|
print('Start to train ! \n')
|
||||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||||
env = gym.make('CartPole-v0').unwrapped # 可google为什么unwrapped gym,此处一般不需要
|
env = gym.make('CartPole-v0')
|
||||||
env.seed(1) # 设置env随机种子
|
env.seed(1) # 设置env随机种子
|
||||||
n_states = env.observation_space.shape[0]
|
n_states = env.observation_space.shape[0]
|
||||||
n_actions = env.action_space.n
|
n_actions = env.action_space.n
|
||||||
@@ -95,10 +95,7 @@ def train(cfg):
|
|||||||
writer.close()
|
writer.close()
|
||||||
print('Complete training!')
|
print('Complete training!')
|
||||||
''' 保存模型 '''
|
''' 保存模型 '''
|
||||||
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
|
save_model(agent,model_path=SAVED_MODEL_PATH)
|
||||||
os.mkdir(SAVED_MODEL_PATH)
|
|
||||||
agent.save_model(SAVED_MODEL_PATH+'checkpoint.pth')
|
|
||||||
print('model saved!')
|
|
||||||
'''存储reward等相关结果'''
|
'''存储reward等相关结果'''
|
||||||
save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path=RESULT_PATH)
|
save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path=RESULT_PATH)
|
||||||
|
|
||||||
@@ -110,7 +107,7 @@ def eval(cfg, saved_model_path = SAVED_MODEL_PATH):
|
|||||||
env.seed(1) # 设置env随机种子
|
env.seed(1) # 设置env随机种子
|
||||||
n_states = env.observation_space.shape[0]
|
n_states = env.observation_space.shape[0]
|
||||||
n_actions = env.action_space.n
|
n_actions = env.action_space.n
|
||||||
agent = DQN(n_states=n_states, n_actions=n_actions, device=device, gamma=cfg.gamma, epsilon_start=cfg.epsilon_start,
|
agent = DQN(n_states=n_states, n_actions=n_actions, device="cpu", gamma=cfg.gamma, epsilon_start=cfg.epsilon_start,
|
||||||
epsilon_end=cfg.epsilon_end, epsilon_decay=cfg.epsilon_decay, policy_lr=cfg.policy_lr, memory_capacity=cfg.memory_capacity, batch_size=cfg.batch_size)
|
epsilon_end=cfg.epsilon_end, epsilon_decay=cfg.epsilon_decay, policy_lr=cfg.policy_lr, memory_capacity=cfg.memory_capacity, batch_size=cfg.batch_size)
|
||||||
agent.load_model(saved_model_path+'checkpoint.pth')
|
agent.load_model(saved_model_path+'checkpoint.pth')
|
||||||
rewards = []
|
rewards = []
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
@Email: johnjim0816@gmail.com
|
@Email: johnjim0816@gmail.com
|
||||||
@Date: 2020-06-11 16:30:09
|
@Date: 2020-06-11 16:30:09
|
||||||
@LastEditor: John
|
@LastEditor: John
|
||||||
LastEditTime: 2020-10-15 22:01:50
|
LastEditTime: 2020-11-23 13:48:31
|
||||||
@Discription:
|
@Discription:
|
||||||
@Environment: python 3.7.7
|
@Environment: python 3.7.7
|
||||||
'''
|
'''
|
||||||
@@ -27,18 +27,6 @@ def plot(item,ylabel='rewards_train', save_fig = True):
|
|||||||
plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png")
|
plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png")
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
# def plot(item,ylabel='rewards'):
|
|
||||||
#
|
|
||||||
# df = pd.DataFrame(dict(time=np.arange(len(item)),value=item))
|
|
||||||
# g = sns.relplot(x="time", y="value", kind="line", data=df)
|
|
||||||
# # g.fig.autofmt_xdate()
|
|
||||||
# # sns.lineplot(time=time, data=item, color="r", condition="behavior_cloning")
|
|
||||||
# # # sns.tsplot(time=time, data=x2, color="b", condition="dagger")
|
|
||||||
# # plt.ylabel("Reward")
|
|
||||||
# # plt.xlabel("Iteration Number")
|
|
||||||
# # plt.title("Imitation Learning")
|
|
||||||
|
|
||||||
# plt.show()
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
output_path = os.path.split(os.path.abspath(__file__))[0]+"/result/"
|
output_path = os.path.split(os.path.abspath(__file__))[0]+"/result/"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ Author: John
|
|||||||
Email: johnjim0816@gmail.com
|
Email: johnjim0816@gmail.com
|
||||||
Date: 2020-10-15 21:28:00
|
Date: 2020-10-15 21:28:00
|
||||||
LastEditor: John
|
LastEditor: John
|
||||||
LastEditTime: 2020-10-15 21:50:30
|
LastEditTime: 2020-10-30 16:56:55
|
||||||
Discription:
|
Discription:
|
||||||
Environment:
|
Environment:
|
||||||
'''
|
'''
|
||||||
@@ -14,8 +14,17 @@ import numpy as np
|
|||||||
|
|
||||||
|
|
||||||
def save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path='./result'):
|
def save_results(rewards,moving_average_rewards,ep_steps,tag='train',result_path='./result'):
|
||||||
|
'''保存reward等结果
|
||||||
|
'''
|
||||||
if not os.path.exists(result_path): # 检测是否存在文件夹
|
if not os.path.exists(result_path): # 检测是否存在文件夹
|
||||||
os.mkdir(result_path)
|
os.mkdir(result_path)
|
||||||
np.save(result_path+'rewards_'+tag+'.npy', rewards)
|
np.save(result_path+'rewards_'+tag+'.npy', rewards)
|
||||||
np.save(result_path+'moving_average_rewards_'+tag+'.npy', moving_average_rewards)
|
np.save(result_path+'moving_average_rewards_'+tag+'.npy', moving_average_rewards)
|
||||||
np.save(result_path+'steps_'+tag+'.npy',ep_steps )
|
np.save(result_path+'steps_'+tag+'.npy',ep_steps )
|
||||||
|
print('results saved!')
|
||||||
|
|
||||||
|
def save_model(agent,model_path='./saved_model'):
|
||||||
|
if not os.path.exists(model_path): # 检测是否存在文件夹
|
||||||
|
os.mkdir(model_path)
|
||||||
|
agent.save_model(model_path+'checkpoint.pth')
|
||||||
|
print('model saved!')
|
||||||
Reference in New Issue
Block a user