update
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:50:49
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-09-15 02:18:56
|
||||
LastEditTime: 2021-09-15 13:35:36
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -50,7 +50,7 @@ class DQN:
|
||||
with torch.no_grad():
|
||||
state = torch.tensor([state], device=self.device, dtype=torch.float32)
|
||||
q_values = self.policy_net(state)
|
||||
action = q_values.max(1)[1].item()
|
||||
action = q_values.max(1)[1].item() # 选择Q值最大的动作
|
||||
else:
|
||||
action = random.randrange(self.action_dim)
|
||||
return action
|
||||
@@ -61,45 +61,33 @@ class DQN:
|
||||
action = q_values.max(1)[1].item()
|
||||
return action
|
||||
def update(self):
|
||||
|
||||
if len(self.memory) < self.batch_size:
|
||||
if len(self.memory) < self.batch_size: # 当memory中不满足一个批量时,不更新策略
|
||||
return
|
||||
# 从memory中随机采样transition
|
||||
# 从经验回放中(replay memory)中随机采样一个批量的转移(transition)
|
||||
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
|
||||
self.batch_size)
|
||||
'''转为张量
|
||||
例如tensor([[-4.5543e-02, -2.3910e-01, 1.8344e-02, 2.3158e-01],...,[-1.8615e-02, -2.3921e-01, -1.1791e-02, 2.3400e-01]])'''
|
||||
# 转为张量
|
||||
state_batch = torch.tensor(
|
||||
state_batch, device=self.device, dtype=torch.float)
|
||||
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(
|
||||
1) # 例如tensor([[1],...,[0]])
|
||||
1)
|
||||
reward_batch = torch.tensor(
|
||||
reward_batch, device=self.device, dtype=torch.float) # tensor([1., 1.,...,1])
|
||||
reward_batch, device=self.device, dtype=torch.float)
|
||||
next_state_batch = torch.tensor(
|
||||
next_state_batch, device=self.device, dtype=torch.float)
|
||||
done_batch = torch.tensor(np.float32(
|
||||
done_batch), device=self.device)
|
||||
|
||||
'''计算当前(s_t,a)对应的Q(s_t, a)'''
|
||||
'''torch.gather:对于a=torch.Tensor([[1,2],[3,4]]),那么a.gather(1,torch.Tensor([[0],[1]]))=torch.Tensor([[1],[3]])'''
|
||||
q_values = self.policy_net(state_batch).gather(
|
||||
dim=1, index=action_batch) # 等价于self.forward
|
||||
# 计算所有next states的V(s_{t+1}),即通过target_net中选取reward最大的对应states
|
||||
next_q_values = self.target_net(next_state_batch).max(
|
||||
1)[0].detach() # 比如tensor([ 0.0060, -0.0171,...,])
|
||||
# 计算 expected_q_value
|
||||
# 对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward
|
||||
expected_q_values = reward_batch + \
|
||||
self.gamma * next_q_values * (1-done_batch)
|
||||
# self.loss = F.smooth_l1_loss(q_values,expected_q_values.unsqueeze(1)) # 计算 Huber loss
|
||||
loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算 均方误差loss
|
||||
# 优化模型
|
||||
self.optimizer.zero_grad() # zero_grad清除上一步所有旧的gradients from the last step
|
||||
# loss.backward()使用backpropagation计算loss相对于所有parameters(需要gradients)的微分
|
||||
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) # 计算当前状态(s_t,a)对应的Q(s_t, a)
|
||||
next_q_values = self.target_net(next_state_batch).max(1)[0].detach() # 计算下一时刻的状态(s_t_,a)对应的Q值
|
||||
# 计算期望的Q值,对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward
|
||||
expected_q_values = reward_batch + self.gamma * next_q_values * (1-done_batch)
|
||||
loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算均方根损失
|
||||
# 优化更新模型
|
||||
self.optimizer.zero_grad()
|
||||
loss.backward()
|
||||
# for param in self.policy_net.parameters(): # clip防止梯度爆炸
|
||||
# param.grad.data.clamp_(-1, 1)
|
||||
self.optimizer.step() # 更新模型
|
||||
for param in self.policy_net.parameters(): # clip防止梯度爆炸
|
||||
param.grad.data.clamp_(-1, 1)
|
||||
self.optimizer.step()
|
||||
|
||||
def save(self, path):
|
||||
torch.save(self.target_net.state_dict(), path+'dqn_checkpoint.pth')
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 45 KiB |
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-12 00:48:57
|
||||
@LastEditor: John
|
||||
LastEditTime: 2021-09-15 02:19:54
|
||||
LastEditTime: 2021-09-15 15:34:13
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -19,7 +19,7 @@ import torch
|
||||
import datetime
|
||||
|
||||
from common.utils import save_results, make_dir
|
||||
from common.plot import plot_rewards
|
||||
from common.plot import plot_rewards,plot_rewards_cn
|
||||
from DQN.agent import DQN
|
||||
|
||||
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
@@ -29,21 +29,21 @@ class DQNConfig:
|
||||
self.algo = "DQN" # 算法名称
|
||||
self.env = 'CartPole-v0' # 环境名称
|
||||
self.result_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/results/' # path to save results
|
||||
'/'+curr_time+'/results/' # 保存结果的路径
|
||||
self.model_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/models/' # path to save models
|
||||
'/'+curr_time+'/models/' # 保存模型的路径
|
||||
self.train_eps = 200 # 训练的回合数
|
||||
self.eval_eps = 30 # 测试的回合数
|
||||
self.gamma = 0.95
|
||||
self.gamma = 0.95 # 强化学习中的折扣因子
|
||||
self.epsilon_start = 0.90 # e-greedy策略中初始epsilon
|
||||
self.epsilon_end = 0.01 # e-greedy策略中的终止epsilon
|
||||
self.epsilon_decay = 500 # e-greedy策略中epsilon的衰减率
|
||||
self.lr = 0.0001 # 学习率
|
||||
self.memory_capacity = 100000 # capacity of Replay Memory
|
||||
self.batch_size = 64
|
||||
self.memory_capacity = 100000 # 经验回放的容量
|
||||
self.batch_size = 64 # mini-batch SGD中的批量大小
|
||||
self.target_update = 4 # 目标网络的更新频率
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # jian che
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
|
||||
self.hidden_dim = 256 # hidden size of net
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
@@ -55,10 +55,10 @@ def env_agent_config(cfg,seed=1):
|
||||
return env,agent
|
||||
|
||||
def train(cfg, env, agent):
|
||||
print('Start to train !')
|
||||
print(f'Env: {cfg.env}, Algorithm: {cfg.algo}, Device: {cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moveing average reward
|
||||
print('开始训练!')
|
||||
print(f'环境:{cfg.env}, 算法:{cfg.algo}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录奖励
|
||||
ma_rewards = [] # 记录滑动平均奖励
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
@@ -75,19 +75,19 @@ def train(cfg, env, agent):
|
||||
if (i_ep+1) % cfg.target_update == 0:
|
||||
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
||||
if (i_ep+1)%10 == 0:
|
||||
print('Episode:{}/{}, Reward:{}'.format(i_ep+1, cfg.train_eps, ep_reward))
|
||||
print('回合:{}/{}, 奖励:{}'.format(i_ep+1, cfg.train_eps, ep_reward))
|
||||
rewards.append(ep_reward)
|
||||
# save ma_rewards
|
||||
if ma_rewards:
|
||||
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
print('完成训练!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
def eval(cfg,env,agent):
|
||||
print('Start to eval !')
|
||||
print(f'Env: {cfg.env}, Algorithm: {cfg.algo}, Device: {cfg.device}')
|
||||
print('开始测试!')
|
||||
print(f'环境:{cfg.env}, 算法:{cfg.algo}, 设备:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moving average rewards
|
||||
for i_ep in range(cfg.eval_eps):
|
||||
@@ -105,24 +105,23 @@ def eval(cfg,env,agent):
|
||||
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print(f"Episode:{i_ep+1}/{cfg.eval_eps}, reward:{ep_reward:.1f}")
|
||||
print('Complete evaling!')
|
||||
print(f"回合:{i_ep+1}/{cfg.eval_eps}, 奖励:{ep_reward:.1f}")
|
||||
print('完成测试!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = DQNConfig()
|
||||
|
||||
# train
|
||||
# 训练
|
||||
env,agent = env_agent_config(cfg,seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(cfg.result_path, cfg.model_path)
|
||||
agent.save(path=cfg.model_path)
|
||||
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
|
||||
plot_rewards(rewards, ma_rewards, tag="train",
|
||||
plot_rewards_cn(rewards, ma_rewards, tag="train",
|
||||
algo=cfg.algo, path=cfg.result_path)
|
||||
# eval
|
||||
# 测试
|
||||
env,agent = env_agent_config(cfg,seed=10)
|
||||
agent.load(path=cfg.model_path)
|
||||
rewards,ma_rewards = eval(cfg,env,agent)
|
||||
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
|
||||
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
|
||||
plot_rewards_cn(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
|
||||
|
||||
Reference in New Issue
Block a user