update codes
This commit is contained in:
@@ -11,23 +11,62 @@ Environment:
|
||||
'''
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
import torch.nn.functional as F
|
||||
import numpy as np
|
||||
import random,math
|
||||
import torch.optim as optim
|
||||
from common.model import MLP
|
||||
from common.memory import ReplayBuffer
|
||||
|
||||
class ReplayBuffer:
|
||||
def __init__(self, capacity):
|
||||
self.capacity = capacity # 经验回放的容量
|
||||
self.buffer = [] # 缓冲区
|
||||
self.position = 0
|
||||
|
||||
def push(self, state, action, reward, next_state, done):
|
||||
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
|
||||
'''
|
||||
if len(self.buffer) < self.capacity:
|
||||
self.buffer.append(None)
|
||||
self.buffer[self.position] = (state, action, reward, next_state, done)
|
||||
self.position = (self.position + 1) % self.capacity
|
||||
|
||||
def sample(self, batch_size):
|
||||
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
|
||||
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
|
||||
return state, action, reward, next_state, done
|
||||
|
||||
def __len__(self):
|
||||
''' 返回当前存储的量
|
||||
'''
|
||||
return len(self.buffer)
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, input_dim,output_dim,hidden_dim=128):
|
||||
""" 初始化q网络,为全连接网络
|
||||
input_dim: 输入的特征数即环境的状态数
|
||||
output_dim: 输出的动作维度
|
||||
"""
|
||||
super(MLP, self).__init__()
|
||||
self.fc1 = nn.Linear(input_dim, hidden_dim) # 输入层
|
||||
self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
|
||||
self.fc3 = nn.Linear(hidden_dim, output_dim) # 输出层
|
||||
|
||||
def forward(self, x):
|
||||
# 各层对应的激活函数
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
return self.fc3(x)
|
||||
|
||||
class HierarchicalDQN:
|
||||
def __init__(self,state_dim,action_dim,cfg):
|
||||
self.state_dim = state_dim
|
||||
self.action_dim = action_dim
|
||||
def __init__(self,n_states,n_actions,cfg):
|
||||
self.n_states = n_states
|
||||
self.n_actions = n_actions
|
||||
self.gamma = cfg.gamma
|
||||
self.device = cfg.device
|
||||
self.batch_size = cfg.batch_size
|
||||
self.frame_idx = 0
|
||||
self.frame_idx = 0 # 用于epsilon的衰减计数
|
||||
self.epsilon = lambda frame_idx: cfg.epsilon_end + (cfg.epsilon_start - cfg.epsilon_end ) * math.exp(-1. * frame_idx / cfg.epsilon_decay)
|
||||
self.policy_net = MLP(2*state_dim, action_dim,cfg.hidden_dim).to(self.device)
|
||||
self.meta_policy_net = MLP(state_dim, state_dim,cfg.hidden_dim).to(self.device)
|
||||
self.policy_net = MLP(2*n_states, n_actions,cfg.hidden_dim).to(self.device)
|
||||
self.meta_policy_net = MLP(n_states, n_states,cfg.hidden_dim).to(self.device)
|
||||
self.optimizer = optim.Adam(self.policy_net.parameters(),lr=cfg.lr)
|
||||
self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(),lr=cfg.lr)
|
||||
self.memory = ReplayBuffer(cfg.memory_capacity)
|
||||
@@ -37,7 +76,7 @@ class HierarchicalDQN:
|
||||
self.losses = []
|
||||
self.meta_losses = []
|
||||
def to_onehot(self,x):
|
||||
oh = np.zeros(self.state_dim)
|
||||
oh = np.zeros(self.n_states)
|
||||
oh[x - 1] = 1.
|
||||
return oh
|
||||
def set_goal(self,state):
|
||||
@@ -46,7 +85,7 @@ class HierarchicalDQN:
|
||||
state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(0)
|
||||
goal = self.meta_policy_net(state).max(1)[1].item()
|
||||
else:
|
||||
goal = random.randrange(self.state_dim)
|
||||
goal = random.randrange(self.n_states)
|
||||
return goal
|
||||
def choose_action(self,state):
|
||||
self.frame_idx += 1
|
||||
@@ -56,7 +95,7 @@ class HierarchicalDQN:
|
||||
q_value = self.policy_net(state)
|
||||
action = q_value.max(1)[1].item()
|
||||
else:
|
||||
action = random.randrange(self.action_dim)
|
||||
action = random.randrange(self.n_actions)
|
||||
return action
|
||||
def update(self):
|
||||
self.update_policy()
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 62 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 77 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 73 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 21 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 62 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
88
codes/HierarchicalDQN/task0.py
Normal file
88
codes/HierarchicalDQN/task0.py
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-29 10:37:32
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-05-04 22:35:56
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import sys
|
||||
import os
|
||||
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
|
||||
parent_path = os.path.dirname(curr_path) # 父路径
|
||||
sys.path.append(parent_path) # 添加路径到系统路径
|
||||
|
||||
import datetime
|
||||
import numpy as np
|
||||
import torch
|
||||
import gym
|
||||
|
||||
from common.utils import save_results,make_dir
|
||||
from common.utils import plot_rewards
|
||||
from HierarchicalDQN.agent import HierarchicalDQN
|
||||
from HierarchicalDQN.train import train,test
|
||||
|
||||
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
algo_name = "Hierarchical DQN" # 算法名称
|
||||
env_name = 'CartPole-v0' # 环境名称
|
||||
class HierarchicalDQNConfig:
|
||||
def __init__(self):
|
||||
self.algo_name = algo_name # 算法名称
|
||||
self.env_name = env_name # 环境名称
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
|
||||
self.train_eps = 300 # 训练的episode数目
|
||||
self.test_eps = 50 # 测试的episode数目
|
||||
self.gamma = 0.99
|
||||
self.epsilon_start = 1 # start epsilon of e-greedy policy
|
||||
self.epsilon_end = 0.01
|
||||
self.epsilon_decay = 200
|
||||
self.lr = 0.0001 # learning rate
|
||||
self.memory_capacity = 10000 # Replay Memory capacity
|
||||
self.batch_size = 32
|
||||
self.target_update = 2 # 目标网络的更新频率
|
||||
self.hidden_dim = 256 # 网络隐藏层
|
||||
class PlotConfig:
|
||||
''' 绘图相关参数设置
|
||||
'''
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.algo_name = algo_name # 算法名称
|
||||
self.env_name = env_name # 环境名称
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
|
||||
self.result_path = curr_path + "/outputs/" + self.env_name + \
|
||||
'/' + curr_time + '/results/' # 保存结果的路径
|
||||
self.model_path = curr_path + "/outputs/" + self.env_name + \
|
||||
'/' + curr_time + '/models/' # 保存模型的路径
|
||||
self.save = True # 是否保存图片
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
env = gym.make(cfg.env_name)
|
||||
env.seed(seed)
|
||||
n_states = env.observation_space.shape[0]
|
||||
n_actions = env.action_space.n
|
||||
agent = HierarchicalDQN(n_states,n_actions,cfg)
|
||||
return env,agent
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = HierarchicalDQNConfig()
|
||||
plot_cfg = PlotConfig()
|
||||
# 训练
|
||||
env, agent = env_agent_config(cfg, seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(plot_cfg.result_path, plot_cfg.model_path) # 创建保存结果和模型路径的文件夹
|
||||
agent.save(path=plot_cfg.model_path) # 保存模型
|
||||
save_results(rewards, ma_rewards, tag='train',
|
||||
path=plot_cfg.result_path) # 保存结果
|
||||
plot_rewards(rewards, ma_rewards, plot_cfg, tag="train") # 画出结果
|
||||
# 测试
|
||||
env, agent = env_agent_config(cfg, seed=10)
|
||||
agent.load(path=plot_cfg.model_path) # 导入模型
|
||||
rewards, ma_rewards = test(cfg, env, agent)
|
||||
save_results(rewards, ma_rewards, tag='test', path=plot_cfg.result_path) # 保存结果
|
||||
plot_rewards(rewards, ma_rewards, plot_cfg, tag="test") # 画出结果
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,146 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-29 10:37:32
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-05-04 22:35:56
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
|
||||
|
||||
import sys,os
|
||||
curr_path = os.path.dirname(__file__)
|
||||
parent_path = os.path.dirname(curr_path)
|
||||
sys.path.append(parent_path) # add current terminal path to sys.path
|
||||
|
||||
import datetime
|
||||
import numpy as np
|
||||
import torch
|
||||
import gym
|
||||
|
||||
from common.utils import save_results,make_dir
|
||||
from common.plot import plot_rewards
|
||||
from HierarchicalDQN.agent import HierarchicalDQN
|
||||
|
||||
curr_time = datetime.datetime.now().strftime(
|
||||
"%Y%m%d-%H%M%S") # obtain current time
|
||||
|
||||
class HierarchicalDQNConfig:
|
||||
def __init__(self):
|
||||
self.algo = "H-DQN" # name of algo
|
||||
self.env = 'CartPole-v0'
|
||||
self.result_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/results/' # path to save results
|
||||
self.model_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/models/' # path to save models
|
||||
self.train_eps = 300 # 训练的episode数目
|
||||
self.eval_eps = 50 # 测试的episode数目
|
||||
self.gamma = 0.99
|
||||
self.epsilon_start = 1 # start epsilon of e-greedy policy
|
||||
self.epsilon_end = 0.01
|
||||
self.epsilon_decay = 200
|
||||
self.lr = 0.0001 # learning rate
|
||||
self.memory_capacity = 10000 # Replay Memory capacity
|
||||
self.batch_size = 32
|
||||
self.target_update = 2 # target net的更新频率
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||
self.hidden_dim = 256 # dimension of hidden layer
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
env = gym.make(cfg.env)
|
||||
env.seed(seed)
|
||||
state_dim = env.observation_space.shape[0]
|
||||
action_dim = env.action_space.n
|
||||
agent = HierarchicalDQN(state_dim,action_dim,cfg)
|
||||
return env,agent
|
||||
|
||||
def train(cfg, env, agent):
|
||||
print('Start to train !')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moveing average reward
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
while not done:
|
||||
goal = agent.set_goal(state)
|
||||
onehot_goal = agent.to_onehot(goal)
|
||||
meta_state = state
|
||||
extrinsic_reward = 0
|
||||
while not done and goal != np.argmax(state):
|
||||
goal_state = np.concatenate([state, onehot_goal])
|
||||
action = agent.choose_action(goal_state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
extrinsic_reward += reward
|
||||
intrinsic_reward = 1.0 if goal == np.argmax(
|
||||
next_state) else 0.0
|
||||
agent.memory.push(goal_state, action, intrinsic_reward, np.concatenate(
|
||||
[next_state, onehot_goal]), done)
|
||||
state = next_state
|
||||
agent.update()
|
||||
agent.meta_memory.push(meta_state, goal, extrinsic_reward, state, done)
|
||||
print('Episode:{}/{}, Reward:{}, Loss:{:.2f}, Meta_Loss:{:.2f}'.format(i_ep+1, cfg.train_eps, ep_reward,agent.loss_numpy ,agent.meta_loss_numpy ))
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
def eval(cfg, env, agent):
|
||||
print('Start to eval !')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moveing average reward
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
while not done:
|
||||
goal = agent.set_goal(state)
|
||||
onehot_goal = agent.to_onehot(goal)
|
||||
extrinsic_reward = 0
|
||||
while not done and goal != np.argmax(state):
|
||||
goal_state = np.concatenate([state, onehot_goal])
|
||||
action = agent.choose_action(goal_state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
extrinsic_reward += reward
|
||||
state = next_state
|
||||
agent.update()
|
||||
print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward}, Loss:{agent.loss_numpy:.2f}, Meta_Loss:{agent.meta_loss_numpy:.2f}')
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = HierarchicalDQNConfig()
|
||||
|
||||
# train
|
||||
env,agent = env_agent_config(cfg,seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(cfg.result_path, cfg.model_path)
|
||||
agent.save(path=cfg.model_path)
|
||||
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
|
||||
plot_rewards(rewards, ma_rewards, tag="train",
|
||||
algo=cfg.algo, path=cfg.result_path)
|
||||
# eval
|
||||
env,agent = env_agent_config(cfg,seed=10)
|
||||
agent.load(path=cfg.model_path)
|
||||
rewards,ma_rewards = eval(cfg,env,agent)
|
||||
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
|
||||
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
|
||||
|
||||
77
codes/HierarchicalDQN/train.py
Normal file
77
codes/HierarchicalDQN/train.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import sys
|
||||
import os
|
||||
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
|
||||
parent_path = os.path.dirname(curr_path) # 父路径
|
||||
sys.path.append(parent_path) # 添加路径到系统路径
|
||||
|
||||
import numpy as np
|
||||
|
||||
def train(cfg, env, agent):
|
||||
print('开始训练!')
|
||||
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录所有回合的奖励
|
||||
ma_rewards = [] # 记录所有回合的滑动平均奖励
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
while not done:
|
||||
goal = agent.set_goal(state)
|
||||
onehot_goal = agent.to_onehot(goal)
|
||||
meta_state = state
|
||||
extrinsic_reward = 0
|
||||
while not done and goal != np.argmax(state):
|
||||
goal_state = np.concatenate([state, onehot_goal])
|
||||
action = agent.choose_action(goal_state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
extrinsic_reward += reward
|
||||
intrinsic_reward = 1.0 if goal == np.argmax(
|
||||
next_state) else 0.0
|
||||
agent.memory.push(goal_state, action, intrinsic_reward, np.concatenate(
|
||||
[next_state, onehot_goal]), done)
|
||||
state = next_state
|
||||
agent.update()
|
||||
if (i_ep+1)%10 == 0:
|
||||
print(f'回合:{i_ep+1}/{cfg.train_eps},奖励:{ep_reward},Loss:{agent.loss_numpy:.2f}, Meta_Loss:{agent.meta_loss_numpy:.2f}')
|
||||
agent.meta_memory.push(meta_state, goal, extrinsic_reward, state, done)
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('完成训练!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
def test(cfg, env, agent):
|
||||
print('开始测试!')
|
||||
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录所有回合的奖励
|
||||
ma_rewards = [] # 记录所有回合的滑动平均奖励
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
while not done:
|
||||
goal = agent.set_goal(state)
|
||||
onehot_goal = agent.to_onehot(goal)
|
||||
extrinsic_reward = 0
|
||||
while not done and goal != np.argmax(state):
|
||||
goal_state = np.concatenate([state, onehot_goal])
|
||||
action = agent.choose_action(goal_state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
extrinsic_reward += reward
|
||||
state = next_state
|
||||
agent.update()
|
||||
if (i_ep+1)%10 == 0:
|
||||
print(f'回合:{i_ep+1}/{cfg.train_eps},奖励:{ep_reward},Loss:{agent.loss_numpy:.2f}, Meta_Loss:{agent.meta_loss_numpy:.2f}')
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('完成训练!')
|
||||
return rewards, ma_rewards
|
||||
Reference in New Issue
Block a user