update codes
This commit is contained in:
@@ -1,13 +0,0 @@
|
||||
# Hierarchical DQN
|
||||
|
||||
## 原理简介
|
||||
|
||||
Hierarchical DQN是一种分层强化学习方法,与DQN相比增加了一个meta controller,
|
||||
|
||||

|
||||
|
||||
即学习时,meta controller每次会生成一个goal,然后controller或者说下面的actor就会达到这个goal,直到done为止。这就相当于给agent增加了一个队长,队长擅长制定局部目标,指导agent前行,这样应对一些每回合步数较长或者稀疏奖励的问题会有所帮助。
|
||||
|
||||
## 伪代码
|
||||
|
||||

|
||||
@@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-24 22:18:18
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-05-04 22:39:34
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
import random,math
|
||||
import torch.optim as optim
|
||||
from common.model import MLP
|
||||
from common.memory import ReplayBuffer
|
||||
|
||||
class HierarchicalDQN:
|
||||
def __init__(self,state_dim,action_dim,cfg):
|
||||
self.state_dim = state_dim
|
||||
self.action_dim = action_dim
|
||||
self.gamma = cfg.gamma
|
||||
self.device = cfg.device
|
||||
self.batch_size = cfg.batch_size
|
||||
self.frame_idx = 0
|
||||
self.epsilon = lambda frame_idx: cfg.epsilon_end + (cfg.epsilon_start - cfg.epsilon_end ) * math.exp(-1. * frame_idx / cfg.epsilon_decay)
|
||||
self.policy_net = MLP(2*state_dim, action_dim,cfg.hidden_dim).to(self.device)
|
||||
self.meta_policy_net = MLP(state_dim, state_dim,cfg.hidden_dim).to(self.device)
|
||||
self.optimizer = optim.Adam(self.policy_net.parameters(),lr=cfg.lr)
|
||||
self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(),lr=cfg.lr)
|
||||
self.memory = ReplayBuffer(cfg.memory_capacity)
|
||||
self.meta_memory = ReplayBuffer(cfg.memory_capacity)
|
||||
self.loss_numpy = 0
|
||||
self.meta_loss_numpy = 0
|
||||
self.losses = []
|
||||
self.meta_losses = []
|
||||
def to_onehot(self,x):
|
||||
oh = np.zeros(self.state_dim)
|
||||
oh[x - 1] = 1.
|
||||
return oh
|
||||
def set_goal(self,state):
|
||||
if random.random() > self.epsilon(self.frame_idx):
|
||||
with torch.no_grad():
|
||||
state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(0)
|
||||
goal = self.meta_policy_net(state).max(1)[1].item()
|
||||
else:
|
||||
goal = random.randrange(self.state_dim)
|
||||
return goal
|
||||
def choose_action(self,state):
|
||||
self.frame_idx += 1
|
||||
if random.random() > self.epsilon(self.frame_idx):
|
||||
with torch.no_grad():
|
||||
state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(0)
|
||||
q_value = self.policy_net(state)
|
||||
action = q_value.max(1)[1].item()
|
||||
else:
|
||||
action = random.randrange(self.action_dim)
|
||||
return action
|
||||
def update(self):
|
||||
self.update_policy()
|
||||
self.update_meta()
|
||||
def update_policy(self):
|
||||
if self.batch_size > len(self.memory):
|
||||
return
|
||||
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(self.batch_size)
|
||||
state_batch = torch.tensor(state_batch,device=self.device,dtype=torch.float)
|
||||
action_batch = torch.tensor(action_batch,device=self.device,dtype=torch.int64).unsqueeze(1)
|
||||
reward_batch = torch.tensor(reward_batch,device=self.device,dtype=torch.float)
|
||||
next_state_batch = torch.tensor(next_state_batch,device=self.device, dtype=torch.float)
|
||||
done_batch = torch.tensor(np.float32(done_batch),device=self.device)
|
||||
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch).squeeze(1)
|
||||
next_state_values = self.policy_net(next_state_batch).max(1)[0].detach()
|
||||
expected_q_values = reward_batch + 0.99 * next_state_values * (1-done_batch)
|
||||
loss = nn.MSELoss()(q_values, expected_q_values)
|
||||
self.optimizer.zero_grad()
|
||||
loss.backward()
|
||||
for param in self.policy_net.parameters(): # clip防止梯度爆炸
|
||||
param.grad.data.clamp_(-1, 1)
|
||||
self.optimizer.step()
|
||||
self.loss_numpy = loss.detach().cpu().numpy()
|
||||
self.losses.append(self.loss_numpy)
|
||||
def update_meta(self):
|
||||
if self.batch_size > len(self.meta_memory):
|
||||
return
|
||||
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.meta_memory.sample(self.batch_size)
|
||||
state_batch = torch.tensor(state_batch,device=self.device,dtype=torch.float)
|
||||
action_batch = torch.tensor(action_batch,device=self.device,dtype=torch.int64).unsqueeze(1)
|
||||
reward_batch = torch.tensor(reward_batch,device=self.device,dtype=torch.float)
|
||||
next_state_batch = torch.tensor(next_state_batch,device=self.device, dtype=torch.float)
|
||||
done_batch = torch.tensor(np.float32(done_batch),device=self.device)
|
||||
q_values = self.meta_policy_net(state_batch).gather(dim=1, index=action_batch).squeeze(1)
|
||||
next_state_values = self.meta_policy_net(next_state_batch).max(1)[0].detach()
|
||||
expected_q_values = reward_batch + 0.99 * next_state_values * (1-done_batch)
|
||||
meta_loss = nn.MSELoss()(q_values, expected_q_values)
|
||||
self.meta_optimizer.zero_grad()
|
||||
meta_loss.backward()
|
||||
for param in self.meta_policy_net.parameters(): # clip防止梯度爆炸
|
||||
param.grad.data.clamp_(-1, 1)
|
||||
self.meta_optimizer.step()
|
||||
self.meta_loss_numpy = meta_loss.detach().cpu().numpy()
|
||||
self.meta_losses.append(self.meta_loss_numpy)
|
||||
|
||||
def save(self, path):
|
||||
torch.save(self.policy_net.state_dict(), path+'policy_checkpoint.pth')
|
||||
torch.save(self.meta_policy_net.state_dict(), path+'meta_checkpoint.pth')
|
||||
|
||||
def load(self, path):
|
||||
self.policy_net.load_state_dict(torch.load(path+'policy_checkpoint.pth'))
|
||||
self.meta_policy_net.load_state_dict(torch.load(path+'meta_checkpoint.pth'))
|
||||
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 112 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 311 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 73 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 21 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 62 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -1,146 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-29 10:37:32
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-05-04 22:35:56
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
|
||||
|
||||
import sys,os
|
||||
curr_path = os.path.dirname(__file__)
|
||||
parent_path = os.path.dirname(curr_path)
|
||||
sys.path.append(parent_path) # add current terminal path to sys.path
|
||||
|
||||
import datetime
|
||||
import numpy as np
|
||||
import torch
|
||||
import gym
|
||||
|
||||
from common.utils import save_results,make_dir
|
||||
from common.plot import plot_rewards
|
||||
from HierarchicalDQN.agent import HierarchicalDQN
|
||||
|
||||
curr_time = datetime.datetime.now().strftime(
|
||||
"%Y%m%d-%H%M%S") # obtain current time
|
||||
|
||||
class HierarchicalDQNConfig:
|
||||
def __init__(self):
|
||||
self.algo = "H-DQN" # name of algo
|
||||
self.env = 'CartPole-v0'
|
||||
self.result_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/results/' # path to save results
|
||||
self.model_path = curr_path+"/outputs/" + self.env + \
|
||||
'/'+curr_time+'/models/' # path to save models
|
||||
self.train_eps = 300 # 训练的episode数目
|
||||
self.eval_eps = 50 # 测试的episode数目
|
||||
self.gamma = 0.99
|
||||
self.epsilon_start = 1 # start epsilon of e-greedy policy
|
||||
self.epsilon_end = 0.01
|
||||
self.epsilon_decay = 200
|
||||
self.lr = 0.0001 # learning rate
|
||||
self.memory_capacity = 10000 # Replay Memory capacity
|
||||
self.batch_size = 32
|
||||
self.target_update = 2 # target net的更新频率
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # 检测gpu
|
||||
self.hidden_dim = 256 # dimension of hidden layer
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
env = gym.make(cfg.env)
|
||||
env.seed(seed)
|
||||
state_dim = env.observation_space.shape[0]
|
||||
action_dim = env.action_space.n
|
||||
agent = HierarchicalDQN(state_dim,action_dim,cfg)
|
||||
return env,agent
|
||||
|
||||
def train(cfg, env, agent):
|
||||
print('Start to train !')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moveing average reward
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
while not done:
|
||||
goal = agent.set_goal(state)
|
||||
onehot_goal = agent.to_onehot(goal)
|
||||
meta_state = state
|
||||
extrinsic_reward = 0
|
||||
while not done and goal != np.argmax(state):
|
||||
goal_state = np.concatenate([state, onehot_goal])
|
||||
action = agent.choose_action(goal_state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
extrinsic_reward += reward
|
||||
intrinsic_reward = 1.0 if goal == np.argmax(
|
||||
next_state) else 0.0
|
||||
agent.memory.push(goal_state, action, intrinsic_reward, np.concatenate(
|
||||
[next_state, onehot_goal]), done)
|
||||
state = next_state
|
||||
agent.update()
|
||||
agent.meta_memory.push(meta_state, goal, extrinsic_reward, state, done)
|
||||
print('Episode:{}/{}, Reward:{}, Loss:{:.2f}, Meta_Loss:{:.2f}'.format(i_ep+1, cfg.train_eps, ep_reward,agent.loss_numpy ,agent.meta_loss_numpy ))
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
def eval(cfg, env, agent):
|
||||
print('Start to eval !')
|
||||
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
|
||||
rewards = []
|
||||
ma_rewards = [] # moveing average reward
|
||||
for i_ep in range(cfg.train_eps):
|
||||
state = env.reset()
|
||||
done = False
|
||||
ep_reward = 0
|
||||
while not done:
|
||||
goal = agent.set_goal(state)
|
||||
onehot_goal = agent.to_onehot(goal)
|
||||
extrinsic_reward = 0
|
||||
while not done and goal != np.argmax(state):
|
||||
goal_state = np.concatenate([state, onehot_goal])
|
||||
action = agent.choose_action(goal_state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
ep_reward += reward
|
||||
extrinsic_reward += reward
|
||||
state = next_state
|
||||
agent.update()
|
||||
print(f'Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward}, Loss:{agent.loss_numpy:.2f}, Meta_Loss:{agent.meta_loss_numpy:.2f}')
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(
|
||||
0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print('Complete training!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg = HierarchicalDQNConfig()
|
||||
|
||||
# train
|
||||
env,agent = env_agent_config(cfg,seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(cfg.result_path, cfg.model_path)
|
||||
agent.save(path=cfg.model_path)
|
||||
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
|
||||
plot_rewards(rewards, ma_rewards, tag="train",
|
||||
algo=cfg.algo, path=cfg.result_path)
|
||||
# eval
|
||||
env,agent = env_agent_config(cfg,seed=10)
|
||||
agent.load(path=cfg.model_path)
|
||||
rewards,ma_rewards = eval(cfg,env,agent)
|
||||
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
|
||||
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
|
||||
|
||||
Reference in New Issue
Block a user