update projects
This commit is contained in:
30
projects/codes/SoftActorCritic/env_wrapper.py
Normal file
30
projects/codes/SoftActorCritic/env_wrapper.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: JiangJi
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-04-29 12:52:11
|
||||
LastEditor: JiangJi
|
||||
LastEditTime: 2021-12-22 15:36:36
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import gym
|
||||
import numpy as np
|
||||
|
||||
class NormalizedActions(gym.ActionWrapper):
|
||||
def action(self, action):
|
||||
low = self.action_space.low
|
||||
high = self.action_space.high
|
||||
|
||||
action = low + (action + 1.0) * 0.5 * (high - low)
|
||||
action = np.clip(action, low, high)
|
||||
|
||||
return action
|
||||
|
||||
def reverse_action(self, action):
|
||||
low = self.action_space.low
|
||||
high = self.action_space.high
|
||||
action = 2 * (action - low) / (high - low) - 1
|
||||
action = np.clip(action, low, high)
|
||||
return action
|
||||
108
projects/codes/SoftActorCritic/model.py
Normal file
108
projects/codes/SoftActorCritic/model.py
Normal file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: JiangJi
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-04-29 12:53:58
|
||||
LastEditor: JiangJi
|
||||
LastEditTime: 2021-11-19 18:04:19
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.distributions import Normal
|
||||
|
||||
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
class ValueNet(nn.Module):
|
||||
def __init__(self, n_states, hidden_dim, init_w=3e-3):
|
||||
super(ValueNet, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_states, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
self.linear3 = nn.Linear(hidden_dim, 1)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state):
|
||||
x = F.relu(self.linear1(state))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
|
||||
|
||||
class SoftQNet(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
|
||||
super(SoftQNet, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_states + n_actions, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
self.linear3 = nn.Linear(hidden_dim, 1)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state, action):
|
||||
x = torch.cat([state, action], 1)
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
|
||||
|
||||
class PolicyNet(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3, log_std_min=-20, log_std_max=2):
|
||||
super(PolicyNet, self).__init__()
|
||||
|
||||
self.log_std_min = log_std_min
|
||||
self.log_std_max = log_std_max
|
||||
|
||||
self.linear1 = nn.Linear(n_states, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
|
||||
self.mean_linear = nn.Linear(hidden_dim, n_actions)
|
||||
self.mean_linear.weight.data.uniform_(-init_w, init_w)
|
||||
self.mean_linear.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
self.log_std_linear = nn.Linear(hidden_dim, n_actions)
|
||||
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
|
||||
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state):
|
||||
x = F.relu(self.linear1(state))
|
||||
x = F.relu(self.linear2(x))
|
||||
|
||||
mean = self.mean_linear(x)
|
||||
log_std = self.log_std_linear(x)
|
||||
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
|
||||
|
||||
return mean, log_std
|
||||
|
||||
def evaluate(self, state, epsilon=1e-6):
|
||||
mean, log_std = self.forward(state)
|
||||
std = log_std.exp()
|
||||
|
||||
normal = Normal(mean, std)
|
||||
z = normal.sample()
|
||||
action = torch.tanh(z)
|
||||
|
||||
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
|
||||
log_prob = log_prob.sum(-1, keepdim=True)
|
||||
|
||||
return action, log_prob, z, mean, log_std
|
||||
|
||||
|
||||
def get_action(self, state):
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(device)
|
||||
mean, log_std = self.forward(state)
|
||||
std = log_std.exp()
|
||||
|
||||
normal = Normal(mean, std)
|
||||
z = normal.sample()
|
||||
action = torch.tanh(z)
|
||||
|
||||
action = action.detach().cpu().numpy()
|
||||
return action[0]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 44 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 60 KiB |
222
projects/codes/SoftActorCritic/sac.py
Normal file
222
projects/codes/SoftActorCritic/sac.py
Normal file
@@ -0,0 +1,222 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: JiangJi
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-04-29 12:53:54
|
||||
LastEditor: JiangJi
|
||||
LastEditTime: 2021-12-22 15:41:19
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import copy
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
import torch.nn.functional as F
|
||||
from torch.distributions import Normal
|
||||
import numpy as np
|
||||
import random
|
||||
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
class ReplayBuffer:
|
||||
def __init__(self, capacity):
|
||||
self.capacity = capacity # 经验回放的容量
|
||||
self.buffer = [] # 缓冲区
|
||||
self.position = 0
|
||||
|
||||
def push(self, state, action, reward, next_state, done):
|
||||
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
|
||||
'''
|
||||
if len(self.buffer) < self.capacity:
|
||||
self.buffer.append(None)
|
||||
self.buffer[self.position] = (state, action, reward, next_state, done)
|
||||
self.position = (self.position + 1) % self.capacity
|
||||
|
||||
def sample(self, batch_size):
|
||||
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
|
||||
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
|
||||
return state, action, reward, next_state, done
|
||||
|
||||
def __len__(self):
|
||||
''' 返回当前存储的量
|
||||
'''
|
||||
return len(self.buffer)
|
||||
|
||||
class ValueNet(nn.Module):
|
||||
def __init__(self, n_states, hidden_dim, init_w=3e-3):
|
||||
super(ValueNet, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_states, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
self.linear3 = nn.Linear(hidden_dim, 1)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state):
|
||||
x = F.relu(self.linear1(state))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
|
||||
|
||||
class SoftQNet(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
|
||||
super(SoftQNet, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_states + n_actions, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
self.linear3 = nn.Linear(hidden_dim, 1)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state, action):
|
||||
x = torch.cat([state, action], 1)
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
|
||||
|
||||
class PolicyNet(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3, log_std_min=-20, log_std_max=2):
|
||||
super(PolicyNet, self).__init__()
|
||||
|
||||
self.log_std_min = log_std_min
|
||||
self.log_std_max = log_std_max
|
||||
|
||||
self.linear1 = nn.Linear(n_states, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
|
||||
self.mean_linear = nn.Linear(hidden_dim, n_actions)
|
||||
self.mean_linear.weight.data.uniform_(-init_w, init_w)
|
||||
self.mean_linear.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
self.log_std_linear = nn.Linear(hidden_dim, n_actions)
|
||||
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
|
||||
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state):
|
||||
x = F.relu(self.linear1(state))
|
||||
x = F.relu(self.linear2(x))
|
||||
|
||||
mean = self.mean_linear(x)
|
||||
log_std = self.log_std_linear(x)
|
||||
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
|
||||
|
||||
return mean, log_std
|
||||
|
||||
def evaluate(self, state, epsilon=1e-6):
|
||||
mean, log_std = self.forward(state)
|
||||
std = log_std.exp()
|
||||
|
||||
normal = Normal(mean, std)
|
||||
z = normal.sample()
|
||||
action = torch.tanh(z)
|
||||
|
||||
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
|
||||
log_prob = log_prob.sum(-1, keepdim=True)
|
||||
|
||||
return action, log_prob, z, mean, log_std
|
||||
|
||||
|
||||
def get_action(self, state):
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(device)
|
||||
mean, log_std = self.forward(state)
|
||||
std = log_std.exp()
|
||||
|
||||
normal = Normal(mean, std)
|
||||
z = normal.sample()
|
||||
action = torch.tanh(z)
|
||||
|
||||
action = action.detach().cpu().numpy()
|
||||
return action[0]
|
||||
|
||||
class SAC:
|
||||
def __init__(self,n_states,n_actions,cfg) -> None:
|
||||
self.batch_size = cfg.batch_size
|
||||
self.memory = ReplayBuffer(cfg.capacity)
|
||||
self.device = cfg.device
|
||||
self.value_net = ValueNet(n_states, cfg.hidden_dim).to(self.device)
|
||||
self.target_value_net = ValueNet(n_states, cfg.hidden_dim).to(self.device)
|
||||
self.soft_q_net = SoftQNet(n_states, n_actions, cfg.hidden_dim).to(self.device)
|
||||
self.policy_net = PolicyNet(n_states, n_actions, cfg.hidden_dim).to(self.device)
|
||||
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=cfg.value_lr)
|
||||
self.soft_q_optimizer = optim.Adam(self.soft_q_net.parameters(), lr=cfg.soft_q_lr)
|
||||
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.policy_lr)
|
||||
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
|
||||
target_param.data.copy_(param.data)
|
||||
self.value_criterion = nn.MSELoss()
|
||||
self.soft_q_criterion = nn.MSELoss()
|
||||
def update(self, gamma=0.99,mean_lambda=1e-3,
|
||||
std_lambda=1e-3,
|
||||
z_lambda=0.0,
|
||||
soft_tau=1e-2,
|
||||
):
|
||||
if len(self.memory) < self.batch_size:
|
||||
return
|
||||
state, action, reward, next_state, done = self.memory.sample(self.batch_size)
|
||||
state = torch.FloatTensor(state).to(self.device)
|
||||
next_state = torch.FloatTensor(next_state).to(self.device)
|
||||
action = torch.FloatTensor(action).to(self.device)
|
||||
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
|
||||
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
|
||||
expected_q_value = self.soft_q_net(state, action)
|
||||
expected_value = self.value_net(state)
|
||||
new_action, log_prob, z, mean, log_std = self.policy_net.evaluate(state)
|
||||
|
||||
|
||||
target_value = self.target_value_net(next_state)
|
||||
next_q_value = reward + (1 - done) * gamma * target_value
|
||||
q_value_loss = self.soft_q_criterion(expected_q_value, next_q_value.detach())
|
||||
|
||||
expected_new_q_value = self.soft_q_net(state, new_action)
|
||||
next_value = expected_new_q_value - log_prob
|
||||
value_loss = self.value_criterion(expected_value, next_value.detach())
|
||||
|
||||
log_prob_target = expected_new_q_value - expected_value
|
||||
policy_loss = (log_prob * (log_prob - log_prob_target).detach()).mean()
|
||||
|
||||
|
||||
mean_loss = mean_lambda * mean.pow(2).mean()
|
||||
std_loss = std_lambda * log_std.pow(2).mean()
|
||||
z_loss = z_lambda * z.pow(2).sum(1).mean()
|
||||
|
||||
policy_loss += mean_loss + std_loss + z_loss
|
||||
|
||||
self.soft_q_optimizer.zero_grad()
|
||||
q_value_loss.backward()
|
||||
self.soft_q_optimizer.step()
|
||||
|
||||
self.value_optimizer.zero_grad()
|
||||
value_loss.backward()
|
||||
self.value_optimizer.step()
|
||||
|
||||
self.policy_optimizer.zero_grad()
|
||||
policy_loss.backward()
|
||||
self.policy_optimizer.step()
|
||||
|
||||
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
|
||||
target_param.data.copy_(
|
||||
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
|
||||
)
|
||||
def save(self, path):
|
||||
torch.save(self.value_net.state_dict(), path + "sac_value")
|
||||
torch.save(self.value_optimizer.state_dict(), path + "sac_value_optimizer")
|
||||
torch.save(self.soft_q_net.state_dict(), path + "sac_soft_q")
|
||||
torch.save(self.soft_q_optimizer.state_dict(), path + "sac_soft_q_optimizer")
|
||||
|
||||
torch.save(self.policy_net.state_dict(), path + "sac_policy")
|
||||
torch.save(self.policy_optimizer.state_dict(), path + "sac_policy_optimizer")
|
||||
|
||||
def load(self, path):
|
||||
self.value_net.load_state_dict(torch.load(path + "sac_value"))
|
||||
self.value_optimizer.load_state_dict(torch.load(path + "sac_value_optimizer"))
|
||||
self.target_value_net = copy.deepcopy(self.value_net)
|
||||
|
||||
self.soft_q_net.load_state_dict(torch.load(path + "sac_soft_q"))
|
||||
self.soft_q_optimizer.load_state_dict(torch.load(path + "sac_soft_q_optimizer"))
|
||||
|
||||
self.policy_net.load_state_dict(torch.load(path + "sac_policy"))
|
||||
self.policy_optimizer.load_state_dict(torch.load(path + "sac_policy_optimizer"))
|
||||
142
projects/codes/SoftActorCritic/task0.py
Normal file
142
projects/codes/SoftActorCritic/task0.py
Normal file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: JiangJi
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-04-29 12:59:22
|
||||
LastEditor: JiangJi
|
||||
LastEditTime: 2021-12-22 16:27:13
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import sys,os
|
||||
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
|
||||
parent_path = os.path.dirname(curr_path) # 父路径
|
||||
sys.path.append(parent_path) # 添加路径到系统路径
|
||||
|
||||
import gym
|
||||
import torch
|
||||
import datetime
|
||||
|
||||
from SoftActorCritic.env_wrapper import NormalizedActions
|
||||
from SoftActorCritic.sac import SAC
|
||||
from common.utils import save_results, make_dir
|
||||
from common.utils import plot_rewards
|
||||
|
||||
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
algo_name = 'SAC' # 算法名称
|
||||
env_name = 'Pendulum-v1' # 环境名称
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
|
||||
|
||||
class SACConfig:
|
||||
def __init__(self) -> None:
|
||||
self.algo_name = algo_name
|
||||
self.env_name = env_name # 环境名称
|
||||
self.device= device
|
||||
self.train_eps = 300
|
||||
self.test_eps = 20
|
||||
self.max_steps = 500 # 每回合的最大步数
|
||||
self.gamma = 0.99
|
||||
self.mean_lambda=1e-3
|
||||
self.std_lambda=1e-3
|
||||
self.z_lambda=0.0
|
||||
self.soft_tau=1e-2
|
||||
self.value_lr = 3e-4
|
||||
self.soft_q_lr = 3e-4
|
||||
self.policy_lr = 3e-4
|
||||
self.capacity = 1000000
|
||||
self.hidden_dim = 256
|
||||
self.batch_size = 128
|
||||
|
||||
|
||||
class PlotConfig:
|
||||
def __init__(self) -> None:
|
||||
self.algo_name = algo_name # 算法名称
|
||||
self.env_name = env_name # 环境名称
|
||||
self.device= device
|
||||
self.result_path = curr_path + "/outputs/" + self.env_name + \
|
||||
'/' + curr_time + '/results/' # 保存结果的路径
|
||||
self.model_path = curr_path + "/outputs/" + self.env_name + \
|
||||
'/' + curr_time + '/models/' # 保存模型的路径
|
||||
self.save = True # 是否保存图片
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
env = NormalizedActions(gym.make(cfg.env_name))
|
||||
env.seed(seed)
|
||||
n_actions = env.action_space.shape[0]
|
||||
n_states = env.observation_space.shape[0]
|
||||
agent = SAC(n_states,n_actions,cfg)
|
||||
return env,agent
|
||||
|
||||
def train(cfg,env,agent):
|
||||
print('开始训练!')
|
||||
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录所有回合的奖励
|
||||
ma_rewards = [] # 记录所有回合的滑动平均奖励
|
||||
for i_ep in range(cfg.train_eps):
|
||||
ep_reward = 0 # 记录一回合内的奖励
|
||||
state = env.reset() # 重置环境,返回初始状态
|
||||
for i_step in range(cfg.max_steps):
|
||||
action = agent.policy_net.get_action(state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
agent.memory.push(state, action, reward, next_state, done)
|
||||
agent.update()
|
||||
state = next_state
|
||||
ep_reward += reward
|
||||
if done:
|
||||
break
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
if (i_ep+1)%10 == 0:
|
||||
print(f'回合:{i_ep+1}/{cfg.train_eps}, 奖励:{ep_reward:.3f}')
|
||||
print('完成训练!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
def test(cfg,env,agent):
|
||||
print('开始测试!')
|
||||
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录所有回合的奖励
|
||||
ma_rewards = [] # 记录所有回合的滑动平均奖励
|
||||
for i_ep in range(cfg.test_eps):
|
||||
state = env.reset()
|
||||
ep_reward = 0
|
||||
for i_step in range(cfg.max_steps):
|
||||
action = agent.policy_net.get_action(state)
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
state = next_state
|
||||
ep_reward += reward
|
||||
if done:
|
||||
break
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print(f"回合:{i_ep+1}/{cfg.test_eps},奖励:{ep_reward:.1f}")
|
||||
print('完成测试!')
|
||||
return rewards, ma_rewards
|
||||
|
||||
if __name__ == "__main__":
|
||||
cfg=SACConfig()
|
||||
plot_cfg = PlotConfig()
|
||||
# 训练
|
||||
env, agent = env_agent_config(cfg, seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(plot_cfg.result_path, plot_cfg.model_path) # 创建保存结果和模型路径的文件夹
|
||||
agent.save(path=plot_cfg.model_path) # 保存模型
|
||||
save_results(rewards, ma_rewards, tag='train',
|
||||
path=plot_cfg.result_path) # 保存结果
|
||||
plot_rewards(rewards, ma_rewards, plot_cfg, tag="train") # 画出结果
|
||||
# 测试
|
||||
env, agent = env_agent_config(cfg, seed=10)
|
||||
agent.load(path=plot_cfg.model_path) # 导入模型
|
||||
rewards, ma_rewards = test(cfg, env, agent)
|
||||
save_results(rewards, ma_rewards, tag='test', path=plot_cfg.result_path) # 保存结果
|
||||
plot_rewards(rewards, ma_rewards, plot_cfg, tag="test") # 画出结果
|
||||
|
||||
|
||||
|
||||
|
||||
221
projects/codes/SoftActorCritic/task0_train.ipynb
Normal file
221
projects/codes/SoftActorCritic/task0_train.ipynb
Normal file
@@ -0,0 +1,221 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"from pathlib import Path\n",
|
||||
"curr_path = str(Path().absolute())\n",
|
||||
"parent_path = str(Path().absolute().parent)\n",
|
||||
"sys.path.append(parent_path) # add current terminal path to sys.path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import gym\n",
|
||||
"import torch\n",
|
||||
"import datetime\n",
|
||||
"\n",
|
||||
"from SAC.env import NormalizedActions\n",
|
||||
"from SAC.agent import SAC\n",
|
||||
"from common.utils import save_results, make_dir\n",
|
||||
"from common.plot import plot_rewards\n",
|
||||
"\n",
|
||||
"curr_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") # obtain current time"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SACConfig:\n",
|
||||
" def __init__(self) -> None:\n",
|
||||
" self.algo = 'SAC'\n",
|
||||
" self.env = 'Pendulum-v0'\n",
|
||||
" self.result_path = curr_path+\"/outputs/\" +self.env+'/'+curr_time+'/results/' # path to save results\n",
|
||||
" self.model_path = curr_path+\"/outputs/\" +self.env+'/'+curr_time+'/models/' # path to save models\n",
|
||||
" self.train_eps = 300\n",
|
||||
" self.train_steps = 500\n",
|
||||
" self.test_eps = 50\n",
|
||||
" self.eval_steps = 500\n",
|
||||
" self.gamma = 0.99\n",
|
||||
" self.mean_lambda=1e-3\n",
|
||||
" self.std_lambda=1e-3\n",
|
||||
" self.z_lambda=0.0\n",
|
||||
" self.soft_tau=1e-2\n",
|
||||
" self.value_lr = 3e-4\n",
|
||||
" self.soft_q_lr = 3e-4\n",
|
||||
" self.policy_lr = 3e-4\n",
|
||||
" self.capacity = 1000000\n",
|
||||
" self.hidden_dim = 256\n",
|
||||
" self.batch_size = 128\n",
|
||||
" self.device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def env_agent_config(cfg,seed=1):\n",
|
||||
" env = NormalizedActions(gym.make(\"Pendulum-v0\"))\n",
|
||||
" env.seed(seed)\n",
|
||||
" n_actions = env.action_space.shape[0]\n",
|
||||
" n_states = env.observation_space.shape[0]\n",
|
||||
" agent = SAC(n_states,n_actions,cfg)\n",
|
||||
" return env,agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def train(cfg,env,agent):\n",
|
||||
" print('Start to train !')\n",
|
||||
" print(f'Env: {cfg.env}, Algorithm: {cfg.algo}, Device: {cfg.device}')\n",
|
||||
" rewards = []\n",
|
||||
" ma_rewards = [] # moveing average reward\n",
|
||||
" for i_ep in range(cfg.train_eps):\n",
|
||||
" state = env.reset()\n",
|
||||
" ep_reward = 0\n",
|
||||
" for i_step in range(cfg.train_steps):\n",
|
||||
" action = agent.policy_net.get_action(state)\n",
|
||||
" next_state, reward, done, _ = env.step(action)\n",
|
||||
" agent.memory.push(state, action, reward, next_state, done)\n",
|
||||
" agent.update()\n",
|
||||
" state = next_state\n",
|
||||
" ep_reward += reward\n",
|
||||
" if done:\n",
|
||||
" break\n",
|
||||
" if (i_ep+1)%10==0:\n",
|
||||
" print(f\"Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward:.3f}\")\n",
|
||||
" rewards.append(ep_reward)\n",
|
||||
" if ma_rewards:\n",
|
||||
" ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)\n",
|
||||
" else:\n",
|
||||
" ma_rewards.append(ep_reward) \n",
|
||||
" print('Complete training!')\n",
|
||||
" return rewards, ma_rewards"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def eval(cfg,env,agent):\n",
|
||||
" print('Start to eval !')\n",
|
||||
" print(f'Env: {cfg.env}, Algorithm: {cfg.algo}, Device: {cfg.device}')\n",
|
||||
" rewards = []\n",
|
||||
" ma_rewards = [] # moveing average reward\n",
|
||||
" for i_ep in range(cfg.test_eps):\n",
|
||||
" state = env.reset()\n",
|
||||
" ep_reward = 0\n",
|
||||
" for i_step in range(cfg.eval_steps):\n",
|
||||
" action = agent.policy_net.get_action(state)\n",
|
||||
" next_state, reward, done, _ = env.step(action)\n",
|
||||
" state = next_state\n",
|
||||
" ep_reward += reward\n",
|
||||
" if done:\n",
|
||||
" break\n",
|
||||
" if (i_ep+1)%10==0:\n",
|
||||
" print(f\"Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward:.3f}\")\n",
|
||||
" rewards.append(ep_reward)\n",
|
||||
" if ma_rewards:\n",
|
||||
" ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)\n",
|
||||
" else:\n",
|
||||
" ma_rewards.append(ep_reward) \n",
|
||||
" print('Complete evaling!')\n",
|
||||
" return rewards, ma_rewards\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "DeprecatedEnv",
|
||||
"evalue": "Env Pendulum-v0 not found (valid versions include ['Pendulum-v1'])",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[0;32m~/anaconda3/envs/py37/lib/python3.7/site-packages/gym/envs/registration.py\u001b[0m in \u001b[0;36mspec\u001b[0;34m(self, path)\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menv_specs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mid\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;31mKeyError\u001b[0m: 'Pendulum-v0'",
|
||||
"\nDuring handling of the above exception, another exception occurred:\n",
|
||||
"\u001b[0;31mDeprecatedEnv\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[0;32m<ipython-input-7-91b1038013e4>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;31m# train\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0menv\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0magent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0menv_agent_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcfg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseed\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mrewards\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mma_rewards\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcfg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0magent\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mmake_dir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcfg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresult_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcfg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;32m<ipython-input-4-040773221550>\u001b[0m in \u001b[0;36menv_agent_config\u001b[0;34m(cfg, seed)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0menv_agent_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcfg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseed\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0menv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNormalizedActions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgym\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Pendulum-v0\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mseed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mseed\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mn_actions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_space\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mn_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobservation_space\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;32m~/anaconda3/envs/py37/lib/python3.7/site-packages/gym/envs/registration.py\u001b[0m in \u001b[0;36mmake\u001b[0;34m(id, **kwargs)\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmake\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 235\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mregistry\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 236\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 237\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;32m~/anaconda3/envs/py37/lib/python3.7/site-packages/gym/envs/registration.py\u001b[0m in \u001b[0;36mmake\u001b[0;34m(self, path, **kwargs)\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 127\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Making new env: %s\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 128\u001b[0;31m \u001b[0mspec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspec\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 129\u001b[0m \u001b[0menv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mspec\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;32m~/anaconda3/envs/py37/lib/python3.7/site-packages/gym/envs/registration.py\u001b[0m in \u001b[0;36mspec\u001b[0;34m(self, path)\u001b[0m\n\u001b[1;32m 185\u001b[0m raise error.DeprecatedEnv(\n\u001b[1;32m 186\u001b[0m \"Env {} not found (valid versions include {})\".format(\n\u001b[0;32m--> 187\u001b[0;31m \u001b[0mid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmatching_envs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 188\u001b[0m )\n\u001b[1;32m 189\u001b[0m )\n",
|
||||
"\u001b[0;31mDeprecatedEnv\u001b[0m: Env Pendulum-v0 not found (valid versions include ['Pendulum-v1'])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" cfg=SACConfig()\n",
|
||||
" \n",
|
||||
" # train\n",
|
||||
" env,agent = env_agent_config(cfg,seed=1)\n",
|
||||
" rewards, ma_rewards = train(cfg, env, agent)\n",
|
||||
" make_dir(cfg.result_path, cfg.model_path)\n",
|
||||
" agent.save(path=cfg.model_path)\n",
|
||||
" save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)\n",
|
||||
" plot_rewards(rewards, ma_rewards, tag=\"train\",\n",
|
||||
" algo=cfg.algo, path=cfg.result_path)\n",
|
||||
" # eval\n",
|
||||
" env,agent = env_agent_config(cfg,seed=10)\n",
|
||||
" agent.load(path=cfg.model_path)\n",
|
||||
" rewards,ma_rewards = eval(cfg,env,agent)\n",
|
||||
" save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)\n",
|
||||
" plot_rewards(rewards,ma_rewards,tag=\"eval\",env=cfg.env,algo = cfg.algo,path=cfg.result_path)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "fe38df673a99c62a9fea33a7aceda74c9b65b12ee9d076c5851d98b692a4989a"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.7.10 64-bit ('mujoco': conda)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.10"
|
||||
},
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "fd81e6a9e450d5c245c1a0b5da0b03c89c450f614a13afa2acb1654375922756"
|
||||
}
|
||||
},
|
||||
"orig_nbformat": 2
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Reference in New Issue
Block a user