This commit is contained in:
johnjim0816
2021-04-29 14:44:25 +08:00
parent ed7b60fd5b
commit 895094a893
19 changed files with 538 additions and 33 deletions

110
codes/SAC/agent.py Normal file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: JiangJi
Email: johnjim0816@gmail.com
Date: 2021-04-29 12:53:54
LastEditor: JiangJi
LastEditTime: 2021-04-29 13:56:39
Discription:
Environment:
'''
import copy
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from common.memory import ReplayBuffer
from SAC.model import ValueNet,PolicyNet,SoftQNet
class SAC:
def __init__(self,state_dim,action_dim,cfg) -> None:
self.batch_size = cfg.batch_size
self.memory = ReplayBuffer(cfg.capacity)
self.device = cfg.device
self.value_net = ValueNet(state_dim, cfg.hidden_dim).to(self.device)
self.target_value_net = ValueNet(state_dim, cfg.hidden_dim).to(self.device)
self.soft_q_net = SoftQNet(state_dim, action_dim, cfg.hidden_dim).to(self.device)
self.policy_net = PolicyNet(state_dim, action_dim, cfg.hidden_dim).to(self.device)
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=cfg.value_lr)
self.soft_q_optimizer = optim.Adam(self.soft_q_net.parameters(), lr=cfg.soft_q_lr)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.policy_lr)
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
self.value_criterion = nn.MSELoss()
self.soft_q_criterion = nn.MSELoss()
def update(self, gamma=0.99,mean_lambda=1e-3,
std_lambda=1e-3,
z_lambda=0.0,
soft_tau=1e-2,
):
if len(self.memory) < self.batch_size:
return
state, action, reward, next_state, done = self.memory.sample(self.batch_size)
state = torch.FloatTensor(state).to(self.device)
next_state = torch.FloatTensor(next_state).to(self.device)
action = torch.FloatTensor(action).to(self.device)
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
expected_q_value = self.soft_q_net(state, action)
expected_value = self.value_net(state)
new_action, log_prob, z, mean, log_std = self.policy_net.evaluate(state)
target_value = self.target_value_net(next_state)
next_q_value = reward + (1 - done) * gamma * target_value
q_value_loss = self.soft_q_criterion(expected_q_value, next_q_value.detach())
expected_new_q_value = self.soft_q_net(state, new_action)
next_value = expected_new_q_value - log_prob
value_loss = self.value_criterion(expected_value, next_value.detach())
log_prob_target = expected_new_q_value - expected_value
policy_loss = (log_prob * (log_prob - log_prob_target).detach()).mean()
mean_loss = mean_lambda * mean.pow(2).mean()
std_loss = std_lambda * log_std.pow(2).mean()
z_loss = z_lambda * z.pow(2).sum(1).mean()
policy_loss += mean_loss + std_loss + z_loss
self.soft_q_optimizer.zero_grad()
q_value_loss.backward()
self.soft_q_optimizer.step()
self.value_optimizer.zero_grad()
value_loss.backward()
self.value_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
def save(self, path):
torch.save(self.value_net.state_dict(), path + "sac_value")
torch.save(self.value_optimizer.state_dict(), path + "sac_value_optimizer")
torch.save(self.soft_q_net.state_dict(), path + "sac_soft_q")
torch.save(self.soft_q_optimizer.state_dict(), path + "sac_soft_q_optimizer")
torch.save(self.policy_net.state_dict(), path + "sac_policy")
torch.save(self.policy_optimizer.state_dict(), path + "sac_policy_optimizer")
def load(self, path):
self.value_net.load_state_dict(torch.load(path + "sac_value"))
self.value_optimizer.load_state_dict(torch.load(path + "sac_value_optimizer"))
self.target_value_net = copy.deepcopy(self.value_net)
self.soft_q_net.load_state_dict(torch.load(path + "sac_soft_q"))
self.soft_q_optimizer.load_state_dict(torch.load(path + "sac_soft_q_optimizer"))
self.policy_net.load_state_dict(torch.load(path + "sac_policy"))
self.policy_optimizer.load_state_dict(torch.load(path + "sac_policy_optimizer"))

29
codes/SAC/env.py Normal file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: JiangJi
Email: johnjim0816@gmail.com
Date: 2021-04-29 12:52:11
LastEditor: JiangJi
LastEditTime: 2021-04-29 12:52:31
Discription:
Environment:
'''
import gym
import numpy as np
class NormalizedActions(gym.ActionWrapper):
def action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action

108
codes/SAC/model.py Normal file
View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: JiangJi
Email: johnjim0816@gmail.com
Date: 2021-04-29 12:53:58
LastEditor: JiangJi
LastEditTime: 2021-04-29 12:57:29
Discription:
Environment:
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ValueNet(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=3e-3):
super(ValueNet, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class SoftQNet(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-3):
super(SoftQNet, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class PolicyNet(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-3, log_std_min=-20, log_std_max=2):
super(PolicyNet, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def evaluate(self, state, epsilon=1e-6):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
action = action.detach().cpu().numpy()
return action[0]

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

89
codes/SAC/task0_train.py Normal file
View File

@@ -0,0 +1,89 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: JiangJi
Email: johnjim0816@gmail.com
Date: 2021-04-29 12:59:22
LastEditor: JiangJi
LastEditTime: 2021-04-29 13:56:56
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path = os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import gym
import torch
import datetime
from SAC.env import NormalizedActions
from SAC.agent import SAC
from common.utils import save_results, make_dir
from common.plot import plot_rewards
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
class SACConfig:
def __init__(self) -> None:
self.algo = 'SAC'
self.env = 'Pendulum-v0'
self.result_path = curr_path+"/outputs/" +self.env+'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" +self.env+'/'+curr_time+'/models/' # path to save models
self.train_eps = 300
self.train_steps = 500
self.gamma = 0.99
self.mean_lambda=1e-3
self.std_lambda=1e-3
self.z_lambda=0.0
self.soft_tau=1e-2
self.value_lr = 3e-4
self.soft_q_lr = 3e-4
self.policy_lr = 3e-4
self.capacity = 1000000
self.hidden_dim = 256
self.batch_size = 128
self.device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(cfg,env,agent):
rewards = []
ma_rewards = [] # moveing average reward
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
for i_step in range(cfg.train_steps):
action = agent.policy_net.get_action(state)
next_state, reward, done, _ = env.step(action)
agent.memory.push(state, action, reward, next_state, done)
agent.update()
state = next_state
ep_reward += reward
if done:
break
print(f"Episode:{i_ep+1}/{cfg.train_eps}, Reward:{ep_reward:.3f}")
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
return rewards, ma_rewards
if __name__ == "__main__":
cfg=SACConfig()
env = NormalizedActions(gym.make("Pendulum-v0"))
action_dim = env.action_space.shape[0]
state_dim = env.observation_space.shape[0]
agent = SAC(state_dim,action_dim,cfg)
rewards,ma_rewards = train(cfg,env,agent)
make_dir(cfg.result_path,cfg.model_path)
agent.save(path=cfg.model_path)
save_results(rewards,ma_rewards,tag='train',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="train",env=cfg.env,algo = cfg.algo,path=cfg.result_path)