更新算法模版
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-09 20:25:52
|
||||
@LastEditor: John
|
||||
LastEditTime: 2022-06-09 19:04:44
|
||||
LastEditTime: 2022-09-27 15:43:21
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -14,96 +14,45 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
import torch.nn.functional as F
|
||||
class ReplayBuffer:
|
||||
def __init__(self, capacity):
|
||||
self.capacity = capacity # 经验回放的容量
|
||||
self.buffer = [] # 缓冲区
|
||||
self.position = 0
|
||||
|
||||
def push(self, state, action, reward, next_state, done):
|
||||
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
|
||||
'''
|
||||
if len(self.buffer) < self.capacity:
|
||||
self.buffer.append(None)
|
||||
self.buffer[self.position] = (state, action, reward, next_state, done)
|
||||
self.position = (self.position + 1) % self.capacity
|
||||
|
||||
def sample(self, batch_size):
|
||||
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
|
||||
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
|
||||
return state, action, reward, next_state, done
|
||||
|
||||
def __len__(self):
|
||||
''' 返回当前存储的量
|
||||
'''
|
||||
return len(self.buffer)
|
||||
class Actor(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
|
||||
super(Actor, self).__init__()
|
||||
self.linear1 = nn.Linear(n_states, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
self.linear3 = nn.Linear(hidden_dim, n_actions)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = torch.tanh(self.linear3(x))
|
||||
return x
|
||||
class Critic(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim, init_w=3e-3):
|
||||
super(Critic, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_states + n_actions, hidden_dim)
|
||||
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
|
||||
self.linear3 = nn.Linear(hidden_dim, 1)
|
||||
# 随机初始化为较小的值
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state, action):
|
||||
# 按维数1拼接
|
||||
x = torch.cat([state, action], 1)
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
class DDPG:
|
||||
def __init__(self, n_states, n_actions, cfg):
|
||||
self.device = torch.device(cfg.device)
|
||||
self.critic = Critic(n_states, n_actions, cfg.hidden_dim).to(self.device)
|
||||
self.actor = Actor(n_states, n_actions, cfg.hidden_dim).to(self.device)
|
||||
self.target_critic = Critic(n_states, n_actions, cfg.hidden_dim).to(self.device)
|
||||
self.target_actor = Actor(n_states, n_actions, cfg.hidden_dim).to(self.device)
|
||||
|
||||
# 复制参数到目标网络
|
||||
class DDPG:
|
||||
def __init__(self, models,memories,cfg):
|
||||
self.device = torch.device(cfg['device'])
|
||||
self.critic = models['critic'].to(self.device)
|
||||
self.target_critic = models['critic'].to(self.device)
|
||||
self.actor = models['actor'].to(self.device)
|
||||
self.target_actor = models['actor'].to(self.device)
|
||||
# copy weights from critic to target_critic
|
||||
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
|
||||
target_param.data.copy_(param.data)
|
||||
# copy weights from actor to target_actor
|
||||
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
|
||||
target_param.data.copy_(param.data)
|
||||
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg['critic_lr'])
|
||||
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg['actor_lr'])
|
||||
self.memory = memories['memory']
|
||||
self.batch_size = cfg['batch_size']
|
||||
self.gamma = cfg['gamma']
|
||||
self.tau = cfg['tau']
|
||||
|
||||
self.critic_optimizer = optim.Adam(
|
||||
self.critic.parameters(), lr=cfg.critic_lr)
|
||||
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr)
|
||||
self.memory = ReplayBuffer(cfg.memory_capacity)
|
||||
self.batch_size = cfg.batch_size
|
||||
self.soft_tau = cfg.soft_tau # 软更新参数
|
||||
self.gamma = cfg.gamma
|
||||
|
||||
def choose_action(self, state):
|
||||
def sample_action(self, state):
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||
action = self.actor(state)
|
||||
return action.detach().cpu().numpy()[0, 0]
|
||||
@torch.no_grad()
|
||||
def predict_action(self, state):
|
||||
''' predict action
|
||||
'''
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||
action = self.actor(state)
|
||||
return action.cpu().numpy()[0, 0]
|
||||
|
||||
def update(self):
|
||||
if len(self.memory) < self.batch_size: # 当 memory 中不满足一个批量时,不更新策略
|
||||
if len(self.memory) < self.batch_size: # when memory size is less than batch size, return
|
||||
return
|
||||
# 从经验回放中(replay memory)中随机采样一个批量的转移(transition)
|
||||
# sample a random minibatch of N transitions from R
|
||||
state, action, reward, next_state, done = self.memory.sample(self.batch_size)
|
||||
# 转变为张量
|
||||
# convert to tensor
|
||||
state = torch.FloatTensor(np.array(state)).to(self.device)
|
||||
next_state = torch.FloatTensor(np.array(next_state)).to(self.device)
|
||||
action = torch.FloatTensor(np.array(action)).to(self.device)
|
||||
@@ -126,19 +75,22 @@ class DDPG:
|
||||
self.critic_optimizer.zero_grad()
|
||||
value_loss.backward()
|
||||
self.critic_optimizer.step()
|
||||
# 软更新
|
||||
# soft update
|
||||
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
|
||||
target_param.data.copy_(
|
||||
target_param.data * (1.0 - self.soft_tau) +
|
||||
param.data * self.soft_tau
|
||||
target_param.data * (1.0 - self.tau) +
|
||||
param.data * self.tau
|
||||
)
|
||||
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
|
||||
target_param.data.copy_(
|
||||
target_param.data * (1.0 - self.soft_tau) +
|
||||
param.data * self.soft_tau
|
||||
target_param.data * (1.0 - self.tau) +
|
||||
param.data * self.tau
|
||||
)
|
||||
def save(self,path):
|
||||
torch.save(self.actor.state_dict(), path+'checkpoint.pt')
|
||||
def save_model(self,path):
|
||||
from pathlib import Path
|
||||
# create path
|
||||
Path(path).mkdir(parents=True, exist_ok=True)
|
||||
torch.save(self.actor.state_dict(), f"{path}/actor_checkpoint.pt")
|
||||
|
||||
def load(self,path):
|
||||
self.actor.load_state_dict(torch.load(path+'checkpoint.pt'))
|
||||
def load_model(self,path):
|
||||
self.actor.load_state_dict(torch.load(f"{path}/actor_checkpoint.pt"))
|
||||
Reference in New Issue
Block a user