hot update A2C
This commit is contained in:
@@ -1,56 +1,60 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: JiangJi
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-05-03 22:16:08
|
||||
LastEditor: JiangJi
|
||||
LastEditTime: 2022-07-20 23:54:40
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.distributions import Categorical
|
||||
import numpy as np
|
||||
|
||||
|
||||
class ActorCritic(nn.Module):
|
||||
''' A2C网络模型,包含一个Actor和Critic
|
||||
'''
|
||||
def __init__(self, input_dim, output_dim, hidden_dim):
|
||||
super(ActorCritic, self).__init__()
|
||||
self.critic = nn.Sequential(
|
||||
nn.Linear(input_dim, hidden_dim),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_dim, 1)
|
||||
)
|
||||
|
||||
self.actor = nn.Sequential(
|
||||
nn.Linear(input_dim, hidden_dim),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_dim, output_dim),
|
||||
nn.Softmax(dim=1),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
value = self.critic(x)
|
||||
probs = self.actor(x)
|
||||
dist = Categorical(probs)
|
||||
return dist, value
|
||||
class A2C:
|
||||
''' A2C算法
|
||||
'''
|
||||
def __init__(self,n_states,n_actions,cfg) -> None:
|
||||
self.gamma = cfg.gamma
|
||||
self.device = torch.device(cfg.device)
|
||||
self.model = ActorCritic(n_states, n_actions, cfg.hidden_size).to(self.device)
|
||||
self.optimizer = optim.Adam(self.model.parameters())
|
||||
def __init__(self,models,memories,cfg):
|
||||
self.n_actions = cfg['n_actions']
|
||||
self.gamma = cfg['gamma']
|
||||
self.device = torch.device(cfg['device'])
|
||||
self.memory = memories['ACMemory']
|
||||
self.actor = models['Actor'].to(self.device)
|
||||
self.critic = models['Critic'].to(self.device)
|
||||
self.actor_optim = torch.optim.Adam(self.actor.parameters(), lr=cfg['actor_lr'])
|
||||
self.critic_optim = torch.optim.Adam(self.critic.parameters(), lr=cfg['critic_lr'])
|
||||
def sample_action(self,state):
|
||||
state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(dim=0)
|
||||
dist = self.actor(state)
|
||||
value = self.critic(state) # note that 'dist' need require_grad=True
|
||||
value = value.detach().numpy().squeeze(0)[0]
|
||||
action = np.random.choice(self.n_actions, p=dist.detach().numpy().squeeze(0)) # shape(p=(n_actions,1)
|
||||
return action,value,dist
|
||||
def predict_action(self,state):
|
||||
state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(dim=0)
|
||||
dist = self.actor(state)
|
||||
value = self.critic(state) # note that 'dist' need require_grad=True
|
||||
value = value.detach().numpy().squeeze(0)[0]
|
||||
action = np.random.choice(self.n_actions, p=dist.detach().numpy().squeeze(0)) # shape(p=(n_actions,1)
|
||||
return action,value,dist
|
||||
def update(self,next_state,entropy):
|
||||
value_pool,log_prob_pool,reward_pool = self.memory.sample()
|
||||
next_state = torch.tensor(next_state, device=self.device, dtype=torch.float32).unsqueeze(dim=0)
|
||||
next_value = self.critic(next_state)
|
||||
returns = np.zeros_like(reward_pool)
|
||||
for t in reversed(range(len(reward_pool))):
|
||||
next_value = reward_pool[t] + self.gamma * next_value # G(s_{t},a{t}) = r_{t+1} + gamma * V(s_{t+1})
|
||||
returns[t] = next_value
|
||||
returns = torch.tensor(returns, device=self.device)
|
||||
value_pool = torch.tensor(value_pool, device=self.device)
|
||||
advantages = returns - value_pool
|
||||
log_prob_pool = torch.stack(log_prob_pool)
|
||||
actor_loss = (-log_prob_pool * advantages).mean()
|
||||
critic_loss = 0.5 * advantages.pow(2).mean()
|
||||
tot_loss = actor_loss + critic_loss + 0.001 * entropy
|
||||
self.actor_optim.zero_grad()
|
||||
self.critic_optim.zero_grad()
|
||||
tot_loss.backward()
|
||||
self.actor_optim.step()
|
||||
self.critic_optim.step()
|
||||
self.memory.clear()
|
||||
def save_model(self, path):
|
||||
from pathlib import Path
|
||||
# create path
|
||||
Path(path).mkdir(parents=True, exist_ok=True)
|
||||
torch.save(self.actor.state_dict(), f"{path}/actor_checkpoint.pt")
|
||||
torch.save(self.critic.state_dict(), f"{path}/critic_checkpoint.pt")
|
||||
|
||||
def compute_returns(self,next_value, rewards, masks):
|
||||
R = next_value
|
||||
returns = []
|
||||
for step in reversed(range(len(rewards))):
|
||||
R = rewards[step] + self.gamma * R * masks[step]
|
||||
returns.insert(0, R)
|
||||
return returns
|
||||
def load_model(self, path):
|
||||
self.actor.load_state_dict(torch.load(f"{path}/actor_checkpoint.pt"))
|
||||
self.critic.load_state_dict(torch.load(f"{path}/critic_checkpoint.pt"))
|
||||
Reference in New Issue
Block a user