update codes
This commit is contained in:
@@ -16,15 +16,15 @@ import torch.optim as optim
|
||||
from PPO.model import Actor,Critic
|
||||
from PPO.memory import PPOMemory
|
||||
class PPO:
|
||||
def __init__(self, state_dim, action_dim,cfg):
|
||||
def __init__(self, n_states, n_actions,cfg):
|
||||
self.gamma = cfg.gamma
|
||||
self.continuous = cfg.continuous
|
||||
self.policy_clip = cfg.policy_clip
|
||||
self.n_epochs = cfg.n_epochs
|
||||
self.gae_lambda = cfg.gae_lambda
|
||||
self.device = cfg.device
|
||||
self.actor = Actor(state_dim, action_dim,cfg.hidden_dim).to(self.device)
|
||||
self.critic = Critic(state_dim,cfg.hidden_dim).to(self.device)
|
||||
self.actor = Actor(n_states, n_actions,cfg.hidden_dim).to(self.device)
|
||||
self.critic = Critic(n_states,cfg.hidden_dim).to(self.device)
|
||||
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr)
|
||||
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr)
|
||||
self.memory = PPOMemory(cfg.batch_size)
|
||||
|
||||
Reference in New Issue
Block a user