hot update A2C
This commit is contained in:
@@ -5,7 +5,7 @@ Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-12 21:14:12
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-09-15 13:21:03
|
||||
LastEditTime: 2022-08-29 14:24:44
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
@@ -31,40 +31,45 @@ class MLP(nn.Module):
|
||||
x = F.relu(self.fc2(x))
|
||||
return self.fc3(x)
|
||||
|
||||
class ActorSoftmax(nn.Module):
|
||||
def __init__(self, input_dim, output_dim, hidden_dim=256):
|
||||
super(ActorSoftmax, self).__init__()
|
||||
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
||||
self.fc2 = nn.Linear(hidden_dim, output_dim)
|
||||
def forward(self,state):
|
||||
dist = F.relu(self.fc1(state))
|
||||
dist = F.softmax(self.fc2(dist),dim=1)
|
||||
return dist
|
||||
class Critic(nn.Module):
|
||||
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
|
||||
super(Critic, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_obs + n_actions, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, 1)
|
||||
# 随机初始化为较小的值
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state, action):
|
||||
# 按维数1拼接
|
||||
x = torch.cat([state, action], 1)
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
def __init__(self,input_dim,output_dim,hidden_dim=256):
|
||||
super(Critic,self).__init__()
|
||||
assert output_dim == 1 # critic must output a single value
|
||||
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
||||
self.fc2 = nn.Linear(hidden_dim, output_dim)
|
||||
def forward(self,state):
|
||||
value = F.relu(self.fc1(state))
|
||||
value = self.fc2(value)
|
||||
return value
|
||||
|
||||
class Actor(nn.Module):
|
||||
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
|
||||
super(Actor, self).__init__()
|
||||
self.linear1 = nn.Linear(n_obs, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, n_actions)
|
||||
class ActorCriticSoftmax(nn.Module):
|
||||
def __init__(self, input_dim, output_dim, actor_hidden_dim=256,critic_hidden_dim=256):
|
||||
super(ActorCriticSoftmax, self).__init__()
|
||||
|
||||
self.critic_fc1 = nn.Linear(input_dim, critic_hidden_dim)
|
||||
self.critic_fc2 = nn.Linear(critic_hidden_dim, 1)
|
||||
|
||||
self.actor_fc1 = nn.Linear(input_dim, actor_hidden_dim)
|
||||
self.actor_fc2 = nn.Linear(actor_hidden_dim, output_dim)
|
||||
|
||||
def forward(self, state):
|
||||
# state = Variable(torch.from_numpy(state).float().unsqueeze(0))
|
||||
value = F.relu(self.critic_fc1(state))
|
||||
value = self.critic_fc2(value)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = torch.tanh(self.linear3(x))
|
||||
return x
|
||||
policy_dist = F.relu(self.actor_fc1(state))
|
||||
policy_dist = F.softmax(self.actor_fc2(policy_dist), dim=1)
|
||||
|
||||
return value, policy_dist
|
||||
|
||||
class ActorCritic(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim=256):
|
||||
|
||||
Reference in New Issue
Block a user