hot update A2C
This commit is contained in:
@@ -24,7 +24,7 @@ class Launcher:
|
||||
save_results(res_dic, tag = 'train', path = cfg['result_path']) # save results
|
||||
plot_rewards(res_dic['rewards'], cfg, path = cfg['result_path'],tag = "train") # plot results
|
||||
# testing
|
||||
env, agent = self.env_agent_config(cfg) # create new env for testing, sometimes can ignore this step
|
||||
# env, agent = self.env_agent_config(cfg) # create new env for testing, sometimes can ignore this step
|
||||
agent.load_model(path = cfg['model_path']) # load model
|
||||
res_dic = self.test(cfg, env, agent)
|
||||
save_results(res_dic, tag='test',
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-10 15:27:16
|
||||
@LastEditor: John
|
||||
LastEditTime: 2022-08-22 17:23:21
|
||||
LastEditTime: 2022-08-28 23:44:06
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -39,12 +39,12 @@ class ReplayBufferQue:
|
||||
def __init__(self, capacity: int) -> None:
|
||||
self.capacity = capacity
|
||||
self.buffer = deque(maxlen=self.capacity)
|
||||
def push(self,trainsitions):
|
||||
def push(self,transitions):
|
||||
'''_summary_
|
||||
Args:
|
||||
trainsitions (tuple): _description_
|
||||
'''
|
||||
self.buffer.append(trainsitions)
|
||||
self.buffer.append(transitions)
|
||||
def sample(self, batch_size: int, sequential: bool = False):
|
||||
if batch_size > len(self.buffer):
|
||||
batch_size = len(self.buffer)
|
||||
|
||||
@@ -5,7 +5,7 @@ Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-12 21:14:12
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-09-15 13:21:03
|
||||
LastEditTime: 2022-08-29 14:24:44
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
@@ -31,40 +31,45 @@ class MLP(nn.Module):
|
||||
x = F.relu(self.fc2(x))
|
||||
return self.fc3(x)
|
||||
|
||||
class ActorSoftmax(nn.Module):
|
||||
def __init__(self, input_dim, output_dim, hidden_dim=256):
|
||||
super(ActorSoftmax, self).__init__()
|
||||
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
||||
self.fc2 = nn.Linear(hidden_dim, output_dim)
|
||||
def forward(self,state):
|
||||
dist = F.relu(self.fc1(state))
|
||||
dist = F.softmax(self.fc2(dist),dim=1)
|
||||
return dist
|
||||
class Critic(nn.Module):
|
||||
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
|
||||
super(Critic, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_obs + n_actions, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, 1)
|
||||
# 随机初始化为较小的值
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state, action):
|
||||
# 按维数1拼接
|
||||
x = torch.cat([state, action], 1)
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = self.linear3(x)
|
||||
return x
|
||||
def __init__(self,input_dim,output_dim,hidden_dim=256):
|
||||
super(Critic,self).__init__()
|
||||
assert output_dim == 1 # critic must output a single value
|
||||
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
||||
self.fc2 = nn.Linear(hidden_dim, output_dim)
|
||||
def forward(self,state):
|
||||
value = F.relu(self.fc1(state))
|
||||
value = self.fc2(value)
|
||||
return value
|
||||
|
||||
class Actor(nn.Module):
|
||||
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
|
||||
super(Actor, self).__init__()
|
||||
self.linear1 = nn.Linear(n_obs, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, n_actions)
|
||||
class ActorCriticSoftmax(nn.Module):
|
||||
def __init__(self, input_dim, output_dim, actor_hidden_dim=256,critic_hidden_dim=256):
|
||||
super(ActorCriticSoftmax, self).__init__()
|
||||
|
||||
self.critic_fc1 = nn.Linear(input_dim, critic_hidden_dim)
|
||||
self.critic_fc2 = nn.Linear(critic_hidden_dim, 1)
|
||||
|
||||
self.actor_fc1 = nn.Linear(input_dim, actor_hidden_dim)
|
||||
self.actor_fc2 = nn.Linear(actor_hidden_dim, output_dim)
|
||||
|
||||
def forward(self, state):
|
||||
# state = Variable(torch.from_numpy(state).float().unsqueeze(0))
|
||||
value = F.relu(self.critic_fc1(state))
|
||||
value = self.critic_fc2(value)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
x = torch.tanh(self.linear3(x))
|
||||
return x
|
||||
policy_dist = F.relu(self.actor_fc1(state))
|
||||
policy_dist = F.softmax(self.actor_fc2(policy_dist), dim=1)
|
||||
|
||||
return value, policy_dist
|
||||
|
||||
class ActorCritic(nn.Module):
|
||||
def __init__(self, n_states, n_actions, hidden_dim=256):
|
||||
|
||||
Reference in New Issue
Block a user