add ddqn
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-09 20:25:52
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-14 11:43:17
|
||||
LastEditTime: 2020-09-02 01:19:13
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -35,39 +35,41 @@ class DDPG:
|
||||
self.critic_optimizer = optim.Adam(
|
||||
self.critic.parameters(), lr=critic_lr)
|
||||
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
|
||||
self.critic_criterion = nn.MSELoss()
|
||||
self.memory = ReplayBuffer(memory_capacity)
|
||||
self.batch_size = batch_size
|
||||
self.soft_tau = soft_tau
|
||||
self.gamma = gamma
|
||||
|
||||
def select_action(self, state):
|
||||
return self.actor.select_action(state)
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||
action = self.actor(state)
|
||||
# torch.detach()用于切断反向传播
|
||||
return action.detach().cpu().numpy()[0, 0]
|
||||
|
||||
def update(self):
|
||||
if len(self.memory) < self.batch_size:
|
||||
return
|
||||
state, action, reward, next_state, done = self.memory.sample(
|
||||
self.batch_size)
|
||||
self.batch_size)
|
||||
# 将所有变量转为张量
|
||||
state = torch.FloatTensor(state).to(self.device)
|
||||
next_state = torch.FloatTensor(next_state).to(self.device)
|
||||
action = torch.FloatTensor(action).to(self.device)
|
||||
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
|
||||
|
||||
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
|
||||
# 注意critic将(s_t,a)作为输入
|
||||
policy_loss = self.critic(state, self.actor(state))
|
||||
|
||||
policy_loss = -policy_loss.mean()
|
||||
|
||||
next_action = self.target_actor(next_state)
|
||||
target_value = self.target_critic(next_state, next_action.detach())
|
||||
expected_value = reward + (1.0 - done) * self.gamma * target_value
|
||||
expected_value = torch.clamp(expected_value, -np.inf, np.inf)
|
||||
|
||||
|
||||
value = self.critic(state, action)
|
||||
|
||||
value_loss = self.critic_criterion(value, expected_value.detach())
|
||||
|
||||
value_loss = nn.MSELoss()(value, expected_value.detach())
|
||||
|
||||
self.actor_optimizer.zero_grad()
|
||||
policy_loss.backward()
|
||||
self.actor_optimizer.step()
|
||||
@@ -85,3 +87,8 @@ class DDPG:
|
||||
target_param.data * (1.0 - self.soft_tau) +
|
||||
param.data * self.soft_tau
|
||||
)
|
||||
def save_model(self,path):
|
||||
torch.save(self.target_actor.state_dict(), path)
|
||||
|
||||
def load_model(self,path):
|
||||
self.actor.load_state_dict(torch.load(path))
|
||||
Reference in New Issue
Block a user