add ddqn
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-09 20:25:52
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-14 11:43:17
|
||||
LastEditTime: 2020-09-02 01:19:13
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -35,39 +35,41 @@ class DDPG:
|
||||
self.critic_optimizer = optim.Adam(
|
||||
self.critic.parameters(), lr=critic_lr)
|
||||
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
|
||||
self.critic_criterion = nn.MSELoss()
|
||||
self.memory = ReplayBuffer(memory_capacity)
|
||||
self.batch_size = batch_size
|
||||
self.soft_tau = soft_tau
|
||||
self.gamma = gamma
|
||||
|
||||
def select_action(self, state):
|
||||
return self.actor.select_action(state)
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||
action = self.actor(state)
|
||||
# torch.detach()用于切断反向传播
|
||||
return action.detach().cpu().numpy()[0, 0]
|
||||
|
||||
def update(self):
|
||||
if len(self.memory) < self.batch_size:
|
||||
return
|
||||
state, action, reward, next_state, done = self.memory.sample(
|
||||
self.batch_size)
|
||||
self.batch_size)
|
||||
# 将所有变量转为张量
|
||||
state = torch.FloatTensor(state).to(self.device)
|
||||
next_state = torch.FloatTensor(next_state).to(self.device)
|
||||
action = torch.FloatTensor(action).to(self.device)
|
||||
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
|
||||
|
||||
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
|
||||
# 注意critic将(s_t,a)作为输入
|
||||
policy_loss = self.critic(state, self.actor(state))
|
||||
|
||||
policy_loss = -policy_loss.mean()
|
||||
|
||||
next_action = self.target_actor(next_state)
|
||||
target_value = self.target_critic(next_state, next_action.detach())
|
||||
expected_value = reward + (1.0 - done) * self.gamma * target_value
|
||||
expected_value = torch.clamp(expected_value, -np.inf, np.inf)
|
||||
|
||||
|
||||
value = self.critic(state, action)
|
||||
|
||||
value_loss = self.critic_criterion(value, expected_value.detach())
|
||||
|
||||
value_loss = nn.MSELoss()(value, expected_value.detach())
|
||||
|
||||
self.actor_optimizer.zero_grad()
|
||||
policy_loss.backward()
|
||||
self.actor_optimizer.step()
|
||||
@@ -85,3 +87,8 @@ class DDPG:
|
||||
target_param.data * (1.0 - self.soft_tau) +
|
||||
param.data * self.soft_tau
|
||||
)
|
||||
def save_model(self,path):
|
||||
torch.save(self.target_actor.state_dict(), path)
|
||||
|
||||
def load_model(self,path):
|
||||
self.actor.load_state_dict(torch.load(path))
|
||||
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-10 15:28:30
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-12 22:49:18
|
||||
LastEditTime: 2020-09-01 10:57:36
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -13,7 +13,8 @@ import gym
|
||||
import numpy as np
|
||||
|
||||
class NormalizedActions(gym.ActionWrapper):
|
||||
|
||||
''' 将action范围重定在[0.1]之间
|
||||
'''
|
||||
def action(self, action):
|
||||
|
||||
low_bound = self.action_space.low
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 20:58:21
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-07-20 23:01:02
|
||||
LastEditTime: 2020-09-02 01:24:50
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -31,15 +31,15 @@ def get_args():
|
||||
parser.add_argument("--memory_capacity", default=10000, type=int,help="capacity of Replay Memory")
|
||||
|
||||
parser.add_argument("--batch_size", default=128, type=int,help="batch size of memory sampling")
|
||||
parser.add_argument("--max_episodes", default=200, type=int)
|
||||
parser.add_argument("--max_steps", default=200, type=int)
|
||||
parser.add_argument("--train_eps", default=200, type=int)
|
||||
parser.add_argument("--train_steps", default=200, type=int)
|
||||
parser.add_argument("--eval_eps", default=200, type=int) # 训练的最大episode数目
|
||||
parser.add_argument("--eval_steps", default=200, type=int) # 训练每个episode的长度
|
||||
parser.add_argument("--target_update", default=4, type=int,help="when(every default 10 eisodes) to update target net ")
|
||||
config = parser.parse_args()
|
||||
|
||||
return config
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def train():
|
||||
cfg = get_args()
|
||||
env = NormalizedActions(gym.make("Pendulum-v0"))
|
||||
|
||||
@@ -54,11 +54,12 @@ if __name__ == "__main__":
|
||||
|
||||
rewards = []
|
||||
moving_average_rewards = []
|
||||
for i_episode in range(1,cfg.max_episodes+1):
|
||||
ep_steps = []
|
||||
for i_episode in range(1,cfg.train_eps+1):
|
||||
state=env.reset()
|
||||
ou_noise.reset()
|
||||
ep_reward = 0
|
||||
for i_step in range(1,cfg.max_steps+1):
|
||||
for i_step in range(1,cfg.train_steps+1):
|
||||
action = agent.select_action(state)
|
||||
action = ou_noise.get_action(action, i_step) # 即paper中的random process
|
||||
next_state, reward, done, _ = env.step(action)
|
||||
@@ -68,22 +69,79 @@ if __name__ == "__main__":
|
||||
state = next_state
|
||||
if done:
|
||||
break
|
||||
print('Episode:', i_episode, ' Reward: %i' % int(ep_reward),)
|
||||
print('Episode:', i_episode, ' Reward: %i' % int(ep_reward),'n_steps:', i_step)
|
||||
ep_steps.append(i_step)
|
||||
rewards.append(ep_reward)
|
||||
#
|
||||
if i_episode == 1:
|
||||
moving_average_rewards.append(ep_reward)
|
||||
else:
|
||||
moving_average_rewards.append(
|
||||
0.9*moving_average_rewards[-1]+0.1*ep_reward)
|
||||
print('Complete!')
|
||||
# 保存模型
|
||||
import os
|
||||
import numpy as np
|
||||
save_path = os.path.dirname(__file__)+"/saved_model/"
|
||||
if not os.path.exists(save_path):
|
||||
os.mkdir(save_path)
|
||||
agent.save_model(save_path+'checkpoint.pth')
|
||||
# 存储reward等相关结果
|
||||
output_path = os.path.dirname(__file__)+"/result/"
|
||||
# 检测是否存在文件夹
|
||||
if not os.path.exists(output_path):
|
||||
os.mkdir(output_path)
|
||||
np.save(output_path+"rewards.npy", rewards)
|
||||
np.save(output_path+"moving_average_rewards.npy", moving_average_rewards)
|
||||
|
||||
np.save(output_path+"steps.npy", ep_steps)
|
||||
plot(rewards)
|
||||
plot(moving_average_rewards,ylabel="moving_average_rewards")
|
||||
plot(moving_average_rewards,ylabel="moving_average_rewards")
|
||||
plot(ep_steps, ylabel="steps_of_each_episode")
|
||||
|
||||
def eval():
|
||||
cfg = get_args()
|
||||
env = NormalizedActions(gym.make("Pendulum-v0"))
|
||||
|
||||
# 增加action噪声
|
||||
ou_noise = OUNoise(env.action_space)
|
||||
|
||||
n_states = env.observation_space.shape[0]
|
||||
n_actions = env.action_space.shape[0]
|
||||
agent=DDPG(n_states,n_actions, critic_lr=1e-3,
|
||||
actor_lr=1e-4, gamma=0.99, soft_tau=1e-2, memory_capacity=100000, batch_size=128)
|
||||
|
||||
import os
|
||||
save_path = os.path.dirname(__file__)+"/saved_model/"
|
||||
if not os.path.exists(save_path):
|
||||
os.mkdir(save_path)
|
||||
agent.load_model(save_path+'checkpoint.pth')
|
||||
rewards = []
|
||||
moving_average_rewards = []
|
||||
ep_steps = []
|
||||
for i_episode in range(1, cfg.eval_eps+1):
|
||||
state = env.reset() # reset环境状态
|
||||
ep_reward = 0
|
||||
for i_step in range(1, cfg.eval_steps+1):
|
||||
action = agent.select_action(state) # 根据当前环境state选择action
|
||||
next_state, reward, done, _ = env.step(action) # 更新环境参数
|
||||
ep_reward += reward
|
||||
state = next_state # 跳转到下一个状态
|
||||
if done:
|
||||
break
|
||||
print('Episode:', i_episode, ' Reward: %i' %
|
||||
int(ep_reward), 'n_steps:', i_step, 'done: ', done)
|
||||
ep_steps.append(i_step)
|
||||
rewards.append(ep_reward)
|
||||
# 计算滑动窗口的reward
|
||||
if i_episode == 1:
|
||||
moving_average_rewards.append(ep_reward)
|
||||
else:
|
||||
moving_average_rewards.append(
|
||||
0.9*moving_average_rewards[-1]+0.1*ep_reward)
|
||||
plot(rewards,save_fig=False)
|
||||
plot(moving_average_rewards, ylabel="moving_average_rewards",save_fig=False)
|
||||
plot(ep_steps, ylabel="steps_of_each_episode",save_fig=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# train()
|
||||
eval()
|
||||
@@ -5,7 +5,7 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-10 15:03:59
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-14 11:42:45
|
||||
LastEditTime: 2020-08-22 19:09:54
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
@@ -20,11 +20,12 @@ class Critic(nn.Module):
|
||||
self.linear1 = nn.Linear(n_obs + n_actions, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, 1)
|
||||
|
||||
# 随机初始化为较小的值
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
|
||||
def forward(self, state, action):
|
||||
# 按维数1拼接
|
||||
x = torch.cat([state, action], 1)
|
||||
x = F.relu(self.linear1(x))
|
||||
x = F.relu(self.linear2(x))
|
||||
@@ -46,11 +47,4 @@ class Actor(nn.Module):
|
||||
x = F.relu(self.linear2(x))
|
||||
x = F.tanh(self.linear3(x))
|
||||
return x
|
||||
|
||||
def select_action(self, state):
|
||||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
state = torch.FloatTensor(state).unsqueeze(0).to(device)
|
||||
# print(state)
|
||||
action = self.forward(state)
|
||||
return action.detach().cpu().numpy()[0, 0]
|
||||
|
||||
@@ -5,39 +5,40 @@
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 16:30:09
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-12 11:34:52
|
||||
LastEditTime: 2020-09-02 01:20:03
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
import seaborn as sns; sns.set()
|
||||
import seaborn as sns;
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
# def plot(item,ylabel='rewards'):
|
||||
# plt.figure()
|
||||
# plt.plot(np.arange(len(item)), item)
|
||||
# plt.title(ylabel+' of DDPG')
|
||||
# plt.ylabel(ylabel)
|
||||
# plt.xlabel('episodes')
|
||||
# plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png")
|
||||
# plt.show()
|
||||
|
||||
def plot(item,ylabel='rewards'):
|
||||
df = pd.DataFrame(dict(time=np.arange(500),
|
||||
value=np.random.randn(500).cumsum()))
|
||||
g = sns.relplot(x="time", y="value", kind="line", data=df)
|
||||
g.fig.autofmt_xdate()
|
||||
# time = range(len(item))
|
||||
# sns.set(style="darkgrid", font_scale=1.5)
|
||||
# sns.lineplot(time=time, data=item, color="r", condition="behavior_cloning")
|
||||
# # sns.tsplot(time=time, data=x2, color="b", condition="dagger")
|
||||
# plt.ylabel("Reward")
|
||||
# plt.xlabel("Iteration Number")
|
||||
# plt.title("Imitation Learning")
|
||||
|
||||
def plot(item,ylabel='rewards',save_fig = True):
|
||||
'''plot using searborn to plot
|
||||
'''
|
||||
sns.set()
|
||||
plt.figure()
|
||||
plt.plot(np.arange(len(item)), item)
|
||||
plt.title(ylabel+' of DDPG')
|
||||
plt.ylabel(ylabel)
|
||||
plt.xlabel('episodes')
|
||||
plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png")
|
||||
plt.show()
|
||||
|
||||
# def plot(item,ylabel='rewards'):
|
||||
#
|
||||
# df = pd.DataFrame(dict(time=np.arange(len(item)),value=item))
|
||||
# g = sns.relplot(x="time", y="value", kind="line", data=df)
|
||||
# # g.fig.autofmt_xdate()
|
||||
# # sns.lineplot(time=time, data=item, color="r", condition="behavior_cloning")
|
||||
# # # sns.tsplot(time=time, data=x2, color="b", condition="dagger")
|
||||
# # plt.ylabel("Reward")
|
||||
# # plt.xlabel("Iteration Number")
|
||||
# # plt.title("Imitation Learning")
|
||||
|
||||
# plt.show()
|
||||
if __name__ == "__main__":
|
||||
|
||||
output_path = os.path.dirname(__file__)+"/result/"
|
||||
|
||||
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 56 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 67 KiB |
BIN
codes/ddpg/result/steps.npy
Normal file
BIN
codes/ddpg/result/steps.npy
Normal file
Binary file not shown.
BIN
codes/ddpg/result/steps_of_each_episode.png
Normal file
BIN
codes/ddpg/result/steps_of_each_episode.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
BIN
codes/ddpg/saved_model/checkpoint.pth
Normal file
BIN
codes/ddpg/saved_model/checkpoint.pth
Normal file
Binary file not shown.
Reference in New Issue
Block a user