This commit is contained in:
JohnJim0816
2021-03-31 15:37:09 +08:00
parent 6a92f97138
commit b6f63a91bf
65 changed files with 1244 additions and 459 deletions

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 21:14:12
LastEditor: John
LastEditTime: 2021-03-24 22:15:00
LastEditTime: 2021-03-31 13:49:06
Discription:
Environment:
'''
@@ -15,15 +15,15 @@ import torch.nn.functional as F
from torch.distributions import Categorical
class MLP(nn.Module):
def __init__(self, state_dim,action_dim,hidden_dim=128):
def __init__(self, input_dim,output_dim,hidden_dim=128):
""" 初始化q网络为全连接网络
state_dim: 输入的feature即环境的state数目
action_dim: 输出的action总个数
input_dim: 输入的feature即环境的state数目
output_dim: 输出的action总个数
"""
super(MLP, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim) # 输入层
self.fc1 = nn.Linear(input_dim, hidden_dim) # 输入层
self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
self.fc3 = nn.Linear(hidden_dim, action_dim) # 输出层
self.fc3 = nn.Linear(hidden_dim, output_dim) # 输出层
def forward(self, x):
# 各层对应的激活函数
@@ -32,10 +32,10 @@ class MLP(nn.Module):
return self.fc3(x)
class Critic(nn.Module):
def __init__(self, n_obs, action_dim, hidden_size, init_w=3e-3):
def __init__(self, n_obs, output_dim, hidden_size, init_w=3e-3):
super(Critic, self).__init__()
self.linear1 = nn.Linear(n_obs + action_dim, hidden_size)
self.linear1 = nn.Linear(n_obs + output_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
# 随机初始化为较小的值
@@ -51,11 +51,11 @@ class Critic(nn.Module):
return x
class Actor(nn.Module):
def __init__(self, n_obs, action_dim, hidden_size, init_w=3e-3):
def __init__(self, n_obs, output_dim, hidden_size, init_w=3e-3):
super(Actor, self).__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, action_dim)
self.linear3 = nn.Linear(hidden_size, output_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
@@ -67,18 +67,18 @@ class Actor(nn.Module):
return x
class ActorCritic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim=256):
def __init__(self, input_dim, output_dim, hidden_dim=256):
super(ActorCritic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
self.actor = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim),
nn.Linear(hidden_dim, output_dim),
nn.Softmax(dim=1),
)

View File

@@ -5,13 +5,13 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2020-10-07 20:57:11
LastEditor: John
LastEditTime: 2021-03-13 11:31:49
LastEditTime: 2021-03-31 14:05:52
Discription:
Environment:
'''
import matplotlib.pyplot as plt
import seaborn as sns
def plot_rewards(rewards,ma_rewards,tag="train",algo = "On-Policy First-Visit MC Control",path='./'):
def plot_rewards(rewards,ma_rewards,tag="train",algo = "DQN",path='./'):
sns.set()
plt.title("average learning curve of {}".format(algo))
plt.xlabel('epsiodes')
@@ -20,4 +20,13 @@ def plot_rewards(rewards,ma_rewards,tag="train",algo = "On-Policy First-Visit MC
plt.legend()
plt.savefig(path+"rewards_curve_{}".format(tag))
plt.show()
def plot_losses(losses,algo = "DQN",path='./'):
sns.set()
plt.title("loss curve of {}".format(algo))
plt.xlabel('epsiodes')
plt.plot(losses,label='rewards')
plt.legend()
plt.savefig(path+"losses_curve")
plt.show()