update codes
This commit is contained in:
@@ -15,15 +15,15 @@ import torch.nn.functional as F
|
||||
from torch.distributions import Categorical
|
||||
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, input_dim,output_dim,hidden_dim=128):
|
||||
def __init__(self, n_states,n_actions,hidden_dim=128):
|
||||
""" 初始化q网络,为全连接网络
|
||||
input_dim: 输入的特征数即环境的状态数
|
||||
output_dim: 输出的动作维度
|
||||
n_states: 输入的特征数即环境的状态数
|
||||
n_actions: 输出的动作维度
|
||||
"""
|
||||
super(MLP, self).__init__()
|
||||
self.fc1 = nn.Linear(input_dim, hidden_dim) # 输入层
|
||||
self.fc1 = nn.Linear(n_states, hidden_dim) # 输入层
|
||||
self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
|
||||
self.fc3 = nn.Linear(hidden_dim, output_dim) # 输出层
|
||||
self.fc3 = nn.Linear(hidden_dim, n_actions) # 输出层
|
||||
|
||||
def forward(self, x):
|
||||
# 各层对应的激活函数
|
||||
@@ -32,10 +32,10 @@ class MLP(nn.Module):
|
||||
return self.fc3(x)
|
||||
|
||||
class Critic(nn.Module):
|
||||
def __init__(self, n_obs, output_dim, hidden_size, init_w=3e-3):
|
||||
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
|
||||
super(Critic, self).__init__()
|
||||
|
||||
self.linear1 = nn.Linear(n_obs + output_dim, hidden_size)
|
||||
self.linear1 = nn.Linear(n_obs + n_actions, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, 1)
|
||||
# 随机初始化为较小的值
|
||||
@@ -51,11 +51,11 @@ class Critic(nn.Module):
|
||||
return x
|
||||
|
||||
class Actor(nn.Module):
|
||||
def __init__(self, n_obs, output_dim, hidden_size, init_w=3e-3):
|
||||
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
|
||||
super(Actor, self).__init__()
|
||||
self.linear1 = nn.Linear(n_obs, hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.linear3 = nn.Linear(hidden_size, output_dim)
|
||||
self.linear3 = nn.Linear(hidden_size, n_actions)
|
||||
|
||||
self.linear3.weight.data.uniform_(-init_w, init_w)
|
||||
self.linear3.bias.data.uniform_(-init_w, init_w)
|
||||
@@ -67,18 +67,18 @@ class Actor(nn.Module):
|
||||
return x
|
||||
|
||||
class ActorCritic(nn.Module):
|
||||
def __init__(self, input_dim, output_dim, hidden_dim=256):
|
||||
def __init__(self, n_states, n_actions, hidden_dim=256):
|
||||
super(ActorCritic, self).__init__()
|
||||
self.critic = nn.Sequential(
|
||||
nn.Linear(input_dim, hidden_dim),
|
||||
nn.Linear(n_states, hidden_dim),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_dim, 1)
|
||||
)
|
||||
|
||||
self.actor = nn.Sequential(
|
||||
nn.Linear(input_dim, hidden_dim),
|
||||
nn.Linear(n_states, hidden_dim),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_dim, output_dim),
|
||||
nn.Linear(hidden_dim, n_actions),
|
||||
nn.Softmax(dim=1),
|
||||
)
|
||||
|
||||
|
||||
@@ -11,36 +11,52 @@ Environment:
|
||||
'''
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
from matplotlib.font_manager import FontProperties
|
||||
def chinese_font():
|
||||
return FontProperties(fname='/System/Library/Fonts/STHeiti Light.ttc',size=15) # 系统字体路径,此处是mac的
|
||||
def plot_rewards(rewards,ma_rewards,tag="train",env='CartPole-v0',algo = "DQN",save=True,path='./'):
|
||||
sns.set()
|
||||
plt.title("average learning curve of {} for {}".format(algo,env))
|
||||
# from matplotlib.font_manager import FontProperties # 导入字体模块
|
||||
|
||||
# def chinese_font():
|
||||
# ''' 设置中文字体
|
||||
# '''
|
||||
# return FontProperties(fname='/System/Library/Fonts/STHeiti Light.ttc',size=15) # fname系统字体路径,此处是mac的
|
||||
# def plot_rewards_cn(rewards,ma_rewards,tag="train",env='CartPole-v0',algo = "DQN",save=True,path='./'):
|
||||
# ''' 中文画图
|
||||
# '''
|
||||
# sns.set()
|
||||
# plt.figure()
|
||||
# plt.title(u"{}环境下{}算法的学习曲线".format(env,algo),fontproperties=chinese_font())
|
||||
# plt.xlabel(u'回合数',fontproperties=chinese_font())
|
||||
# plt.plot(rewards)
|
||||
# plt.plot(ma_rewards)
|
||||
# plt.legend((u'奖励',u'滑动平均奖励',),loc="best",prop=chinese_font())
|
||||
# if save:
|
||||
# plt.savefig(path+f"{tag}_rewards_curve_cn")
|
||||
# # plt.show()
|
||||
|
||||
def plot_rewards(rewards,ma_rewards,plot_cfg,tag='train'):
|
||||
sns.set()
|
||||
plt.figure() # 创建一个图形实例,方便同时多画几个图
|
||||
plt.title("learning curve on {} of {} for {}".format(plot_cfg.device, plot_cfg.algo, plot_cfg.env))
|
||||
plt.xlabel('epsiodes')
|
||||
plt.plot(rewards,label='rewards')
|
||||
plt.plot(ma_rewards,label='ma rewards')
|
||||
plt.legend()
|
||||
if save:
|
||||
plt.savefig(path+"{}_rewards_curve".format(tag))
|
||||
if plot_cfg.save:
|
||||
plt.savefig(plot_cfg.result_path+"{}_rewards_curve".format(tag))
|
||||
plt.show()
|
||||
|
||||
def plot_rewards_cn(rewards,ma_rewards,tag="train",env='CartPole-v0',algo = "DQN",save=True,path='./'):
|
||||
''' 中文画图
|
||||
'''
|
||||
sns.set()
|
||||
plt.figure()
|
||||
plt.title(u"{}环境下{}算法的学习曲线".format(env,algo),fontproperties=chinese_font())
|
||||
plt.xlabel(u'回合数',fontproperties=chinese_font())
|
||||
plt.plot(rewards)
|
||||
plt.plot(ma_rewards)
|
||||
plt.legend((u'奖励',u'滑动平均奖励',),loc="best",prop=chinese_font())
|
||||
if save:
|
||||
plt.savefig(path+f"{tag}_rewards_curve_cn")
|
||||
# plt.show()
|
||||
# def plot_rewards(rewards,ma_rewards,tag="train",env='CartPole-v0',algo = "DQN",save=True,path='./'):
|
||||
# sns.set()
|
||||
# plt.figure() # 创建一个图形实例,方便同时多画几个图
|
||||
# plt.title("average learning curve of {} for {}".format(algo,env))
|
||||
# plt.xlabel('epsiodes')
|
||||
# plt.plot(rewards,label='rewards')
|
||||
# plt.plot(ma_rewards,label='ma rewards')
|
||||
# plt.legend()
|
||||
# if save:
|
||||
# plt.savefig(path+"{}_rewards_curve".format(tag))
|
||||
# plt.show()
|
||||
|
||||
def plot_losses(losses,algo = "DQN",save=True,path='./'):
|
||||
sns.set()
|
||||
plt.figure()
|
||||
plt.title("loss curve of {}".format(algo))
|
||||
plt.xlabel('epsiodes')
|
||||
plt.plot(losses,label='rewards')
|
||||
|
||||
Reference in New Issue
Block a user