update codes

This commit is contained in:
johnjim0816
2021-12-21 20:14:13 +08:00
parent 64c319cab4
commit 3b712e8815
71 changed files with 1097 additions and 1340 deletions

View File

@@ -15,15 +15,15 @@ import torch.nn.functional as F
from torch.distributions import Categorical
class MLP(nn.Module):
def __init__(self, n_states,n_actions,hidden_dim=128):
def __init__(self, input_dim,output_dim,hidden_dim=128):
""" 初始化q网络为全连接网络
n_states: 输入的特征数即环境的状态数
n_actions: 输出的动作维度
input_dim: 输入的特征数即环境的状态数
output_dim: 输出的动作维度
"""
super(MLP, self).__init__()
self.fc1 = nn.Linear(n_states, hidden_dim) # 输入层
self.fc1 = nn.Linear(input_dim, hidden_dim) # 输入层
self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
self.fc3 = nn.Linear(hidden_dim, n_actions) # 输出层
self.fc3 = nn.Linear(hidden_dim, output_dim) # 输出层
def forward(self, x):
# 各层对应的激活函数
@@ -32,10 +32,10 @@ class MLP(nn.Module):
return self.fc3(x)
class Critic(nn.Module):
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
def __init__(self, n_obs, action_dim, hidden_size, init_w=3e-3):
super(Critic, self).__init__()
self.linear1 = nn.Linear(n_obs + n_actions, hidden_size)
self.linear1 = nn.Linear(n_obs + action_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
# 随机初始化为较小的值
@@ -51,11 +51,11 @@ class Critic(nn.Module):
return x
class Actor(nn.Module):
def __init__(self, n_obs, n_actions, hidden_size, init_w=3e-3):
def __init__(self, n_obs, action_dim, hidden_size, init_w=3e-3):
super(Actor, self).__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, n_actions)
self.linear3 = nn.Linear(hidden_size, action_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
@@ -67,18 +67,18 @@ class Actor(nn.Module):
return x
class ActorCritic(nn.Module):
def __init__(self, n_states, n_actions, hidden_dim=256):
def __init__(self, state_dim, action_dim, hidden_dim=256):
super(ActorCritic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(n_states, hidden_dim),
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
self.actor = nn.Sequential(
nn.Linear(n_states, hidden_dim),
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, n_actions),
nn.Linear(hidden_dim, action_dim),
nn.Softmax(dim=1),
)

View File

@@ -1,5 +1,5 @@
#This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
# 该代码来自 openai baseline,用于多线程环境
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2020-10-07 20:57:11
LastEditor: John
LastEditTime: 2021-09-23 12:23:01
Discription:
Environment:
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.font_manager import FontProperties # 导入字体模块
def plot_rewards(rewards,ma_rewards,plot_cfg,tag='train'):
sns.set()
plt.figure() # 创建一个图形实例,方便同时多画几个图
plt.title("learning curve on {} of {} for {}".format(plot_cfg.device, plot_cfg.algo, plot_cfg.env_name))
plt.xlabel('epsiodes')
plt.plot(rewards,label='rewards')
plt.plot(ma_rewards,label='ma rewards')
plt.legend()
if plot_cfg.save:
plt.savefig(plot_cfg.result_path+"{}_rewards_curve".format(tag))
plt.show()
def plot_losses(losses,algo = "DQN",save=True,path='./'):
sns.set()
plt.figure()
plt.title("loss curve of {}".format(algo))
plt.xlabel('epsiodes')
plt.plot(losses,label='rewards')
plt.legend()
if save:
plt.savefig(path+"losses_curve")
plt.show()

View File

@@ -5,29 +5,90 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 16:02:24
LastEditor: John
LastEditTime: 2021-09-11 21:48:49
LastEditTime: 2021-11-30 18:39:19
Discription:
Environment:
'''
import os
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
def save_results(rewards,ma_rewards,tag='train',path='./results'):
'''save rewards and ma_rewards
from matplotlib.font_manager import FontProperties # 导入字体模块
def chinese_font():
''' 设置中文字体,注意需要根据自己电脑情况更改字体路径,否则还是默认的字体
'''
try:
font = FontProperties(
fname='/System/Library/Fonts/STHeiti Light.ttc', size=15) # fname系统字体路径此处是mac的
except:
font = None
return font
def plot_rewards_cn(rewards, ma_rewards, plot_cfg, tag='train'):
''' 中文画图
'''
sns.set()
plt.figure()
plt.title(u"{}环境下{}算法的学习曲线".format(plot_cfg.env_name,
plot_cfg.algo_name), fontproperties=chinese_font())
plt.xlabel(u'回合数', fontproperties=chinese_font())
plt.plot(rewards)
plt.plot(ma_rewards)
plt.legend((u'奖励', u'滑动平均奖励',), loc="best", prop=chinese_font())
if plot_cfg.save:
plt.savefig(plot_cfg.result_path+f"{tag}_rewards_curve_cn")
# plt.show()
def plot_rewards(rewards, ma_rewards, plot_cfg, tag='train'):
sns.set()
plt.figure() # 创建一个图形实例,方便同时多画几个图
plt.title("learning curve on {} of {} for {}".format(
plot_cfg.device, plot_cfg.algo_name, plot_cfg.env_name))
plt.xlabel('epsiodes')
plt.plot(rewards, label='rewards')
plt.plot(ma_rewards, label='ma rewards')
plt.legend()
if plot_cfg.save:
plt.savefig(plot_cfg.result_path+"{}_rewards_curve".format(tag))
plt.show()
def plot_losses(losses, algo="DQN", save=True, path='./'):
sns.set()
plt.figure()
plt.title("loss curve of {}".format(algo))
plt.xlabel('epsiodes')
plt.plot(losses, label='rewards')
plt.legend()
if save:
plt.savefig(path+"losses_curve")
plt.show()
def save_results(rewards, ma_rewards, tag='train', path='./results'):
''' 保存奖励
'''
np.save(path+'{}_rewards.npy'.format(tag), rewards)
np.save(path+'{}_ma_rewards.npy'.format(tag), ma_rewards)
print('结果保存完毕!')
def make_dir(*paths):
''' 创建文件夹
'''
for path in paths:
Path(path).mkdir(parents=True, exist_ok=True)
def del_empty_dir(*paths):
'''del_empty_dir delete empty folders unders "paths"
''' 删除目录下所有空文件夹
'''
for path in paths:
dirs = os.listdir(path)
for dir in dirs:
if not os.listdir(os.path.join(path, dir)):
os.removedirs(os.path.join(path, dir))
os.removedirs(os.path.join(path, dir))