update
This commit is contained in:
@@ -5,7 +5,7 @@ Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2020-11-22 23:27:44
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-03-13 11:50:16
|
||||
LastEditTime: 2021-03-23 16:37:14
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
@@ -13,14 +13,13 @@ import torch
|
||||
from torch.distributions import Bernoulli
|
||||
from torch.autograd import Variable
|
||||
import numpy as np
|
||||
|
||||
from common.model import MLP1
|
||||
from PolicyGradient.model import MLP
|
||||
|
||||
class PolicyGradient:
|
||||
|
||||
def __init__(self, n_states,cfg):
|
||||
self.gamma = cfg.gamma
|
||||
self.policy_net = MLP1(n_states,hidden_dim=cfg.hidden_dim)
|
||||
self.policy_net = MLP(n_states,hidden_dim=cfg.hidden_dim)
|
||||
self.optimizer = torch.optim.RMSprop(self.policy_net.parameters(), lr=cfg.lr)
|
||||
self.batch_size = cfg.batch_size
|
||||
|
||||
@@ -66,6 +65,6 @@ class PolicyGradient:
|
||||
loss.backward()
|
||||
self.optimizer.step()
|
||||
def save_model(self,path):
|
||||
torch.save(self.policy_net.state_dict(), path+'pg_checkpoint.pth')
|
||||
torch.save(self.policy_net.state_dict(), path+'pg_checkpoint.pt')
|
||||
def load_model(self,path):
|
||||
self.policy_net.load_state_dict(torch.load(path+'pg_checkpoint.pth'))
|
||||
self.policy_net.load_state_dict(torch.load(path+'pg_checkpoint.pt'))
|
||||
@@ -5,12 +5,12 @@ Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2020-11-22 23:21:53
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-03-13 11:50:32
|
||||
LastEditTime: 2021-03-23 16:38:54
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import sys,os
|
||||
sys.path.append(os.getcwd()) # 添加当前终端路径
|
||||
sys.path.append(os.getcwd()) # add current terminal path to sys.path
|
||||
from itertools import count
|
||||
import datetime
|
||||
import gym
|
||||
@@ -18,25 +18,25 @@ from PolicyGradient.agent import PolicyGradient
|
||||
from common.plot import plot_rewards
|
||||
from common.utils import save_results
|
||||
|
||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹
|
||||
SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
|
||||
SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # path to save model
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"):
|
||||
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/")
|
||||
if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹
|
||||
if not os.path.exists(SAVED_MODEL_PATH):
|
||||
os.mkdir(SAVED_MODEL_PATH)
|
||||
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹
|
||||
RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # path to save rewards
|
||||
if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"):
|
||||
os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/")
|
||||
if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹
|
||||
if not os.path.exists(RESULT_PATH):
|
||||
os.mkdir(RESULT_PATH)
|
||||
|
||||
class PGConfig:
|
||||
def __init__(self):
|
||||
self.train_eps = 300 # 训练的episode数目
|
||||
self.batch_size = 8
|
||||
self.lr = 0.01 # 学习率
|
||||
self.lr = 0.01 # learning rate
|
||||
self.gamma = 0.99
|
||||
self.hidden_dim = 36 # 隐藏层维度
|
||||
self.hidden_dim = 36 # dimmension of hidden layer
|
||||
|
||||
def train(cfg,env,agent):
|
||||
'''下面带pool都是存放的transition序列用于gradient'''
|
||||
|
||||
30
codes/PolicyGradient/model.py
Normal file
30
codes/PolicyGradient/model.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2021-03-23 16:35:58
|
||||
LastEditor: John
|
||||
LastEditTime: 2021-03-23 16:36:20
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
class MLP(nn.Module):
|
||||
''' 多层感知机
|
||||
输入:state维度
|
||||
输出:概率
|
||||
'''
|
||||
def __init__(self,n_states,hidden_dim = 36):
|
||||
super(MLP, self).__init__()
|
||||
# 24和36为hidden layer的层数,可根据state_dim, n_actions的情况来改变
|
||||
self.fc1 = nn.Linear(n_states, hidden_dim)
|
||||
self.fc2 = nn.Linear(hidden_dim,hidden_dim)
|
||||
self.fc3 = nn.Linear(hidden_dim, 1) # Prob of Left
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = F.sigmoid(self.fc3(x))
|
||||
return x
|
||||
Reference in New Issue
Block a user