update projects
This commit is contained in:
141
projects/codes/QLearning/task0.py
Normal file
141
projects/codes/QLearning/task0.py
Normal file
@@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Author: John
|
||||
Email: johnjim0816@gmail.com
|
||||
Date: 2020-09-11 23:03:00
|
||||
LastEditor: John
|
||||
LastEditTime: 2022-06-21 19:36:05
|
||||
Discription:
|
||||
Environment:
|
||||
'''
|
||||
import sys
|
||||
import os
|
||||
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
|
||||
parent_path = os.path.dirname(curr_path) # 父路径
|
||||
sys.path.append(parent_path) # 添加路径到系统路径
|
||||
|
||||
import gym
|
||||
import torch
|
||||
import datetime
|
||||
|
||||
from env.gridworld_env import CliffWalkingWapper
|
||||
from qlearning import QLearning
|
||||
from common.utils import plot_rewards
|
||||
from common.utils import save_results,make_dir
|
||||
|
||||
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
|
||||
class Config:
|
||||
'''超参数
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
################################## 环境超参数 ###################################
|
||||
self.algo_name = 'Q-learning' # 算法名称
|
||||
self.env_name = 'CliffWalking-v0' # 环境名称
|
||||
self.device = torch.device(
|
||||
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPUgjgjlkhfsf风刀霜的撒发十
|
||||
self.seed = 10 # 随机种子,置0则不设置随机种子
|
||||
self.train_eps = 400 # 训练的回合数
|
||||
self.test_eps = 30 # 测试的回合数
|
||||
################################################################################
|
||||
|
||||
################################## 算法超参数 ###################################
|
||||
self.gamma = 0.90 # 强化学习中的折扣因子
|
||||
self.epsilon_start = 0.95 # e-greedy策略中初始epsilon
|
||||
self.epsilon_end = 0.01 # e-greedy策略中的终止epsilon
|
||||
self.epsilon_decay = 300 # e-greedy策略中epsilon的衰减率
|
||||
self.lr = 0.1 # 学习率
|
||||
################################################################################
|
||||
|
||||
################################# 保存结果相关参数 ################################
|
||||
self.result_path = curr_path + "/outputs/" + self.env_name + \
|
||||
'/' + curr_time + '/results/' # 保存结果的路径
|
||||
self.model_path = curr_path + "/outputs/" + self.env_name + \
|
||||
'/' + curr_time + '/models/' # 保存模型的路径
|
||||
self.save = True # 是否保存图片
|
||||
################################################################################
|
||||
|
||||
def train(cfg,env,agent):
|
||||
print('开始训练!')
|
||||
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录奖励
|
||||
ma_rewards = [] # 记录滑动平均奖励
|
||||
for i_ep in range(cfg.train_eps):
|
||||
ep_reward = 0 # 记录每个回合的奖励
|
||||
state = env.reset() # 重置环境,即开始新的回合
|
||||
while True:
|
||||
action = agent.choose_action(state) # 根据算法选择一个动作
|
||||
next_state, reward, done, _ = env.step(action) # 与环境进行一次动作交互
|
||||
agent.update(state, action, reward, next_state, done) # Q学习算法更新
|
||||
state = next_state # 更新状态
|
||||
ep_reward += reward
|
||||
if done:
|
||||
break
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print("回合数:{}/{},奖励{:.1f}".format(i_ep+1, cfg.train_eps,ep_reward))
|
||||
print('完成训练!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
def test(cfg,env,agent):
|
||||
print('开始测试!')
|
||||
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
|
||||
rewards = [] # 记录所有回合的奖励
|
||||
ma_rewards = [] # 滑动平均的奖励
|
||||
for i_ep in range(cfg.test_eps):
|
||||
ep_reward = 0 # 记录每个episode的reward
|
||||
state = env.reset() # 重置环境, 重新开一局(即开始新的一个回合)
|
||||
while True:
|
||||
action = agent.predict(state) # 根据算法选择一个动作
|
||||
next_state, reward, done, _ = env.step(action) # 与环境进行一个交互
|
||||
state = next_state # 更新状态
|
||||
ep_reward += reward
|
||||
if done:
|
||||
break
|
||||
rewards.append(ep_reward)
|
||||
if ma_rewards:
|
||||
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
|
||||
else:
|
||||
ma_rewards.append(ep_reward)
|
||||
print(f"回合数:{i_ep+1}/{cfg.test_eps}, 奖励:{ep_reward:.1f}")
|
||||
print('完成测试!')
|
||||
return rewards,ma_rewards
|
||||
|
||||
def env_agent_config(cfg,seed=1):
|
||||
'''创建环境和智能体
|
||||
Args:
|
||||
cfg ([type]): [description]
|
||||
seed (int, optional): 随机种子. Defaults to 1.
|
||||
Returns:
|
||||
env [type]: 环境
|
||||
agent : 智能体
|
||||
'''
|
||||
env = gym.make(cfg.env_name)
|
||||
env = CliffWalkingWapper(env)
|
||||
env.seed(seed) # 设置随机种子
|
||||
n_states = env.observation_space.n # 状态维度
|
||||
n_actions = env.action_space.n # 动作维度
|
||||
agent = QLearning(n_states,n_actions,cfg)
|
||||
return env,agent
|
||||
if __name__ == "__main__":
|
||||
cfg = Config()
|
||||
# 训练
|
||||
env, agent = env_agent_config(cfg, seed=1)
|
||||
rewards, ma_rewards = train(cfg, env, agent)
|
||||
make_dir(cfg.result_path, cfg.model_path) # 创建保存结果和模型路径的文件夹
|
||||
agent.save(path=cfg.model_path) # 保存模型
|
||||
save_results(rewards, ma_rewards, tag='train',
|
||||
path=cfg.result_path) # 保存结果
|
||||
plot_rewards(rewards, ma_rewards, cfg, tag="train") # 画出结果
|
||||
# 测试
|
||||
env, agent = env_agent_config(cfg, seed=10)
|
||||
agent.load(path=cfg.model_path) # 导入模型
|
||||
rewards, ma_rewards = test(cfg, env, agent)
|
||||
save_results(rewards, ma_rewards, tag='test', path=cfg.result_path) # 保存结果
|
||||
plot_rewards(rewards, ma_rewards, cfg, tag="test") # 画出结果
|
||||
|
||||
|
||||
Reference in New Issue
Block a user