diff --git a/codes/QLearning/agent.py b/codes/QLearning/agent.py index 3e9fb2d..f4a793a 100644 --- a/codes/QLearning/agent.py +++ b/codes/QLearning/agent.py @@ -5,11 +5,10 @@ Author: John Email: johnjim0816@gmail.com Date: 2020-09-11 23:03:00 LastEditor: John -LastEditTime: 2021-03-11 19:16:27 +LastEditTime: 2021-03-12 16:48:25 Discription: Environment: ''' -from functools import update_wrapper import numpy as np import math import torch @@ -53,11 +52,11 @@ class QLearning(object): import dill torch.save( obj=self.Q_table, - f=path, + f=path+"Qleaning_model.pkl", pickle_module=dill ) - def load(self, path): '''从文件中读取数据到 Q表格 ''' - self.Q_table =torch.load(f='prod_dls.pkl',pickle_module=dill) \ No newline at end of file + import dill + self.Q_table =torch.load(f=path+'Qleaning_model.pkl',pickle_module=dill) \ No newline at end of file diff --git a/codes/QLearning/main.py b/codes/QLearning/main.py index aa1e3b2..6fefb8a 100644 --- a/codes/QLearning/main.py +++ b/codes/QLearning/main.py @@ -5,7 +5,7 @@ Author: John Email: johnjim0816@gmail.com Date: 2020-09-11 23:03:00 LastEditor: John -LastEditTime: 2021-03-11 19:22:50 +LastEditTime: 2021-03-12 16:52:26 Discription: Environment: ''' @@ -15,101 +15,101 @@ sys.path.append(os.getcwd()) # 添加当前终端路径 import argparse import gym import datetime -from QLearning.plot import plot -from QLearning.utils import save_results + from envs.gridworld_env import CliffWalkingWapper, FrozenLakeWapper from QLearning.agent import QLearning +from common.plot import plot_rewards +from common.utils import save_results -SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") -SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' -RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/result/"+SEQUENCE+'/' +SEQUENCE = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间 +SAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"+SEQUENCE+'/' # 生成保存的模型路径 +if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/"): # 检测是否存在文件夹 + os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/saved_model/") +if not os.path.exists(SAVED_MODEL_PATH): # 检测是否存在文件夹 + os.mkdir(SAVED_MODEL_PATH) +RESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+"/results/"+SEQUENCE+'/' # 存储reward的路径 +if not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+"/results/"): # 检测是否存在文件夹 + os.mkdir(os.path.split(os.path.abspath(__file__))[0]+"/results/") +if not os.path.exists(RESULT_PATH): # 检测是否存在文件夹 + os.mkdir(RESULT_PATH) -def get_args(): - '''训练的模型参数 - ''' - parser = argparse.ArgumentParser() +class QlearningConfig: '''训练相关参数''' - parser.add_argument("--n_episodes", default=500, - type=int, help="训练的最大episode数目") - '''算法相关参数''' - parser.add_argument("--gamma", default=0.9, - type=float, help="reward的衰减率") - parser.add_argument("--epsilon_start", default=0.99, - type=float, help="e-greedy策略中初始epsilon") - parser.add_argument("--epsilon_end", default=0.01, - type=float, help="e-greedy策略中的结束epsilon") - parser.add_argument("--epsilon_decay", default=200, - type=float, help="e-greedy策略中epsilon的衰减率") - parser.add_argument("--lr", default=0.1, type=float, help="学习率") - config = parser.parse_args() - return config + def __init__(self): + self.n_episodes = 200 # 训练的episode数目 + self.gamma = 0.9 # reward的衰减率 + self.epsilon_start = 0.99 # e-greedy策略中初始epsilon + self.epsilon_end = 0.01 # e-greedy策略中的终止epsilon + self.epsilon_decay = 200 # e-greedy策略中epsilon的衰减率 + self.lr = 0.1 # 学习率 + def train(cfg,env,agent): # env = gym.make("FrozenLake-v0", is_slippery=False) # 0 left, 1 down, 2 right, 3 up # env = FrozenLakeWapper(env) - rewards = [] # 记录所有episode的reward, + rewards = [] # 记录所有episode的reward + ma_rewards = [] # 滑动平均的reward steps = [] # 记录所有episode的steps for i_episode in range(cfg.n_episodes): ep_reward = 0 # 记录每个episode的reward ep_steps = 0 # 记录每个episode走了多少step - obs = env.reset() # 重置环境, 重新开一局(即开始新的一个episode) + state = env.reset() # 重置环境, 重新开一局(即开始新的一个episode) while True: - action = agent.choose_action(obs) # 根据算法选择一个动作 - next_obs, reward, done, _ = env.step(action) # 与环境进行一个交互 - # 训练 Q-learning算法 - agent.update(obs, action, reward, next_obs, done) # 不需要下一步的action - obs = next_obs # 存储上一个观察值 + action = agent.choose_action(state) # 根据算法选择一个动作 + next_state, reward, done, _ = env.step(action) # 与环境进行一次动作交互 + agent.update(state, action, reward, next_state, done) # Q-learning算法更新 + state = next_state # 存储上一个观察值 ep_reward += reward ep_steps += 1 # 计算step数 if done: break steps.append(ep_steps) + rewards.append(ep_reward) # 计算滑动平均的reward - if rewards: - rewards.append(rewards[-1]*0.9+ep_reward*0.1) + if ma_rewards: + ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1) else: - rewards.append(ep_reward) + ma_rewards.append(ep_reward) print("Episode:{}/{}: reward:{:.1f}".format(i_episode+1, cfg.n_episodes,ep_reward)) - plot(rewards) - if not os.path.exists(SAVED_MODEL_PATH): - os.mkdir(SAVED_MODEL_PATH) - agent.save(SAVED_MODEL_PATH+'Q_table.pkl') # 训练结束,保存模型 - '''存储reward等相关结果''' - save_results(rewards,tag='train',result_path=RESULT_PATH) + return rewards,ma_rewards def eval(cfg,env,agent): # env = gym.make("FrozenLake-v0", is_slippery=False) # 0 left, 1 down, 2 right, 3 up # env = FrozenLakeWapper(env) - rewards = [] # 记录所有episode的reward, + rewards = [] # 记录所有episode的reward + ma_rewards = [] # 滑动平均的reward steps = [] # 记录所有episode的steps - for i_episode in range(20): + for i_episode in range(cfg.n_episodes): ep_reward = 0 # 记录每个episode的reward ep_steps = 0 # 记录每个episode走了多少step - obs = env.reset() # 重置环境, 重新开一局(即开始新的一个episode) + state = env.reset() # 重置环境, 重新开一局(即开始新的一个episode) while True: - action = agent.choose_action(obs) # 根据算法选择一个动作 - next_obs, reward, done, _ = env.step(action) # 与环境进行一个交互 - obs = next_obs # 存储上一个观察值 + action = agent.choose_action(state) # 根据算法选择一个动作 + next_state, reward, done, _ = env.step(action) # 与环境进行一个交互 + state = next_state # 存储上一个观察值 ep_reward += reward ep_steps += 1 # 计算step数 if done: break steps.append(ep_steps) + rewards.append(ep_reward) # 计算滑动平均的reward - if rewards: - rewards.append(rewards[-1]*0.9+ep_reward*0.1) + if ma_rewards: + ma_rewards.append(rewards[-1]*0.9+ep_reward*0.1) else: - rewards.append(ep_reward) + ma_rewards.append(ep_reward) print("Episode:{}/{}: reward:{:.1f}".format(i_episode+1, cfg.n_episodes,ep_reward)) - plot(rewards) - '''存储reward等相关结果''' - save_results(rewards,tag='eval',result_path=RESULT_PATH) + return rewards,ma_rewards if __name__ == "__main__": - cfg = get_args() + cfg = QlearningConfig() env = gym.make("CliffWalking-v0") # 0 up, 1 right, 2 down, 3 left env = CliffWalkingWapper(env) n_actions = env.action_space.n agent = QLearning(n_actions,cfg) - train(cfg,env,agent) - eval(cfg,env,agent) + rewards,ma_rewards = train(cfg,env,agent) + agent.save(path=SAVED_MODEL_PATH) + # eval(cfg,env,agent) + save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH) + plot_rewards(rewards,ma_rewards,tag="train",algo = "On-Policy First-Visit MC Control",path=RESULT_PATH) + diff --git a/codes/QLearning/plot.py b/codes/QLearning/plot.py deleted file mode 100644 index e64ceba..0000000 --- a/codes/QLearning/plot.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -''' -Author: John -Email: johnjim0816@gmail.com -Date: 2020-10-07 20:57:11 -LastEditor: John -LastEditTime: 2020-10-07 21:00:29 -Discription: -Environment: -''' -import matplotlib.pyplot as plt -import seaborn as sns -import numpy as np -import os - -def plot(item,ylabel='rewards'): - sns.set() - plt.figure() - plt.plot(np.arange(len(item)), item) - plt.title(ylabel+' of Q-learning') - plt.ylabel(ylabel) - plt.xlabel('episodes') - plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png") - plt.show() - -if __name__ == "__main__": - - output_path = os.path.dirname(__file__)+"/result/" - rewards=np.load(output_path+"rewards_train.npy", ) - MA_rewards=np.load(output_path+"MA_rewards_train.npy") - steps = np.load(output_path+"steps_train.npy") - plot(rewards) - plot(MA_rewards,ylabel='moving_average_rewards') - plot(steps,ylabel='steps') \ No newline at end of file diff --git a/codes/QLearning/result/20210311-192256/rewards_eval.npy b/codes/QLearning/result/20210311-192256/rewards_eval.npy deleted file mode 100644 index 9bee5e4..0000000 Binary files a/codes/QLearning/result/20210311-192256/rewards_eval.npy and /dev/null differ diff --git a/codes/QLearning/result/20210311-192256/rewards_train.npy b/codes/QLearning/result/20210311-192256/rewards_train.npy deleted file mode 100644 index 9395542..0000000 Binary files a/codes/QLearning/result/20210311-192256/rewards_train.npy and /dev/null differ diff --git a/codes/QLearning/result/rewards.png b/codes/QLearning/result/rewards.png deleted file mode 100644 index 4acca82..0000000 Binary files a/codes/QLearning/result/rewards.png and /dev/null differ diff --git a/codes/QLearning/results/20210312-165244/ma_rewards_train.npy b/codes/QLearning/results/20210312-165244/ma_rewards_train.npy new file mode 100644 index 0000000..489cfb4 Binary files /dev/null and b/codes/QLearning/results/20210312-165244/ma_rewards_train.npy differ diff --git a/codes/QLearning/results/20210312-165244/rewards_curve_train.png b/codes/QLearning/results/20210312-165244/rewards_curve_train.png new file mode 100644 index 0000000..3353dee Binary files /dev/null and b/codes/QLearning/results/20210312-165244/rewards_curve_train.png differ diff --git a/codes/QLearning/results/20210312-165244/rewards_train.npy b/codes/QLearning/results/20210312-165244/rewards_train.npy new file mode 100644 index 0000000..91008fd Binary files /dev/null and b/codes/QLearning/results/20210312-165244/rewards_train.npy differ diff --git a/codes/QLearning/saved_model/20210311-192256/Q_table.pkl b/codes/QLearning/saved_model/20210311-192256/Q_table.pkl deleted file mode 100644 index 159318d..0000000 Binary files a/codes/QLearning/saved_model/20210311-192256/Q_table.pkl and /dev/null differ diff --git a/codes/QLearning/saved_model/20210312-165244/Qleaning_model.pkl b/codes/QLearning/saved_model/20210312-165244/Qleaning_model.pkl new file mode 100644 index 0000000..c70d88f Binary files /dev/null and b/codes/QLearning/saved_model/20210312-165244/Qleaning_model.pkl differ diff --git a/codes/QLearning/utils.py b/codes/QLearning/utils.py deleted file mode 100644 index d777986..0000000 --- a/codes/QLearning/utils.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -''' -Author: John -Email: johnjim0816@gmail.com -Date: 2020-11-23 13:44:52 -LastEditor: John -LastEditTime: 2021-03-11 19:18:34 -Discription: -Environment: -''' -import os -import numpy as np - - -def save_results(rewards,tag='train',result_path='./result'): - '''保存reward等结果 - ''' - if not os.path.exists(result_path): # 检测是否存在文件夹 - os.mkdir(result_path) - np.save(result_path+'rewards_'+tag+'.npy', rewards) - print('results saved!')