update projects

This commit is contained in:
johnjim0816
2022-08-15 22:31:37 +08:00
parent cd27cb67b7
commit 73948f1dc8
109 changed files with 3483 additions and 1011 deletions

View File

@@ -5,7 +5,7 @@ Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-12 16:14:34
LastEditor: John
LastEditTime: 2021-05-05 16:58:39
LastEditTime: 2022-08-15 18:10:13
Discription:
Environment:
'''
@@ -22,11 +22,10 @@ class FisrtVisitMC:
self.epsilon = cfg.epsilon
self.gamma = cfg.gamma
self.Q_table = defaultdict(lambda: np.zeros(n_actions))
self.returns_sum = defaultdict(float) # sum of returns
self.returns_sum = defaultdict(float) # 保存return之和
self.returns_count = defaultdict(float)
def choose_action(self,state):
''' e-greed policy '''
def sample(self,state):
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
@@ -35,6 +34,15 @@ class FisrtVisitMC:
else:
action = np.random.randint(0,self.n_actions)
return action
def predict(self,state):
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
action_probs[best_action] += (1.0 - self.epsilon)
action = np.argmax(self.Q_table[state])
else:
action = np.random.randint(0,self.n_actions)
return action
def update(self,one_ep_transition):
# Find all (state, action) pairs we've visited in this one_ep_transition
# We convert each state to a tuple so that we can use it as a dict key
@@ -50,16 +58,18 @@ class FisrtVisitMC:
self.returns_sum[sa_pair] += G
self.returns_count[sa_pair] += 1.0
self.Q_table[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
def save(self,path):
def save(self,path=None):
'''把 Q表格 的数据保存到文件中
'''
from pathlib import Path
Path(path).mkdir(parents=True, exist_ok=True)
torch.save(
obj=self.Q_table,
f=path+"Q_table",
pickle_module=dill
)
def load(self, path):
def load(self, path=None):
'''从文件中读取数据到 Q表格
'''
self.Q_table =torch.load(f=path+"Q_table",pickle_module=dill)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

View File

@@ -0,0 +1 @@
{"algo_name": "First-Visit MC", "env_name": "Racetrack", "train_eps": 200, "test_eps": 20, "gamma": 0.9, "epsilon": 0.15, "device": "cpu", "result_path": "/Users/jj/Desktop/rl-tutorials/codes/MonteCarlo/outputs/Racetrack/20220815-180742/results/", "model_path": "/Users/jj/Desktop/rl-tutorials/codes/MonteCarlo/outputs/Racetrack/20220815-180742/models/", "save_fig": true}

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-11 14:26:44
LastEditor: John
LastEditTime: 2022-08-15 18:12:13
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import datetime
import argparse
from common.utils import save_results,save_args,plot_rewards
from MonteCarlo.agent import FisrtVisitMC
from envs.racetrack_env import RacetrackEnv
curr_time = datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") # obtain current time
def get_args():
""" 超参数
"""
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
parser = argparse.ArgumentParser(description="hyperparameters")
parser.add_argument('--algo_name',default='First-Visit MC',type=str,help="name of algorithm")
parser.add_argument('--env_name',default='Racetrack',type=str,help="name of environment")
parser.add_argument('--train_eps',default=200,type=int,help="episodes of training")
parser.add_argument('--test_eps',default=20,type=int,help="episodes of testing")
parser.add_argument('--gamma',default=0.9,type=float,help="discounted factor")
parser.add_argument('--epsilon',default=0.15,type=float,help="the probability to select a random action")
parser.add_argument('--device',default='cpu',type=str,help="cpu or cuda")
parser.add_argument('--result_path',default=curr_path + "/outputs/" + parser.parse_args().env_name + \
'/' + curr_time + '/results/' )
parser.add_argument('--model_path',default=curr_path + "/outputs/" + parser.parse_args().env_name + \
'/' + curr_time + '/models/' )
parser.add_argument('--show_fig',default=False,type=bool,help="if show figure or not")
parser.add_argument('--save_fig',default=True,type=bool,help="if save figure or not")
args = parser.parse_args()
return args
def env_agent_config(cfg,seed=1):
env = RacetrackEnv()
n_actions = env.action_space.n
agent = FisrtVisitMC(n_actions, cfg)
return env,agent
def train(cfg, env, agent):
print("开始训练!")
print(f"环境:{cfg.env_name},算法:{cfg.algo_name},设备:{cfg.device}")
rewards = []
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
one_ep_transition = []
while True:
action = agent.sample(state)
next_state, reward, done = env.step(action)
ep_reward += reward
one_ep_transition.append((state, action, reward))
state = next_state
if done:
break
rewards.append(ep_reward)
agent.update(one_ep_transition)
if (i_ep+1) % 10 == 0:
print(f"Episode:{i_ep+1}/{cfg.train_eps}: Reward:{ep_reward}")
print("完成训练")
return {'rewards':rewards}
def test(cfg, env, agent):
print("开始测试!")
print(f"环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}")
rewards = []
for i_ep in range(cfg.test_eps):
state = env.reset()
ep_reward = 0
while True:
action = agent.predict(state)
next_state, reward, done = env.step(action)
ep_reward += reward
state = next_state
if done:
break
rewards.append(ep_reward)
print(f'回合:{i_ep+1}/{cfg.test_eps},奖励:{ep_reward:.2f}')
return {'rewards':rewards}
if __name__ == "__main__":
cfg = get_args()
# 训练
env, agent = env_agent_config(cfg)
res_dic = train(cfg, env, agent)
save_args(cfg,path = cfg.result_path) # 保存参数到模型路径上
agent.save(path = cfg.model_path) # 保存模型
save_results(res_dic, tag = 'train', path = cfg.result_path)
plot_rewards(res_dic['rewards'], cfg, path = cfg.result_path,tag = "train")
# 测试
env, agent = env_agent_config(cfg) # 也可以不加,加这一行的是为了避免训练之后环境可能会出现问题,因此新建一个环境用于测试
agent.load(path = cfg.model_path) # 导入模型
res_dic = test(cfg, env, agent)
save_results(res_dic, tag='test',
path = cfg.result_path) # 保存结果
plot_rewards(res_dic['rewards'], cfg, path = cfg.result_path,tag = "test") # 画出结果

View File

@@ -1,118 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: johnjim0816@gmail.com
Date: 2021-03-11 14:26:44
LastEditor: John
LastEditTime: 2021-05-05 17:27:50
Discription:
Environment:
'''
import sys,os
curr_path = os.path.dirname(__file__)
parent_path = os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
import torch
import datetime
from common.utils import save_results,make_dir
from common.plot import plot_rewards
from MonteCarlo.agent import FisrtVisitMC
from envs.racetrack_env import RacetrackEnv
curr_time = datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") # obtain current time
class MCConfig:
def __init__(self):
self.algo = "MC" # name of algo
self.env = 'Racetrack'
self.result_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/results/' # path to save results
self.model_path = curr_path+"/outputs/" + self.env + \
'/'+curr_time+'/models/' # path to save models
# epsilon: The probability to select a random action .
self.epsilon = 0.15
self.gamma = 0.9 # gamma: Gamma discount factor.
self.train_eps = 200
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # check gpu
def env_agent_config(cfg,seed=1):
env = RacetrackEnv()
n_actions = 9
agent = FisrtVisitMC(n_actions, cfg)
return env,agent
def train(cfg, env, agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
one_ep_transition = []
while True:
action = agent.choose_action(state)
next_state, reward, done = env.step(action)
ep_reward += reward
one_ep_transition.append((state, action, reward))
state = next_state
if done:
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
else:
ma_rewards.append(ep_reward)
agent.update(one_ep_transition)
if (i_ep+1) % 10 == 0:
print(f"Episode:{i_ep+1}/{cfg.train_eps}: Reward:{ep_reward}")
print('Complete training')
return rewards, ma_rewards
def eval(cfg, env, agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.train_eps):
state = env.reset()
ep_reward = 0
while True:
action = agent.choose_action(state)
next_state, reward, done = env.step(action)
ep_reward += reward
state = next_state
if done:
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1)
else:
ma_rewards.append(ep_reward)
if (i_ep+1) % 10 == 0:
print(f"Episode:{i_ep+1}/{cfg.train_eps}: Reward:{ep_reward}")
return rewards, ma_rewards
if __name__ == "__main__":
cfg = MCConfig()
# train
env,agent = env_agent_config(cfg,seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
agent.save(path=cfg.model_path)
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, tag="train",
algo=cfg.algo, path=cfg.result_path)
# eval
env,agent = env_agent_config(cfg,seed=10)
agent.load(path=cfg.model_path)
rewards,ma_rewards = eval(cfg,env,agent)
save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)