add some codes
This commit is contained in:
107
codes/dqn_cnn/dqn.py
Normal file
107
codes/dqn_cnn/dqn.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import random
|
||||
import math
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torch.nn.functional as F
|
||||
from memory import ReplayBuffer
|
||||
from model import CNN
|
||||
|
||||
|
||||
class DQN:
|
||||
def __init__(self, screen_height=0, screen_width=0, n_actions=0, gamma=0.999, epsilon_start=0.9, epsilon_end=0.05, epsilon_decay=200, memory_capacity=10000, batch_size=128, device="cpu"):
|
||||
self.actions_count = 0
|
||||
self.n_actions = n_actions
|
||||
self.device = device
|
||||
self.gamma = gamma
|
||||
self.epsilon = 0
|
||||
self.epsilon_start = epsilon_start
|
||||
self.epsilon_end = epsilon_end
|
||||
self.epsilon_decay = epsilon_decay
|
||||
self.batch_size = batch_size
|
||||
self.policy_net = CNN(screen_height, screen_width,
|
||||
n_actions).to(self.device)
|
||||
self.target_net = CNN(screen_height, screen_width,
|
||||
n_actions).to(self.device)
|
||||
self.target_net.load_state_dict(self.policy_net.state_dict())
|
||||
self.target_net.eval() # 不启用 BatchNormalization 和 Dropout
|
||||
self.optimizer = optim.RMSprop(self.policy_net.parameters())
|
||||
self.loss = 0
|
||||
self.memory = ReplayBuffer(memory_capacity)
|
||||
|
||||
|
||||
def select_action(self, state):
|
||||
'''choose_action [summary]
|
||||
Args:
|
||||
state [torch tensor]: [description]
|
||||
Returns:
|
||||
actions [torch tensor]: [description]
|
||||
'''
|
||||
sample = random.random()
|
||||
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
|
||||
math.exp(-1. * self.actions_count / self.epsilon_decay)
|
||||
self.actions_count += 1
|
||||
if sample > self.epsilon:
|
||||
with torch.no_grad():
|
||||
# t.max(1) will return largest column value of each row.
|
||||
# second column on max result is index of where max element was
|
||||
# found, so we pick action with the larger expected reward.
|
||||
|
||||
q_value = self.policy_net(state) # q_value比如tensor([[-0.2522, 0.3887]])
|
||||
action = q_value.max(1)[1].view(1, 1) # q_value最大对应的下标,注意该action是个张量,如tensor([1])
|
||||
return action
|
||||
else:
|
||||
return torch.tensor([[random.randrange(self.n_actions)]], device=self.device, dtype=torch.long)
|
||||
|
||||
def update(self):
|
||||
if len(self.memory) < self.batch_size:
|
||||
return
|
||||
transitions = self.memory.sample(self.batch_size)
|
||||
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
|
||||
# detailed explanation). This converts batch-array of Transitions
|
||||
# to Transition of batch-arrays.
|
||||
batch = self.memory.Transition(*zip(*transitions))
|
||||
|
||||
# Compute a mask of non-final states and concatenate the batch elements
|
||||
# (a final state would've been the one after which simulation ended)
|
||||
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
|
||||
batch.next_state)), device=self.device, dtype=torch.bool)
|
||||
|
||||
non_final_next_states = torch.cat([s for s in batch.next_state
|
||||
if s is not None])
|
||||
state_batch = torch.cat(batch.state)
|
||||
action_batch = torch.cat(batch.action)
|
||||
reward_batch = torch.cat(batch.reward) # tensor([1., 1.,...,])
|
||||
|
||||
|
||||
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
|
||||
# columns of actions taken. These are the actions which would've been taken
|
||||
# for each batch state according to policy_net
|
||||
state_action_values = self.policy_net(
|
||||
state_batch).gather(1, action_batch) #tensor([[ 1.1217],...,[ 0.8314]])
|
||||
|
||||
# Compute V(s_{t+1}) for all next states.
|
||||
# Expected values of actions for non_final_next_states are computed based
|
||||
# on the "older" target_net; selecting their best reward with max(1)[0].
|
||||
# This is merged based on the mask, such that we'll have either the expected
|
||||
# state value or 0 in case the state was final.
|
||||
next_state_values = torch.zeros(self.batch_size, device=self.device)
|
||||
|
||||
next_state_values[non_final_mask] = self.target_net(
|
||||
non_final_next_states).max(1)[0].detach()
|
||||
|
||||
# Compute the expected Q values
|
||||
expected_state_action_values = (next_state_values * self.gamma) + reward_batch # tensor([0.9685, 0.9683,...,])
|
||||
|
||||
# Compute Huber loss
|
||||
self.loss = F.smooth_l1_loss(
|
||||
state_action_values, expected_state_action_values.unsqueeze(1)) # .unsqueeze增加一个维度
|
||||
# Optimize the model
|
||||
self.optimizer.zero_grad() # zero_grad clears old gradients from the last step (otherwise you’d just accumulate the gradients from all loss.backward() calls).
|
||||
self.loss.backward() # loss.backward() computes the derivative of the loss w.r.t. the parameters (or anything requiring gradients) using backpropagation.
|
||||
for param in self.policy_net.parameters(): # clip防止梯度爆炸
|
||||
param.grad.data.clamp_(-1, 1)
|
||||
self.optimizer.step() # causes the optimizer to take a step based on the gradients of the parameters.
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
dqn = DQN()
|
||||
115
codes/dqn_cnn/main.py
Normal file
115
codes/dqn_cnn/main.py
Normal file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 10:01:09
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-13 00:24:31
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
'''
|
||||
应该是没有收敛,但是pytorch官方教程的结果也差不多
|
||||
'''
|
||||
import gym
|
||||
import torch
|
||||
|
||||
from screen_state import get_screen
|
||||
from dqn import DQN
|
||||
from plot import plot
|
||||
|
||||
import argparse
|
||||
|
||||
def get_args():
|
||||
'''模型建立好之后只需要在这里调参
|
||||
'''
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--gamma", default=0.999, type=float) # q-learning中的gamma
|
||||
parser.add_argument("--epsilon_start", default=0.9, type=float) # 基于贪心选择action对应的参数epsilon
|
||||
parser.add_argument("--epsilon_end", default=0.05, type=float)
|
||||
parser.add_argument("--epsilon_decay", default=200, type=float)
|
||||
|
||||
parser.add_argument("--memory_capacity", default=10000, type=int,help="capacity of Replay Memory")
|
||||
|
||||
parser.add_argument("--batch_size", default=128, type=int,help="batch size of memory sampling")
|
||||
parser.add_argument("--max_episodes", default=100, type=int)
|
||||
parser.add_argument("--max_steps", default=200, type=int)
|
||||
parser.add_argument("--target_update", default=4, type=int,help="when(every default 10 eisodes) to update target net ")
|
||||
config = parser.parse_args()
|
||||
|
||||
return config
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
cfg = get_args()
|
||||
# if gpu is to be used
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# Get screen size so that we can initialize layers correctly based on shape
|
||||
# returned from AI gym. Typical dimensions at this point are close to 3x40x90
|
||||
# which is the result of a clamped and down-scaled render buffer in get_screen(env,device)
|
||||
env = gym.make('CartPole-v0').unwrapped
|
||||
env.reset()
|
||||
init_screen = get_screen(env, device)
|
||||
_, _, screen_height, screen_width = init_screen.shape
|
||||
# Get number of actions from gym action space
|
||||
n_actions = env.action_space.n
|
||||
agent = DQN(screen_height=screen_height, screen_width=screen_width,
|
||||
n_actions=n_actions, device=device, gamma=cfg.gamma, epsilon_start=cfg.epsilon_start, epsilon_end=cfg.epsilon_end, epsilon_decay=cfg.epsilon_decay, memory_capacity=cfg.memory_capacity,batch_size=cfg.batch_size)
|
||||
|
||||
rewards = []
|
||||
moving_average_rewards = []
|
||||
for i_episode in range(1,cfg.max_episodes+1):
|
||||
# Initialize the environment and state
|
||||
env.reset()
|
||||
last_screen = get_screen(env, device)
|
||||
current_screen = get_screen(env, device)
|
||||
state = current_screen - last_screen
|
||||
ep_reward = 0
|
||||
for t in range(1,cfg.max_steps+1):
|
||||
# Select and perform an action
|
||||
action = agent.select_action(state)
|
||||
_, reward, done, _ = env.step(action.item())
|
||||
ep_reward += reward
|
||||
reward = torch.tensor([reward], device=device)
|
||||
# Observe new state
|
||||
last_screen = current_screen
|
||||
current_screen = get_screen(env, device)
|
||||
|
||||
if done: break
|
||||
next_state = current_screen - last_screen
|
||||
|
||||
# Store the transition in memory
|
||||
agent.memory.push(state, action, next_state, reward)
|
||||
|
||||
# Move to the next state
|
||||
state = next_state
|
||||
|
||||
# Perform one step of the optimization (on the target network)
|
||||
agent.update()
|
||||
|
||||
# Update the target network, copying all weights and biases in DQN
|
||||
if i_episode % cfg.target_update == 0:
|
||||
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
||||
print('Episode:', i_episode, ' Reward: %i' %int(ep_reward), 'Explore: %.2f' % agent.epsilon)
|
||||
rewards.append(ep_reward)
|
||||
if i_episode == 1:
|
||||
moving_average_rewards.append(ep_reward)
|
||||
else:
|
||||
moving_average_rewards.append(
|
||||
0.9*moving_average_rewards[-1]+0.1*ep_reward)
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
output_path = os.path.dirname(__file__)+"/result/"
|
||||
if not os.path.exists(output_path):
|
||||
os.mkdir(output_path)
|
||||
np.save(output_path+"rewards.npy", rewards)
|
||||
np.save(output_path+"moving_average_rewards.npy", moving_average_rewards)
|
||||
print('Complete!')
|
||||
plot(rewards)
|
||||
plot(moving_average_rewards,ylabel="moving_average_rewards")
|
||||
|
||||
|
||||
37
codes/dqn_cnn/memory.py
Normal file
37
codes/dqn_cnn/memory.py
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 09:42:44
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-11 15:50:33
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
from collections import namedtuple
|
||||
import random
|
||||
|
||||
|
||||
|
||||
class ReplayBuffer(object):
|
||||
|
||||
def __init__(self, capacity):
|
||||
self.capacity = capacity
|
||||
self.buffer = []
|
||||
self.position = 0
|
||||
self.Transition = namedtuple('Transition',
|
||||
('state', 'action', 'next_state', 'reward'))
|
||||
|
||||
def push(self, *args):
|
||||
"""Saves a transition."""
|
||||
if len(self.buffer) < self.capacity:
|
||||
self.buffer.append(None)
|
||||
self.buffer[self.position] = self.Transition(*args)
|
||||
self.position = (self.position + 1) % self.capacity
|
||||
|
||||
def sample(self, batch_size):
|
||||
return random.sample(self.buffer, batch_size)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.buffer)
|
||||
41
codes/dqn_cnn/model.py
Normal file
41
codes/dqn_cnn/model.py
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 12:18:12
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-11 17:23:45
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
class CNN(nn.Module):
|
||||
|
||||
def __init__(self, h, w, n_outputs):
|
||||
super(CNN, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
|
||||
self.bn1 = nn.BatchNorm2d(16)
|
||||
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
|
||||
self.bn2 = nn.BatchNorm2d(32)
|
||||
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
|
||||
self.bn3 = nn.BatchNorm2d(32)
|
||||
|
||||
# Number of Linear input connections depends on output of conv2d layers
|
||||
# and therefore the input image size, so compute it.
|
||||
def conv2d_size_out(size, kernel_size = 5, stride = 2):
|
||||
return (size - (kernel_size - 1) - 1) // stride + 1
|
||||
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
|
||||
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
|
||||
linear_input_size = convw * convh * 32
|
||||
self.head = nn.Linear(linear_input_size, n_outputs)
|
||||
|
||||
# Called with either one element to determine next action, or a batch
|
||||
# during optimization. Returns tensor([[left0exp,right0exp]...]).
|
||||
def forward(self, x):
|
||||
x = F.relu(self.bn1(self.conv1(x)))
|
||||
x = F.relu(self.bn2(self.conv2(x)))
|
||||
x = F.relu(self.bn3(self.conv3(x)))
|
||||
return self.head(x.view(x.size(0), -1))
|
||||
24
codes/dqn_cnn/plot.py
Normal file
24
codes/dqn_cnn/plot.py
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 16:30:09
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-11 22:27:24
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
def plot(item,ylabel='rewards'):
|
||||
plt.figure()
|
||||
plt.plot(np.arange(len(item)), item)
|
||||
plt.title(ylabel+' of CnnDQN')
|
||||
plt.ylabel('rewards')
|
||||
plt.xlabel('episodes')
|
||||
|
||||
plt.savefig(os.path.dirname(__file__)+"/result/"+ylabel+".png")
|
||||
plt.show()
|
||||
BIN
codes/dqn_cnn/result/moving_average_rewards.npy
Normal file
BIN
codes/dqn_cnn/result/moving_average_rewards.npy
Normal file
Binary file not shown.
BIN
codes/dqn_cnn/result/moving_average_rewards.png
Normal file
BIN
codes/dqn_cnn/result/moving_average_rewards.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
BIN
codes/dqn_cnn/result/rewards.npy
Normal file
BIN
codes/dqn_cnn/result/rewards.npy
Normal file
Binary file not shown.
BIN
codes/dqn_cnn/result/rewards.png
Normal file
BIN
codes/dqn_cnn/result/rewards.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 44 KiB |
66
codes/dqn_cnn/screen_state.py
Normal file
66
codes/dqn_cnn/screen_state.py
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
@Author: John
|
||||
@Email: johnjim0816@gmail.com
|
||||
@Date: 2020-06-11 10:02:35
|
||||
@LastEditor: John
|
||||
@LastEditTime: 2020-06-11 16:57:34
|
||||
@Discription:
|
||||
@Environment: python 3.7.7
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
|
||||
resize = T.Compose([T.ToPILImage(),
|
||||
T.Resize(40, interpolation=Image.CUBIC),
|
||||
T.ToTensor()])
|
||||
|
||||
|
||||
def get_cart_location(env,screen_width):
|
||||
world_width = env.x_threshold * 2
|
||||
scale = screen_width / world_width
|
||||
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
|
||||
|
||||
def get_screen(env,device):
|
||||
# Returned screen requested by gym is 400x600x3, but is sometimes larger
|
||||
# such as 800x1200x3. Transpose it into torch order (CHW).
|
||||
screen = env.render(mode='rgb_array').transpose((2, 0, 1))
|
||||
# Cart is in the lower half, so strip off the top and bottom of the screen
|
||||
_, screen_height, screen_width = screen.shape
|
||||
screen = screen[:, int(screen_height*0.4):int(screen_height * 0.8)]
|
||||
view_width = int(screen_width * 0.6)
|
||||
cart_location = get_cart_location(env,screen_width)
|
||||
if cart_location < view_width // 2:
|
||||
slice_range = slice(view_width)
|
||||
elif cart_location > (screen_width - view_width // 2):
|
||||
slice_range = slice(-view_width, None)
|
||||
else:
|
||||
slice_range = slice(cart_location - view_width // 2,
|
||||
cart_location + view_width // 2)
|
||||
# Strip off the edges, so that we have a square image centered on a cart
|
||||
screen = screen[:, :, slice_range]
|
||||
# Convert to float, rescale, convert to torch tensor
|
||||
# (this doesn't require a copy)
|
||||
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
|
||||
screen = torch.from_numpy(screen)
|
||||
# Resize, and add a batch dimension (BCHW)
|
||||
return resize(screen).unsqueeze(0).to(device)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
import gym
|
||||
env = gym.make('CartPole-v0').unwrapped
|
||||
# if gpu is to be used
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
env.reset()
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
plt.figure()
|
||||
plt.imshow(get_screen(env,device).cpu().squeeze(0).permute(1, 2, 0).numpy(),
|
||||
interpolation='none')
|
||||
plt.title('Example extracted screen')
|
||||
plt.show()
|
||||
Reference in New Issue
Block a user