一、PPO优化
PPO的简介和实践可以看笔者之前的文章 强化学习_06_pytorch-PPO实践(Pendulum-v1)
针对之前的PPO做了主要以下优化:
batch_normalize
: 在mini_batch
函数中进行adv的normalize, 加速模型对adv的学习policyNet
采用beta
分布(0~1): 同时增加MaxMinScale 将beta分布产出值转换到action的分布空间- 收集多个
episode
的数据,依次计算adv,后合并到一个dataloader中进行遍历:加速模型收敛
1.1 PPO2 代码
详细可见 Github: PPO2.py
class PPO2:
"""
PPO2算法, 采用截断方式
"""
def __init__(self,
state_dim: int,
actor_hidden_layers_dim: typ.List,
critic_hidden_layers_dim: typ.List,
action_dim: int,
actor_lr: float,
critic_lr: float,
gamma: float,
PPO_kwargs: typ.Dict,
device: torch.device,
reward_func: typ.Optional[typ.Callable]=None
):
dist_type = PPO_kwargs.get('dist_type', 'beta')
self.dist_type = dist_type
self.actor = policyNet(state_dim, actor_hidden_layers_dim, action_dim, dist_type=dist_type).to(device)
self.critic = valueNet(state_dim, critic_hidden_layers_dim).to(device)
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)
self.gamma = gamma
self.lmbda = PPO_kwargs['lmbda']
self.k_epochs = PPO_kwargs['k_epochs'] # 一条序列的数据用来训练的轮次
self.eps = PPO_kwargs['eps'] # PPO中截断范围的参数
self.sgd_batch_size = PPO_kwargs.get('sgd_batch_size', 512)
self.minibatch_size = PPO_kwargs.get('minibatch_size', 128)
self.action_bound = PPO_kwargs.get('action_bound', 1.0)
self.action_low = -1 * self.action_bound
self.action_high = self.action_bound
if 'action_space' in PPO_kwargs:
self.action_low = self.action_space.low
self.action_high = self.action_space.high
self.count = 0
self.device = device
self.reward_func = reward_func
self.min_batch_collate_func = partial(mini_batch, mini_batch_size=self.minibatch_size)
def _action_fix(self, act):
if self.dist_type == 'beta':
# beta 0-1 -> low ~ high
return act * (self.action_high - self.action_low) + self.action_low
return act
def _action_return(self, act):
if self.dist_type == 'beta':
# low ~ high -> 0-1
act_out = (act - self.action_low) / (self.action_high - self.action_low)
return act_out * 1 + 0
return act
def policy(self, state):
state = torch.FloatTensor(np.array([state])).to(self.device)
action_dist = self.actor.get_dist(state, self.action_bound)
action = action_dist.sample()
action = self._action_fix(action)
return action.cpu().detach().numpy()[0]
def _one_deque_pp(self, samples: deque):
state, action, reward, next_state, done = zip(*samples)
state = torch.FloatTensor(np.stack(state)).to(self.device)
action = torch.FloatTensor(np.stack(action)).to(self.device)
reward = torch.tensor(np.stack(reward)).view(-1, 1).to(self.device)
if self.reward_func is not None:
reward = self.reward_func(reward)
next_state = torch.FloatTensor(np.stack(next_state)).to(self.device)
done = torch.FloatTensor(np.stack(done)).view(-1, 1).to(self.device)
old_v = self.critic(state)
td_target = reward + self.gamma * self.critic(next_state) * (1 - done)
td_delta = td_target - old_v
advantage = compute_advantage(self.gamma, self.lmbda, td_delta, done).to(self.device)
# recompute
td_target = advantage + old_v
action_dists = self.actor.get_dist(state, self.action_bound)
old_log_probs = action_dists.log_prob(self._action_return(action))
return state, action, old_log_probs, advantage, td_target
def data_prepare(self, samples_list: List[deque]):
state_pt_list = []
action_pt_list = []
old_log_probs_pt_list = []
advantage_pt_list = []
td_target_pt_list = []
for sample in samples_list:
state_i, action_i, old_log_probs_i, advantage_i, td_target_i = self._one_deque_pp(sample)
state_pt_list.append(state_i)
action_pt_list.append(action_i)
old_log_probs_pt_list.append(old_log_probs_i)
advantage_pt_list.append(advantage_i)
td_target_pt_list.append(td_target_i)
state = torch.concat(state_pt_list)
action = torch.concat(action_pt_list)
old_log_probs = torch.concat(old_log_probs_pt_list)
advantage = torch.concat(advantage_pt_list)
td_target = torch.concat(td_target_pt_list)
return state, action, old_log_probs, advantage, td_target
def update(self, samples_list: List[deque]):
state, action, old_log_probs, advantage, td_target = self.data_prepare(samples_list)
if len(old_log_probs.shape) == 2:
old_log_probs = old_log_probs.sum(dim=1)
d_set = memDataset(state, action, old_log_probs, advantage, td_target)
train_loader = DataLoader(
d_set,
batch_size=self.sgd_batch_size,
shuffle=True,
drop_last=True,
collate_fn=self.min_batch_collate_func
)
for _ in range(self.k_epochs):
for state_, action_, old_log_prob, adv, td_v in train_loader:
action_dists = self.actor.get_dist(state_, self.action_bound)
log_prob = action_dists.log_prob(self._action_return(action_))
if len(log_prob.shape) == 2:
log_prob = log_prob.sum(dim=1)
# e(log(a/b))
ratio = torch.exp(log_prob - old_log_prob.detach())
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1 - self.eps, 1 + self.eps) * adv
actor_loss = torch.mean(-torch.min(surr1, surr2)).float()
critic_loss = torch.mean(
F.mse_loss(self.critic(state_).float(), td_v.detach().float())
).float()
self.actor_opt.zero_grad()
self.critic_opt.zero_grad()
actor_loss.backward()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
self.actor_opt.step()
self.critic_opt.step()
return True
def save_model(self, file_path):
if not os.path.exists(file_path):
os.makedirs(file_path)
act_f = os.path.join(file_path, 'PPO_actor.ckpt')
critic_f = os.path.join(file_path, 'PPO_critic.ckpt')
torch.save(self.actor.state_dict(), act_f)
torch.save(self.critic.state_dict(), critic_f)
def load_model(self, file_path):
act_f = os.path.join(file_path, 'PPO_actor.ckpt')
critic_f = os.path.join(file_path, 'PPO_critic.ckpt')
self.actor.load_state_dict(torch.load(act_f, map_location='cpu'))
self.critic.load_state_dict(torch.load(critic_f, map_location='cpu'))
self.actor.to(self.device)
self.critic.to(self.device)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr)
def train(self):
self.training = True
self.actor.train()
self.critic.train()
def eval(self):
self.training = False
self.actor.eval()
self.critic.eval()
二、 Pytorch实践
2.1 智能体构建与训练
PPO2主要是收集多轮的结果序列进行训练,增加训练轮数,适当降低学习率,稍微增Actor和Critic的网络深度
详细可见 Github: test_ppo.Hopper_v4_ppo2_test
import os
from os.path import dirname
import sys
import gymnasium as gym
import torch
# 笔者的github-RL库
from RLAlgo.PPO import PPO
from RLAlgo.PPO2 import PPO2
from RLUtils import train_on_policy, random_play, play, Config, gym_env_desc
env_name = 'Hopper-v4'
gym_env_desc(env_name)
print("gym.__version__ = ", gym.__version__ )
path_ = os.path.dirname(__file__)
env = gym.make(
env_name,
exclude_current_positions_from_observation=True,
# healthy_reward=0
)
cfg = Config(
env,
# 环境参数
save_path=os.path.join(path_, "test_models" ,'PPO_Hopper-v4_test2'),
seed=42,
# 网络参数
actor_hidden_layers_dim=[256, 256, 256],
critic_hidden_layers_dim=[256, 256, 256],
# agent参数
actor_lr=1.5e-4,
critic_lr=5.5e-4,
gamma=0.99,
# 训练参数
num_episode=12500,
off_buffer_size=512,
off_minimal_size=510,
max_episode_steps=500,
PPO_kwargs={
'lmbda': 0.9,
'eps': 0.25,
'k_epochs': 4,
'sgd_batch_size': 128,
'minibatch_size': 12,
'actor_bound': 1,
'dist_type': 'beta'
}
)
agent = PPO2(
state_dim=cfg.state_dim,
actor_hidden_layers_dim=cfg.actor_hidden_layers_dim,
critic_hidden_layers_dim=cfg.critic_hidden_layers_dim,
action_dim=cfg.action_dim,
actor_lr=cfg.actor_lr,
critic_lr=cfg.critic_lr,
gamma=cfg.gamma,
PPO_kwargs=cfg.PPO_kwargs,
device=cfg.device,
reward_func=None
)
agent.train()
train_on_policy(env, agent, cfg, wandb_flag=False, train_without_seed=True, test_ep_freq=1000,
online_collect_nums=cfg.off_buffer_size,
test_episode_count=5)
2.2 训练出的智能体观测
最后将训练的最好的网络拿出来进行观察
agent.load_model(cfg.save_path)
agent.eval()
env_ = gym.make(env_name,
exclude_current_positions_from_observation=True,
render_mode='human'
) # , render_mode='human'
play(env_, agent, cfg, episode_count=3, play_without_seed=True, render=True)