-
Notifications
You must be signed in to change notification settings - Fork 0
/
ppo_class_z.py
306 lines (246 loc) · 14.7 KB
/
ppo_class_z.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
# docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/ppo/#ppopy
import argparse
import os
import random
import time
from distutils.util import strtobool
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
from doorsenvs import DoorZ
from agents import PPOAgentwithZ
def parse_args():
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, cuda will be enabled by default")
parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
help="whether to capture videos of the agent performances (check out `videos` folder)")
# Algorithm specific arguments
parser.add_argument("--env-id", type=str, default="DoorEnv_AIRL",
help="the id of the environment")
parser.add_argument("--total-timesteps", type=int, default=1000000,
help="total timesteps of the experiments")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--num-envs", type=int, default=4,
help="the number of parallel game environments")
parser.add_argument("--num-steps", type=int, default=32,
help="the number of steps to run in each environment per policy rollout")
parser.add_argument("--anneal-lr", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="Toggle learning rate annealing for policy and value networks")
parser.add_argument("--gamma", type=float, default=0.99,
help="the discount factor gamma")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="the lambda for the general advantage estimation")
parser.add_argument("--num-minibatches", type=int, default=4,
help="the number of mini-batches")
parser.add_argument("--update-epochs", type=int, default=4,
help="the K epochs to update the policy")
parser.add_argument("--norm-adv", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="Toggles advantages normalization")
parser.add_argument("--clip-coef", type=float, default=0.2,
help="the surrogate clipping coefficient")
parser.add_argument("--clip-vloss", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="Toggles whether or not to use a clipped loss for the value function, as per the paper.")
parser.add_argument("--ent-coef", type=float, default=0.05,
help="coefficient of the entropy")
parser.add_argument("--vf-coef", type=float, default=0.5,
help="coefficient of the value function")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="the maximum norm for the gradient clipping")
parser.add_argument("--target-kl", type=float, default=None,
help="the target KL divergence threshold")
args = parser.parse_args()
args.batch_size = int(args.num_envs * args.num_steps)
args.minibatch_size = int(args.batch_size // args.num_minibatches)
# fmt: on
return args
def step_all(envs,actions):
states,rewards,dones,infos = [],[],[],[]
for i,env in enumerate(envs):
state,reward,done,info = env.step(actions[i])
states.append(state)
rewards.append(reward)
dones.append(done)
infos.append(info)
return states,rewards,dones,infos
class ppo_trainer_z():
def __init__(self,agent=None,lr=2.5e-4,total_timesteps = 1000000,buffer_size = 3000,env_id = ""):
self.args = parse_args()
run_name = f"{env_id}__{self.args.exp_name}__{self.args.seed}__{int(time.time())}"
self.writer = SummaryWriter(f"runs/{run_name}")
self.writer.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(self.args).items()])),
)
# TRY NOT TO MODIFY: seeding
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
torch.backends.cudnn.deterministic = self.args.torch_deterministic
self.device = torch.device("cuda" if torch.cuda.is_available() and self.args.cuda else "cpu")
if agent:
self.agent = agent.to(self.device)
else:
self.agent = PPOAgentwithZ(self.envs).to(self.device)
self.optimizer = optim.Adam(self.agent.parameters(), lr=lr, eps=1e-5)
self.buffersize = buffer_size
self.envs = [DoorZ(max_steps=self.args.num_steps,seed=self.args.seed + i) for i in range(self.args.num_envs)]
self.num_updates = total_timesteps // self.args.batch_size
self.reset()
def reset(self):
# env setup
#envs = [make_env(args.seed + i) for i in range(args.num_envs)]
# ALGO Logic: Storage setup
self.obs = torch.zeros((self.args.num_steps, self.args.num_envs) + self.envs[0].single_observation_space).to(self.device)
self.actions = torch.zeros((self.args.num_steps, self.args.num_envs) + self.envs[0].single_action_space).to(self.device)
self.Z = torch.zeros((self.args.num_steps, self.args.num_envs,self.envs[0].classes)).to(self.device)
self.logprobs = torch.zeros((self.args.num_steps, self.args.num_envs)).to(self.device)
self.rewards = torch.zeros((self.args.num_steps, self.args.num_envs)).to(self.device)
self.dones = torch.zeros((self.args.num_steps, self.args.num_envs)).to(self.device)
self.values = torch.zeros((self.args.num_steps, self.args.num_envs)).to(self.device)
# TRY NOT TO MODIFY: start the game
self.global_step = 0
self.start_time = time.time()
self.all_obs = self.obs.reshape((-1,) + self.envs[0].single_observation_space)[:1,...]
self.all_acts = self.actions.reshape((-1,) + self.envs[0].single_action_space)[:1,...]
self.all_Zs = self.Z.reshape((-1, self.envs[0].classes))[:1,...]
self.all_logprobs = self.logprobs.reshape((-1,1))[:1,...]
def train(self):
next_obs = torch.Tensor([env.reset() for env in self.envs]).to(self.device)
next_z = torch.vstack([env.gt_z for env in self.envs]).to(self.device)
next_done = torch.zeros(self.args.num_envs).to(self.device)
for update in range(1, self.num_updates + 1):
# Annealing the rate if instructed to do so.
if self.args.anneal_lr:
frac = 1.0 - (update - 1.0) / self.num_updates
lrnow = frac * self.args.learning_rate
self.optimizer.param_groups[0]["lr"] = lrnow
for step in range(0, self.args.num_steps):
self.global_step += 1 * self.args.num_envs
self.obs[step] = next_obs
self.Z[step] = next_z
self.dones[step] = next_done
# ALGO LOGIC: action logic
with torch.no_grad():
action, logprob, _, value = self.agent.get_action_and_value(next_obs,z=next_z)
self.values[step] = value.flatten()
self.actions[step] = torch.functional.F.one_hot(action,num_classes=self.envs[0].single_action_space[0])
self.logprobs[step] = logprob
# TRY NOT TO MODIFY: execute the game and log data.
#next_obs, reward, done, info = envs.step(action.cpu().numpy())
next_obs, reward, done, info = step_all(self.envs,action.cpu().numpy())
next_z = torch.vstack([env.gt_z for env in self.envs]).to(self.device)
self.rewards[step] = torch.tensor(reward).to(self.device).view(-1)
next_obs, next_done = torch.Tensor(next_obs).to(self.device), torch.Tensor(done).to(self.device)
for item in info:
if "episode" in item.keys():
print(f"global_step={self.global_step}, episodic_return={item['episode']['r']}")
self.writer.add_scalar("charts/episodic_return", item["episode"]["r"], self.global_step)
self.writer.add_scalar("charts/episodic_length", item["episode"]["l"], self.global_step)
break
# bootstrap value if not done
with torch.no_grad():
next_value = self.agent.get_value(next_obs).reshape(1, -1)
advantages = torch.zeros_like(self.rewards).to(self.device)
lastgaelam = 0
for t in reversed(range(self.args.num_steps)):
if t == self.args.num_steps - 1:
nextnonterminal = 1.0 - next_done
nextvalues = next_value
else:
nextnonterminal = 1.0 - self.dones[t + 1]
nextvalues = self.values[t + 1]
delta = self.rewards[t] + self.args.gamma * nextvalues * nextnonterminal - self.values[t]
advantages[t] = lastgaelam = delta + self.args.gamma * self.args.gae_lambda * nextnonterminal * lastgaelam
returns = advantages + self.values
# flatten the batch
b_obs = self.obs.reshape((-1,) + self.envs[0].single_observation_space)
b_logprobs = self.logprobs.reshape(-1)
b_actions = self.actions.reshape((-1,) + self.envs[0].single_action_space)
b_Z = self.Z.reshape((-1, self.envs[0].classes))
b_advantages = advantages.reshape(-1)
b_returns = returns.reshape(-1)
b_values = self.values.reshape(-1)
self.all_obs = torch.vstack((self.all_obs,b_obs))
self.all_acts = torch.vstack((self.all_acts,b_actions))
self.all_Zs = torch.vstack((self.all_Zs,b_Z))
self.all_logprobs = torch.vstack((self.all_logprobs,b_logprobs[:,None]))
# Optimizing the policy and value network
b_inds = np.arange(self.args.batch_size)
clipfracs = []
for epoch in range(self.args.update_epochs):
np.random.shuffle(b_inds)
for start in range(0, self.args.batch_size, self.args.minibatch_size):
end = start + self.args.minibatch_size
mb_inds = b_inds[start:end]
_, newlogprob, entropy, newvalue = self.agent.get_action_and_value(b_obs[mb_inds], b_actions.long()[mb_inds].argmax(dim=1),z=b_Z[mb_inds])
logratio = newlogprob - b_logprobs[mb_inds]
ratio = logratio.exp()
with torch.no_grad():
# calculate approx_kl http://joschu.net/blog/kl-approx.html
old_approx_kl = (-logratio).mean()
approx_kl = ((ratio - 1) - logratio).mean()
clipfracs += [((ratio - 1.0).abs() > self.args.clip_coef).float().mean().item()]
mb_advantages = b_advantages[mb_inds]
if self.args.norm_adv:
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
# Policy loss
pg_loss1 = -mb_advantages * ratio
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.args.clip_coef, 1 + self.args.clip_coef)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
# Value loss
newvalue = newvalue.view(-1)
if self.args.clip_vloss:
v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2
v_clipped = b_values[mb_inds] + torch.clamp(
newvalue - b_values[mb_inds],
-self.args.clip_coef,
self.args.clip_coef,
)
v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
v_loss = 0.5 * v_loss_max.mean()
else:
v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean()
entropy_loss = entropy.mean()
loss = pg_loss - self.args.ent_coef * entropy_loss + v_loss * self.args.vf_coef
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.agent.parameters(), self.args.max_grad_norm)
self.optimizer.step()
if self.args.target_kl is not None:
if approx_kl > self.args.target_kl:
break
y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
var_y = np.var(y_true)
explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y
# TRY NOT TO MODIFY: record rewards for plotting purposes
self.writer.add_scalar("charts/learning_rate", self.optimizer.param_groups[0]["lr"], self.global_step)
self.writer.add_scalar("losses/value_loss", v_loss.item(), self.global_step)
self.writer.add_scalar("losses/policy_loss", pg_loss.item(), self.global_step)
self.writer.add_scalar("losses/entropy", entropy_loss.item(), self.global_step)
self.writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), self.global_step)
self.writer.add_scalar("losses/approx_kl", approx_kl.item(), self.global_step)
self.writer.add_scalar("losses/clipfrac", np.mean(clipfracs), self.global_step)
self.writer.add_scalar("losses/explained_variance", explained_var, self.global_step)
print("SPS:", int(self.global_step / (time.time() - self.start_time)))
self.writer.add_scalar("charts/SPS", int(self.global_step / (time.time() - self.start_time)), self.global_step)
self.all_acts = self.all_acts[-self.buffersize:]
self.all_Zs = self.all_Zs[-self.buffersize:]
self.all_obs = self.all_obs[-self.buffersize:]
self.all_logprobs = self.all_logprobs[-self.buffersize:]
return self.all_obs,self.all_acts,self.all_logprobs,self.all_Zs
def close(self):
self.writer.close()
#torch.save(agent,f'ppo_agent_iter_{self.args.total_timesteps}_mlp.pth')