class UnsupervisedLearning(BaseRunner): def __init__( self, algo, n_updates, seed=None, affinity=None, log_interval_updates=1e3, snapshot_gap_intervals=None, # units: log_intervals ): n_updates = int(n_updates) affinity = dict() if affinity is None else affinity save__init__args(locals()) def startup(self): p = psutil.Process() try: if self.affinity.get("master_cpus", None) is not None and self.affinity.get( "set_affinity", True): p.cpu_affinity(self.affinity["master_cpus"]) cpu_affin = p.cpu_affinity() except AttributeError: cpu_affin = "UNAVAILABLE MacOS" logger.log(f"Runner {getattr(self, 'rank', '')} master CPU affinity: " f"{cpu_affin}.") if self.affinity.get("master_torch_threads", None) is not None: torch.set_num_threads(self.affinity["master_torch_threads"]) logger.log(f"Runner {getattr(self, 'rank', '')} master Torch threads: " f"{torch.get_num_threads()}.") if self.seed is None: self.seed = make_seed() set_seed(self.seed) # self.rank = rank = getattr(self, "rank", 0) # self.world_size = world_size = getattr(self, "world_size", 1) self.algo.initialize( n_updates=self.n_updates, cuda_idx=self.affinity.get("cuda_idx", None), ) self.initialize_logging() def initialize_logging(self): self._opt_infos = {k: list() for k in self.algo.opt_info_fields} self._start_time = self._last_time = time.time() self._cum_time = 0.0 if self.snapshot_gap_intervals is not None: logger.set_snapshot_gap(self.snapshot_gap_intervals * self.log_interval_updates) self.pbar = ProgBarCounter(self.log_interval_updates) def shutdown(self): logger.log("Pretraining complete.") self.pbar.stop() def get_itr_snapshot(self, itr): return dict( itr=itr, algo_state_dict=self.algo.state_dict(), ) def save_itr_snapshot(self, itr): """ Calls the logger to save training checkpoint/snapshot (logger itself may or may not save, depending on mode selected). """ logger.log("saving snapshot...") params = self.get_itr_snapshot(itr) logger.save_itr_params(itr, params) logger.log("saved") def store_diagnostics(self, itr, opt_info): for k, v in self._opt_infos.items(): new_v = getattr(opt_info, k, []) v.extend(new_v if isinstance(new_v, list) else [new_v]) self.pbar.update((itr + 1) % self.log_interval_updates) def log_diagnostics(self, itr, val_info, *args, **kwargs): self.save_itr_snapshot(itr) new_time = time.time() self._cum_time = new_time - self._start_time epochs = (itr * self.algo.batch_size / (self.algo.replay_buffer.size * (1 - self.algo.validation_split))) logger.record_tabular("Iteration", itr) logger.record_tabular("Epochs", epochs) logger.record_tabular("CumTime (s)", self._cum_time) logger.record_tabular("UpdatesPerSecond", itr / self._cum_time) if self._opt_infos: for k, v in self._opt_infos.items(): logger.record_tabular_misc_stat(k, v) for k, v in zip(val_info._fields, val_info): logger.record_tabular_misc_stat("val_" + k, v) self._opt_infos = {k: list() for k in self._opt_infos} # (reset) logger.dump_tabular(with_prefix=False) if itr < self.n_updates - 1: logger.log( f"Optimizing over {self.log_interval_updates} iterations.") self.pbar = ProgBarCounter(self.log_interval_updates) def train(self): self.startup() self.algo.train() for itr in range(self.n_updates): logger.set_iteration(itr) with logger.prefix(f"itr #{itr} "): opt_info = self.algo.optimize(itr) # perform one update self.store_diagnostics(itr, opt_info) if (itr + 1) % self.log_interval_updates == 0: self.algo.eval() val_info = self.algo.validation(itr) self.log_diagnostics(itr, val_info) self.algo.train() self.shutdown()
def do_training_mt(loader, model, opt, dev, aug_model, min_bc_module, n_batches): # @torch.jit.script def do_loss_forward_back(obs_batch_obs, obs_batch_task, obs_batch_var, obs_batch_source, acts_batch): # we don't use the value output logits_flat, _ = model(obs_batch_obs, task_ids=obs_batch_task) losses = F.cross_entropy(logits_flat, acts_batch.long(), reduction='none') if min_bc_module is not None: # weight using a model-dependent strategy mbc_weights = min_bc_module(obs_batch_task, obs_batch_var, obs_batch_source) assert mbc_weights.shape == losses.shape, (mbc_weights.shape, losses.shape) loss = (losses * mbc_weights).sum() else: # no weighting loss = losses.mean() loss.backward() return losses.detach().cpu().numpy() # make sure we're in train mode model.train() # for logging loss_ewma = None losses = [] per_task_losses = collections.defaultdict(lambda: []) progress = ProgBarCounter(n_batches) inf_batch_iter = repeat_dataset(loader) ctr_batch_iter = zip(range(1, n_batches), inf_batch_iter) for batches_done, loader_batch in ctr_batch_iter: # (task_ids_batch, obs_batch, acts_batch) # copy to GPU obs_batch = loader_batch['obs'] acts_batch = loader_batch['acts'] # reminder: attributes are .observation, .task_id, .variant_id obs_batch = tree_map(lambda t: t.to(dev), obs_batch) acts_batch = acts_batch.to(dev) if aug_model is not None: # apply augmentations obs_batch = obs_batch._replace( observation=aug_model(obs_batch.observation)) # compute loss & take opt step opt.zero_grad() batch_losses = do_loss_forward_back(obs_batch.observation, obs_batch.task_id, obs_batch.variant_id, obs_batch.source_id, acts_batch) opt.step() # for logging progress.update(batches_done) f_loss = np.mean(batch_losses) loss_ewma = f_loss if loss_ewma is None \ else 0.9 * loss_ewma + 0.1 * f_loss losses.append(f_loss) # also track separately for each task tv_ids = torch.stack((obs_batch.task_id, obs_batch.variant_id), axis=1) np_tv_ids = tv_ids.cpu().numpy() assert len(np_tv_ids.shape) == 2 and np_tv_ids.shape[1] == 2, \ np_tv_ids.shape for tv_id in np.unique(np_tv_ids, axis=0): tv_mask = np.all(np_tv_ids == tv_id[None], axis=-1) rel_losses = batch_losses[tv_mask] if len(rel_losses) > 0: task_id, variant_id = tv_id per_task_losses[(task_id, variant_id)] \ .append(np.mean(rel_losses)) progress.stop() return loss_ewma, losses, per_task_losses
class MinibatchRl(MinibatchRlBase): """Runs RL on minibatches; tracks performance online using learning trajectories.""" def __init__(self, log_traj_window=100, **kwargs): super().__init__(**kwargs) self.log_traj_window = int(log_traj_window) def train(self): n_itr = self.startup() for itr in range(n_itr): with logger.prefix(f"itr #{itr} "): self.agent.sample_mode(itr) # Might not be this agent sampling. samples, traj_infos = self.sampler.obtain_samples(itr) self.agent.train_mode(itr) opt_info = self.algo.optimize_agent(itr, samples) self.store_diagnostics(itr, traj_infos, opt_info) if (itr + 1) % self.log_interval_itrs == 0: self.log_diagnostics(itr) self.shutdown() def initialize_logging(self): self._traj_infos = deque(maxlen=self.log_traj_window) self._cum_completed_trajs = 0 self._new_completed_trajs = 0 logger.log(f"Optimizing over {self.log_interval_itrs} iterations.") super().initialize_logging() self.pbar = ProgBarCounter(self.log_interval_itrs) def store_diagnostics(self, itr, traj_infos, opt_info): self._cum_completed_trajs += len(traj_infos) self._new_completed_trajs += len(traj_infos) self._traj_infos.extend(traj_infos) for k, v in self._opt_infos.items(): new_v = getattr(opt_info, k, []) v.extend(new_v if isinstance(new_v, list) else [new_v]) self.pbar.update((itr + 1) % self.log_interval_itrs) def log_diagnostics(self, itr): self.pbar.stop() self.save_itr_snapshot(itr) new_time = time.time() samples_per_second = (self.log_interval_itrs * self.itr_batch_size) / (new_time - self._last_time) logger.record_tabular('Iteration', itr) logger.record_tabular('CumSteps', (itr + 1) * self.itr_batch_size) logger.record_tabular('CumTime (s)', new_time - self._start_time) logger.record_tabular('SamplesPerSecond', samples_per_second) logger.record_tabular('CumCompletedTrajs', self._cum_completed_trajs) logger.record_tabular('NewCompletedTrajs', self._new_completed_trajs) logger.record_tabular('StepsInTrajWindow', sum(info["Length"] for info in self._traj_infos)) self._log_infos() self._last_time = new_time logger.dump_tabular(with_prefix=False) self._new_completed_trajs = 0 if itr < self.n_itr - 1: logger.log(f"Optimizing over {self.log_interval_itrs} iterations.") self.pbar = ProgBarCounter(self.log_interval_itrs)