def build_and_train(affinity_code, log_dir, run_ID, **kwargs): # I prefer put all tunable default configs into launch file # acquire affinity asigned by the launcher. # NOTE: If the affinity is a list, it means multiple resources (gpu) # is assigned to current experiment affinity = affinity_from_code(affinity_code) # now you will have `config` as a dictionary in the same # structure you define your default configurations config = load_variant(log_dir) name = "demo_experiment" # This helps you know what GPU is recommand to you for this experiment gpu_idx = affinity["cuda_idx"] # under a logger context, run your experiment. with logger_context(log_dir, run_ID, name, config): logger.log("Start running experiment") for epoch_i in range(10): # log your scalar with this function for example logger.record_tabular("metric1", epoch_i, epoch_i) # dump all logs into csv file (This is the exact function that # write one line into progress.csv file) logger.dump_tabular()
def log_diagnostics(self, itr, prefix='Diagnostics/'): with logger.tabular_prefix(prefix): logger.record_tabular('NewCompletedTrajs', self._new_completed_trajs) logger.record_tabular( 'StepsInTrajWindow', sum(info["Length"] for info in self._traj_infos)) super().log_diagnostics(itr, prefix=prefix) self._new_completed_trajs = 0
def _log_dignostics(self, epoch_i): """ Call logger to dump all statistics to the file. NOTE: Due to calling logger.dump_tabular you need to call this implementation after you logging all your costomed information """ self._save_epoch_snapshot(epoch_i) logger.record_tabular("Optim_epoch", epoch_i, epoch_i) for k, v in self._train_infos.items(): if not k.startswith("_"): logger.record_tabular_misc_stat(k, v, epoch_i) self._train_infos = {k: list() for k in self._train_infos} for k, v in self._env_infos.items(): # NOTE: incase a value of info is not numeric type if not k.startswith("_") and len(v) > 0 and \ (isinstance(v[0], numbers.Number) or isinstance(v[0], np.ndarray)): logger.record_tabular_misc_stat(k, v, epoch_i) self._env_infos = {k: list() for k in self._env_infos} logger.dump_tabular()
def log_diagnostics(self, itr, sampler_itr, throttle_time, prefix='Diagnostics/'): if not self._traj_infos: logger.log("WARNING: had no complete trajectories in eval.") steps_in_eval = sum([info["Length"] for info in self._traj_infos]) with logger.tabular_prefix(prefix): logger.record_tabular('StepsInEval', steps_in_eval) logger.record_tabular('TrajsInEval', len(self._traj_infos)) logger.record_tabular('CumEvalTime', self.ctrl.eval_time.value) super().log_diagnostics(itr, sampler_itr, throttle_time, prefix=prefix) self._traj_infos = list() # Clear after each eval.
def log_diagnostics(self, itr, eval_traj_infos, eval_time, prefix='Diagnostics/'): if not eval_traj_infos: logger.log("WARNING: had no complete trajectories in eval.") steps_in_eval = sum([info["Length"] for info in eval_traj_infos]) with logger.tabular_prefix(prefix): logger.record_tabular('StepsInEval', steps_in_eval) logger.record_tabular('TrajsInEval', len(eval_traj_infos)) self._cum_eval_time += eval_time logger.record_tabular('CumEvalTime', self._cum_eval_time) super().log_diagnostics(itr, eval_traj_infos, eval_time, prefix=prefix)
def log_diagnostics(self, itr, traj_infos=None, eval_time=0, prefix='Diagnostics/'): """ Write diagnostics (including stored ones) to csv via the logger. """ if itr > 0: self.pbar.stop() self.save_itr_snapshot(itr) new_time = time.time() self._cum_time = new_time - self._start_time train_time_elapsed = new_time - self._last_time - eval_time new_updates = self.algo.update_counter - self._last_update_counter new_samples = (self.sampler.batch_size * self.world_size * self.log_interval_itrs) updates_per_second = (float('nan') if itr == 0 else new_updates / train_time_elapsed) samples_per_second = (float('nan') if itr == 0 else new_samples / train_time_elapsed) replay_ratio = (new_updates * self.algo.batch_size * self.world_size / new_samples) cum_replay_ratio = (self.algo.batch_size * self.algo.update_counter / ((itr + 1) * self.sampler.batch_size) ) # world_size cancels. cum_steps = (itr + 1) * self.sampler.batch_size * self.world_size with logger.tabular_prefix(prefix): if self._eval: logger.record_tabular( 'CumTrainTime', self._cum_time - self._cum_eval_time) # Already added new eval_time. logger.record_tabular('Iteration', itr) logger.record_tabular('CumTime (s)', self._cum_time) logger.record_tabular('CumSteps', cum_steps) logger.record_tabular('CumCompletedTrajs', self._cum_completed_trajs) logger.record_tabular('CumUpdates', self.algo.update_counter) logger.record_tabular('StepsPerSecond', samples_per_second) logger.record_tabular('UpdatesPerSecond', updates_per_second) logger.record_tabular('ReplayRatio', replay_ratio) logger.record_tabular('CumReplayRatio', cum_replay_ratio) self._log_infos(traj_infos) logger.dump_tabular(with_prefix=False) self._last_time = new_time self._last_update_counter = self.algo.update_counter if itr < self.n_itr - 1: logger.log(f"Optimizing over {self.log_interval_itrs} iterations.") self.pbar = ProgBarCounter(self.log_interval_itrs)
def log_diagnostics(self, itr, sampler_itr, throttle_time, prefix='Diagnostics/'): self.pbar.stop() self.save_itr_snapshot(itr, sampler_itr) new_time = time.time() time_elapsed = new_time - self._last_time new_updates = self.algo.update_counter - self._last_update_counter new_samples = self.sampler.batch_size * (sampler_itr - self._last_sampler_itr) updates_per_second = (float('nan') if itr == 0 else new_updates / time_elapsed) samples_per_second = (float('nan') if itr == 0 else new_samples / time_elapsed) if self._eval: new_eval_time = self.ctrl.eval_time.value eval_time_elapsed = new_eval_time - self._last_eval_time non_eval_time_elapsed = time_elapsed - eval_time_elapsed non_eval_samples_per_second = (float('nan') if itr == 0 else new_samples / non_eval_time_elapsed) self._last_eval_time = new_eval_time cum_steps = sampler_itr * self.sampler.batch_size # No * world_size. replay_ratio = (new_updates * self.algo.batch_size * self.world_size / max(1, new_samples)) cum_replay_ratio = (self.algo.update_counter * self.algo.batch_size * self.world_size / max(1, cum_steps)) with logger.tabular_prefix(prefix): logger.record_tabular('Iteration', itr) logger.record_tabular('SamplerIteration', sampler_itr) logger.record_tabular('CumTime (s)', new_time - self._start_time) logger.record_tabular('CumSteps', cum_steps) logger.record_tabular('CumUpdates', self.algo.update_counter) logger.record_tabular('ReplayRatio', replay_ratio) logger.record_tabular('CumReplayRatio', cum_replay_ratio) logger.record_tabular('StepsPerSecond', samples_per_second) if self._eval: logger.record_tabular('NonEvalSamplesPerSecond', non_eval_samples_per_second) logger.record_tabular('UpdatesPerSecond', updates_per_second) logger.record_tabular('OptThrottle', (time_elapsed - throttle_time) / time_elapsed) self._log_infos() self._last_time = new_time self._last_itr = itr self._last_sampler_itr = sampler_itr self._last_update_counter = self.algo.update_counter logger.dump_tabular(with_prefix=False) logger.log(f"Optimizing over {self.log_interval_itrs} sampler " "iterations.") self.pbar = ProgBarCounter(self.log_interval_itrs)