def configure_logger(opt): """ Configures the logger. """ rlog.init(opt.experiment, path=opt.out_dir, tensorboard=True) train_log = rlog.getLogger(opt.experiment + ".train") train_log.addMetrics( rlog.AvgMetric("R_ep", metargs=["reward", "done"]), rlog.AvgMetric("V_step", metargs=["value", 1]), rlog.AvgMetric("v_mse_loss", metargs=["v_mse", 1]), rlog.AvgMetric("v_hub_loss", metargs=["v_hub", 1]), rlog.SumMetric("ep_cnt", resetable=False, metargs=["done"]), rlog.AvgMetric("steps_ep", metargs=["step_no", "done"]), rlog.FPSMetric("fps", metargs=["frame_no"]), ) train_log.log_fmt = ( "[{0:6d}/{ep_cnt:5d}] R/ep={R_ep:8.2f}, V/step={V_step:8.2f}" + " | steps/ep={steps_ep:8.2f}, fps={fps:8.2f}.") val_log = rlog.getLogger(opt.experiment + ".valid") val_log.addMetrics( rlog.AvgMetric("R_ep", metargs=["reward", "done"]), rlog.AvgMetric("RR_ep", resetable=False, eps=0.8, metargs=["reward", "done"]), rlog.AvgMetric("V_step", metargs=["value", 1]), rlog.AvgMetric("steps_ep", metargs=["frame_no", "done"]), rlog.FPSMetric("fps", metargs=["frame_no"]), ) if hasattr(opt.log, "detailed") and opt.log.detailed: val_log.addMetrics( rlog.ValueMetric("Vhist", metargs=["value"], tb_type="histogram")) val_log.log_fmt = ( "@{0:6d} R/ep={R_ep:8.2f}, RunR/ep={RR_ep:8.2f}" + " | steps/ep={steps_ep:8.2f}, fps={fps:8.2f}.")
def run(opt): """ Entry Point. """ rlog.init(opt.experiment, path=opt.out_dir, tensorboard=True) rlog.addMetrics( rlog.AvgMetric("trn_R_ep", metargs=["trn_reward", "trn_done"]), rlog.AvgMetric("trn_loss", metargs=["trn_loss", 1]), rlog.FPSMetric("lrn_tps", metargs=["lrn_steps"]), rlog.AvgMetric("val_R_ep", metargs=["reward", "done"]), rlog.AvgMetric("val_avg_step", metargs=[1, "done"]), rlog.FPSMetric("val_fps", metargs=["val_frames"]), ) opt = game_settings_(opt) env, agent = experiment_factory(opt) rlog.info(ioutil.config_to_string(opt)) ioutil.save_config(opt, opt.out_dir) steps = 0 for ep in range(1, opt.env.episodes + 1): steps = train_one_ep(env, agent, steps, opt.update_freq, opt.target_update_freq) if ep % opt.valid_freq == 0: rlog.traceAndLog(ep) validate(env, agent, opt.valid_episodes) rlog.traceAndLog(ep)
def run(opt): """ Entry point of the experiment """ # no need to run this for all the seeds if opt.run_id not in [0, 1, 2]: return # this is a bit of a hack, it would be nice to change it # when launching the experiment. It generally only affects the logger. if "JyxNorm" not in opt.experiment: opt.experiment += "--JyxNorm" rlog.init(opt.experiment, path=opt.out_dir, relative_time=True) rlog.addMetrics( rlog.AvgMetric("Jyx_norm_avg", metargs=["Jyx_norm", 1]), rlog.MaxMetric("Jyx_norm_max", metargs=["Jyx_norm"]), rlog.AvgMetric("val_R_ep", metargs=["reward", "done"]), rlog.SumMetric("val_ep_cnt", metargs=["done"]), rlog.AvgMetric("val_avg_step", metargs=[1, "done"]), rlog.FPSMetric("val_fps", metargs=["val_frames"]), ) opt.device = "cuda" if torch.cuda.is_available() else "cpu" root = Path(opt.out_dir) ckpt_paths = sorted(root.glob("**/checkpoint*")) rlog.info("Begin empirical estimation of norm(Jyx).") rlog.info("Runing experiment on {}.".format(opt.device)) rlog.info("Found {:3d} checkpoints.".format(len(ckpt_paths))) # Sample only every other third checkpoint if (Path(opt.out_dir) / "max_ckpt").exists(): ckpt_paths = [ p for p in ckpt_paths if int(p.stem.split("_")[1]) == int((Path(opt.out_dir) / "max_ckpt").read_text()) ] rlog.info("IMPORTANT! Found max_ckpt @{}.".format(ckpt_paths[0])) else: if "MinAtar" in opt.game: ckpt_paths = ckpt_paths[0::3] rlog.warning("IMPORTANT! Sampling only every other third checkpoint.") else: ckpt_paths = ckpt_paths[0::5] rlog.warning("IMPORTANT! Sampling only every other fifth checkpoint.") for ckpt_path in ckpt_paths: env = get_env(opt, mode="testing") policy, step = load_policy(env, ckpt_path, deepcopy(opt)) check_lipschitz_constant(policy, env, opt.valid_step_cnt) rlog.traceAndLog(step=step)
def main(cmdl): """ Entry point. """ opt = read_config(Path(cmdl.experiment_path) / "cfg.yaml") chkpt_paths = sorted( Path(cmdl.experiment_path).glob("*.pth"), key=lambda path: int(path.stem.split("_")[2]), ) chkpt_paths = [(int(path.stem.split("_")[2]), path) for path in chkpt_paths] print(config_to_string(cmdl)) print(config_to_string(opt)) if cmdl.build_val_dset: perf = [(torch.load(path)["R/ep"], path) for _, path in chkpt_paths] best_score, path = max(perf, key=lambda x: x[0]) print(f"Loading {path} with total return {best_score}.") env, policy = configure_eval(cmdl, opt, path) achlioptas = _get_achlioptas(8, 4) val_dset = build_validation_dset( env, policy, opt.gamma, partial(_hash, decimals=cmdl.decimals, rnd_proj=achlioptas), ) val_dset["meta"]["agent"] = path val_dset["meta"]["decimals"] = cmdl.decimals val_dset["meta"]["rnd_proj"] = achlioptas for k, v in val_dset["meta"].items(): print(f"{k:12}", v) torch.save(val_dset, f"./val_dsets/{env.spec.id}.pkl") elif cmdl.offline_validation: rlog.init(opt.experiment, path=opt.out_dir, tensorboard=True) log = rlog.getLogger(opt.experiment + ".off_valid") log.addMetrics([ rlog.AvgMetric("V_step", metargs=["value", 1]), rlog.AvgMetric("off_mse", metargs=["off_mse", 1]), ]) log.info("Loading dataset...") dset = torch.load(f"./val_dsets/{opt.env_name}.pkl") for step, path in chkpt_paths: env, policy = configure_eval(cmdl, opt, path) offline_validation(step, policy, dset, opt) else: for step, path in chkpt_paths: env, policy = configure_eval(cmdl, opt, path) avg_return = greedy_validation(env, policy, opt.gamma) print("[{0:8d}] R/ep={1:8.2f}.".format(step, avg_return))
def make_rlog(opt): """ Configure logger. """ rlog.init("pff", path=opt.path, tensorboard=True) train_log = rlog.getLogger("pff.train") train_log.fmt = ( "[{gen:03d}/{batch:04d}] acc={acc:2.2f}% | bestFit={bestFit:2.3f}" + ", unFit={unFit:2.3f} [μ={attnMean:2.3f}/σ={attnVar:2.3f}]" ) if opt.model == "baseline": train_log.fmt = "[{batch:04d}] acc={acc:2.2f}%, loss={loss:2.3f}" msg = "Configuration:\n" for k, v in vars(opt).items(): msg += f" {k:16}: {v}\n" rlog.info(msg) return train_log
def set_logger(opt): """ Configure logger. """ rlog.init(opt.experiment, path=opt.out_dir, tensorboard=True) trn_log = rlog.getLogger(opt.experiment + ".train") val_log = rlog.getLogger(opt.experiment + ".valid") trn_log.fmt = "[{:03d}][TRN] acc={:5.2f}% loss={:5.2f}" val_log.fmt = "[{:03d}][VAL] acc={:5.2f}% loss={:5.2f}" # add histogram support if hasattr(opt, "log") and opt.log.detailed: mdl_log = rlog.getLogger(opt.experiment + ".model") mdl_log.addMetrics( rlog.ValueMetric("std", metargs=["std"], tb_type="histogram"), rlog.ValueMetric("mu", metargs=["mu"], tb_type="histogram"), ) return trn_log, val_log
def configure_logger(opt): rlog.init(opt.experiment, path=opt.out_dir) train_log = rlog.getLogger(opt.experiment + ".train") train_log.addMetrics([ rlog.AvgMetric("R/ep", metargs=["reward", "done"]), rlog.SumMetric("ep_cnt", resetable=False, metargs=["done"]), rlog.AvgMetric("steps/ep", metargs=["step_no", "done"]), rlog.FPSMetric("learning_fps", metargs=["frame_no"]), ]) test_log = rlog.getLogger(opt.experiment + ".test") test_log.addMetrics([ rlog.AvgMetric("R/ep", metargs=["reward", "done"]), rlog.SumMetric("ep_cnt", resetable=False, metargs=["done"]), rlog.AvgMetric("steps/ep", metargs=["frame_no", "done"]), rlog.FPSMetric("test_fps", metargs=["frame_no"]), rlog.MaxMetric("max_q", metargs=["qval"]), ])
def run(opt): """ Entry point """ if "sRank" not in opt.experiment: opt.experiment += "--sRank" rlog.init(opt.experiment, path=opt.out_dir, relative_time=True) rlog.addMetrics( rlog.AvgMetric("avg_rank", metargs=["rank", 1]), # rlog.ValueMetric("rank", metargs=["rank"]), rlog.AvgMetric("val_R_ep", metargs=["reward", "done"]), rlog.SumMetric("val_ep_cnt", metargs=["done"]), rlog.AvgMetric("val_avg_step", metargs=[1, "done"]), rlog.FPSMetric("val_fps", metargs=["val_frames"]), ) opt.device = "cuda" if torch.cuda.is_available() else "cpu" root = Path(opt.out_dir) ckpt_paths = sorted(root.glob("**/checkpoint*")) rlog.info("Begin empirical estimation of feature matrix rank.") rlog.info("Runing experiment on {}".format(opt.device)) rlog.info("Found {:3d} checkpoints.".format(len(ckpt_paths))) # Sample only every other third checkpoint if "MinAtar" in opt.game: ckpt_paths = ckpt_paths[0::3] rlog.warning("IMPORTANT! Sampling only every other third checkpoint.") else: ckpt_paths = ckpt_paths[0::5] rlog.warning("IMPORTANT! Sampling only every other fifth checkpoint.") sampled_steps = min(opt.valid_step_cnt, opt.train_step_cnt) rlog.info( "Sampling {:6d} steps from the environment".format(sampled_steps)) for ckpt_path in ckpt_paths: env = get_env(opt, mode="testing") policy, step = load_policy(env, ckpt_path, deepcopy(opt)) check_effective_features_rank(policy, env, sampled_steps) rlog.traceAndLog(step=step)
def run(opt): """ Entry point of the program. """ if __debug__: print( clr( "Code might have assertions. Use -O in liftoff when running stuff.", color="red", attrs=["bold"], )) ioutil.create_paths(opt) sticky_schedule = OrderedDict([(int(s), float(p)) for (s, p) in opt.sticky_schedule]) assert 1 in sticky_schedule rlog.init(opt.experiment, path=opt.out_dir, tensorboard=True) train_loggers = OrderedDict() for i, epoch in enumerate(sticky_schedule.keys()): train_loggers[epoch] = train_log = rlog.getLogger( f"{opt.experiment}.{i:d}") train_log.addMetrics( rlog.AvgMetric("trn_R_ep", metargs=["trn_reward", "trn_done"]), rlog.SumMetric("trn_ep_cnt", metargs=["trn_done"]), rlog.AvgMetric("trn_loss", metargs=["trn_loss", 1]), rlog.FPSMetric("trn_tps", metargs=["trn_steps"]), rlog.ValueMetric("trn_sticky_action_prob", metargs=["trn_sticky_action_prob"]), rlog.FPSMetric("lrn_tps", metargs=["lrn_steps"]), rlog.AvgMetric("val_R_ep", metargs=["reward", "done"]), rlog.SumMetric("val_ep_cnt", metargs=["done"]), rlog.AvgMetric("val_avg_step", metargs=[1, "done"]), rlog.FPSMetric("val_fps", metargs=["val_frames"]), rlog.ValueMetric("val_sticky_action_prob", metargs=["val_sticky_action_prob"]), ) # Initialize the objects we will use during training. env, (replay, policy_improvement, policy_evaluation) = experiment_factory(opt) rlog.info("\n\n{}\n\n{}\n\n{}".format(env, replay, policy_evaluation.estimator)) rlog.info("\n\n{}\n\n{}".format(policy_improvement, policy_evaluation)) if opt.estimator.args.get("spectral", None) is not None: for k in policy_evaluation.estimator.get_spectral_norms().keys(): # k = f"min{str(k)[1:]}" rlog.addMetrics(rlog.ValueMetric(k, metargs=[k])) # if we loaded a checkpoint if Path(opt.out_dir).joinpath("replay.gz").is_file(): # sometimes the experiment is intrerupted while saving the replay # buffer and it gets corrupted. Therefore we attempt restoring # from the previous checkpoint and replay. try: idx = replay.load(Path(opt.out_dir) / "replay.gz") ckpt = ioutil.load_checkpoint(opt.out_dir, idx=idx) rlog.info(f"Loaded most recent replay (step {idx}).") except: gc.collect() rlog.info("Last replay gzip is faulty.") idx = replay.load(Path(opt.out_dir) / "prev_replay.gz") ckpt = ioutil.load_checkpoint(opt.out_dir, idx=idx) rlog.info(f"Loading a previous snapshot (step {idx}).") # load state dicts # load state dicts ioutil.special_conv_uv_buffer_fix(policy_evaluation.estimator, ckpt["estimator_state"]) policy_evaluation.estimator.load_state_dict(ckpt["estimator_state"]) ioutil.special_conv_uv_buffer_fix(policy_evaluation.target_estimator, ckpt["target_estimator_state"]) policy_evaluation.target_estimator.load_state_dict( ckpt["target_estimator_state"]) policy_evaluation.optimizer.load_state_dict(ckpt["optim_state"]) last_epsilon = None for _ in range(ckpt["step"]): last_epsilon = next(policy_improvement.epsilon) rlog.info(f"Last epsilon: {last_epsilon}.") # some counters last_epoch = ckpt["step"] // opt.train_step_cnt rlog.info(f"Resuming from epoch {last_epoch}.") start_epoch = last_epoch + 1 steps = ckpt["step"] else: steps = 0 start_epoch = 1 # add some hardware and git info, log and save opt = ioutil.add_platform_info(opt) rlog.info("\n" + ioutil.config_to_string(opt)) ioutil.save_config(opt, opt.out_dir) # Start training last_state = None # used by train_one_epoch to know how to resume episode. for epoch in range(start_epoch, opt.epoch_cnt + 1): last_sched_epoch = max(ep for ep in sticky_schedule if ep <= epoch) print(f"StickyActProb goes from {env.sticky_action_prob}" f" to {sticky_schedule[last_sched_epoch]}") env.sticky_action_prob = sticky_schedule[last_sched_epoch] crt_logger = train_loggers[last_sched_epoch] # train for 250,000 steps steps, last_state = train_one_epoch( env, (replay, policy_improvement, policy_evaluation), opt.train_step_cnt, opt.update_freq, opt.target_update_freq, opt, crt_logger, total_steps=steps, last_state=last_state, ) crt_logger.put(trn_sticky_action_prob=env.sticky_action_prob) crt_logger.traceAndLog(epoch * opt.train_step_cnt) # validate for 125,000 steps for sched_epoch, eval_logger in train_loggers.items(): eval_env = get_env( # this doesn't work, fute-m-aș în ele de wrappere opt, mode="testing", sticky_action_prob=sticky_schedule[sched_epoch]) eval_env.sticky_action_prob = sticky_schedule[sched_epoch] print( f"Evaluating on the env with sticky={eval_env.sticky_action_prob}." ) validate( AGENTS[opt.agent.name]["policy_improvement"]( policy_improvement.estimator, opt.action_cnt, epsilon=opt.val_epsilon, ), eval_env, opt.valid_step_cnt, eval_logger, ) eval_logger.put( val_sticky_action_prob=eval_env.sticky_action_prob, ) eval_logger.traceAndLog(epoch * opt.train_step_cnt) # save the checkpoint if opt.agent.save: ioutil.checkpoint_agent( opt.out_dir, steps, estimator=policy_evaluation.estimator, target_estimator=policy_evaluation.target_estimator, optim=policy_evaluation.optimizer, cfg=opt, replay=replay, save_replay=(epoch % 8 == 0 or epoch == opt.epoch_cnt), )
def run(opt): """ Entry point of the program. """ if __debug__: print( clr( "Code might have assertions. Use -O in liftoff when running stuff.", color="red", attrs=["bold"], )) ioutil.create_paths(opt) rlog.init(opt.experiment, path=opt.out_dir, tensorboard=True, relative_time=True) rlog.addMetrics( rlog.AvgMetric("trn_R_ep", metargs=["trn_reward", "trn_done"]), rlog.SumMetric("trn_ep_cnt", metargs=["trn_done"]), rlog.AvgMetric("trn_loss", metargs=["trn_loss", 1]), rlog.FPSMetric("trn_tps", metargs=["trn_steps"]), rlog.FPSMetric("lrn_tps", metargs=["lrn_steps"]), rlog.AvgMetric("val_R_ep", metargs=["reward", "done"]), rlog.SumMetric("val_ep_cnt", metargs=["done"]), rlog.AvgMetric("val_avg_step", metargs=[1, "done"]), rlog.FPSMetric("val_fps", metargs=["val_frames"]), ) # Initialize the objects we will use during training. env, (replay, policy_improvement, policy_evaluation) = experiment_factory(opt) guts = [ env, replay, policy_evaluation.estimator, policy_evaluation.optimizer, policy_improvement, policy_evaluation, ] rlog.info(("\n\n{}" * len(guts)).format(*guts)) if opt.estimator.args.get("spectral", None) is not None: for k in policy_evaluation.estimator.get_spectral_norms().keys(): # k = f"min{str(k)[1:]}" rlog.addMetrics(rlog.ValueMetric(k, metargs=[k])) # if we loaded a checkpoint if Path(opt.out_dir).joinpath("replay.gz").is_file(): # sometimes the experiment is intrerupted while saving the replay # buffer and it gets corrupted. Therefore we attempt restoring # from the previous checkpoint and replay. try: idx = replay.load(Path(opt.out_dir) / "replay.gz") ckpt = ioutil.load_checkpoint(opt.out_dir, idx=idx) rlog.info(f"Loaded most recent replay (step {idx}).") except: gc.collect() rlog.info("Last replay gzip is faulty.") idx = replay.load(Path(opt.out_dir) / "prev_replay.gz") ckpt = ioutil.load_checkpoint(opt.out_dir, idx=idx) rlog.info(f"Loading a previous snapshot (step {idx}).") # load state dicts # load state dicts ioutil.special_conv_uv_buffer_fix(policy_evaluation.estimator, ckpt["estimator_state"]) policy_evaluation.estimator.load_state_dict(ckpt["estimator_state"]) ioutil.special_conv_uv_buffer_fix(policy_evaluation.target_estimator, ckpt["target_estimator_state"]) policy_evaluation.target_estimator.load_state_dict( ckpt["target_estimator_state"]) policy_evaluation.optimizer.load_state_dict(ckpt["optim_state"]) last_epsilon = None for _ in range(ckpt["step"]): last_epsilon = next(policy_improvement.epsilon) rlog.info(f"Last epsilon: {last_epsilon}.") # some counters last_epoch = ckpt["step"] // opt.train_step_cnt rlog.info(f"Resuming from epoch {last_epoch}.") start_epoch = last_epoch + 1 steps = ckpt["step"] else: steps = 0 start_epoch = 1 # add some hardware and git info, log and save opt = ioutil.add_platform_info(opt) rlog.info("\n" + ioutil.config_to_string(opt)) ioutil.save_config(opt, opt.out_dir) # Start training last_state = None # used by train_one_epoch to know how to resume episode. for epoch in range(start_epoch, opt.epoch_cnt + 1): # train for 250,000 steps steps, last_state = train_one_epoch( env, (replay, policy_improvement, policy_evaluation), opt.train_step_cnt, opt.update_freq, opt.target_update_freq, opt, rlog.getRootLogger(), total_steps=steps, last_state=last_state, ) rlog.traceAndLog(epoch * opt.train_step_cnt) # validate for 125,000 steps validate( AGENTS[opt.agent.name]["policy_improvement"]( policy_improvement.estimator, opt.action_cnt, epsilon=opt.val_epsilon), get_env(opt, mode="testing"), opt.valid_step_cnt, rlog.getRootLogger(), ) rlog.traceAndLog(epoch * opt.train_step_cnt) # save the checkpoint if opt.agent.save: ioutil.checkpoint_agent( opt.out_dir, steps, estimator=policy_evaluation.estimator, target_estimator=policy_evaluation.target_estimator, optim=policy_evaluation.optimizer, cfg=opt, replay=replay, save_replay=(epoch % 8 == 0 or epoch == opt.epoch_cnt), )
def main(): # get the root logger, preconfigured to log to the console, # to a text file, a pickle and a tensorboard protobuf. experiment_path = get_experiment_path() rlog.init("dqn", path=experiment_path, tensorboard=True) rlog.info("Logging application level stuff.") rlog.info("Log artifacts will be saved in %s", experiment_path) rlog.addMetrics( # counts each time it receives a `done=True`, aka counts episodes rlog.SumMetric("ep_cnt", resetable=False, metargs=["done"]), # sums up all the `reward=value` it receives and divides it # by the number of `done=True`, aka mean reward per episode rlog.AvgMetric("R_per_ep", metargs=["reward", "done"]), ) for step in range(5): # probably not a good idea to call this every step if it is a hot loop? # also this will not be logged to the console or to the text file # since the default log-level for these two is INFO. rlog.trace(step=step, aux_loss=7.23 - step) # but we can register metrics that will accumulate traced events # and summarize them. Each Metric accepts a name and some metargs # that tells it which arguments received by the `put` call bellow # to accumulate and summarize. rlog.addMetrics( # counts each time it receives a `done=True`, aka counts episodes rlog.SumMetric("ep_cnt", resetable=False, metargs=["done"]), # sums up all the `reward=value` it receives and divides it # by the number of `done=True`, aka mean reward per episode rlog.AvgMetric("R_per_ep", metargs=["reward", "done"]), # same but keeps a running average instead (experimental). rlog.AvgMetric("RunR", eps=0.9, metargs=["reward", "done"]), # same as above but now we divide by the number of rewards rlog.AvgMetric("R_per_step", metargs=["reward", 1]), # same but with clipped rewards (to +- 1) rlog.AvgMetric("rw_per_ep", metargs=["clip(reward)", "done"]), # computes the no of frames per second rlog.FPSMetric("train_fps", metargs=["frame_no"]), # caches all the values it receives and inserts them into a # tensorboad.summary.histogram every time you call `log.trace` rlog.ValueMetric("gaussians", metargs=["sample"], tb_type="histogram"), ) mean = 0 for step in range(1, 300_001): # make a step in the "environment" reward, done = reward_following_policy(step) # sample from a gaussian for showcasing the histogram sample = random.gauss(mean, 0.1) # simply trace all the values you passed as `metargs` above. # the logger will know how to dispatch each argument. rlog.put(reward=reward, done=done, frame_no=1, sample=sample) if step % 10_000 == 0: # this is the call that dumps everything to the logger. summary = rlog.summarize() rlog.trace(step=step, **summary) # rlog.info( # "{0:6d}, ep {ep_cnt:3d}, RunR/ep{RunR:8.2f} | rw/ep{R_per_ep:8.2f}.".format( # step, **summary # ) # ) # rlog.reset() rlog.traceAndLog(step) mean += 1