# generate work dir run_id = osp.join( r'./runs', cfg['model']['arch'] + '_' + cfg['train']['loss']['name'] + '_' + cfg['train']['optimizer']['name'] + '_' + cfg['train']['lr_schedule']['name'] + '_') all_runs = glob.glob(run_id + '*') all_runs = natsort.natsorted(all_runs) if all_runs: run_id_cnt = re.search('_\d+', all_runs[-1]) run_id_cnt = int(run_id_cnt.group(0)[1:]) run_id = run_id + str(run_id_cnt + 1) else: run_id = run_id + '0' writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush() # logger logger = get_logger(run_id) # logger.info('Let the games begin') # print('-'*100) # print('RUNDIR: {}'.format(logdir)) logger.info(f'RUNDIR: {run_id}') shutil.copy(args.config, run_id) train(cfg, writer, logger, run_id)
'best_macro_OA_iter_now':best_macro_OA_iter_now, } save_path = os.path.join(writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg.model.arch, cfg.data.dataloader)) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser('configs/psr_siamdiff_polar_c3.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = osp.join(r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name) run_id = utils.get_work_dir(run_id) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush() # logger logger = get_logger(run_id) # print('-'*100) logger.info(f'RUNDIR: {run_id}') logger.info(f'using config file: {cfg.config_file}') shutil.copy(cfg.config_file, run_id) train(cfg, writer, logger) logger.info(f'RUNDIR:{run_id}')