def package(self, rbf_file, sw_dir, doc_dir, package_name): image_dir = os.path.join(utils.get_work_dir(), "image_0") if not os.path.exists(image_dir): os.makedirs(image_dir) gbs_name = os.path.splitext(os.path.basename( self.afu_desc_file))[0] + GBS_EXT gbs_path = os.path.join(image_dir, gbs_name) self.create_gbs(rbf_file, gbs_path) shutil.copyfile( self.afu_desc_file, os.path.join(image_dir, os.path.basename(self.afu_desc_file))) package_dir = os.path.join(utils.get_work_dir(), "package") shutil.make_archive(os.path.join(package_dir, "image_0"), ARCHIVE_FORMAT, image_dir) if sw_dir: shutil.make_archive(os.path.join(package_dir, "sw"), ARCHIVE_FORMAT, sw_dir) if doc_dir: shutil.make_archive(os.path.join(package_dir, "docs"), ARCHIVE_FORMAT, doc_dir) shutil.make_archive(package_name, ARCHIVE_FORMAT, package_dir) shutil.rmtree(utils.get_work_dir())
def package(self, rbf_file, sw_dir, doc_dir, package_name): image_dir = os.path.join(utils.get_work_dir(), "image_0") if not os.path.exists(image_dir): os.makedirs(image_dir) gbs_name = os.path.splitext( os.path.basename( self.afu_desc_file))[0] + GBS_EXT gbs_path = os.path.join(image_dir, gbs_name) self.create_gbs(rbf_file, gbs_path) shutil.copyfile( self.afu_desc_file, os.path.join( image_dir, os.path.basename( self.afu_desc_file))) package_dir = os.path.join(utils.get_work_dir(), "package") shutil.make_archive( os.path.join( package_dir, "image_0"), ARCHIVE_FORMAT, image_dir) if sw_dir: shutil.make_archive( os.path.join( package_dir, "sw"), ARCHIVE_FORMAT, sw_dir) if doc_dir: shutil.make_archive( os.path.join( package_dir, "docs"), ARCHIVE_FORMAT, doc_dir) shutil.make_archive(package_name, ARCHIVE_FORMAT, package_dir) shutil.rmtree(utils.get_work_dir())
"scheduler_state": scheduler.state_dict(), "best_macro_OA_now": best_macro_OA_now, 'best_macro_OA_iter_now':best_macro_OA_iter_now, } save_path = os.path.join(writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg.model.arch, cfg.data.dataloader)) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser('configs/psr_siamdiff_polar_c3.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = osp.join(r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name) run_id = utils.get_work_dir(run_id) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush() # logger logger = get_logger(run_id) # print('-'*100) logger.info(f'RUNDIR: {run_id}') logger.info(f'using config file: {cfg.config_file}') shutil.copy(cfg.config_file, run_id)
} save_path = os.path.join( writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg['model']['arch'], cfg['data']['dataloader'])) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser(r'configs/tile.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = utils.get_work_dir( osp.join( r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name + '_' + cfg.train.lr.name)) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush() # logger logger = get_logger(run_id) # print('RUNDIR: {}'.format(logdir)) logger.info(f'RUNDIR: {run_id}') shutil.copy(cfg.config_file, run_id) train(cfg, writer, logger)
# score_val,_ = running_metrics_val.get_scores() acc = score['Acc'] # acc_train = score_train['Acc'] # acc_val = score_val['Acc'] logger.info(f'acc : {acc}\tOA:{acc.mean()}') micro_OA = score['Overall_Acc'] miou = score['Mean_IoU'] logger.info(f'overall acc: {micro_OA}, mean iou: {miou}') # logger.info(f'acc of train set: {acc_train} \nacc of val set: {acc_val}') if __name__ == '__main__': cfg = args.get_argparser('configs/psr_siamdiff_pauli.yml') del cfg.train torch.backends.cudnn.benchmark = True run_id = utils.get_work_dir( osp.join(cfg.test.out_path, osp.split(osp.split(cfg.test.pth)[0])[1])) # writer = SummaryWriter(log_dir=run_id) # config_fig = types.dict2fig(cfg.to_flatten_dict()) # writer.add_figure('config', config_fig, close=True) # writer.flush() shutil.copy(cfg.config_file, run_id) # logger logger = get_logger(run_id) logger.info(f'RUN DIR: {run_id}') test(cfg, logger, run_id) logger.info(f'RUN DIR: {run_id}')