logger.info("best OA iter now= %d" % (best_macro_OA_iter_now)) state = { "epoch": it, "model_state": model.state_dict(), "optimizer_state": optimizer.state_dict(), "scheduler_state": scheduler.state_dict(), "best_macro_OA_now": best_macro_OA_now, 'best_macro_OA_iter_now':best_macro_OA_iter_now, } save_path = os.path.join(writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg.model.arch, cfg.data.dataloader)) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser('configs/psr_siamdiff_polar_c3.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = osp.join(r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name) run_id = utils.get_work_dir(run_id) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush() # logger logger = get_logger(run_id)
state = { "epoch": it, "model_state": model.state_dict(), "optimizer_state": optimizer.state_dict(), "scheduler_state": scheduler.state_dict(), "best_macro_OA_now": best_macro_OA_now, 'best_macro_OA_iter_now': best_macro_OA_iter_now, } save_path = os.path.join( writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg.model.arch, cfg.data.dataloader)) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser('configs/psr_siamdiff_complex_c3.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = osp.join( r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name) run_id = utils.get_work_dir(run_id) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush()
state = { "epoch": it, "model_state": model.state_dict(), "optimizer_state": optimizer.state_dict(), "scheduler_state": scheduler.state_dict(), "best_macro_OA_now": best_macro_OA_now, 'best_macro_OA_iter_now': best_macro_OA_iter_now, } save_path = os.path.join( writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg.model.arch, cfg.data.dataloader)) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser('configs/psr_siamdiff_pauli.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = osp.join( r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name) run_id = utils.get_work_dir(run_id) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush()
logger.debug("lost connection with user") async def main(bus_port, browser_port, host): serve_data_receiving = partial(serve_websocket, listen_bus_route_data, host, bus_port, ssl_context=None) serve_browser_connection = partial(serve_websocket, handle_browser_connection, host, browser_port, ssl_context=None) async with trio.open_nursery() as nursery: nursery.start_soon(serve_data_receiving) nursery.start_soon(serve_browser_connection) if __name__ == "__main__": args = get_argparser().parse_args() formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.ERROR, format=formatter) logging_level = logging.DEBUG if args.verbose else logging.WARNING logger.setLevel(logging_level) with contextlib.suppress(KeyboardInterrupt): trio.run(main, args.bus_port, args.browser_port, args.host)
state = { "epoch": it + 1, "model_state": model.state_dict(), "optimizer_state": optimizer.state_dict(), "scheduler_state": scheduler.state_dict(), "best_fwIoU": best_fwIoU_now, } save_path = os.path.join( writer.file_writer.get_logdir(), "{}_{}_last_model.pkl".format(cfg['model']['arch'], cfg['data']['dataloader'])) torch.save(state, save_path) if __name__ == "__main__": cfg = args.get_argparser(r'configs/tile.yml') del cfg.test torch.backends.cudnn.benchmark = True # generate work dir run_id = utils.get_work_dir( osp.join( r'runs', cfg.model.arch + '_' + cfg.train.loss.name + '_' + cfg.train.optimizer.name + '_' + cfg.train.lr.name)) writer = SummaryWriter(log_dir=run_id) config_fig = types.dict2fig(cfg.to_flatten_dict()) # plt.savefig(r'./tmp/ff.png') writer.add_figure('config', config_fig, close=True) # writer.add_hparams(types.flatten_dict_summarWriter(cfg), {'a': 'b'}) writer.flush() # logger