print('gradients --- ', k) if cfg.solver == 'SGD': optimizer = torch.optim.SGD() elif cfg.solver == 'RMS': optimizer = torch.optim.RMSprop() elif cfg.solver == 'Adam': optimizer = torch.optim.Adam() else: optimizer = '' raise ValueError() model.cuda() # # DATA LOADER get_loader = get_data_loader(cfg.datasetname) train_data = get_loader() class_names = train_data.dataset.classes print('dataset len: {}'.format(len(train_data.dataset))) tb_dir = os.path.join(cfg.train_dir, cfg.backbone + '_' + cfg.datasetname, time.strftime("%h%d_%H")) writer = tbx.FileWriter(tb_dir) summary_out = [] global_step = 0 timer = Timer() for ep in range(start_epoch, cfg.max_epoch): if ep in cfg.lr_decay_epoches and cfg.solver == 'SGD': lr *= cfg.lr_decay
def parse_args(): parser = argparse.ArgumentParser(description='') parser.add_argument('--json', dest='json', help='citerpersons json file', default='', type=str) args = parser.parse_args() if len(sys.argv) == 1: parser.print_help() sys.exit(1) return args args = parse_args() if __name__ == '__main__': get_loader = get_data_loader('citypersons') test_data = get_loader('./data/citypersons', 'val', is_training=False, batch_size=1, num_workers=1, shuffle=False) dataset = test_data.dataset if not os.path.exists(args.json): print('%s do not exist' % args.json) dataset.eval_over_scales(args.json)