def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')( cfg, is_train=False ) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False) else: model_state_file = os.path.join( final_output_dir, 'final_state.pth' ) logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() # define loss function (criterion) and optimizer criterion = JointsMSELoss( use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT ).cuda() # Data loading code normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([ transforms.ToTensor(), normalize, ]) ) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True ) # evaluate on validation set test(cfg, valid_loader, valid_dataset, model, criterion, 0, final_output_dir, tb_log_dir)
def main(): args = parse_args() logger, final_output_dir, _ = create_logger( config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model model = models.gsto_hrnet.get_seg_model(config) dump_input = torch.rand( (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0]) ) logger.info(get_model_summary(model.cuda(), dump_input.cuda())) if config.TEST.MODEL_FILE: model_state_file = config.TEST.MODEL_FILE else: model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info('=> loading model from {}'.format(model_state_file)) model_state_file = config.TEST.MODEL_FILE pretrained_dict = torch.load(model_state_file) model_dict = model.state_dict() pretrained_dict = {k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict.keys()} model_dict.update(pretrained_dict) model.load_state_dict(model_dict) gpus = list(config.GPUS) model = nn.DataParallel(model, device_ids=gpus).cuda() # prepare data test_size = (config.TEST.IMAGE_SIZE[1], config.TEST.IMAGE_SIZE[0]) test_dataset = eval('datasets.' + config.DATASET.DATASET)( root=config.DATASET.ROOT, list_path=config.DATASET.TEST_SET, num_samples=None, num_classes=config.DATASET.NUM_CLASSES, multi_scale=False, flip=False, ignore_label=config.TRAIN.IGNORE_LABEL, base_size=config.TEST.BASE_SIZE, crop_size=test_size, downsample_rate=1) testloader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=False, num_workers=config.WORKERS, pin_memory=True) start = timeit.default_timer() if 'val' in config.DATASET.TEST_SET: mean_IoU, IoU_array, pixel_acc, mean_acc = testval(config, test_dataset, testloader, model) msg = 'MeanIU: {: 4.4f}, Pixel_Acc: {: 4.4f}, \ Mean_Acc: {: 4.4f}, Class IoU: '.format(mean_IoU, pixel_acc, mean_acc) logging.info(msg) logging.info(IoU_array) elif 'test' in config.DATASET.TEST_SET: test(config, test_dataset, testloader, model, sv_dir=final_output_dir) end = timeit.default_timer() logger.info('Mins: %d' % np.int((end - start) / 60)) logger.info('Done')
def main(): args = parse_args() logger, final_output_dir, _ = create_logger(config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model if torch.__version__.startswith('1'): module = eval('models.' + config.MODEL.NAME) module.BatchNorm2d_class = module.BatchNorm2d = torch.nn.BatchNorm2d model = eval('models.' + config.MODEL.NAME + '.get_seg_model')(config) dump_input = torch.rand( (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0])) logger.info(get_model_summary(model.cuda(), dump_input.cuda())) if config.TEST.MODEL_FILE: model_state_file = config.TEST.MODEL_FILE else: model_state_file = os.path.join( final_output_dir, 'best.pth') # turn final_state.pth to best.pth logger.info('=> loading model from {}'.format(model_state_file)) pretrained_dict = torch.load(model_state_file) if 'state_dict' in pretrained_dict: pretrained_dict = pretrained_dict['state_dict'] model_dict = model.state_dict() pretrained_dict = { k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict.keys() } for k, _ in pretrained_dict.items(): logger.info('=> loading {} from pretrained model'.format(k)) model_dict.update(pretrained_dict) model.load_state_dict(model_dict) gpus = list(config.GPUS) model = nn.DataParallel(model, device_ids=gpus).cuda() # prepare data test_size = (config.TEST.IMAGE_SIZE[1], config.TEST.IMAGE_SIZE[0]) test_dataset = eval('datasets.' + config.DATASET.DATASET)( root=config.DATASET.ROOT, list_path=config.DATASET.TEST_SET, num_samples=None, num_classes=config.DATASET.NUM_CLASSES, multi_scale=False, flip=False, ignore_label=config.TRAIN.IGNORE_LABEL, base_size=config.TEST.BASE_SIZE, crop_size=test_size, downsample_rate=1) testloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=config.WORKERS, pin_memory=True) start = timeit.default_timer() if 'val' in config.DATASET.TEST_SET: # mean_IoU, IoU_array, pixel_acc, mean_acc = testval(config, # test_dataset, # testloader, # model) # # msg = 'MeanIU: {: 4.4f}, Pixel_Acc: {: 4.4f}, \ # Mean_Acc: {: 4.4f}, Class IoU: '.format(mean_IoU, # pixel_acc, mean_acc) # logging.info(msg) # logging.info(IoU_array) F, J = myTestval(config, test_dataset, testloader, model) msg = 'F: {: 4.4f}, J: {: 4.4f} '.format(F, J) logging.info(msg) elif 'test' in config.DATASET.TEST_SET: # import pdb # pdb.set_trace() test(config, test_dataset, testloader, model, sv_dir=r'/raid/wj/HRNet-Semantic-Segmentation/results/faceparse') end = timeit.default_timer() logger.info('Mins: %d' % np.int((end - start) / 60)) logger.info('Done')
def main(): args = parse_args() # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED nnb = models.nnb.get_nnb(config) # 不锁定参数 TODO: optimzer 中途添加参数 # nnb = models.ae.get_ae() # nnb = models.fcn.get_fcn(config) # 训练时令nnc的softmax不起作用 nnc = models.nnc.get_nnc(config) writer_dict = { 'writer': SummaryWriter(log_dir='./output/facexray/tensorboard/tensorboard' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')), 'train_global_steps': 0, 'valid_global_steps': 0, 'test_global_steps': 0, } # log init save_dir = os.path.join('./output/facexray/log/log' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) if os.path.exists(save_dir): raise NameError('model dir exists!') os.makedirs(save_dir) logging = init_log(save_dir) _print = logging.info gpus = list(config.GPUS) nnb = torch.nn.DataParallel(nnb, device_ids=[0]).cuda() nnc = torch.nn.DataParallel(nnc, device_ids=[0]).cuda() # define loss function (criterion) and optimizer criterion = Loss() # 一些参数 # 初始化optimzer,训练除nnb的原hrnet参数外的参数 optimizer = get_optimizer(config, [nnb, nnc]) # TODO: 暂时直接全部初始化 NNB_GRAD = False nnb.module.pretrained_grad(NNB_GRAD) last_iter = config.TRAIN.BEGIN_ITER best_perf = 0.0 if isinstance(config.TRAIN.LR_STEP, list): lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR, last_iter - 1) else: lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR, last_iter - 1) # Data loading code # transform还没能适用于其他规格,应做成[256, 256, 3] # train_dataset = eval('dataset.' + config.DATASET.TRAIN_SET + '.' + config.DATASET.TRAIN_SET)( # root=config.DATASET.TRAIN_ROOT, list_name=config.DATASET.TRAIN_LIST, mode='train', Transform='simple') # valid_dataset = eval('dataset.' + config.DATASET.EVAL_SET + '.' + config.DATASET.EVAL_SET)( # root=config.DATASET.VALID_ROOT, list_name=config.DATASET.VALID_LIST, mode='valid', Transform='simple') # test_dataset = eval('dataset.' + config.DATASET.EVAL_SET + '.' + config.DATASET.EVAL_SET)( # root=config.DATASET.TEST_ROOT, list_name=config.DATASET.TEST_LIST, mode='test', Transform='simple') train_dataset = mydataset(datapath + 'train15k', datapath + 'origin5k') valid_dataset = mydataset(datapath + 'generatorBlendedRandomGaussian', datapath + 'origin') test_dataset = mydataset(datapath + 'test1k', datapath + 'test_o500') train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU * len(gpus), shuffle=config.TRAIN.SHUFFLE, num_workers=config.WORKERS, pin_memory=config.PIN_MEMORY) def cycle(loader): while True: for x in loader: yield x op = getattr(loader.dataset, "generate", None) if callable(op): op() train_generator = iter(cycle(train_loader)) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=config.PIN_MEMORY) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=config.PIN_MEMORY) for iteration in range(last_iter, config.TRAIN.END_ITER, config.TRAIN.EVAL_ITER): # 前50000次迭代锁定原hrnet层参数训练,后面的迭代训练所有参数 if not NNB_GRAD and iteration >= 50000: if len(gpus) > 0: nnb.module.pretrained_grad(True) else: nnb.pretrained_grad(True) NNB_GRAD = True # train for one epoch train(config, train_generator, nnb, nnc, criterion, optimizer, iteration, writer_dict, _print, lr_scheduler=lr_scheduler) # evaluate on validation set perf_indicator = validate(config, valid_loader, nnb, nnc, criterion, writer_dict, _print) test(config, test_loader, nnb, nnc, criterion, writer_dict, _print) # 保存目前准确率最高的模型 # if perf_indicator > best_perf: # best_perf = perf_indicator # torch.save(model.module.state_dict(), './output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth') # _print('[Save best model] ./output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth\t') iter_now = iteration + config.TRAIN.EVAL_ITER if (iteration // config.TRAIN.EVAL_ITER) % 2 == 0: torch.save( nnb.module.state_dict(), './output/BI_dataset2/faceXray_' + str(iter_now) + '.pth') torch.save(nnc.module.state_dict(), './output/BI_dataset2/nnc' + str(iter_now) + '.pth') _print('[Save model] ./output/BI_dataset2/faceXray_' + str(iter_now) + '.pth\t') _print('[Save the last model] ./output/BI_dataset2/nnc' + str(iter_now) + '.pth\t') # lr_scheduler.step() # 最后的模型 torch.save(nnb.module.state_dict(), './output/BI_dataset/faceXray.pth') torch.save(nnc.module.state_dict(), './output/BI_dataset/nnc.pth') _print('[Save the last model] ./output/BI_dataset/faceXray.pth\t') _print('[Save the last model] ./output/BI_dataset/nnc.pth\t') writer_dict['writer'].close()
def main(): args = parse_args() reset_config(config, args) logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED model = eval('models.' + config.MODEL.NAME + '.get_pose_net')( config, is_train=False) gpus = [int(i) for i in config.GPUS.split(',')] model = torch.nn.DataParallel(model, device_ids=gpus).cuda() if config.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE)) model.load_state_dict(torch.load(config.TEST.MODEL_FILE)) else: model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) # place torch.nn.DataParallel after model.load_state_dict if config.TEST.MODEL_FILE is the models from the SimpleBaseline paper #gpus = [int(i) for i in config.GPUS.split(',')] #model = torch.nn.DataParallel(model, device_ids=gpus).cuda() imgs = [os.path.join(args.dir, i) for i in os.listdir(args.dir)] cnt = 0 cnt1 = 0 for img in imgs: img_name = img.rsplit('/')[-1] print(img_name) if img_name.find('.JPG'): os.rename(img, img.replace('.JPG', '.jpg')) img = img.replace('.JPG', '.jpg') if img.find('.jpg') == -1: continue data_numpy = cv2.imread( img, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) data_numpy = cv2.resize( data_numpy, (config.MODEL.IMAGE_SIZE[0], config.MODEL.IMAGE_SIZE[1]), interpolation=cv2.INTER_LINEAR) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.ToTensor(), normalize, ]) inp = transform(data_numpy) # convert to tensor and normalize data # pick image to batch if cnt == 0: input = inp.view(-1, inp.size()[0], inp.size()[1], inp.size()[2]) else: inp = inp.view(-1, inp.size()[0], inp.size()[1], inp.size()[2]) input = torch.cat((input, inp), 0) cnt += 1 if cnt == config.TEST.BATCH_SIZE: print('Reached the batch size: {}. Test: {}'.format( config.TEST.BATCH_SIZE, cnt)) test(config, input, model, final_output_dir, cnt1) cnt = 0 cnt1 += 1 if cnt > 0: print('Test: {}'.format(cnt)) test(config, input, model, final_output_dir, cnt1)
def main(): args = parse_args() logger, final_output_dir, _ = create_logger(config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model if torch.__version__.startswith('1'): module = eval('models.' + config.MODEL.NAME) module.BatchNorm2d_class = module.BatchNorm2d = torch.nn.BatchNorm2d model = eval('models.' + config.MODEL.NAME + '.get_seg_model')(config) dump_input = torch.rand( (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0])) logger.info(get_model_summary(model.cuda(), dump_input.cuda())) if config.TEST.MODEL_FILE: model_state_file = config.TEST.MODEL_FILE else: # model_state_file = os.path.join(final_output_dir, 'best_0.7589.pth') model_state_file = os.path.join(final_output_dir, 'best.pth') logger.info('=> loading model from {}'.format(model_state_file)) pretrained_dict = torch.load(model_state_file) if 'state_dict' in pretrained_dict: pretrained_dict = pretrained_dict['state_dict'] model_dict = model.state_dict() pretrained_dict = { k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict.keys() } for k, _ in pretrained_dict.items(): logger.info('=> loading {} from pretrained model'.format(k)) model_dict.update(pretrained_dict) model.load_state_dict(model_dict) model = model.cuda() # gpus = list(config.GPUS) # model = nn.DataParallel(model, device_ids=gpus).cuda() # prepare data test_size = (config.TEST.IMAGE_SIZE[1], config.TEST.IMAGE_SIZE[0]) test_dataset = eval('datasets.' + config.DATASET.DATASET)( root=config.DATASET.ROOT, list_path=config.DATASET.TEST_SET, num_samples=None, num_classes=config.DATASET.NUM_CLASSES, multi_scale=False, flip=False, ignore_label=config.TRAIN.IGNORE_LABEL, base_size=config.TEST.BASE_SIZE, crop_size=test_size, downsample_rate=1) testloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=config.WORKERS, pin_memory=True) start = timeit.default_timer() test(config, test_dataset, testloader, model, sv_dir=final_output_dir + '/test_result') end = timeit.default_timer() logger.info('Mins: %d' % np.int((end - start) / 60)) logger.info('Done')
def main(): args = parse_args() # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED nnb = models.nnb.get_nnb(config) # 不锁定参数 TODO: optimzer 中途添加参数 # nnb = models.ae.get_ae() # nnb = models.fcn.get_fcn(config) # 训练时令nnc的softmax不起作用 nnc = models.nnc.get_nnc(config) writer_dict = { 'writer': SummaryWriter(log_dir='./output/facexray/tensorboard/tensorboard' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')), 'train_global_steps': 0, 'valid_global_steps': 0, 'test_global_steps': 0, } # log init save_dir = os.path.join('./output/facexray/log/log' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) if os.path.exists(save_dir): raise NameError('model dir exists!') os.makedirs(save_dir) logging = init_log(save_dir) _print = logging.info gpus = list(config.GPUS) nnb = torch.nn.DataParallel(nnb, device_ids=gpus).cuda() nnc = torch.nn.DataParallel(nnc, device_ids=gpus).cuda() # define loss function (criterion) and optimizer criterion = Loss() # 一些参数 # 初始化optimzer,训练除nnb的原hrnet参数外的参数 optimizer = get_optimizer(config, [nnb, nnc]) # TODO: 暂时直接全部初始化 last_epoch = config.TRAIN.BEGIN_EPOCH best_perf = 0.0 if isinstance(config.TRAIN.LR_STEP, list): lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR, last_epoch - 1) else: lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR, last_epoch - 1) # Data loading code # transform还没能适用于其他规格,应做成[256, 256, 3] train_dataset = eval('dataset.' + config.DATASET.DATASET + '.' + config.DATASET.DATASET)( root=config.DATASET.TRAIN_ROOT, list_name=config.DATASET.TRAIN_LIST, mode='train', Transform='strong_pixel') valid_dataset = eval('dataset.' + config.DATASET.DATASET + '.' + config.DATASET.DATASET)( root=config.DATASET.VALID_ROOT, list_name=config.DATASET.VALID_LIST, mode='valid', Transform='easy') test_dataset = eval('dataset.' + config.DATASET.DATASET + '.' + config.DATASET.DATASET)( root=config.DATASET.TEST_ROOT, list_name=config.DATASET.TEST_LIST, mode='test', Transform='easy') train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU * len(gpus), shuffle=config.TRAIN.SHUFFLE, num_workers=config.WORKERS, pin_memory=config.PIN_MEMORY) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=config.PIN_MEMORY) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=config.PIN_MEMORY) for epoch in range(last_epoch, config.TRAIN.END_EPOCH): # 前50000次迭代锁定原hrnet层参数训练,后面的迭代训练所有参数 # 暂时先不管 warming up # if epoch == 25000: # for k, v in nnb.named_parameters(): # v.requires_grad = True # train for one epoch train(config, train_loader, nnb, nnc, criterion, optimizer, epoch, writer_dict, _print) # evaluate on validation set perf_indicator = validate(config, valid_loader, nnb, nnc, criterion, writer_dict, _print) test(config, test_loader, nnb, nnc, criterion, writer_dict, _print) # 保存目前准确率最高的模型 # if perf_indicator > best_perf: # best_perf = perf_indicator # torch.save(model.module.state_dict(), './output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth') # _print('[Save best model] ./output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth\t') if epoch % 2 == 0: torch.save(nnb.module.state_dict(), './output/BI_dataset2/faceXray_' + str(epoch) + '.pth') torch.save(nnc.module.state_dict(), './output/BI_dataset2/nnc' + str(epoch) + '.pth') _print('[Save model] ./output/BI_dataset2/faceXray_' + str(epoch) + '.pth\t') _print('[Save the last model] ./output/BI_dataset2/nnc' + str(epoch) + '.pth\t') lr_scheduler.step() # 最后的模型 torch.save(nnb.module.state_dict(), './output/BI_dataset/faceXray.pth') torch.save(nnc.module.state_dict(), './output/BI_dataset/nnc.pth') _print('[Save the last model] ./output/BI_dataset/faceXray.pth\t') _print('[Save the last model] ./output/BI_dataset/nnc.pth\t') writer_dict['writer'].close()
def main(): args = parse_args() update_config(cfg, args) os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(x) for x in cfg.GPUS]) logger, final_output_dir, tb_log_dir = create_logger(cfg, args.cfg, phase='valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False) else: model_state_file = os.path.join(final_output_dir, modelDict[args.modelType]) logger.info('=> loading model from {}'.format(model_state_file)) if 'current' == args.modelType: model.load_state_dict( torch.load(model_state_file)['best_state_dict'], strict=True) else: # model.load_state_dict({k.replace('gen_attention','nl_attention'):v for k,v in torch.load(model_state_file)['best_state_dict'].items()}, strict=True) model.load_state_dict( torch.load(model_state_file)['best_state_dict'], strict=False) model = torch.nn.DataParallel(model, device_ids=list(range(len( cfg.GPUS)))).cuda() # define loss function (criterion) and optimizer if cfg.LOSS.NAME == 'ModMSE_KL_CC_NSS_Loss': criterion = ModMSE_KL_CC_Loss(cfg).cuda() else: criterion = eval(cfg.LOSS.NAME)(cfg).cuda() # Data loading code normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if args.mode == 'val': valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, 'val', False, transforms.Compose([ transforms.ToTensor(), normalize, ])) if cfg.DATASET.SAMPLER == "": valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) elif cfg.DATASET.SAMPLER == "RandomIdentitySampler": valid_loader = torch.utils.data.DataLoader( valid_dataset, sampler=dataset.RandomIdentitySampler( valid_dataset.images, cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), cfg.DATASET.NUM_INSTANCES), batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) else: assert False # evaluate on validation set perf_indicator, res = validate(cfg, valid_loader, valid_dataset, model, criterion, final_output_dir, tb_log_dir, returnRes=True) valid_dataset.evaluate(final_output_dir, res, modelDict[args.modelType].split('.')[0]) else: test_dataset = eval('dataset.' + cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, 'test', False, transforms.Compose([ transforms.ToTensor(), normalize, ])) if cfg.DATASET.SAMPLER == "": test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) elif cfg.DATASET.SAMPLER == "RandomIdentitySampler": test_loader = torch.utils.data.DataLoader( test_dataset, sampler=dataset.RandomIdentitySampler( test_dataset.images, cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), cfg.DATASET.NUM_INSTANCES), batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS) // cfg.DATASET.NUM_INSTANCES, shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) else: assert False output_dir = os.path.join(final_output_dir, cfg.TEST.OUT_DIR) if not os.path.exists(output_dir): os.mkdir(output_dir) test(cfg, test_loader, model, output_dir)