def main(): args = get_args() # get student config student_cfg = get_student_cfg(cfg, args.student_file) student_cfg.LOG_DIR = args.log student_cfg.PRINT_FREQ = int(args.print_freq) if args.mode == 'test': student_cfg.DATASET.TEST = 'test2017' logger, final_output_dir, tb_log_dir = create_logger( student_cfg, args.student_file, 'valid') logger.info(pprint.pformat(args)) logger.info(student_cfg) # cudnn related setting cudnn.benchmark = student_cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = student_cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = student_cfg.CUDNN.ENABLED dev = 'cuda' if torch.cuda.is_available() else 'cpu' model = PoseHigherResolutionNet(student_cfg) model.load_state_dict(torch.load(args.model_file)) dump_input = torch.rand( (1, 3, student_cfg.DATASET.INPUT_SIZE, student_cfg.DATASET.INPUT_SIZE)) logger.info( get_model_summary(model, dump_input, verbose=student_cfg.VERBOSE)) model = torch.nn.DataParallel(model, device_ids=student_cfg.GPUS).cuda() model.eval() data_loader, test_dataset = make_test_dataloader(student_cfg) transforms = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) parser = HeatmapParser(student_cfg) all_preds = [] all_scores = [] pbar = tqdm( total=len(test_dataset)) if student_cfg.TEST.LOG_PROGRESS else None for i, (images, annos) in enumerate(data_loader): assert 1 == images.size(0), 'Test batch size should be 1' image = images[0].cpu().numpy() # size at scale 1.0 base_size, center, scale = get_multi_scale_size( image, student_cfg.DATASET.INPUT_SIZE, 1.0, min(student_cfg.TEST.SCALE_FACTOR)) with torch.no_grad(): final_heatmaps = None tags_list = [] for idx, s in enumerate( sorted(student_cfg.TEST.SCALE_FACTOR, reverse=True)): input_size = student_cfg.DATASET.INPUT_SIZE image_resized, center, scale = resize_align_multi_scale( image, input_size, s, min(student_cfg.TEST.SCALE_FACTOR)) image_resized = transforms(image_resized) image_resized = image_resized.unsqueeze(0).cuda() outputs, heatmaps, tags = get_multi_stage_outputs( student_cfg, model, image_resized, student_cfg.TEST.FLIP_TEST, student_cfg.TEST.PROJECT2IMAGE, base_size) final_heatmaps, tags_list = aggregate_results( student_cfg, s, final_heatmaps, tags_list, heatmaps, tags) final_heatmaps = final_heatmaps / float( len(student_cfg.TEST.SCALE_FACTOR)) tags = torch.cat(tags_list, dim=4) grouped, scores = parser.parse(final_heatmaps, tags, student_cfg.TEST.ADJUST, student_cfg.TEST.REFINE) final_results = get_final_preds( grouped, center, scale, [final_heatmaps.size(3), final_heatmaps.size(2)]) if student_cfg.TEST.LOG_PROGRESS: pbar.update() if i % student_cfg.PRINT_FREQ == 0: prefix = '{}_{}'.format( os.path.join(final_output_dir, 'result_valid'), i) # logger.info('=> write {}'.format(prefix)) save_valid_image(image, final_results, '{}.jpg'.format(prefix), dataset=test_dataset.name) # save_debug_images(cfg, image_resized, None, None, outputs, prefix) all_preds.append(final_results) all_scores.append(scores) if student_cfg.TEST.LOG_PROGRESS: pbar.close() name_values, _ = test_dataset.evaluate(cfg, all_preds, all_scores, final_output_dir) if isinstance(name_values, list): for name_value in name_values: _print_name_value(logger, name_value, cfg.MODEL.NAME) else: _print_name_value(logger, name_values, cfg.MODEL.NAME)
def main(): args = parse_args() update_config(cfg, args) check_config(cfg) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) dump_input = torch.rand( (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE)) logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE)) if cfg.FP16.ENABLED: model = network_to_half(model) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True) else: model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() model.eval() data_loader, test_dataset = make_test_dataloader(cfg) if cfg.MODEL.NAME == 'pose_hourglass': transforms = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), ]) else: transforms = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) parser = HeatmapParser(cfg) all_preds = [] all_scores = [] # pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None pbar = tqdm(total=len(test_dataset)) for i, (images, annos) in enumerate(data_loader): assert 1 == images.size(0), 'Test batch size should be 1' image = images[0].cpu().numpy() # size at scale 1.0 base_size, center, scale = get_multi_scale_size( image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR)) with torch.no_grad(): final_heatmaps = None tags_list = [] for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)): input_size = cfg.DATASET.INPUT_SIZE image_resized, center, scale = resize_align_multi_scale( image, input_size, s, min(cfg.TEST.SCALE_FACTOR)) image_resized = transforms(image_resized) image_resized = image_resized.unsqueeze(0).cuda() outputs, heatmaps, tags = get_multi_stage_outputs( cfg, model, image_resized, cfg.TEST.FLIP_TEST, cfg.TEST.PROJECT2IMAGE, base_size) final_heatmaps, tags_list = aggregate_results( cfg, s, final_heatmaps, tags_list, heatmaps, tags) final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR)) tags = torch.cat(tags_list, dim=4) grouped, scores = parser.parse(final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE) final_results = get_final_preds( grouped, center, scale, [final_heatmaps.size(3), final_heatmaps.size(2)]) if cfg.RESCORE.USE: try: scores = rescore_valid(cfg, final_results, scores) except: print("got one.") # if cfg.TEST.LOG_PROGRESS: # pbar.update() pbar.update() if i % cfg.PRINT_FREQ == 0: prefix = '{}_{}'.format( os.path.join(final_output_dir, 'result_valid'), i) # logger.info('=> write {}'.format(prefix)) save_valid_image(image, final_results, '{}.jpg'.format(prefix), dataset=test_dataset.name) # for scale_idx in range(len(outputs)): # prefix_scale = prefix + '_output_{}'.format( # # cfg.DATASET.OUTPUT_SIZE[scale_idx] # scale_idx # ) # save_debug_images( # cfg, images, None, None, # outputs[scale_idx], prefix_scale # ) all_preds.append(final_results) all_scores.append(scores) if cfg.TEST.LOG_PROGRESS: pbar.close() name_values, _ = test_dataset.evaluate(cfg, all_preds, all_scores, final_output_dir) if isinstance(name_values, list): for name_value in name_values: _print_name_value(logger, name_value, cfg.MODEL.NAME) else: _print_name_value(logger, name_values, cfg.MODEL.NAME)