def evaluate(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True print ('The image is {:}'.format(args.image)) print ('The model is {:}'.format(args.model)) snapshot = Path(args.model) assert snapshot.exists(), 'The model path {:} does not exist' print ('The face bounding box is {:}'.format(args.face)) assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face) snapshot = torch.load(snapshot) # General Data Argumentation mean_fill = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] ) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) param = snapshot['args'] import pdb; pdb.set_trace() eval_transform = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net.cuda() weights = remove_module_dict(snapshot['detector']) net.load_state_dict(weights) print ('Prepare input data') [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face) inputs = image.unsqueeze(0).cuda() # network forward with torch.no_grad(): batch_heatmaps, batch_locs, batch_scos = net(inputs) # obtain the locations on the image in the orignial size cpu = torch.device('cpu') np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy() locations, scores = np_batch_locs[0,:-1,:], np.expand_dims(np_batch_scos[0,:-1], -1) scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2) , cropped_size[1] * 1. / inputs.size(-1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + cropped_size[3] prediction = np.concatenate((locations, scores), axis=1).transpose(1,0) print ('the coordinates for {:} facial landmarks:'.format(param.num_pts)) for i in range(param.num_pts): point = prediction[:, i] print ('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2]))) if args.save: resize = 512 image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0), args.face, resize) image.save(args.save) print ('save the visualization results into {:}'.format(args.save)) else: print ('ignore the visualization procedure')
def evaluate(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True print ('The image is {:}'.format(args.image)) print ('The model is {:}'.format(args.model)) snapshot = Path(args.model) assert snapshot.exists(), 'The model path {:} does not exist' print ('The face bounding box is {:}'.format(args.face)) assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face) snapshot = torch.load(snapshot) # General Data Argumentation mean_fill = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] ) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) param = snapshot['args'] eval_transform = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net.cuda() weights = remove_module_dict(snapshot['state_dict']) net.load_state_dict(weights) print ('Prepare input data') [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face) inputs = image.unsqueeze(0).cuda() # network forward with torch.no_grad(): batch_heatmaps, batch_locs, batch_scos = net(inputs) # obtain the locations on the image in the orignial size cpu = torch.device('cpu') np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy() locations, scores = np_batch_locs[0,:-1,:], np.expand_dims(np_batch_scos[0,:-1], -1) scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2) , cropped_size[1] * 1. / inputs.size(-1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + cropped_size[3] prediction = np.concatenate((locations, scores), axis=1).transpose(1,0) print ('the coordinates for {:} facial landmarks:'.format(param.num_pts)) for i in range(param.num_pts): point = prediction[:, i] print ('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2]))) if args.save: resize = 512 image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0), args.face, resize) image.save(args.save) print ('save the visualization results into {:}'.format(args.save)) else: print ('ignore the visualization procedure')
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True prepare_seed(args.rand_seed) logstr = 'seed-{:}-time-{:}'.format(args.rand_seed, time_for_file()) logger = Logger(args.save_path, logstr) logger.log('Main Function with logger : {:}'.format(logger)) logger.log('Arguments : -------------------------------') for name, value in args._get_kwargs(): logger.log('{:16} : {:}'.format(name, value)) logger.log("Python version : {}".format(sys.version.replace('\n', ' '))) logger.log("Pillow version : {}".format(PIL.__version__)) logger.log("PyTorch version : {}".format(torch.__version__)) logger.log("cuDNN version : {}".format(torch.backends.cudnn.version())) # General Data Argumentation mean_fill = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] ) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) assert args.arg_flip == False, 'The flip is : {}, rotate is {}'.format(args.arg_flip, args.rotate_max) train_transform = [transforms.PreCrop(args.pre_crop_expand)] train_transform += [transforms.TrainScale2WH((args.crop_width, args.crop_height))] train_transform += [transforms.AugScale(args.scale_prob, args.scale_min, args.scale_max)] #if args.arg_flip: # train_transform += [transforms.AugHorizontalFlip()] if args.rotate_max: train_transform += [transforms.AugRotate(args.rotate_max)] train_transform += [transforms.AugCrop(args.crop_width, args.crop_height, args.crop_perturb_max, mean_fill)] train_transform += [transforms.ToTensor(), normalize] train_transform = transforms.Compose( train_transform ) eval_transform = transforms.Compose([transforms.PreCrop(args.pre_crop_expand), transforms.TrainScale2WH((args.crop_width, args.crop_height)), transforms.ToTensor(), normalize]) assert (args.scale_min+args.scale_max) / 2 == args.scale_eval, 'The scale is not ok : {},{} vs {}'.format(args.scale_min, args.scale_max, args.scale_eval) # Model Configure Load model_config = load_configure(args.model_config, logger) args.sigma = args.sigma * args.scale_eval logger.log('Real Sigma : {:}'.format(args.sigma)) # Training Dataset train_data = Dataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) train_data.load_list(args.train_lists, args.num_pts, True) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) # Evaluation Dataloader eval_loaders = [] if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, True) eval_vloader = torch.utils.data.DataLoader(eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, True) eval_iloader = torch.utils.data.DataLoader(eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) # Define network logger.log('configure : {:}'.format(model_config)) net = obtain_model(model_config, args.num_pts + 1) assert model_config.downsample == net.downsample, 'downsample is not correct : {} vs {}'.format(model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format(i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) if hasattr(net, 'specify_parameter'): net_param_dict = net.specify_parameter(opt_config.LR, opt_config.Decay) else: net_param_dict = net.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) assert last_info['epoch'] == checkpoint['epoch'], 'Last-Info is not right {:} vs {:}'.format(last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done" .format(logger.last_info(), checkpoint['epoch'])) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 if args.eval_once: logger.log("=> only evaluate the model once") eval_results = eval_all(args, eval_loaders, net, criterion, 'eval-once', logger, opt_config) logger.close() ; return # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): scheduler.step() need_time = convert_secs2time(epoch_time.avg * (opt_config.epochs-epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log('\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'.format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss, train_nme = train(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config) # log the results logger.log('==>>{:s} Train [{:}] Average Loss = {:.6f}, NME = {:.2f}'.format(time_string(), epoch_str, train_loss, train_nme*100)) # remember best prec@1 and save checkpoint save_path = save_checkpoint({ 'epoch': epoch, 'args' : deepcopy(args), 'arch' : model_config.arch, 'state_dict': net.state_dict(), 'scheduler' : scheduler.state_dict(), 'optimizer' : optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) last_info = save_checkpoint({ 'epoch': epoch, 'last_checkpoint': save_path, }, logger.last_info(), logger) eval_results = eval_all(args, eval_loaders, net, criterion, epoch_str, logger, opt_config) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.close()
print('The model is {:}'.format(model_path)) snapshot = Path(model_path) assert snapshot.exists(), 'The model path {:} does not exist' snapshot = torch.load(snapshot) mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) param = snapshot['args'] eval_transform = transforms.Compose([ transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize ]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net.cuda() #import pdb; pdb.set_trace() try: weights = remove_module_dict(snapshot['detector']) except: weights = remove_module_dict(snapshot['state_dict']) net.load_state_dict(weights) def evaluate(args):
def evaluate(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True model_name = os.path.split(args.model)[-1] onnx_name = os.path.splitext(model_name)[0] + ".onnx" print('The model is {:}'.format(args.model)) print('Model name is {:} \nOutput onnx file is {:}'.format( model_name, onnx_name)) snapshot = Path(args.model) assert snapshot.exists(), 'The model does not exist {:}' #print('Output onnx file is {:}'.format(onnx_name)) snapshot = torch.load(snapshot) # General Data Argumentation mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) param = snapshot['args'] print(param) eval_transform = transforms.Compose([ transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize ]) model_config = load_configure(param.model_config, None) print(model_config) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net weights = remove_module_dict(snapshot['state_dict']) nu_weights = {} for key, val in weights.items(): nu_weights[key.split('detector.')[-1]] = val print(key.split('detector.')[-1]) weights = nu_weights net.load_state_dict(weights) input_name = ['image_in'] output_name = ['locs', 'scors', 'crap'] im = cv2.imread('Menpo51220/val/0000018.jpg') imshape = im.shape face = [0, 0, imshape[0], imshape[1]] [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input('Menpo51220/val/0000018.jpg', face) dummy_input = torch.randn(1, 3, 256, 256, requires_grad=True, dtype=torch.float32) input(dummy_input.dtype) #input('imcrap') inputs = image.unsqueeze(0) out_in = inputs.data.numpy() with open('pick.pick', 'wb') as crap: pickle.dump(out_in, crap) with torch.no_grad(): batch_locs, batch_scos, heatmap = net(inputs) torch.onnx.export(net.cuda(), dummy_input.cuda(), onnx_name, verbose=True, input_names=input_name, output_names=output_name, export_params=True) print(batch_locs) print(batch_scos) print(heatmap) cpu = torch.device('cpu') np_batch_locs, np_batch_scos, cropped_size = batch_locs.to( cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy() locations = np_batch_locs[:-1, :] scores = np.expand_dims(np_batch_scos[:-1], -1) scale_h, scale_w = cropped_size[0] * 1. / inputs.size( -2), cropped_size[1] * 1. / inputs.size(-1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w, locations[:, 1] * scale_h prediction = np.concatenate((locations, scores), axis=1).transpose(1, 0) pred_pts = np.transpose(prediction, [1, 0]) pred_pts = pred_pts[:, :-1] #print(pred_pts) sim = draw_pts(im, pred_pts=pred_pts, get_l1e=False) cv2.imwrite('py_0.jpg', sim)
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Evaluate the Robustness of a Detector : prepare_seed : {:}'.format( args.rand_seed)) prepare_seed(args.rand_seed) assert args.init_model is not None and Path( args.init_model).exists(), 'invalid initial model path : {:}'.format( args.init_model) checkpoint = load_checkpoint(args.init_model) xargs = checkpoint['args'] eval_func = procedures[xargs.procedure] logger = prepare_logger(args) if xargs.use_gray == False: mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: mean_fill = (0.5, ) normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5]) robust_component = [ transforms.ToTensor(), normalize, transforms.PreCrop(xargs.pre_crop_expand) ] robust_component += [ transforms.RandomTrans(args.robust_scale, args.robust_offset, args.robust_rotate, args.robust_iters, args.robust_cache_dir, True) ] robust_transform = transforms.Compose3V(robust_component) logger.log('--- arguments --- : {:}'.format(args)) logger.log('robust_transform : {:}'.format(robust_transform)) recover = xvision.transforms2v.ToPILImage(normalize) model_config = load_configure(xargs.model_config, logger) shape = (xargs.height, xargs.width) logger.log('Model : {:} $$$$ Shape : {:}'.format(model_config, shape)) # Evaluation Dataloader assert args.eval_lists is not None and len( args.eval_lists) > 0, 'invalid args.eval_lists : {:}'.format( args.eval_lists) eval_loaders = [] for eval_list in args.eval_lists: eval_data = RobustDataset(robust_transform, xargs.sigma, model_config.downsample, xargs.heatmap_type, shape, xargs.use_gray, xargs.data_indicator) if xargs.x68to49: eval_data.load_list(eval_list, 68, xargs.boxindicator, True) convert68to49(eval_data) else: eval_data.load_list(eval_list, xargs.num_pts, xargs.boxindicator, True) eval_data.get_normalization_distance(None, True) if hasattr(xargs, 'batch_size'): batch_size = xargs.batch_size elif hasattr(xargs, 'i_batch_size') and xargs.i_batch_size > 0: batch_size = xargs.i_batch_size elif hasattr(xargs, 'v_batch_size') and xargs.v_batch_size > 0: batch_size = xargs.v_batch_size else: raise ValueError( 'can not find batch size information in xargs : {:}'.format( xargs)) eval_loader = torch.utils.data.DataLoader(eval_data, batch_size=batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append(eval_loader) # define the detection network detector = obtain_pro_model(model_config, xargs.num_pts, xargs.sigma, xargs.use_gray) assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, detector.downsample) logger.log("=> detector :\n {:}".format(detector)) logger.log("=> Net-Parameters : {:} MB".format( count_parameters_in_MB(detector))) for i, eval_loader in enumerate(eval_loaders): logger.log('The [{:2d}/{:2d}]-th testing-data = {:}'.format( i, len(eval_loaders), eval_loader.dataset)) logger.log('basic-arguments : {:}\n'.format(xargs)) logger.log('xoxox-arguments : {:}\n'.format(args)) detector.load_state_dict(remove_module_dict(checkpoint['detector'])) detector = detector.cuda() for ieval, loader in enumerate(eval_loaders): errors, valids, meta = eval_func(detector, loader, args.print_freq, logger) logger.log( '[{:2d}/{:02d}] eval-data : error : mean={:.3f}, std={:.3f}'. format(ieval, len(eval_loaders), np.mean(errors), np.std(errors))) logger.log( '[{:2d}/{:02d}] eval-data : valid : mean={:.3f}, std={:.3f}'. format(ieval, len(eval_loaders), np.mean(valids), np.std(valids))) nme, auc, pck_curves = meta.compute_mse(loader.dataset.dataset_name, logger) logger.close()
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True prepare_seed(args.rand_seed) logstr = 'seed-{:}-time-{:}'.format(args.rand_seed, time_for_file()) logger = Logger(args.save_path, logstr) logger.log('Main Function with logger : {:}'.format(logger)) logger.log('Arguments : -------------------------------') for name, value in args._get_kwargs(): logger.log('{:16} : {:}'.format(name, value)) logger.log("Python version : {}".format(sys.version.replace('\n', ' '))) logger.log("Pillow version : {}".format(PIL.__version__)) logger.log("PyTorch version : {}".format(torch.__version__)) logger.log("cuDNN version : {}".format(torch.backends.cudnn.version())) # General Data Argumentation mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) assert args.arg_flip == False, 'The flip is : {}, rotate is {}'.format( args.arg_flip, args.rotate_max) train_transform = [transforms.PreCrop(args.pre_crop_expand)] train_transform += [ transforms.TrainScale2WH((args.crop_width, args.crop_height)) ] train_transform += [ transforms.AugScale(args.scale_prob, args.scale_min, args.scale_max) ] #if args.arg_flip: # train_transform += [transforms.AugHorizontalFlip()] if args.rotate_max: train_transform += [transforms.AugRotate(args.rotate_max)] train_transform += [ transforms.AugCrop(args.crop_width, args.crop_height, args.crop_perturb_max, mean_fill) ] train_transform += [transforms.ToTensor(), normalize] train_transform = transforms.Compose(train_transform) eval_transform = transforms.Compose([ transforms.PreCrop(args.pre_crop_expand), transforms.TrainScale2WH((args.crop_width, args.crop_height)), transforms.ToTensor(), normalize ]) assert ( args.scale_min + args.scale_max ) / 2 == args.scale_eval, 'The scale is not ok : {},{} vs {}'.format( args.scale_min, args.scale_max, args.scale_eval) # Model Configure Load model_config = load_configure(args.model_config, logger) args.sigma = args.sigma * args.scale_eval logger.log('Real Sigma : {:}'.format(args.sigma)) # Training Dataset train_data = VDataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator, args.video_parser) train_data.load_list(args.train_lists, args.num_pts, True) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) # Evaluation Dataloader eval_loaders = [] if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, True) eval_vloader = torch.utils.data.DataLoader( eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, True) eval_iloader = torch.utils.data.DataLoader( eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) # Define network lk_config = load_configure(args.lk_config, logger) logger.log('model configure : {:}'.format(model_config)) logger.log('LK configure : {:}'.format(lk_config)) net = obtain_model(model_config, lk_config, args.num_pts + 1) assert model_config.downsample == net.downsample, 'downsample is not correct : {} vs {}'.format( model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format( i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) if hasattr(net, 'specify_parameter'): net_param_dict = net.specify_parameter(opt_config.LR, opt_config.Decay) else: net_param_dict = net.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) assert last_info['epoch'] == checkpoint[ 'epoch'], 'Last-Info is not right {:} vs {:}'.format( last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done".format( logger.last_info(), checkpoint['epoch'])) elif args.init_model is not None: init_model = Path(args.init_model) assert init_model.exists(), 'init-model {:} does not exist'.format( init_model) checkpoint = torch.load(init_model) checkpoint = remove_module_dict(checkpoint['state_dict'], True) net.module.detector.load_state_dict(checkpoint) logger.log("=> initialize the detector : {:}".format(init_model)) start_epoch = 0 else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 detector = torch.nn.DataParallel(net.module.detector) eval_results = eval_all(args, eval_loaders, detector, criterion, 'start-eval', logger, opt_config) if args.eval_once: logger.log("=> only evaluate the model once") logger.close() return # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): scheduler.step() need_time = convert_secs2time( epoch_time.avg * (opt_config.epochs - epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log( '\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'. format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss = train(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config, lk_config, epoch >= lk_config.start) # log the results logger.log('==>>{:s} Train [{:}] Average Loss = {:.6f}'.format( time_string(), epoch_str, train_loss)) # remember best prec@1 and save checkpoint save_path = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'arch': model_config.arch, 'state_dict': net.state_dict(), 'detector': detector.state_dict(), 'scheduler': scheduler.state_dict(), 'optimizer': optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) last_info = save_checkpoint( { 'epoch': epoch, 'last_checkpoint': save_path, }, logger.last_info(), logger) eval_results = eval_all(args, eval_loaders, detector, criterion, epoch_str, logger, opt_config) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.close()
def evaluate(args): if args.cuda: assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True else: print('Use the CPU mode') print('The image is {:}'.format(args.image)) print('The model is {:}'.format(args.model)) last_info = Path(args.model) assert last_info.exists(), 'The model path {:} does not exist'.format( last_info) last_info = torch.load(last_info, map_location=torch.device('cpu')) snapshot = last_info['last_checkpoint'] assert snapshot.exists(), 'The model path {:} does not exist'.format( snapshot) print('The face bounding box is {:}'.format(args.face)) assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face) snapshot = torch.load(snapshot, map_location=torch.device('cpu')) param = snapshot['args'] # General Data Argumentation if param.use_gray == False: mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: mean_fill = (0.5, ) normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5]) eval_transform = transforms.Compose2V([transforms.ToTensor(), normalize, \ transforms.PreCrop(param.pre_crop_expand), \ transforms.CenterCrop(param.crop_max)]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (120, 96), param.use_gray, None, param.data_indicator) #dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (param.height,param.width), param.use_gray, None, param.data_indicator) dataset.reset(param.num_pts) net = obtain_pro_model(model_config, param.num_pts + 1, param.sigma, param.use_gray) net.load_state_dict(remove_module_dict(snapshot['state_dict'])) if args.cuda: net = net.cuda() print('Processing the input face image.') face_meta = PointMeta(dataset.NUM_PTS, None, args.face, args.image, 'BASE-EVAL') face_img = pil_loader(args.image, dataset.use_gray) affineImage, heatmaps, mask, norm_trans_points, transthetas, _, _, _, shape = dataset._process_( face_img, face_meta, -1) #import cv2; cv2.imwrite('temp.png', transforms.ToPILImage(normalize, False)(affineImage)) # network forward with torch.no_grad(): if args.cuda: inputs = affineImage.unsqueeze(0).cuda() else: inputs = affineImage.unsqueeze(0) _, _, batch_locs, batch_scos = net(inputs) batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu() (batch_size, C, H, W), num_pts = inputs.size(), param.num_pts locations, scores = batch_locs[0, :-1, :], batch_scos[:, :-1] norm_locs = normalize_points((H, W), locations.transpose(1, 0)) norm_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0) transtheta = transthetas[:2, :] norm_locs = torch.mm(transtheta, norm_locs) real_locs = denormalize_points(shape.tolist(), norm_locs) real_locs = torch.cat((real_locs, scores), dim=0) print('the coordinates for {:} facial landmarks:'.format(param.num_pts)) for i in range(param.num_pts): point = real_locs[:, i] print( 'the {:02d}/{:02d}-th landmark : ({:.1f}, {:.1f}), score = {:.2f}'. format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2]))) if args.save: resize = 512 image = draw_image_by_points(args.image, real_locs, 2, (255, 0, 0), args.face, resize) image.save(args.save) print('save the visualization results into {:}'.format(args.save)) else: print('ignore the visualization procedure')
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads( args.workers ) print ('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) temporal_main, eval_all = procedures['{:}-train'.format(args.procedure)], procedures['{:}-test'.format(args.procedure)] logger = prepare_logger(args) # General Data Argumentation normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation(transforms, args) recover = transforms.ToPILImage(normalize) args.tensor2imageF = recover assert (args.scale_min+args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format(args.scale_min, args.scale_max) # Model Configure Load model_config = load_configure(args.model_config, logger) sbr_config = load_configure(args.sbr_config, logger) shape = (args.height, args.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format(model_config, args.sigma, shape)) logger.log('--> SBR Configuration : {:}\n'.format(sbr_config)) # Training Dataset train_data = VDataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, \ args.data_indicator, sbr_config, transforms.ToPILImage(normalize, 'cv2gray')) train_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) batch_sampler = SbrBatchSampler(train_data, args.i_batch_size, args.v_batch_size, args.sbr_sampler_use_vid) train_loader = torch.utils.data.DataLoader(train_data, batch_sampler=batch_sampler, num_workers=args.workers, pin_memory=True) # Evaluation Dataloader eval_loaders = [] if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, args.boxindicator, args.normalizeL, True) eval_iloader = torch.utils.data.DataLoader(eval_idata, batch_size=args.i_batch_size+args.v_batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, args.boxindicator, args.normalizeL, True) eval_vloader = torch.utils.data.DataLoader(eval_vdata, batch_size=args.i_batch_size+args.v_batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) # from 68 points to 49 points, removing the face contour if args.x68to49: assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format(args.num_pts) if train_data is not None: train_data = convert68to49( train_data ) for eval_loader, is_video in eval_loaders: convert68to49( eval_loader.dataset ) args.num_pts = 49 # define the temporal model (accelerated SBR) net = obtain_pro_temporal(model_config, sbr_config, args.num_pts, args.sigma, args.use_gray) assert model_config.downsample == net.downsample, 'downsample is not correct : {:} vs {:}'.format(model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format(i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) if hasattr(net, 'specify_parameter'): net_param_dict = net.specify_parameter(opt_config.LR, opt_config.weight_decay) else : net_param_dict = net.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) test_accuracies = checkpoint['test_accuracies'] assert last_info['epoch'] == checkpoint['epoch'], 'Last-Info is not right {:} vs {:}'.format(last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done" .format(logger.last_info(), checkpoint['epoch'])) elif args.init_model is not None: last_checkpoint = load_checkpoint(args.init_model) checkpoint = remove_module_dict(last_checkpoint['state_dict'], False) net.module.detector.load_state_dict( checkpoint ) logger.log("=> initialize the detector : {:}".format(args.init_model)) start_epoch, test_accuracies = 0, {'best': 10000} else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, test_accuracies = 0, {'best': 10000} detector = torch.nn.DataParallel(net.module.detector) if args.skip_first_eval == False: logger.log('===>>> First Time Evaluation') eval_results, eval_metas = eval_all(args, eval_loaders, detector, criterion, 'Before-Training', logger, opt_config, None) save_path = save_checkpoint(eval_metas, logger.path('meta') / '{:}-first.pth'.format(model_config.arch), logger) logger.log('===>>> Before Training : {:}'.format(eval_results)) # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): need_time = convert_secs2time(epoch_time.avg * (opt_config.epochs-epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log('\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'.format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss, train_nme = temporal_main(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config, sbr_config, epoch>=sbr_config.start, 'train') scheduler.step() # log the results logger.log('==>>{:s} Train [{:}] Average Loss = {:.6f}, NME = {:.2f}'.format(time_string(), epoch_str, train_loss, train_nme*100)) save_path = save_checkpoint({ 'epoch': epoch, 'args' : deepcopy(args), 'arch' : model_config.arch, 'detector' : detector.state_dict(), 'test_accuracies': test_accuracies, 'state_dict': net.state_dict(), 'scheduler' : scheduler.state_dict(), 'optimizer' : optimizer.state_dict(), }, logger.path('model') / 'ckp-seed-{:}-last-{:}.pth'.format(args.rand_seed, model_config.arch), logger) last_info = save_checkpoint({ 'epoch': epoch, 'last_checkpoint': save_path, }, logger.last_info(), logger) if (args.eval_freq is None) or (epoch+1 == opt_config.epochs) or (epoch%args.eval_freq == 0): if epoch+1 == opt_config.epochs: _robust_transform = robust_transform else : _robust_transform = None logger.log('') eval_results, eval_metas = eval_all(args, eval_loaders, detector, criterion, epoch_str, logger, opt_config, _robust_transform) # check whether it is the best and save with copyfile(src, dst) try: cur_eval_nme = float( eval_results.split('NME = ')[1].split(' ')[0] ) except: cur_eval_nme = 1e9 test_accuracies[epoch] = cur_eval_nme if test_accuracies['best'] > cur_eval_nme: # find the lowest error dest_path = logger.path('model') / 'ckp-seed-{:}-best-{:}.pth'.format(args.rand_seed, model_config.arch) copyfile(save_path, dest_path) logger.log('==>> find lowest error = {:}, save into {:}'.format(cur_eval_nme, dest_path)) meta_save_path = save_checkpoint(eval_metas, logger.path('meta') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) logger.log('==>> evaluation results : {:}'.format(eval_results)) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('Final checkpoint into {:}'.format(logger.last_info())) logger.close()
def evaluate(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True print('The image is {:}'.format(args.image)) print('The model is {:}'.format(args.model)) snapshot = Path(args.model) assert snapshot.exists(), 'The model path {:} does not exist' snapshot = torch.load(snapshot) # General Data Argumentation mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) param = snapshot['args'] eval_transform = transforms.Compose([ transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize ]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net.cuda() weights = remove_module_dict(snapshot['state_dict']) nu_weights = {} for key, val in weights.items(): nu_weights[key.split('detector.')[-1]] = val print(key.split('detector.')[-1]) weights = nu_weights net.load_state_dict(weights) print('Prepare input data') l1 = [] record_writer = Collection_engine.produce_generator() total_images = len(images) for im_ind, aimage in enumerate(images): progressbar(im_ind, total_images) pts_name = os.path.splitext(aimage)[0] + '.pts' pts_full = _pts_path_ + pts_name gtpts = get_pts(pts_full, 90) aim = _image_path + aimage args.image = aim im = cv2.imread(aim) imshape = im.shape args.face = [0, 0, imshape[0], imshape[1]] [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face) inputs = image.unsqueeze(0).cuda() # network forward with torch.no_grad(): batch_heatmaps, batch_locs, batch_scos = net(inputs) # obtain the locations on the image in the orignial size cpu = torch.device('cpu') np_batch_locs, np_batch_scos, cropped_size = batch_locs.to( cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy() locations, scores = np_batch_locs[0, :-1, :], np.expand_dims( np_batch_scos[0, :-1], -1) scale_h, scale_w = cropped_size[0] * 1. / inputs.size( -2), cropped_size[1] * 1. / inputs.size(-1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[ 2], locations[:, 1] * scale_h + cropped_size[3] prediction = np.concatenate((locations, scores), axis=1).transpose(1, 0) #print ('the coordinates for {:} facial landmarks:'.format(param.num_pts)) for i in range(param.num_pts): point = prediction[:, i] #print ('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2]))) if args.save: args.save = _output_path + aimage resize = 512 #image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0), args.face, resize) #sim, l1e =draw_pts(im, gt_pts=gtpts, pred_pts=prediction, get_l1e=True) #print(np.mean(l1e)) #l1.append(np.mean(l1e)) pred_pts = np.transpose(prediction, [1, 0]) pred_pts = pred_pts[:, :-1] record_writer.consume_data(im, gt_pts=gtpts, pred_pts=pred_pts, name=aimage) #cv2.imwrite(_output_path+aimage, sim) #image.save(args.save) #print ('save the visualization results into {:}'.format(args.save)) else: print('ignore the visualization procedure') record_writer.post_process() record_writer.generate_output(output_path=_output_path, epochs=50, name='Supervision By Registration')
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) basic_main, eval_all = procedures['{:}-train'.format( args.procedure)], procedures['{:}-test'.format(args.procedure)] logger = prepare_logger(args) # General Data Augmentation normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation( transforms, args) #data_cache = get_path2image( args.shared_img_cache ) data_cache = None recover = transforms.ToPILImage(normalize) args.tensor2imageF = recover assert (args.scale_min + args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format( args.scale_min, args.scale_max) logger.log('robust_transform : {:}'.format(robust_transform)) # Model Configure Load model_config = load_configure(args.model_config, logger) shape = (args.height, args.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format( model_config, args.sigma, shape)) # Training Dataset if args.train_lists: train_data = Dataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) safex_data = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) train_data.set_cutout(args.cutout_length) safex_data.set_cutout(args.cutout_length) train_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) safex_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) if args.sampler is None: train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True) safex_loader = torch.utils.data.DataLoader( safex_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True) else: train_sampler = SpecialBatchSampler(train_data, args.batch_size, args.sampler) safex_sampler = SpecialBatchSampler(safex_data, args.batch_size, args.sampler) logger.log('Training-sampler : {:}'.format(train_sampler)) train_loader = torch.utils.data.DataLoader( train_data, batch_sampler=train_sampler, num_workers=args.workers, pin_memory=True) safex_loader = torch.utils.data.DataLoader( safex_data, batch_sampler=safex_sampler, num_workers=args.workers, pin_memory=True) logger.log('Training-data : {:}'.format(train_data)) else: train_data, safex_loader = None, None #train_data[0] # Evaluation Dataloader eval_loaders = [] if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) eval_idata.load_list(eval_ilist, args.num_pts, args.boxindicator, args.normalizeL, True) eval_iloader = torch.utils.data.DataLoader( eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) eval_vdata.load_list(eval_vlist, args.num_pts, args.boxindicator, args.normalizeL, True) eval_vloader = torch.utils.data.DataLoader( eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) # from 68 points to 49 points, removing the face contour if args.x68to49: assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format( args.num_pts) if train_data is not None: train_data = convert68to49(train_data) for eval_loader, is_video in eval_loaders: convert68to49(eval_loader.dataset) args.num_pts = 49 # define the detector detector = obtain_pro_model(model_config, args.num_pts, args.sigma, args.use_gray) assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, detector.downsample) logger.log("=> detector :\n {:}".format(detector)) logger.log("=> Net-Parameters : {:} MB".format( count_parameters_in_MB(detector))) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format( i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}\n'.format(args)) logger.log('train_transform : {:}'.format(train_transform)) logger.log('eval_transform : {:}'.format(eval_transform)) opt_config = load_configure(args.opt_config, logger) if hasattr(detector, 'specify_parameter'): net_param_dict = detector.specify_parameter(opt_config.LR, opt_config.weight_decay) else: net_param_dict = detector.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) detector, criterion = detector.cuda(), criterion.cuda() net = torch.nn.DataParallel(detector) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) assert last_info['epoch'] == checkpoint[ 'epoch'], 'Last-Info is not right {:} vs {:}'.format( last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done".format( logger.last_info(), checkpoint['epoch'])) elif args.init_model is not None: last_checkpoint = load_checkpoint(args.init_model) net.load_state_dict(last_checkpoint['detector']) logger.log("=> initialize the detector : {:}".format(args.init_model)) start_epoch = 0 else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 if args.eval_once is not None: logger.log("=> only evaluate the model once") #if safex_loader is not None: # safe_results, safe_metas = eval_all(args, [(safex_loader, False)], net, criterion, 'eval-once-train', logger, opt_config, robust_transform) # logger.log('-'*50 + ' evaluate the training set') #import pdb; pdb.set_trace() eval_results, eval_metas = eval_all(args, eval_loaders, net, criterion, 'eval-once', logger, opt_config, robust_transform) all_predictions = [eval_meta.predictions for eval_meta in eval_metas] torch.save( all_predictions, osp.join(args.save_path, '{:}-predictions.pth'.format(args.eval_once))) logger.log('==>> evaluation results : {:}'.format(eval_results)) logger.log('==>> configuration : {:}'.format(model_config)) logger.close() return # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): need_time = convert_secs2time( epoch_time.avg * (opt_config.epochs - epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log( '\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'. format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss, train_meta, train_nme = basic_main(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config, 'train') scheduler.step() # log the results logger.log( '==>>{:s} Train [{:}] Average Loss = {:.6f}, NME = {:.2f}'.format( time_string(), epoch_str, train_loss, train_nme * 100)) save_path = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'arch': model_config.arch, 'detector': net.state_dict(), 'state_dict': net.state_dict(), 'scheduler': scheduler.state_dict(), 'optimizer': optimizer.state_dict(), }, logger.path('model') / 'seed-{:}-{:}.pth'.format(args.rand_seed, model_config.arch), logger) last_info = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.last_info(), logger) if (args.eval_freq is None) or (epoch + 1 == opt_config.epochs) or ( epoch % args.eval_freq == 0): if epoch + 1 == opt_config.epochs: _robust_transform = robust_transform else: _robust_transform = None logger.log('') eval_results, eval_metas = eval_all(args, eval_loaders, net, criterion, epoch_str, logger, opt_config, _robust_transform) #save_path = save_checkpoint(eval_metas, logger.path('meta') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) save_path = save_checkpoint( eval_metas, logger.path('meta') / 'seed-{:}-{:}.pth'.format(args.rand_seed, model_config.arch), logger) logger.log( '==>> evaluation results : {:}\n==>> save evaluation results into {:}.' .format(eval_results, save_path)) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('Final checkpoint into {:}'.format(logger.last_info())) logger.close()
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) temporal_main, eval_all = procedures['{:}-train'.format( args.procedure)], procedures['{:}-test'.format(args.procedure)] logger = prepare_logger(args) # General Data Argumentation normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation( transforms, args) recover = transforms.ToPILImage(normalize) args.tensor2imageF = recover assert (args.scale_min + args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format( args.scale_min, args.scale_max) # Model Configure Load model_config = load_configure(args.model_config, logger) sbr_config = load_configure(args.sbr_config, logger) shape = (args.height, args.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format( model_config, args.sigma, shape)) logger.log('--> SBR Configuration : {:}\n'.format(sbr_config)) # Training Dataset train_data = VDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, \ args.data_indicator, sbr_config, transforms.ToPILImage(normalize, 'cv2gray')) train_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) if args.x68to49: assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format( args.num_pts) if train_data is not None: train_data = convert68to49(train_data) args.num_pts = 49 # define the temporal model (accelerated SBR) net = obtain_pro_temporal(model_config, sbr_config, args.num_pts, args.sigma, args.use_gray) assert model_config.downsample == net.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) optimizer, scheduler, criterion = obtain_optimizer(net.parameters(), opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() try: last_checkpoint = load_checkpoint(args.init_model) checkpoint = remove_module_dict(last_checkpoint['state_dict'], False) net.module.detector.load_state_dict(checkpoint) except: last_checkpoint = load_checkpoint(args.init_model) net.load_state_dict(last_checkpoint['state_dict']) detector = torch.nn.DataParallel(net.module.detector) logger.log("=> initialize the detector : {:}".format(args.init_model)) net.eval() detector.eval() logger.log('SBR Config : {:}'.format(sbr_config)) save_xdir = logger.path('meta') random.seed(111) index_list = list(range(len(train_data))) random.shuffle(index_list) #selected_list = index_list[: min(200, len(index_list))] #selected_list = [7260, 11506, 39952, 75196, 51614, 41061, 37747, 41355] #for iidx, i in enumerate(selected_list): index_list.remove(47875) selected_list = [47875] + index_list save_xdir = logger.path('meta') type_error_1, type_error_2, type_error, misses = 0, 0, 0, 0 type_error_pts, total_pts = 0, 0 for iidx, i in enumerate(selected_list): frames, Fflows, Bflows, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes, is_images = train_data[ i] frames, Fflows, Bflows, is_images = frames.unsqueeze( 0), Fflows.unsqueeze(0), Bflows.unsqueeze(0), is_images.unsqueeze( 0) # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down] with torch.no_grad(): if args.procedure == 'heatmap': batch_heatmaps, batch_locs, batch_scos, batch_past2now, batch_future2now, batch_FBcheck = net( frames, Fflows, Bflows, is_images) else: batch_locs, batch_past2now, batch_future2now, batch_FBcheck = net( frames, Fflows, Bflows, is_images) (batch_size, frame_length, C, H, W), num_pts, annotate_index = frames.size( ), args.num_pts, train_data.video_L batch_locs = batch_locs.cpu()[:, :, :num_pts] video_mask = masks.unsqueeze(0)[:, :num_pts] batch_past2now = batch_past2now.cpu()[:, :, :num_pts] batch_future2now = batch_future2now.cpu()[:, :, :num_pts] batch_FBcheck = batch_FBcheck[:, :num_pts].cpu() FB_check_oks = FB_communication(criterion, batch_locs, batch_past2now, batch_future2now, batch_FBcheck, video_mask, sbr_config) # locations norm_past_det_locs = torch.cat( (batch_locs[0, annotate_index - 1, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_noww_det_locs = torch.cat( (batch_locs[0, annotate_index, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_next_det_locs = torch.cat( (batch_locs[0, annotate_index + 1, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_next_locs = torch.cat( (batch_past2now[0, annotate_index, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_past_locs = torch.cat( (batch_future2now[0, annotate_index - 1, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) transtheta = transthetas[:2, :] norm_past_det_locs = torch.mm(transtheta, norm_past_det_locs) norm_noww_det_locs = torch.mm(transtheta, norm_noww_det_locs) norm_next_det_locs = torch.mm(transtheta, norm_next_det_locs) norm_next_locs = torch.mm(transtheta, norm_next_locs) norm_past_locs = torch.mm(transtheta, norm_past_locs) real_past_det_locs = denormalize_points(shapes.tolist(), norm_past_det_locs) real_noww_det_locs = denormalize_points(shapes.tolist(), norm_noww_det_locs) real_next_det_locs = denormalize_points(shapes.tolist(), norm_next_det_locs) real_next_locs = denormalize_points(shapes.tolist(), norm_next_locs) real_past_locs = denormalize_points(shapes.tolist(), norm_past_locs) gt_noww_points = train_data.labels[image_index.item()].get_points() gt_past_points = train_data.find_index( train_data.datas[image_index.item()][annotate_index - 1]) gt_next_points = train_data.find_index( train_data.datas[image_index.item()][annotate_index + 1]) FB_check_oks = FB_check_oks[:num_pts].squeeze() #import pdb; pdb.set_trace() if FB_check_oks.sum().item() > 2: # type 1 error : detection at both (t) and (t-1) is wrong, while pass the check is_type_1, (T_wrong, T_total) = check_is_1st_error( [real_past_det_locs, real_noww_det_locs, real_next_det_locs], [gt_past_points, gt_noww_points, gt_next_points], FB_check_oks, shapes) # type 2 error : detection at frame t is ok, while tracking are wrong and frame at (t-1) is wrong: spec_index, is_type_2 = check_is_2nd_error( real_noww_det_locs, gt_noww_points, [real_past_locs, real_next_locs], [gt_past_points, gt_next_points], FB_check_oks, shapes) type_error_1 += is_type_1 type_error_2 += is_type_2 type_error += is_type_1 or is_type_2 type_error_pts, total_pts = type_error_pts + T_wrong, total_pts + T_total if is_type_2: RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255) [image_past, image_noww, image_next] = train_data.datas[image_index.item()] crop_box = train_data.labels[ image_index.item()].get_box().tolist() point_index = FB_check_oks.nonzero().squeeze().tolist() colors = [ GREEN if _i in point_index else RED for _i in range(num_pts) ] + [BLUE for _i in range(num_pts)] I_past_det = draw_image_by_points( image_past, torch.cat((real_past_det_locs, gt_past_points[:2]), dim=1), 3, colors, crop_box, (400, 500)) I_noww_det = draw_image_by_points( image_noww, torch.cat((real_noww_det_locs, gt_noww_points[:2]), dim=1), 3, colors, crop_box, (400, 500)) I_next_det = draw_image_by_points( image_next, torch.cat((real_next_det_locs, gt_next_points[:2]), dim=1), 3, colors, crop_box, (400, 500)) I_past = draw_image_by_points( image_past, torch.cat((real_past_locs, gt_past_points[:2]), dim=1), 3, colors, crop_box, (400, 500)) I_next = draw_image_by_points( image_next, torch.cat((real_next_locs, gt_next_points[:2]), dim=1), 3, colors, crop_box, (400, 500)) ### I_past.save(str(save_xdir / '{:05d}-v1-a-pastt.png'.format(i))) I_noww_det.save( str(save_xdir / '{:05d}-v1-b-curre.png'.format(i))) I_next.save(str(save_xdir / '{:05d}-v1-c-nextt.png'.format(i))) I_past_det.save( str(save_xdir / '{:05d}-v1-det-a-past.png'.format(i))) I_noww_det.save( str(save_xdir / '{:05d}-v1-det-b-curr.png'.format(i))) I_next_det.save( str(save_xdir / '{:05d}-v1-det-c-next.png'.format(i))) logger.log('TYPE-ERROR : {:}, landmark-index : {:}'.format( i, spec_index)) else: misses += 1 string = 'Handle {:05d}/{:05d} :: {:05d}'.format( iidx, len(selected_list), i) string += ', error-1 : {:} ({:.2f}%), error-2 : {:} ({:.2f}%)'.format( type_error_1, type_error_1 * 100.0 / (iidx + 1), type_error_2, type_error_2 * 100.0 / (iidx + 1)) string += ', error : {:} ({:.2f}%), miss : {:}'.format( type_error, type_error * 100.0 / (iidx + 1), misses) string += ', final-error : {:05d} / {:05d} = {:.2f}%'.format( type_error_pts, total_pts, type_error_pts * 100.0 / total_pts) logger.log(string)
def main(xargs): # your main function # print some necessary informations # create logger if not os.path.exists(xargs.log_dir): os.makedirs(xargs.log_dir) logger = Logger(xargs.log_dir, xargs.manual_seed) logger.print('args :\n{:}'.format(xargs)) logger.print('PyTorch: {:}'.format(torch.__version__)) assert torch.cuda.is_available(), 'You must have at least one GPU' # set random seed #torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True random.seed(xargs.manual_seed) np.random.seed(xargs.manual_seed) torch.manual_seed(xargs.manual_seed) torch.cuda.manual_seed(xargs.manual_seed) logger.print('Start Main with this file : {:}'.format(__file__)) graph_info = torch.load(Path(xargs.data_root)) unseen_classes = graph_info['unseen_classes'] train_classes = graph_info['train_classes'] # All labels return original value between 0-49 train_dataset = AwA2_IMG_Rotate_Save(graph_info, 'train') batch_size = xargs.class_per_it * xargs.num_shot total_episode = ((len(train_dataset) / batch_size) // 100 + 1) * 100 #train_sampler = MetaSampler(train_dataset, total_episode, xargs.class_per_it, xargs.num_shot) train_sampler = DualMetaSampler(train_dataset, total_episode, xargs.class_per_it, xargs.num_shot) train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=xargs.num_workers) #train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True , num_workers=xargs.num_workers, drop_last=True) test_seen_dataset = AwA2_IMG_Rotate_Save(graph_info, 'test-seen') test_seen_dataset.set_return_img_mode('original') test_seen_loader = torch.utils.data.DataLoader(test_seen_dataset, batch_size=batch_size, shuffle=False, num_workers=xargs.num_workers) test_unseen_dataset = AwA2_IMG_Rotate_Save(graph_info, 'test-unseen') test_unseen_dataset.set_return_img_mode('original') test_unseen_loader = torch.utils.data.DataLoader(test_unseen_dataset, batch_size=batch_size, shuffle=False, num_workers=xargs.num_workers) all_class_sampler = AllClassSampler(train_dataset) all_class_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=all_class_sampler, num_workers=xargs.num_workers, pin_memory=True) logger.print('train-dataset : {:}'.format(train_dataset)) #logger.print('train_sampler : {:}'.format(train_sampler)) logger.print('test-seen-dataset : {:}'.format(test_seen_dataset)) logger.print('test-unseen-dataset : {:}'.format(test_unseen_dataset)) logger.print('all-class-train-sam : {:}'.format(all_class_sampler)) features = graph_info['ori_attributes'].float().cuda() train_features = features[graph_info['train_classes'], :] logger.print('feature-shape={:}, train-feature-shape={:}'.format(list(features.shape), list(train_features.shape))) kmeans = KMeans(n_clusters=xargs.clusters, random_state=1337).fit(train_features.cpu().numpy()) att_centers = torch.tensor(kmeans.cluster_centers_).float().cuda() for cls in range(xargs.clusters): logger.print('[cluster : {:}] has {:} elements.'.format(cls, (kmeans.labels_ == cls).sum())) logger.print('Train-Feature-Shape={:}, use {:} clusters, shape={:}'.format(train_features.shape, xargs.clusters, att_centers.shape)) # build adjacent matrix distances = distance_func(graph_info['attributes'], graph_info['attributes'], 'euclidean-pow').float().cuda() xallx_adj_dis = distances.clone() train_adj_dis = distances[graph_info['train_classes'],:][:,graph_info['train_classes']] network = obtain_combine_models_v2(xargs.semantic_name, xargs.relation_name, att_centers, 2048) network = network.cuda() #parameters = [{'params': list(C_Net.parameters()), 'lr': xargs.lr*5, 'weight_decay': xargs.weight_decay*0.1}, # {'params': list(R_Net.parameters()), 'lr': xargs.lr , 'weight_decay': xargs.weight_decay}] parameters = network.parameters() optimizer = torch.optim.Adam(parameters, lr=xargs.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=xargs.weight_decay, amsgrad=False) #optimizer = torch.optim.SGD(parameters, lr=xargs.lr, momentum=0.9, weight_decay=xargs.weight_decay, nesterov=True) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, gamma=0.1, step_size=xargs.epochs*2//3) logger.print('network : {:.2f} MB =>>>\n{:}'.format(count_parameters_in_MB(network), network)) logger.print('optimizer : {:}'.format(optimizer)) #import pdb; pdb.set_trace() model_lst_path = logger.checkpoint('ckp-last-{:}.pth'.format(xargs.manual_seed)) if os.path.isfile(model_lst_path): checkpoint = torch.load(model_lst_path) start_epoch = checkpoint['epoch'] + 1 best_accs = checkpoint['best_accs'] network.load_state_dict(checkpoint['network']) optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['scheduler']) logger.print('load checkpoint from {:}'.format(model_lst_path)) else: start_epoch, best_accs = 0, {'train': -1, 'xtrain': -1, 'zs': -1, 'gzs-seen': -1, 'gzs-unseen': -1, 'gzs-H':-1, 'best-info': None} epoch_time, start_time = AverageMeter(), time.time() # training for iepoch in range(start_epoch, xargs.epochs): # set some classes as fake zero-shot classes time_str = convert_secs2time(epoch_time.val * (xargs.epochs- iepoch), True) epoch_str= '{:03d}/{:03d}'.format(iepoch, xargs.epochs) # last_lr = lr_scheduler.get_last_lr() last_lr = lr_scheduler.get_lr() logger.print('Train the {:}-th epoch, {:}, LR={:1.6f} ~ {:1.6f}'.format(epoch_str, time_str, min(last_lr), max(last_lr))) config_train = load_configure(None, {'epoch_str': epoch_str, 'log_interval': xargs.log_interval, 'loss_type': xargs.loss_type, 'consistency_coef': xargs.consistency_coef, 'consistency_type': xargs.consistency_type}, None) train_cls_loss, train_acc = train_model(train_loader, train_features, train_adj_dis, network, optimizer, config_train, logger) lr_scheduler.step() if train_acc > best_accs['train']: best_accs['train'] = train_acc logger.print('Train {:} done, cls-loss={:.3f}, accuracy={:.2f}%, (best={:.2f}).\n'.format(epoch_str, train_cls_loss, train_acc, best_accs['train'])) if iepoch % xargs.test_interval == 0 or iepoch == xargs.epochs -1: with torch.no_grad(): xinfo = {'train_classes' : graph_info['train_classes'], 'unseen_classes': graph_info['unseen_classes']} train_loader.dataset.set_return_img_mode('original') all_class_loader.dataset.set_return_label_mode('original') all_class_loader.dataset.set_return_img_mode('original') seen_protos, unseen_att = get_train_protos(network, features, train_classes, unseen_classes, all_class_loader, xargs) for test_topK in range(1, 2): logger.print('-----test--init with top-{:} seen protos-------'.format(test_topK)) topkATT, topkIDX = torch.topk(unseen_att, test_topK, dim=1) norm_att = F.softmax(topkATT, dim=1) unseen_protos = norm_att.view(len(unseen_classes), test_topK, 1) * seen_protos[topkIDX] unseen_protos = unseen_protos.mean(dim=1) protos = [] for icls in range(features.size(0)): if icls in train_classes: protos.append( seen_protos[ train_classes.index(icls) ] ) else : protos.append( unseen_protos[ unseen_classes.index(icls) ] ) protos = torch.stack(protos) train_loader.dataset.set_return_img_mode('original') evaluate_all_dual(epoch_str, train_loader, test_unseen_loader, test_seen_loader, features, protos, xallx_adj_dis, network, xinfo, best_accs, logger) semantic_lists = network.get_semantic_list(features) # save the info info = {'epoch' : iepoch, 'args' : deepcopy(xargs), 'finish' : iepoch+1==xargs.epochs, 'best_accs' : best_accs, 'semantic_lists' : semantic_lists, 'adj_distances' : xallx_adj_dis, 'network' : network.state_dict(), 'optimizer' : optimizer.state_dict(), 'scheduler' : lr_scheduler.state_dict(), } try: torch.save(info, model_lst_path) logger.print('--->>> joint-arch :: save into {:}.\n'.format(model_lst_path)) except PermmisionError: print('unsuccessful write log') # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() if 'info' in locals() or 'checkpoint' in locals(): if 'checkpoint' in locals(): semantic_lists = checkpoint['semantic_lists'] else: semantic_lists = info['semantic_lists'] ''' # the final evaluation logger.print('final evaluation --->>>') with torch.no_grad(): xinfo = {'train_classes' : graph_info['train_classes'], 'unseen_classes': graph_info['unseen_classes']} train_loader.dataset.set_return_img_mode('original') evaluate_all('final-eval', train_loader, test_unseen_loader, test_seen_loader, features, xallx_adj_dis, network, xinfo, best_accs, logger) logger.print('-'*200) ''' logger.close()
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) temporal_main, eval_all = procedures['{:}-train'.format( args.procedure)], procedures['{:}-test'.format(args.procedure)] logger = prepare_logger(args) # General Data Argumentation normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation( transforms, args) recover = transforms.ToPILImage(normalize) args.tensor2imageF = recover assert (args.scale_min + args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format( args.scale_min, args.scale_max) # Model Configure Load model_config = load_configure(args.model_config, logger) sbr_config = load_configure(args.sbr_config, logger) shape = (args.height, args.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format( model_config, args.sigma, shape)) logger.log('--> SBR Configuration : {:}\n'.format(sbr_config)) # Training Dataset train_data = VDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, \ args.data_indicator, sbr_config, transforms.ToPILImage(normalize, 'cv2gray')) train_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) # Evaluation Dataloader assert len( args.eval_ilists) == 1, 'invalid length of eval_ilists : {:}'.format( len(eval_ilists)) eval_data = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator) eval_data.load_list(args.eval_ilists[0], args.num_pts, args.boxindicator, args.normalizeL, True) if args.x68to49: assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format( args.num_pts) if train_data is not None: train_data = convert68to49(train_data) eval_data = convert68to49(eval_data) args.num_pts = 49 # define the temporal model (accelerated SBR) net = obtain_pro_temporal(model_config, sbr_config, args.num_pts, args.sigma, args.use_gray) assert model_config.downsample == net.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) logger.log('Evaluate-data : {:}'.format(eval_data)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) optimizer, scheduler, criterion = obtain_optimizer(net.parameters(), opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() try: last_checkpoint = load_checkpoint(args.init_model) checkpoint = remove_module_dict(last_checkpoint['state_dict'], False) net.module.detector.load_state_dict(checkpoint) except: last_checkpoint = load_checkpoint(args.init_model) net.load_state_dict(last_checkpoint['state_dict']) detector = torch.nn.DataParallel(net.module.detector) logger.log("=> initialize the detector : {:}".format(args.init_model)) net.eval() detector.eval() logger.log('SBR Config : {:}'.format(sbr_config)) save_xdir = logger.path('meta') type_error = 0 random.seed(111) index_list = list(range(len(train_data))) random.shuffle(index_list) #selected_list = index_list[: min(200, len(index_list))] selected_list = [ 7260, 11506, 39952, 75196, 51614, 41061, 37747, 41355, 47875 ] for iidx, i in enumerate(selected_list): frames, Fflows, Bflows, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes, is_images = train_data[ i] frames, Fflows, Bflows, is_images = frames.unsqueeze( 0), Fflows.unsqueeze(0), Bflows.unsqueeze(0), is_images.unsqueeze( 0) # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down] if args.procedure == 'heatmap': batch_heatmaps, batch_locs, batch_scos, batch_past2now, batch_future2now, batch_FBcheck = net( frames, Fflows, Bflows, is_images) else: batch_locs, batch_past2now, batch_future2now, batch_FBcheck = net( frames, Fflows, Bflows, is_images) (batch_size, frame_length, C, H, W), num_pts, annotate_index = frames.size( ), args.num_pts, train_data.video_L batch_locs = batch_locs.cpu()[:, :, :num_pts] video_mask = masks.unsqueeze(0)[:, :num_pts] batch_past2now = batch_past2now.cpu()[:, :, :num_pts] batch_future2now = batch_future2now.cpu()[:, :, :num_pts] batch_FBcheck = batch_FBcheck[:, :num_pts].cpu() FB_check_oks = FB_communication(criterion, batch_locs, batch_past2now, batch_future2now, batch_FBcheck, video_mask, sbr_config) # locations norm_past_det_locs = torch.cat( (batch_locs[0, annotate_index - 1, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_noww_det_locs = torch.cat( (batch_locs[0, annotate_index, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_next_det_locs = torch.cat( (batch_locs[0, annotate_index + 1, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_next_locs = torch.cat( (batch_past2now[0, annotate_index, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) norm_past_locs = torch.cat( (batch_future2now[0, annotate_index - 1, :num_pts].permute( 1, 0), torch.ones(1, num_pts)), dim=0) transtheta = transthetas[:2, :] norm_past_det_locs = torch.mm(transtheta, norm_past_det_locs) norm_noww_det_locs = torch.mm(transtheta, norm_noww_det_locs) norm_next_det_locs = torch.mm(transtheta, norm_next_det_locs) norm_next_locs = torch.mm(transtheta, norm_next_locs) norm_past_locs = torch.mm(transtheta, norm_past_locs) real_past_det_locs = denormalize_points(shapes.tolist(), norm_past_det_locs) real_noww_det_locs = denormalize_points(shapes.tolist(), norm_noww_det_locs) real_next_det_locs = denormalize_points(shapes.tolist(), norm_next_det_locs) real_next_locs = denormalize_points(shapes.tolist(), norm_next_locs) real_past_locs = denormalize_points(shapes.tolist(), norm_past_locs) gt_noww_points = train_data.labels[image_index.item()].get_points() FB_check_oks = FB_check_oks[:num_pts].squeeze() #import pdb; pdb.set_trace() if FB_check_oks.sum().item() > 2: point_index = FB_check_oks.nonzero().squeeze().tolist() something_wrong = False for pidx in point_index: real_now_det_loc = real_noww_det_locs[:, pidx] real_pst_det_loc = real_past_det_locs[:, pidx] real_net_det_loc = real_next_det_locs[:, pidx] real_nex_loc = real_next_locs[:, pidx] real_pst_loc = real_next_locs[:, pidx] grdt_now_loc = gt_noww_points[:2, pidx] #if torch.abs(real_now_loc - grdt_now_loc).max() > 5: # something_wrong = True #if torch.abs(real_nex_loc - grdt_nex_loc).max() > 5: # something_wrong = True #if something_wrong == True: if True: [image_past, image_noww, image_next] = train_data.datas[image_index.item()] try: crop_box = train_data.labels[ image_index.item()].get_box().tolist() #crop_box = [crop_box[0]-20, crop_box[1]-20, crop_box[2]+20, crop_box[3]+20] except: crop_box = False RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255) colors = [ GREEN if _i in point_index else RED for _i in range(num_pts) ] if crop_box != False or True: I_past_det = draw_image_by_points(image_past, real_past_det_locs[:], 3, colors, crop_box, (400, 500)) I_noww_det = draw_image_by_points(image_noww, real_noww_det_locs[:], 3, colors, crop_box, (400, 500)) I_next_det = draw_image_by_points(image_next, real_next_det_locs[:], 3, colors, crop_box, (400, 500)) I_next = draw_image_by_points(image_next, real_next_locs[:], 3, colors, crop_box, (400, 500)) I_past = draw_image_by_points(image_past, real_past_locs[:], 3, colors, crop_box, (400, 500)) I_past.save( str(save_xdir / '{:05d}-v1-a-pastt.png'.format(i))) I_noww_det.save( str(save_xdir / '{:05d}-v1-b-curre.png'.format(i))) I_next.save( str(save_xdir / '{:05d}-v1-c-nextt.png'.format(i))) I_past_det.save( str(save_xdir / '{:05d}-v1-det-a-past.png'.format(i))) I_noww_det.save( str(save_xdir / '{:05d}-v1-det-b-curr.png'.format(i))) I_next_det.save( str(save_xdir / '{:05d}-v1-det-c-next.png'.format(i))) #[image_past, image_noww, image_next] = train_data.datas[image_index.item()] #image_noww = draw_image_by_points(image_noww, real_noww_locs[:], 2, colors, False, False) #image_next = draw_image_by_points(image_next, real_next_locs[:], 2, colors, False, False) #image_past = draw_image_by_points(image_past, real_past_locs[:], 2, colors, False, False) #image_noww.save( str(save_xdir / '{:05d}-v2-b-curre.png'.format(i)) ) #image_next.save( str(save_xdir / '{:05d}-v2-c-nextt.png'.format(i)) ) #image_past.save( str(save_xdir / '{:05d}-v2-a-pastt.png'.format(i)) ) #type_error += 1 logger.log( 'Handle {:05d}/{:05d} :: {:05d}, ok-points={:.3f}, wrong data={:}'. format(iidx, len(selected_list), i, FB_check_oks.float().mean().item(), type_error)) save_xx_dir = save_xdir.parent / 'image-data' save_xx_dir.mkdir(parents=True, exist_ok=True) selected_list = [100, 115, 200, 300, 400] + list(range(200, 220)) for iidx, i in enumerate(selected_list): inputs, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes = eval_data[ i] inputs = inputs.unsqueeze(0) (batch_size, C, H, W), num_pts = inputs.size(), args.num_pts _, _, batch_locs, batch_scos = detector(inputs) # inputs batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu() norm_locs = normalize_points((H, W), batch_locs[0, :num_pts].transpose(1, 0)) norm_det_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0) norm_det_locs = torch.mm(transthetas[:2, :], norm_det_locs) real_det_locs = denormalize_points(shapes.tolist(), norm_det_locs) gt_now_points = eval_data.labels[image_index.item()].get_points() image_now = eval_data.datas[image_index.item()] crop_box = eval_data.labels[image_index.item()].get_box().tolist() RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255) Gcolors = [GREEN for _ in range(num_pts)] points = torch.cat((real_det_locs, gt_now_points[:2]), dim=1) colors = [GREEN for _ in range(num_pts)] + [BLUE for _ in range(num_pts)] image = draw_image_by_points(image_now, real_det_locs, 3, Gcolors, crop_box, (400, 500)) image.save(str(save_xx_dir / '{:05d}-crop.png'.format(i))) image = draw_image_by_points(image_now, points, 3, colors, False, False) #image = draw_image_by_points(image_now, real_det_locs, 3, colors , False, False) image.save(str(save_xx_dir / '{:05d}-orig.png'.format(i))) logger.log('Finish drawing : {:}'.format(save_xdir)) logger.log('Finish drawing : {:}'.format(save_xx_dir)) logger.close()
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) logger = prepare_logger(args) checkpoint = load_checkpoint(args.init_model) xargs = checkpoint['args'] logger.log('Previous args : {:}'.format(xargs)) # General Data Augmentation if xargs.use_gray == False: mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: mean_fill = (0.5, ) normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5]) eval_transform = transforms.Compose2V([transforms.ToTensor(), normalize, \ transforms.PreCrop(xargs.pre_crop_expand), \ transforms.CenterCrop(xargs.crop_max)]) # Model Configure Load model_config = load_configure(xargs.model_config, logger) shape = (xargs.height, xargs.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format( model_config, xargs.sigma, shape)) # Evaluation Dataloader eval_loaders = [] if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = EvalDataset(eval_transform, xargs.sigma, model_config.downsample, xargs.heatmap_type, shape, xargs.use_gray, xargs.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, xargs.boxindicator, xargs.normalizeL, True) eval_iloader = torch.utils.data.DataLoader( eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = EvalDataset(eval_transform, xargs.sigma, model_config.downsample, xargs.heatmap_type, shape, xargs.use_gray, xargs.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, xargs.boxindicator, xargs.normalizeL, True) eval_vloader = torch.utils.data.DataLoader( eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) # define the detector detector = obtain_pro_model(model_config, xargs.num_pts, xargs.sigma, xargs.use_gray) assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, detector.downsample) logger.log("=> detector :\n {:}".format(detector)) logger.log("=> Net-Parameters : {:} MB".format( count_parameters_in_MB(detector))) logger.log('=> Eval-Transform : {:}'.format(eval_transform)) detector = detector.cuda() net = torch.nn.DataParallel(detector) net.eval() net.load_state_dict(checkpoint['detector']) cpu = torch.device('cpu') assert len(args.use_stable) == 2 for iLOADER, (loader, is_video) in enumerate(eval_loaders): logger.log( '{:} The [{:2d}/{:2d}]-th test set [{:}] = {:} with {:} batches.'. format(time_string(), iLOADER, len(eval_loaders), 'video' if is_video else 'image', loader.dataset, len(loader))) with torch.no_grad(): all_points, all_results, all_image_ps = [], [], [] for i, (inputs, targets, masks, normpoints, transthetas, image_index, nopoints, shapes) in enumerate(loader): image_index = image_index.squeeze(1).tolist() (batch_size, C, H, W), num_pts = inputs.size(), xargs.num_pts # batch_heatmaps is a list for stage-predictions, each element should be [Batch, C, H, W] if xargs.procedure == 'heatmap': batch_features, batch_heatmaps, batch_locs, batch_scos = net( inputs) batch_locs = batch_locs[:, :-1, :] else: batch_locs = net(inputs) batch_locs = batch_locs.detach().to(cpu) # evaluate the training data for ibatch, (imgidx, nopoint) in enumerate(zip(image_index, nopoints)): if xargs.procedure == 'heatmap': norm_locs = normalize_points( (H, W), batch_locs[ibatch].transpose(1, 0)) norm_locs = torch.cat( (norm_locs, torch.ones(1, num_pts)), dim=0) else: norm_locs = torch.cat((batch_locs[ibatch].permute( 1, 0), torch.ones(1, num_pts)), dim=0) transtheta = transthetas[ibatch][:2, :] norm_locs = torch.mm(transtheta, norm_locs) real_locs = denormalize_points(shapes[ibatch].tolist(), norm_locs) #real_locs = torch.cat((real_locs, batch_scos[ibatch].permute(1,0)), dim=0) real_locs = torch.cat((real_locs, torch.ones(1, num_pts)), dim=0) xpoints = loader.dataset.labels[imgidx].get_points().numpy( ) image_path = loader.dataset.datas[imgidx] # put into the list all_points.append(torch.from_numpy(xpoints)) all_results.append(real_locs) all_image_ps.append(image_path) total = len(all_points) logger.log( '{:} The [{:2d}/{:2d}]-th test set finishes evaluation : {:} frames/images' .format(time_string(), iLOADER, len(eval_loaders), total)) """ if args.use_stable[0] > 0: save_dir = Path( osp.join(args.save_path, '{:}-X-{:03d}'.format(args.model_name, iLOADER)) ) save_dir.mkdir(parents=True, exist_ok=True) wrap_parallel = WrapParallel(save_dir, all_image_ps, all_results, all_points, 180, (255, 0, 0)) wrap_loader = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True) for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES cmd = 'ffmpeg -y -i {:}/%06d.png -framerate 30 {:}.avi'.format(save_dir, save_dir) logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd)) os.system( cmd ) if args.use_stable[1] > 0: save_dir = Path( osp.join(args.save_path, '{:}-Y-{:03d}'.format(args.model_name, iLOADER)) ) save_dir.mkdir(parents=True, exist_ok=True) Xpredictions, Xgts = torch.stack(all_results), torch.stack(all_points) new_preds = fc_solve(Xgts, Xpredictions, is_cuda=True) wrap_parallel = WrapParallel(save_dir, all_image_ps, new_preds, all_points, 180, (0, 0, 255)) wrap_loader = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True) for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES cmd = 'ffmpeg -y -i {:}/%06d.png -framerate 30 {:}.avi'.format(save_dir, save_dir) logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd)) os.system( cmd ) """ Xpredictions, Xgts = torch.stack(all_results), torch.stack(all_points) save_path = Path( osp.join(args.save_path, '{:}-result-{:03d}.pth'.format(args.model_name, iLOADER))) torch.save( { 'paths': all_image_ps, 'ground-truths': Xgts, 'predictions': all_results }, save_path) logger.log('{:} save into {:}'.format(time_string(), save_path)) if False: new_preds = fc_solve_v2(Xgts, Xpredictions, is_cuda=True) # create the dir save_dir = Path( osp.join(args.save_path, '{:}-T-{:03d}'.format(args.model_name, iLOADER))) save_dir.mkdir(parents=True, exist_ok=True) wrap_parallel = WrapParallelV2(save_dir, all_image_ps, Xgts, all_results, new_preds, all_points, 180, [args.model_name, 'SRT']) wrap_parallel[0] wrap_loader = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True) for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES cmd = 'ffmpeg -y -i {:}/%06d.png -vb 5000k {:}.avi'.format( save_dir, save_dir) logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd)) os.system(cmd) logger.close() return
def evaluate(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True print('The model is {:}'.format(args.model)) snapshot = Path(args.model) assert snapshot.exists(), 'The model path {:} does not exist' snapshot = torch.load(snapshot) # General Data Argumentation mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) param = snapshot['args'] eval_transform = transforms.Compose([ transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize ]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net.cuda() weights = remove_module_dict(snapshot['state_dict']) nu_weights = {} for key, val in weights.items(): nu_weights[key.split('detector.')[-1]] = val print(key.split('detector.')[-1]) weights = nu_weights net.load_state_dict(weights) print('Prepare input data') images = os.listdir(args.image_path) images = natsort.natsorted(images) total_images = len(images) for im_ind, aimage in enumerate(images): progressbar(im_ind, total_images) #aim = os.path.join(args.image_path, aimage) aim = '0.jpg' args.image = aim im = cv2.imread(aim) imshape = im.shape print(imshape) input('crap12') args.face = [0, 0, imshape[0], imshape[1]] [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face) inputs = image.unsqueeze(0).cuda() scale_h, scale_w = cropped_size[0] * 1. / inputs.size( -2), cropped_size[1] * 1. / inputs.size(-1) print(inputs.size(-2)) print(inputs.size(-1)) print(scale_w.data.numpy()) print(scale_h.data.numpy()) print(cropped_size.data.numpy()) input('crap') # network forward with torch.no_grad(): batch_locs, batch_scos = net(inputs) c_im = np.expand_dims(image.data.numpy(), 0) c_locs, c_scors = rep.run(c_im) # obtain the locations on the image in the orignial size cpu = torch.device('cpu') np_batch_locs, np_batch_scos, cropped_size = batch_locs.to( cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy() locations, scores = np_batch_locs[0, :-1, :], np.expand_dims( np_batch_scos[0, :-1], -1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + \ cropped_size[3] prediction = np.concatenate((locations, scores), axis=1).transpose(1, 0) c_locations = c_locs[0, :-1, :] c_locations[:, 0], c_locations[:, 1] = c_locations[:, 0] * scale_w + cropped_size[2], c_locations[:, 1] * scale_h + \ cropped_size[3] c_scores = np.expand_dims(c_scors[0, :-1], -1) c_pred_pts = np.concatenate((c_locations, c_scores), axis=1).transpose(1, 0) pred_pts = np.transpose(prediction, [1, 0]) pred_pts = pred_pts[:, :-1] c_pred_pts = np.transpose(c_pred_pts, [1, 0]) c_pred_pts = c_pred_pts[:, :-1] print(c_scors, '\n\n\n') print(np_batch_scos) print(c_scors - np_batch_scos) if args.save: json_file = os.path.splitext(aimage)[0] + '.jpg' save_path = os.path.join(args.save, 'caf' + json_file) save_path2 = os.path.join(args.save, 'py_' + json_file) sim2 = draw_pts(im, pred_pts=pred_pts, get_l1e=False) sim = draw_pts(im, pred_pts=c_pred_pts, get_l1e=False) #print(pred_pts) cv2.imwrite(save_path, sim) cv2.imwrite(save_path2, sim2) input('save1') # image.save(args.save) # print ('save the visualization results into {:}'.format(args.save)) else: print('ignore the visualization procedure')