def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Evaluate the Robustness of a Detector : prepare_seed : {:}'.format( args.rand_seed)) prepare_seed(args.rand_seed) assert args.init_model is not None and Path( args.init_model).exists(), 'invalid initial model path : {:}'.format( args.init_model) checkpoint = load_checkpoint(args.init_model) xargs = checkpoint['args'] eval_func = procedures[xargs.procedure] logger = prepare_logger(args) if xargs.use_gray == False: mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: mean_fill = (0.5, ) normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5]) robust_component = [ transforms.ToTensor(), normalize, transforms.PreCrop(xargs.pre_crop_expand) ] robust_component += [ transforms.RandomTrans(args.robust_scale, args.robust_offset, args.robust_rotate, args.robust_iters, args.robust_cache_dir, True) ] robust_transform = transforms.Compose3V(robust_component) logger.log('--- arguments --- : {:}'.format(args)) logger.log('robust_transform : {:}'.format(robust_transform)) recover = xvision.transforms2v.ToPILImage(normalize) model_config = load_configure(xargs.model_config, logger) shape = (xargs.height, xargs.width) logger.log('Model : {:} $$$$ Shape : {:}'.format(model_config, shape)) # Evaluation Dataloader assert args.eval_lists is not None and len( args.eval_lists) > 0, 'invalid args.eval_lists : {:}'.format( args.eval_lists) eval_loaders = [] for eval_list in args.eval_lists: eval_data = RobustDataset(robust_transform, xargs.sigma, model_config.downsample, xargs.heatmap_type, shape, xargs.use_gray, xargs.data_indicator) if xargs.x68to49: eval_data.load_list(eval_list, 68, xargs.boxindicator, True) convert68to49(eval_data) else: eval_data.load_list(eval_list, xargs.num_pts, xargs.boxindicator, True) eval_data.get_normalization_distance(None, True) if hasattr(xargs, 'batch_size'): batch_size = xargs.batch_size elif hasattr(xargs, 'i_batch_size') and xargs.i_batch_size > 0: batch_size = xargs.i_batch_size elif hasattr(xargs, 'v_batch_size') and xargs.v_batch_size > 0: batch_size = xargs.v_batch_size else: raise ValueError( 'can not find batch size information in xargs : {:}'.format( xargs)) eval_loader = torch.utils.data.DataLoader(eval_data, batch_size=batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append(eval_loader) # define the detection network detector = obtain_pro_model(model_config, xargs.num_pts, xargs.sigma, xargs.use_gray) assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, detector.downsample) logger.log("=> detector :\n {:}".format(detector)) logger.log("=> Net-Parameters : {:} MB".format( count_parameters_in_MB(detector))) for i, eval_loader in enumerate(eval_loaders): logger.log('The [{:2d}/{:2d}]-th testing-data = {:}'.format( i, len(eval_loaders), eval_loader.dataset)) logger.log('basic-arguments : {:}\n'.format(xargs)) logger.log('xoxox-arguments : {:}\n'.format(args)) detector.load_state_dict(remove_module_dict(checkpoint['detector'])) detector = detector.cuda() for ieval, loader in enumerate(eval_loaders): errors, valids, meta = eval_func(detector, loader, args.print_freq, logger) logger.log( '[{:2d}/{:02d}] eval-data : error : mean={:.3f}, std={:.3f}'. format(ieval, len(eval_loaders), np.mean(errors), np.std(errors))) logger.log( '[{:2d}/{:02d}] eval-data : valid : mean={:.3f}, std={:.3f}'. format(ieval, len(eval_loaders), np.mean(valids), np.std(valids))) nme, auc, pck_curves = meta.compute_mse(loader.dataset.dataset_name, logger) logger.close()
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) basic_main, eval_all = procedures['{:}-train'.format( args.procedure)], procedures['{:}-test'.format(args.procedure)] logger = prepare_logger(args) # General Data Augmentation normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation( transforms, args) #data_cache = get_path2image( args.shared_img_cache ) data_cache = None recover = transforms.ToPILImage(normalize) args.tensor2imageF = recover assert (args.scale_min + args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format( args.scale_min, args.scale_max) logger.log('robust_transform : {:}'.format(robust_transform)) # Model Configure Load model_config = load_configure(args.model_config, logger) shape = (args.height, args.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format( model_config, args.sigma, shape)) # Training Dataset if args.train_lists: train_data = Dataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) safex_data = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) train_data.set_cutout(args.cutout_length) safex_data.set_cutout(args.cutout_length) train_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) safex_data.load_list(args.train_lists, args.num_pts, args.boxindicator, args.normalizeL, True) if args.sampler is None: train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True) safex_loader = torch.utils.data.DataLoader( safex_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True) else: train_sampler = SpecialBatchSampler(train_data, args.batch_size, args.sampler) safex_sampler = SpecialBatchSampler(safex_data, args.batch_size, args.sampler) logger.log('Training-sampler : {:}'.format(train_sampler)) train_loader = torch.utils.data.DataLoader( train_data, batch_sampler=train_sampler, num_workers=args.workers, pin_memory=True) safex_loader = torch.utils.data.DataLoader( safex_data, batch_sampler=safex_sampler, num_workers=args.workers, pin_memory=True) logger.log('Training-data : {:}'.format(train_data)) else: train_data, safex_loader = None, None #train_data[0] # Evaluation Dataloader eval_loaders = [] if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) eval_idata.load_list(eval_ilist, args.num_pts, args.boxindicator, args.normalizeL, True) eval_iloader = torch.utils.data.DataLoader( eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, args.data_indicator, data_cache) eval_vdata.load_list(eval_vlist, args.num_pts, args.boxindicator, args.normalizeL, True) eval_vloader = torch.utils.data.DataLoader( eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) # from 68 points to 49 points, removing the face contour if args.x68to49: assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format( args.num_pts) if train_data is not None: train_data = convert68to49(train_data) for eval_loader, is_video in eval_loaders: convert68to49(eval_loader.dataset) args.num_pts = 49 # define the detector detector = obtain_pro_model(model_config, args.num_pts, args.sigma, args.use_gray) assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, detector.downsample) logger.log("=> detector :\n {:}".format(detector)) logger.log("=> Net-Parameters : {:} MB".format( count_parameters_in_MB(detector))) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format( i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}\n'.format(args)) logger.log('train_transform : {:}'.format(train_transform)) logger.log('eval_transform : {:}'.format(eval_transform)) opt_config = load_configure(args.opt_config, logger) if hasattr(detector, 'specify_parameter'): net_param_dict = detector.specify_parameter(opt_config.LR, opt_config.weight_decay) else: net_param_dict = detector.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) detector, criterion = detector.cuda(), criterion.cuda() net = torch.nn.DataParallel(detector) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) assert last_info['epoch'] == checkpoint[ 'epoch'], 'Last-Info is not right {:} vs {:}'.format( last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done".format( logger.last_info(), checkpoint['epoch'])) elif args.init_model is not None: last_checkpoint = load_checkpoint(args.init_model) net.load_state_dict(last_checkpoint['detector']) logger.log("=> initialize the detector : {:}".format(args.init_model)) start_epoch = 0 else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 if args.eval_once is not None: logger.log("=> only evaluate the model once") #if safex_loader is not None: # safe_results, safe_metas = eval_all(args, [(safex_loader, False)], net, criterion, 'eval-once-train', logger, opt_config, robust_transform) # logger.log('-'*50 + ' evaluate the training set') #import pdb; pdb.set_trace() eval_results, eval_metas = eval_all(args, eval_loaders, net, criterion, 'eval-once', logger, opt_config, robust_transform) all_predictions = [eval_meta.predictions for eval_meta in eval_metas] torch.save( all_predictions, osp.join(args.save_path, '{:}-predictions.pth'.format(args.eval_once))) logger.log('==>> evaluation results : {:}'.format(eval_results)) logger.log('==>> configuration : {:}'.format(model_config)) logger.close() return # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): need_time = convert_secs2time( epoch_time.avg * (opt_config.epochs - epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log( '\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'. format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss, train_meta, train_nme = basic_main(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config, 'train') scheduler.step() # log the results logger.log( '==>>{:s} Train [{:}] Average Loss = {:.6f}, NME = {:.2f}'.format( time_string(), epoch_str, train_loss, train_nme * 100)) save_path = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'arch': model_config.arch, 'detector': net.state_dict(), 'state_dict': net.state_dict(), 'scheduler': scheduler.state_dict(), 'optimizer': optimizer.state_dict(), }, logger.path('model') / 'seed-{:}-{:}.pth'.format(args.rand_seed, model_config.arch), logger) last_info = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.last_info(), logger) if (args.eval_freq is None) or (epoch + 1 == opt_config.epochs) or ( epoch % args.eval_freq == 0): if epoch + 1 == opt_config.epochs: _robust_transform = robust_transform else: _robust_transform = None logger.log('') eval_results, eval_metas = eval_all(args, eval_loaders, net, criterion, epoch_str, logger, opt_config, _robust_transform) #save_path = save_checkpoint(eval_metas, logger.path('meta') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) save_path = save_checkpoint( eval_metas, logger.path('meta') / 'seed-{:}-{:}.pth'.format(args.rand_seed, model_config.arch), logger) logger.log( '==>> evaluation results : {:}\n==>> save evaluation results into {:}.' .format(eval_results, save_path)) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('Final checkpoint into {:}'.format(logger.last_info())) logger.close()
def evaluate(args): if args.cuda: assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True else: print('Use the CPU mode') print('The image is {:}'.format(args.image)) print('The model is {:}'.format(args.model)) last_info = Path(args.model) assert last_info.exists(), 'The model path {:} does not exist'.format( last_info) last_info = torch.load(last_info, map_location=torch.device('cpu')) snapshot = last_info['last_checkpoint'] assert snapshot.exists(), 'The model path {:} does not exist'.format( snapshot) print('The face bounding box is {:}'.format(args.face)) assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face) snapshot = torch.load(snapshot, map_location=torch.device('cpu')) param = snapshot['args'] # General Data Argumentation if param.use_gray == False: mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: mean_fill = (0.5, ) normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5]) eval_transform = transforms.Compose2V([transforms.ToTensor(), normalize, \ transforms.PreCrop(param.pre_crop_expand), \ transforms.CenterCrop(param.crop_max)]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (120, 96), param.use_gray, None, param.data_indicator) #dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (param.height,param.width), param.use_gray, None, param.data_indicator) dataset.reset(param.num_pts) net = obtain_pro_model(model_config, param.num_pts + 1, param.sigma, param.use_gray) net.load_state_dict(remove_module_dict(snapshot['state_dict'])) if args.cuda: net = net.cuda() print('Processing the input face image.') face_meta = PointMeta(dataset.NUM_PTS, None, args.face, args.image, 'BASE-EVAL') face_img = pil_loader(args.image, dataset.use_gray) affineImage, heatmaps, mask, norm_trans_points, transthetas, _, _, _, shape = dataset._process_( face_img, face_meta, -1) #import cv2; cv2.imwrite('temp.png', transforms.ToPILImage(normalize, False)(affineImage)) # network forward with torch.no_grad(): if args.cuda: inputs = affineImage.unsqueeze(0).cuda() else: inputs = affineImage.unsqueeze(0) _, _, batch_locs, batch_scos = net(inputs) batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu() (batch_size, C, H, W), num_pts = inputs.size(), param.num_pts locations, scores = batch_locs[0, :-1, :], batch_scos[:, :-1] norm_locs = normalize_points((H, W), locations.transpose(1, 0)) norm_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0) transtheta = transthetas[:2, :] norm_locs = torch.mm(transtheta, norm_locs) real_locs = denormalize_points(shape.tolist(), norm_locs) real_locs = torch.cat((real_locs, scores), dim=0) print('the coordinates for {:} facial landmarks:'.format(param.num_pts)) for i in range(param.num_pts): point = real_locs[:, i] print( 'the {:02d}/{:02d}-th landmark : ({:.1f}, {:.1f}), score = {:.2f}'. format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2]))) if args.save: resize = 512 image = draw_image_by_points(args.image, real_locs, 2, (255, 0, 0), args.face, resize) image.save(args.save) print('save the visualization results into {:}'.format(args.save)) else: print('ignore the visualization procedure')
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.set_num_threads(args.workers) print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed)) prepare_seed(args.rand_seed) logger = prepare_logger(args) checkpoint = load_checkpoint(args.init_model) xargs = checkpoint['args'] logger.log('Previous args : {:}'.format(xargs)) # General Data Augmentation if xargs.use_gray == False: mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: mean_fill = (0.5, ) normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5]) eval_transform = transforms.Compose2V([transforms.ToTensor(), normalize, \ transforms.PreCrop(xargs.pre_crop_expand), \ transforms.CenterCrop(xargs.crop_max)]) # Model Configure Load model_config = load_configure(xargs.model_config, logger) shape = (xargs.height, xargs.width) logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format( model_config, xargs.sigma, shape)) # Evaluation Dataloader eval_loaders = [] if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = EvalDataset(eval_transform, xargs.sigma, model_config.downsample, xargs.heatmap_type, shape, xargs.use_gray, xargs.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, xargs.boxindicator, xargs.normalizeL, True) eval_iloader = torch.utils.data.DataLoader( eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = EvalDataset(eval_transform, xargs.sigma, model_config.downsample, xargs.heatmap_type, shape, xargs.use_gray, xargs.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, xargs.boxindicator, xargs.normalizeL, True) eval_vloader = torch.utils.data.DataLoader( eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) # define the detector detector = obtain_pro_model(model_config, xargs.num_pts, xargs.sigma, xargs.use_gray) assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format( model_config.downsample, detector.downsample) logger.log("=> detector :\n {:}".format(detector)) logger.log("=> Net-Parameters : {:} MB".format( count_parameters_in_MB(detector))) logger.log('=> Eval-Transform : {:}'.format(eval_transform)) detector = detector.cuda() net = torch.nn.DataParallel(detector) net.eval() net.load_state_dict(checkpoint['detector']) cpu = torch.device('cpu') assert len(args.use_stable) == 2 for iLOADER, (loader, is_video) in enumerate(eval_loaders): logger.log( '{:} The [{:2d}/{:2d}]-th test set [{:}] = {:} with {:} batches.'. format(time_string(), iLOADER, len(eval_loaders), 'video' if is_video else 'image', loader.dataset, len(loader))) with torch.no_grad(): all_points, all_results, all_image_ps = [], [], [] for i, (inputs, targets, masks, normpoints, transthetas, image_index, nopoints, shapes) in enumerate(loader): image_index = image_index.squeeze(1).tolist() (batch_size, C, H, W), num_pts = inputs.size(), xargs.num_pts # batch_heatmaps is a list for stage-predictions, each element should be [Batch, C, H, W] if xargs.procedure == 'heatmap': batch_features, batch_heatmaps, batch_locs, batch_scos = net( inputs) batch_locs = batch_locs[:, :-1, :] else: batch_locs = net(inputs) batch_locs = batch_locs.detach().to(cpu) # evaluate the training data for ibatch, (imgidx, nopoint) in enumerate(zip(image_index, nopoints)): if xargs.procedure == 'heatmap': norm_locs = normalize_points( (H, W), batch_locs[ibatch].transpose(1, 0)) norm_locs = torch.cat( (norm_locs, torch.ones(1, num_pts)), dim=0) else: norm_locs = torch.cat((batch_locs[ibatch].permute( 1, 0), torch.ones(1, num_pts)), dim=0) transtheta = transthetas[ibatch][:2, :] norm_locs = torch.mm(transtheta, norm_locs) real_locs = denormalize_points(shapes[ibatch].tolist(), norm_locs) #real_locs = torch.cat((real_locs, batch_scos[ibatch].permute(1,0)), dim=0) real_locs = torch.cat((real_locs, torch.ones(1, num_pts)), dim=0) xpoints = loader.dataset.labels[imgidx].get_points().numpy( ) image_path = loader.dataset.datas[imgidx] # put into the list all_points.append(torch.from_numpy(xpoints)) all_results.append(real_locs) all_image_ps.append(image_path) total = len(all_points) logger.log( '{:} The [{:2d}/{:2d}]-th test set finishes evaluation : {:} frames/images' .format(time_string(), iLOADER, len(eval_loaders), total)) """ if args.use_stable[0] > 0: save_dir = Path( osp.join(args.save_path, '{:}-X-{:03d}'.format(args.model_name, iLOADER)) ) save_dir.mkdir(parents=True, exist_ok=True) wrap_parallel = WrapParallel(save_dir, all_image_ps, all_results, all_points, 180, (255, 0, 0)) wrap_loader = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True) for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES cmd = 'ffmpeg -y -i {:}/%06d.png -framerate 30 {:}.avi'.format(save_dir, save_dir) logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd)) os.system( cmd ) if args.use_stable[1] > 0: save_dir = Path( osp.join(args.save_path, '{:}-Y-{:03d}'.format(args.model_name, iLOADER)) ) save_dir.mkdir(parents=True, exist_ok=True) Xpredictions, Xgts = torch.stack(all_results), torch.stack(all_points) new_preds = fc_solve(Xgts, Xpredictions, is_cuda=True) wrap_parallel = WrapParallel(save_dir, all_image_ps, new_preds, all_points, 180, (0, 0, 255)) wrap_loader = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True) for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES cmd = 'ffmpeg -y -i {:}/%06d.png -framerate 30 {:}.avi'.format(save_dir, save_dir) logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd)) os.system( cmd ) """ Xpredictions, Xgts = torch.stack(all_results), torch.stack(all_points) save_path = Path( osp.join(args.save_path, '{:}-result-{:03d}.pth'.format(args.model_name, iLOADER))) torch.save( { 'paths': all_image_ps, 'ground-truths': Xgts, 'predictions': all_results }, save_path) logger.log('{:} save into {:}'.format(time_string(), save_path)) if False: new_preds = fc_solve_v2(Xgts, Xpredictions, is_cuda=True) # create the dir save_dir = Path( osp.join(args.save_path, '{:}-T-{:03d}'.format(args.model_name, iLOADER))) save_dir.mkdir(parents=True, exist_ok=True) wrap_parallel = WrapParallelV2(save_dir, all_image_ps, Xgts, all_results, new_preds, all_points, 180, [args.model_name, 'SRT']) wrap_parallel[0] wrap_loader = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True) for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES cmd = 'ffmpeg -y -i {:}/%06d.png -vb 5000k {:}.avi'.format( save_dir, save_dir) logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd)) os.system(cmd) logger.close() return