def main(): global best_loss start_epoch = 0 # start from epoch 0 or last checkpoint epoch val_loader = torch.utils.data.DataLoader( davis.DavisSet(params, is_train=False), batch_size=int(params['batchSize']), shuffle=False, num_workers=args.workers, pin_memory=True) model = tc.TimeCycle() model = Wrap(model, 'forward_affinity') model = torch.nn.DataParallel(model).cuda() cudnn.benchmark = False print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0)) # Load checkpoint. if os.path.isfile(args.resume): print('==> Resuming from checkpoint..') checkpoint = torch.load(args.resume) partial_load(checkpoint['state_dict'], model) del checkpoint model.eval() if not os.path.exists(args.save_path): os.makedirs(args.save_path) print('\Testing') test_loss = test(val_loader, model, 1, use_cuda)
def main(): global best_loss start_epoch = 0 # start from epoch 0 or last checkpoint epoch args.kldv_coef = 1 args.long_coef = 1 args.frame_transforms = 'crop' args.frame_aug = 'grid' args.npatch = 49 args.img_size = 256 args.pstride = [0.5, 0.5] args.patch_size = [64, 64, 3] args.visualize = False model = tc.TimeCycle(args, vis=vis).cuda() params['mapScale'] = model(torch.zeros(1, 10, 3, 320, 320).cuda(), just_feats=True)[1].shape[-2:] params['mapScale'] = 320 // np.array(params['mapScale']) val_loader = torch.utils.data.DataLoader( davis.DavisSet(params, is_train=False) if not 'jhmdb' in args.filelist else \ jhmdb.JhmdbSet(params, is_train=False), batch_size=int(params['batchSize']), shuffle=False, num_workers=args.workers, pin_memory=True) cudnn.benchmark = False print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0)) # Load checkpoint. if os.path.isfile(args.resume): print('==> Resuming from checkpoint..') checkpoint = torch.load(args.resume) utils.partial_load(checkpoint['model'], model, skip_keys=['head']) del checkpoint model.eval() # model = torch.nn.DataParallel(model).cuda() # model = model.cuda() model = model.cuda() if not os.path.exists(args.save_path): os.makedirs(args.save_path) print('\Testing') # with torch.no_grad(): test_loss = test(val_loader, model, 1, use_cuda, args)
def main(): global best_loss start_epoch = 0 # start from epoch 0 or last checkpoint epoch model = tc.TimeCycle(args).cuda() model = Wrap(model) params['mapScale'] = model(torch.zeros(1, 10, 3, 320, 320).cuda(), None, True, func='forward')[1].shape[-2:] params['mapScale'] = 320 // np.array(params['mapScale']) val_loader = torch.utils.data.DataLoader(davis.DavisSet(params, is_train=False), batch_size=int( params['batchSize']), shuffle=False, num_workers=args.workers, pin_memory=True) cudnn.benchmark = False print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0)) # Load checkpoint. if os.path.isfile(args.resume): print('==> Resuming from checkpoint..') checkpoint = torch.load(args.resume) # model.model.load_state_dict(checkpoint['model']) utils.partial_load(checkpoint['model'], model.model) del checkpoint model.eval() model = torch.nn.DataParallel(model).cuda() # model = model.cuda() if not os.path.exists(args.save_path): os.makedirs(args.save_path) print('\Testing') with torch.no_grad(): test_loss = test(val_loader, model, 1, use_cuda)