def validate_video(self, loader, model, criterion, epoch, args): """ Run video-level validation on the Charades test set""" # with torch.no_grad(): batch_time = AverageMeter() outputs = [] gts = [] ids = [] # switch to evaluate mode model.eval() criterion.eval() key = torch.autograd.Variable(torch.IntTensor([3])) end = time.time() kernelsize = 1 print('applying smoothing with kernelsize {}'.format(kernelsize)) for i, (input, target, meta, vid, Auxili_info, raw_test) in enumerate(loader): gc.collect() meta['epoch'] = epoch target = target.long().cuda(async=True) assert target[0, :].eq(target[1, :]).all(), "val_video not synced" input_var = torch.autograd.Variable(input.cuda(), volatile=True) target_var = torch.autograd.Variable(target.float().cuda(), volatile=True) Auxili_info = torch.autograd.Variable(Auxili_info) output = model(input_var, Auxili_info, key) # loss = criterion(output, target_var) # store predictions output = torch.nn.Sigmoid()(output) output = smooth.winsmooth(output, kernelsize=kernelsize) output_video = output.max(dim=0)[0] outputs.append(output_video.cpu().numpy()) gts.append(target[0, :]) ids.append(meta['id'][0]) batch_time.update(time.time() - end) end = time.time() if i % int(len(loader) * 0.07) == 0: print( 'Test2: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format( i, len(loader), batch_time=batch_time)) mAP, _, ap = map.charades_map(np.vstack(outputs), np.vstack(gts)) prec1, prec5 = accuracy(torch.Tensor(np.vstack(outputs)), torch.Tensor(np.vstack(gts)), topk=(1, 5)) # print(ap) print(' * mAP {:.3f}'.format(mAP)) print(' * prec1 {:.3f} * prec5 {:.3f}'.format(prec1[0], prec5[0])) return mAP, prec1[0], prec5[0]
def validate_video(self, loader, model, criterion, epoch, args): """ Run video-level validation on the Charades test set""" with torch.no_grad(): batch_time = AverageMeter() outputs = [] gts = [] ids = [] # switch to evaluate mode model.eval() criterion.eval() end = time.time() for i, (input, target, meta) in enumerate(loader): gc.collect() meta['epoch'] = epoch target = target.long().cuda(async=True) assert target[0, :].eq( target[1, :]).all(), "val_video not synced" input_var = torch.autograd.Variable(input.cuda(), volatile=True) target_var = torch.autograd.Variable(target, volatile=True) output = model(input_var) output, loss = criterion(*(output + (target_var, meta)), synchronous=True) # store predictions #output_video = output.mean(dim=0) output_video = output.max(dim=0)[0] outputs.append(output_video.data.cpu().numpy()) gts.append(target[0, :]) ids.append(meta['id'][0]) batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test2: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'. format(i, len(loader), batch_time=batch_time)) #mAP, _, ap = map.map(np.vstack(outputs), np.vstack(gts)) mAP, _, ap = map.charades_map(np.vstack(outputs), np.vstack(gts)) prec1, prec5 = accuracy(torch.Tensor(np.vstack(outputs)), torch.Tensor(np.vstack(gts)), topk=(1, 5)) print(ap) print(' * mAP {:.3f}'.format(mAP)) print(' * prec1 {:.3f} * prec5 {:.3f}'.format(prec1[0], prec5[0])) submission_file( ids, outputs, '{}/epoch_{:03d}.txt'.format(args.cache, epoch + 1)) return mAP, prec1[0], prec5[0]
def validate_video(model, model_dir, criterion, loader, args): # with torch.no_grad(): outputs = [] gts = [] ids = [] trained_model = torch.load(model_dir) model.load_state_dict(trained_model) model = torch.nn.DataParallel(model).cuda() # switch to evaluate mode model.eval() criterion.eval() kernelsize = 1 print('applying smoothing with kernelsize {}'.format(kernelsize)) for i, (input, target, meta, vid, Auxili_info, raw_test) in enumerate(loader): gc.collect() # meta['epoch'] = epoch target = target.long().cuda(async=True) assert target[0, :].eq(target[1, :]).all(), "val_video not synced" input_var = torch.autograd.Variable(input.cuda(), volatile=True) target_var = torch.autograd.Variable(target.float().cuda(), volatile=True) Auxili_info = torch.autograd.Variable(Auxili_info) output = model(input_var, Auxili_info) # store predictions output = torch.nn.Sigmoid()(output) output = smooth.winsmooth(output, kernelsize=kernelsize) output_video = output.max(dim=0)[0] outputs.append(output_video.cpu().numpy()) gts.append(target[0, :]) ids.append(meta['id'][0]) if i % int(len(loader) * 0.07) == 0: print('Test2: [{0}/{1}]\t'.format(i, len(loader))) mAP, _, ap = map.charades_map(np.vstack(outputs), np.vstack(gts)) prec1, prec5 = accuracy(torch.Tensor(np.vstack(outputs)), torch.Tensor(np.vstack(gts)), topk=(1, 5)) # print(ap) print(' * mAP {:.3f}'.format(mAP)) print(' * prec1 {:.3f} * prec5 {:.3f}'.format(prec1[0], prec5[0])) return mAP, prec1[0], prec5[0]
def validate_video(self, loader, model, epoch, args): """ Run video-level validation on the Charades test set""" timer = Timer() outputs, gts, ids = [], [], [] metrics = {} # switch to evaluate mode model.eval() for i, x in enumerate(loader): inputs, target, meta = parse(x) target = target.long().cuda(async=True) assert target[0, :].eq(target[1, :]).all(), "val_video not synced" input_vars = [ torch.autograd.Variable(inp.cuda(), volatile=True) for inp in inputs ] output = model( *input_vars)[-1] # classification should be last output output = torch.nn.Softmax(dim=1)(output) # store predictions output_video = output.mean(dim=0) outputs.append(output_video.data.cpu().numpy()) gts.append(target[0, :]) ids.append(meta['id'][0]) timer.tic() if i % args.print_freq == 0: print('Test2: [{0}/{1}]\t' 'Time {timer.val:.3f} ({timer.avg:.3f})'.format( i, len(loader), timer=timer)) # mAP, _, ap = meanap.map(np.vstack(outputs), np.vstack(gts)) mAP, _, ap = meanap.charades_map(np.vstack(outputs), np.vstack(gts)) metrics['mAP'] = mAP print(ap) print(' * mAP {:.3f}'.format(mAP)) submission_file(ids, outputs, '{}/epoch_{:03d}.txt'.format(args.cache, epoch + 1)) return metrics
def validate_video(self, loader, model, epoch, args): """ Run video-level validation on the Charades test set""" batch_time = AverageMeter() outputs = [] gts = [] ids = [] # switch to evaluate mode model.eval() end = time.time() for i, (input, target, meta) in enumerate(loader): target = target.long().cuda(async=True)#torch.Size([25, 157]) assert target[0,:].eq(target[1,:]).all(), "val_video not synced" input_var = torch.autograd.Variable(input.cuda(), volatile=True)#torch.Size([25, 3, 224, 224]) output = model(input_var) import ipdb;ipdb.set_trace() output = torch.nn.Softmax(dim=1)(output)#torch.Size([25, 157]) # store predictions output_video = output.mean(dim=0) outputs.append(output_video.data.cpu().numpy()) gts.append(target[0,:]) ids.append(meta['id'][0])#why are you sure first id means the whole batch? batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test2: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format( i, len(loader), batch_time=batch_time)) #mAP, _, ap = map.map(np.vstack(outputs), np.vstack(gts)) mAP, _, ap = map.charades_map(np.vstack(outputs), np.vstack(gts)) print(ap) print(' * mAP {:.3f}'.format(mAP)) submission_file( ids, outputs, '{}/epoch_{:03d}.txt'.format(args.cache, epoch+1)) return mAP
out_dict[V_id[j]]['out'].append((C_logits_d[j, :, :],int(data_info.data.cpu().numpy()[j,3]))) # add to mAP test if num_iter == num_steps_per_update: num_iter = 0 optimizer.step() optimizer.zero_grad() # warm = warm + num_steps_per_update warm = warm + 1 warm_up(0.0001, args.init_lr, warm, step_max=int((Epoch/num_steps_per_update))) # just pass_ if epoch % int(0.05*Epoch) == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Loss_cls {loss_cls.val:.4f} ({loss_cls.avg:.4f})\t' 'Loss_loc {loss_loc.val:.4f} ({loss_loc.avg:.4f})\t' .format(round((epoch/Epoch),3), i, len(train_loader), loss=losses, loss_cls=loss_cls, loss_loc=loss_loc)) if epoch % int(0.2 * Epoch) == 0: print('This batch lr:\t', optimizer.param_groups[0]['lr']) Map = winsmooth(out_dict) mAP, _, ap = map.charades_map(np.vstack(Map[0]), np.vstack(Map[1])) print('The final mAp is:', mAP) print('Lr: {:.5f}\t'.format(optimizer.param_groups[0]['lr'])) checkpoint_name = "%03d_%s" % (int((epoch+1)/Epoch), args.dataname+"_model.pth") cur_path = os.path.join(model_dir, args.arch + checkpoint_name) torch.save(Model.module.state_dict(), cur_path) args.mAP = mAP save_log(args, optimizer.param_groups[0]['lr'], losses.avg, int((epoch+1)/Epoch), train_log_dir)