def train(train_loader, model, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(train_loader)) for batch_idx, (inputs, targets) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.data[0], inputs.size(0)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(testloader, net, model, criterion, epoch, use_cuda, device): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode net.eval() model.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.to(device), targets.to(device) # compute output f1, f2 = net(inputs, inputs, out_feat_keys=['conv2']) outputs = model(f1) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(val_loader, model, criterion, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) for batch_idx, (inputs, targets) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.data[0], inputs.size(0)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(args, testloader, enc, dec, cl, disc_l, disc_v, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() top1 = AverageMeter() # switch to evaluate mode enc.eval() dec.eval() cl.eval() disc_l.eval() disc_v.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # with torch.no_grad(): inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output recon = dec(enc(inputs)) scores = torch.mean(torch.pow((inputs - recon), 2), dim=[1, 2, 3]) prec1 = roc_auc_score(targets.cpu().detach().numpy(), -scores.cpu().detach().numpy()) top1.update(prec1, inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | top1: {top1: .4f} '.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, top1=top1.avg, ) bar.next() bar.finish() return top1.avg
def validate(val_loader, val_loader_len, model, criterion): bar = Bar('Processing', max=val_loader_len) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() for i, (input, target) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(non_blocking=True) #with torch.no_grad(): # compute output output = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) loss.backward() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=i + 1, size=val_loader_len, data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(trainloader, model_pre, model_fcs, optimizer, epoch): # switch to train mode model_fcs.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() avg_acc = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, (rgb, gel, label) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) rgb, gel, label = rgb.cuda(), gel.cuda(), label.cuda() # compute output outputs = model_pre(rgb, gel) outputs = model_fcs(outputs) loss = F.cross_entropy(outputs, label) y_pred = torch.max(outputs, 1)[1] acc = accuracy_score(y_pred.cpu().data.numpy(), label.cpu().data.numpy()) # measure the result losses.update(loss.item(), rgb.size(0)) avg_acc.update(acc, rgb.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}| ACC(input): {acc: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=avg_acc.avg, ) bar.next() bar.finish() return losses.avg
def test(testloader, model, criterion, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() DEBUG = False bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) writer.add_scalar('Loss/test', losses.avg) writer.add_scalar('Accuracy/test', top1.avg) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def validate(valloader, model, criterion, epoch, use_cuda, mode): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar(f'{mode}', max=len(valloader)) with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda( non_blocking=True) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs, targets, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(valloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(args, model, trainloader, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, (image, target) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: image, target = image.cuda(), target.cuda() # compute loss and do SGD step outputs = model(image) loss = criterion(outputs, target) optimizer.zero_grad() loss.backward() optimizer.step() # measure train accuracy and record loss prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5)) losses.update(loss.item(), image.size(0)) top1.update(prec1.item(), image.size(0)) top5.update(prec5.item(), image.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(val_loader, model, criterion, epoch, use_cuda): global min_loss batch_time = AverageMeter() data_time = AverageMeter() test_losses = AverageMeter() test_f2 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) for batch_idx, (inputs, targets) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss test_losses.update(loss.data[0], inputs.size(0)) binary_out = F.sigmoid(outputs) binary_out[binary_out >= 0.2] = 1 binary_out[binary_out < 0.2] = 0 test_f2.update( f2_score(binary_out.data.cpu().numpy(), targets.data.cpu().numpy())) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | F2: {f2_score}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=test_losses.avg, f2_score=test_f2.avg) bar.next() bar.finish() return (test_losses.avg, test_f2.avg)
def train(train_loader, num_classes, model, optimizer, criterion, epoch, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() end = time.time() bar = Bar('Training', max=args.val_iteration) t = tqdm(enumerate(train_loader), total=len(train_loader), desc='training') model.train() for batch_idx, (input, target) in t: if use_cuda: input, target = input.cuda(), target.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # batch size batch_size = input.size(0) output = model(input) loss = criterion(output, target.squeeze(1)) # record loss losses.update(loss.item(), input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format( batch=batch_idx + 1, size=args.val_iteration, data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg) bar.next() bar.finish() return ( batch_idx, losses.avg, )
def valid(testloader, model_pre, model_fcs, epoch): # switch to train mode model_fcs.eval() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() avg_acc = AverageMeter() end = time.time() preds = [] targets_list = [] bar = Bar('Processing', max=len(testloader)) for batch_idx, (rgb, gel, label) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) rgb, gel, label = rgb[1].cuda(), gel.cuda(), label.cuda() # print(x_tactile.shape) # compute output rgb = model_pre(rgb) outputs = model_fcs(rgb, gel) loss = F.cross_entropy(outputs, label) y_pred = torch.max(outputs, 1)[1] for i in range(outputs.size(0)): preds.append(y_pred[i].cpu().data.numpy()) targets_list.append(label[i].cpu().data.numpy()) acc = accuracy_score(y_pred.cpu().data.numpy(), label.cpu().data.numpy()) # measure the result losses.update(loss.item(), rgb.size(0)) avg_acc.update(acc, rgb.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}| ACC(input): {acc: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=avg_acc.avg, ) bar.next() bar.finish() return (losses.avg, avg_acc.avg, preds, targets_list)
def train(trainloader, model, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() NormMS = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, batch_data in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) inputs = batch_data['image'] targets = batch_data['landmarks'] if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss #nms= normalizedME(outputs.data,targets.data,64,64) losses.update(loss.data[0], inputs.size(0)) #NormMS.update(nms[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return (losses.avg,0)
def test(testloader, model, criterion, args): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if args.gpu_id is not None: inputs = inputs.cuda() targets = targets.cuda() # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(labeled_trainloader, model, optimizer, criterion, epoch, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() end = time.time() size = args.val_iteration bar = Bar('Training', max=size) labeled_train_iter = iter(labeled_trainloader) model.train() for batch_idx in range(size): try: inputs_x, targets_x = labeled_train_iter.next() except: labeled_train_iter = iter(labeled_trainloader) inputs_x, targets_x = labeled_train_iter.next() data_time.update(time.time() - end) batch_size = inputs_x.size(0) if use_cuda: inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True) logits_x = model(inputs_x) loss = criterion(logits_x, targets_x) losses.update(loss.item(), inputs_x.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}'.format( batch=batch_idx + 1, size=size, data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg ) bar.next() bar.finish() return losses.avg
def test(val_loader, model, criterion, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('P', max=len(val_loader)) for batch_idx, (inputs, targets) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # compute output end = time.time() outputs = model(inputs) batch_time.update(time.time() - end) loss = criterion(outputs, targets) prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # plot progress if (batch_idx+1) % 10 == 0: print('({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | ' 'E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} | t5: {top5: .3f}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, )) bar.next() bar.finish() return (losses.avg, top1.avg, top5.avg)
def validate(val_loader, model, generated_weights, criterion): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() bar = Bar('Testing ', max=len(val_loader)) with torch.no_grad(): end = time.time() for batch_idx, (input, target) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() target = target.cuda(non_blocking=True) # compute output output = model(input, base_class_indexes = None, novel_class_classifiers = generated_weights) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(testloader, model, epoch, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() # switch to evaluate mode model.eval() with torch.no_grad(): bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # compute output outputs = model(inputs)[-1] loss = F.cross_entropy(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.shape[0]) top1.update(prec1.item(), inputs.shape[0]) top5.update(prec5.item(), inputs.shape[0]) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train_adv(trainloader, net, criterion, optimizer, epoch, adversary): print('\nEpoch: %d' % epoch) net.train() batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, data_info in enumerate(trainloader): inputs = data_info[0] targets = data_info[1].long() inputs, targets = inputs.to(device), targets.to(device) # adv_inputs = adv_train(inputs, targets, net, criterion, adversary) _, adv_inputs = adversary.perturb(inputs, targets) net = net.train() for update_idx in range(args.update): # adv_inputs, targets = adv_inputs.to(device), targets.to(device) outputs = net(adv_inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Batch: {bt:.3f}s| Total:{total:}| ETA:{eta:}| Loss:{loss:.4f}| top1:{top1:.2f}'.format( batch=batch_idx + 1, size=len(trainloader), bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg) bar.next() bar.finish() return losses.avg, top1.avg
def fine_tuning(train_loader, model, criterion, optimizer): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() microF1 = AverageMeter() macroF1 = AverageMeter() model.train() end = time.time() bar = Bar('Training', max=len(train_loader)) for batch_idx, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target.float()) target = target.data.cpu().float() output = output.data.cpu() micro, macro = calc_f1(target, output) losses.update(loss.item(), input.size(0)) microF1.update(micro.item(), input.size(0)) macroF1.update(macro.item(), input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() model.weight_norm() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Micro-f1: {microF1: .4f} |Macro-f1: {macroF1: .4f}'.format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, microF1=microF1.avg, macroF1=macroF1.avg, ) bar.next() bar.finish() return (losses.avg, microF1.avg, macroF1.avg)
def get_novel_weights(novel_loader, model, weight_generator): batch_time = AverageMeter() data_time = AverageMeter() # switch to evaluate mode model.eval() weight_generator.eval() end = time.time() bar = Bar('Imprinting', max=len(novel_loader)) with torch.no_grad(): for batch_idx, (input, target) in enumerate(novel_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() # compute output output = model.extract(input) if batch_idx == 0: output_stack = output target_stack = target else: output_stack = torch.cat((output_stack, output), 0) target_stack = torch.cat((target_stack, target), 0) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format( batch=batch_idx + 1, size=len(novel_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td ) bar.next() bar.finish() new_weight = torch.zeros(100, 256) for i in range(100): tmp = output_stack[target_stack == (i + 100)].mean(0) if not args.random else torch.randn(256) new_weight[i] = tmp / tmp.norm(p=2) gen_weight = weight_generator(new_weight.cuda()) return gen_weight
def train(trainloader, model, criterion, optimizer, epoch): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, (inputs, targets) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) inputs, targets = inputs.to(device), targets.to(device).squeeze() # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1 = accuracy(outputs.data, targets.data, topk=(1, )) losses.update(loss.item(), inputs.size(0)) top1.update(prec1[0].item(), inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(testloader, model, criterion, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, batch_data in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) inputs = batch_data['image'] targets = batch_data['landmarks'] if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets.squeeze()) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) # measure elapsed time batch_time.update(float(time.time() - end)) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return (losses.avg, 0)
def output_attention(val_loader, model, epoch, use_cuda, save_dir): batch_time = AverageMeter() data_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) fw = open(os.path.join(save_dir, 'attention.txt'), 'w') for batch_idx, (inputs, targets) in enumerate(val_loader): data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets) probs, attention = model(inputs) bs, c, w, h = attention.size() attention = attention.sum(1) attention = attention.cpu().data.numpy() attention = attention.reshape((bs, -1)) for index in range(bs): hot = '' for j in range(w * h): hot += '{:.3f} '.format(attention[index][j]) hot += '\n' fw.write(hot) prec1, prec5 = accuracy(probs.data, targets.data, topk=(1,5)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) bar.shuffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, top1=top1.avg, top5=top5.avg ) bar.next() bar.finish() fw.close()
def test(testloader, model, use_cuda, loader_len): batch_time = AverageMeter() data_time = AverageMeter() data_represent_list = [] # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) outputs = torch.nn.functional.avg_pool2d(outputs, kernel_size=(4, 4), stride=(1, 1)) outputs = outputs.view(outputs.size(0), -1) data_represent_list.extend(outputs.detach().cpu().numpy()) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td) bar.next() bar.finish() return data_represent_list
def get_p(evalloader, model, epoch, use_cuda): # switch to evaluate mode model.eval() p_results = [] bar = Bar('Evaluating', max=len(evalloader)) for batch_idx, (inputs, targets) in enumerate(evalloader): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output outputs = model(inputs) outputs = nn.functional.softmax(outputs, dim=-1) # normalization p_results.append(outputs.cpu().data[0].numpy()) bar.suffix = '({batch}/{size})'.format(batch=batch_idx + 1, size=len(evalloader)) bar.next() bar.finish() return p_results
def test(testloader, model, use_cuda): landmarks = [] batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, batch_data in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) inputs = batch_data if use_cuda: inputs = inputs.cuda() inputs = torch.autograd.Variable(inputs) # compute output outputs = model(inputs) landmarks.append(outputs.cpu().data.numpy()) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return landmarks
def attack_over_test(testloader, net, criterion, adversary): net.eval() batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, data_info in enumerate(testloader): inputs = data_info[0] targets = data_info[1].long() # adv_inputs = inputs inputs, targets = inputs.to(device), targets.to(device) _, adv_inputs = adversary.perturb(inputs, targets) outputs = net(adv_inputs) loss = criterion(outputs, targets) prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Batch: {bt:.3f}s| Total: {total:}| ETA: {eta:}| Loss:{loss:.4f}| top1: {top1:.2f}'.format( batch=batch_idx + 1, size=len(testloader), bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg) bar.next() bar.finish() return losses.avg, top1.avg
def train(train_loader, model, criterion, optimizer, epoch, use_cuda, logger): global batch_time_global, data_time_global # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() train_loader_len = int(train_loader._size / args.train_batch) + 1 bar = Bar('Processing', max=train_loader_len) for batch_idx, data in enumerate(train_loader): # measure data loading time data_time_lap = time.time() - end data_time.update(data_time_lap) if epoch > 0: data_time_global.update(data_time_lap) inputs = data[0]["data"] targets = data[0]["label"].squeeze().cuda().long() if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True) inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(to_python_float(loss.data), inputs.size(0)) top1.update(to_python_float(prec1), inputs.size(0)) top5.update(to_python_float(prec5), inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time_lap = time.time() - end batch_time.update(batch_time_lap) if epoch > 0: batch_time_global.update(batch_time_lap) end = time.time() # plot progress bar.suffix = '(Epoch {epoch}, {batch}/{size}) Data: {data:.3f}s/{data_global:.3f}s | Batch: {bt:.3f}s/{bt_global:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( epoch=epoch, batch=batch_idx + 1, size=train_loader_len, data=data_time.val, data_global=data_time_global.avg, bt=batch_time.val, bt_global=batch_time_global.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() logger.file.write(bar.suffix) bar.finish() return (losses.avg, top1.avg, top5.avg)
def train(trainloader, model, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) print(args) for batch_idx, (inputs, targets) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.data[0], inputs.size(0)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() for k, m in enumerate(model.modules()): # print(k, m) if isinstance(m, nn.Conv2d): weight_copy = m.weight.data.abs().clone() mask = weight_copy.gt(0).float().cuda() m.weight.grad.data.mul_(mask) optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(train_loader, model, weight_generator, criterion, optimizer, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() end = time.time() bar = Bar('Training ', max=len(train_loader)) for batch_idx, (base_samples, base_labels, fake_novel_samples, fake_novel_query, fake_novel_labels) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) base_samples = torch.cat(base_samples).cuda() base_labels = torch.cat(base_labels).cuda() fake_novel_samples = torch.cat(fake_novel_samples).cuda() fake_novel_labels = torch.cat(fake_novel_labels).cuda() fake_novel_query = torch.cat(fake_novel_query).cuda() fake_train = model.extract(fake_novel_samples) unique_novel_labels = torch.unique(fake_novel_labels) new_weight = torch.zeros(unique_novel_labels.shape[0], 256) for i, f_l in enumerate(unique_novel_labels): tmp = fake_train[fake_novel_labels == f_l].mean(0) new_weight[i] = tmp / tmp.norm(p=2) new_weight = new_weight.cuda() gen_weight = weight_generator(new_weight) # compute output of the sampled 10-way classification problem input = torch.cat((base_samples, fake_novel_query)) unique_base_labels = torch.unique(base_labels) output = model(input, base_class_indexes = unique_base_labels, novel_class_classifiers = gen_weight, detach_feature=True) lst_lab = np.repeat(list(range(10)), 5) target = torch.LongTensor(lst_lab).cuda() loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() model.weight_norm() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)