def test(testloader, model, criterion, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(train_loader, model, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() bar = Bar('P', max=len(train_loader)) for batch_idx, (inputs, targets) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) # measure accuracy and record loss losses.update(loss.data[0], inputs.size(0)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress if (batch_idx + 1) % 10 == 0: print( '({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} | t5: {top5: .3f}' .format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, )) bar.next() bar.finish() return (losses.avg, top5.avg)
def test(val_loader, model, criterion, epoch, use_cuda): global best_acc data_time = AverageMeter() # switch to evaluate mode model.eval() y_true = [] y_pred = [] avgpool = nn.AdaptiveAvgPool2d(1) with torch.no_grad(): end = time.time() bar = Bar('Processing', max=len(val_loader)) for batch_idx, (inputs, targets) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output model.include_top = False outputs = model(inputs) # [bs, num_feature, h, w] weights = model.module.fc.weight.data.cuda( ) # [num_class, num_feature] num_class, num_feature = weights.size() # [bs, num_class, h, w] cam = torch.empty(outputs.size(0), num_class, outputs.size(2), outputs.size(3)).cuda() for i in range(num_class): tmp = outputs * weights[i].view(1, num_feature, 1, 1) # [bs, num_feature, h, w] cam[:, i, :, :] = tmp.sum(dim=1) cam = cam[torch.arange( cam.size(0) ), targets, :, :] # select the ones corresponds to the correct label # select the ones which get predicted correctly outputs = avgpool(outputs).view(cam.size(0), -1) outputs = torch.matmul(outputs, weights.t()) _, pred = outputs.data.topk(1, 1, True, True) pred = pred.view(-1, ) idx = pred.eq(targets) cam = cam[idx != 0] # [bs, h, w] inputs = inputs[idx != 0] draw_CAM(cam, inputs) break bar.next() bar.finish() return
def train(trainloader, model_pre, model_fcs, optimizer, epoch): # switch to train mode model_fcs.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() avg_acc = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, (rgb, gel, label) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) rgb, gel, label = rgb.cuda(), gel.cuda(), label.cuda() # compute output outputs = model_pre(rgb, gel) outputs = model_fcs(outputs) loss = F.cross_entropy(outputs, label) y_pred = torch.max(outputs, 1)[1] acc = accuracy_score(y_pred.cpu().data.numpy(), label.cpu().data.numpy()) # measure the result losses.update(loss.item(), rgb.size(0)) avg_acc.update(acc, rgb.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}| ACC(input): {acc: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=avg_acc.avg, ) bar.next() bar.finish() return losses.avg
def test(testloader, model, criterion, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() DEBUG = False bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) writer.add_scalar('Loss/test', losses.avg) writer.add_scalar('Accuracy/test', top1.avg) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(args, testloader, enc, dec, cl, disc_l, disc_v, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() top1 = AverageMeter() # switch to evaluate mode enc.eval() dec.eval() cl.eval() disc_l.eval() disc_v.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # with torch.no_grad(): inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output recon = dec(enc(inputs)) scores = torch.mean(torch.pow((inputs - recon), 2), dim=[1, 2, 3]) prec1 = roc_auc_score(targets.cpu().detach().numpy(), -scores.cpu().detach().numpy()) top1.update(prec1, inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | top1: {top1: .4f} '.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, top1=top1.avg, ) bar.next() bar.finish() return top1.avg
def valid(testloader, model_pre, model_fcs, epoch): # switch to train mode model_fcs.eval() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() avg_acc = AverageMeter() end = time.time() preds = [] targets_list = [] bar = Bar('Processing', max=len(testloader)) for batch_idx, (rgb, gel, label) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) rgb, gel, label = rgb[1].cuda(), gel.cuda(), label.cuda() # print(x_tactile.shape) # compute output rgb = model_pre(rgb) outputs = model_fcs(rgb, gel) loss = F.cross_entropy(outputs, label) y_pred = torch.max(outputs, 1)[1] for i in range(outputs.size(0)): preds.append(y_pred[i].cpu().data.numpy()) targets_list.append(label[i].cpu().data.numpy()) acc = accuracy_score(y_pred.cpu().data.numpy(), label.cpu().data.numpy()) # measure the result losses.update(loss.item(), rgb.size(0)) avg_acc.update(acc, rgb.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}| ACC(input): {acc: .4f}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=avg_acc.avg, ) bar.next() bar.finish() return (losses.avg, avg_acc.avg, preds, targets_list)
def train(args, model, trainloader, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, (image, target) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: image, target = image.cuda(), target.cuda() # compute loss and do SGD step outputs = model(image) loss = criterion(outputs, target) optimizer.zero_grad() loss.backward() optimizer.step() # measure train accuracy and record loss prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5)) losses.update(loss.item(), image.size(0)) top1.update(prec1.item(), image.size(0)) top5.update(prec5.item(), image.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def test(val_loader, model, criterion, epoch, use_cuda): global min_loss batch_time = AverageMeter() data_time = AverageMeter() test_losses = AverageMeter() test_f2 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) for batch_idx, (inputs, targets) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss test_losses.update(loss.data[0], inputs.size(0)) binary_out = F.sigmoid(outputs) binary_out[binary_out >= 0.2] = 1 binary_out[binary_out < 0.2] = 0 test_f2.update( f2_score(binary_out.data.cpu().numpy(), targets.data.cpu().numpy())) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | F2: {f2_score}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=test_losses.avg, f2_score=test_f2.avg) bar.next() bar.finish() return (test_losses.avg, test_f2.avg)
def validate(valloader, model, criterion, epoch, use_cuda, mode): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar(f'{mode}', max=len(valloader)) with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda( non_blocking=True) # compute output outputs, _ = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs, targets, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(valloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(train_loader, num_classes, model, optimizer, criterion, epoch, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() end = time.time() bar = Bar('Training', max=args.val_iteration) t = tqdm(enumerate(train_loader), total=len(train_loader), desc='training') model.train() for batch_idx, (input, target) in t: if use_cuda: input, target = input.cuda(), target.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # batch size batch_size = input.size(0) output = model(input) loss = criterion(output, target.squeeze(1)) # record loss losses.update(loss.item(), input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format( batch=batch_idx + 1, size=args.val_iteration, data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg) bar.next() bar.finish() return ( batch_idx, losses.avg, )
def validate(val_loader, val_loader_len, model, criterion): bar = Bar('Processing', max=val_loader_len) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() for i, (input, target) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(non_blocking=True) #with torch.no_grad(): # compute output output = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) loss.backward() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=i + 1, size=val_loader_len, data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(train_loader, model, criterion, optimizer, epoch, use_cuda): bar = Bar('Processing', max=len(train_loader)) pdata = dict() pdata['data'] = [] pdata['labels'] = [] for batch_idx, (inputs, targets) in enumerate(train_loader): # print(type(inputs), type(targets)) # print(inputs, targets) inputs = inputs*255 inputs = inputs.to(torch.uint8).numpy() pdata['data'].extend(inputs) pdata['labels'].extend(targets.numpy()) bar.next() bar.finish() pdata['data'] = np.asarray(pdata['data'], dtype=np.uint8) # print(pdata['data'].shape) # print(pdata) # from PIL import Image # x = pdata['data'][1] # x = np.transpose(x, (1, 2, 0)) # im = Image.fromarray(x) # im.show() pdata1 = dict() pdata1['data'] = pdata['data'][:250000] pdata1['labels'] = pdata['labels'][:250000] pdata2 = dict() pdata2['data'] = pdata['data'][250000:250000*2] pdata2['labels'] = pdata['labels'][250000:250000*2] pdata3 = dict() pdata3['data'] = pdata['data'][250000*2:250000*3] pdata3['labels'] = pdata['labels'][250000*2:250000*3] pdata4 = dict() pdata4['data'] = pdata['data'][250000*3:250000*4] pdata4['labels'] = pdata['labels'][250000*3:250000*4] pdata5 = dict() pdata5['data'] = pdata['data'][250000 * 4:] pdata5['labels'] = pdata['labels'][250000 * 4:] with open(r"/BS/database11/ILSVRC2012_imgsize64/train0", "wb") as output_file: pickle.dump(pdata1, output_file) with open(r"/BS/database11/ILSVRC2012_imgsize64/train1", "wb") as output_file: pickle.dump(pdata2, output_file) with open(r"/BS/database11/ILSVRC2012_imgsize64/train2", "wb") as output_file: pickle.dump(pdata3, output_file) with open(r"/BS/database11/ILSVRC2012_imgsize64/train3", "wb") as output_file: pickle.dump(pdata4, output_file) with open(r"/BS/database11/ILSVRC2012_imgsize64/train4", "wb") as output_file: pickle.dump(pdata5, output_file) return
def train(trainloader, model, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() NormMS = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, batch_data in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) inputs = batch_data['image'] targets = batch_data['landmarks'] if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss #nms= normalizedME(outputs.data,targets.data,64,64) losses.update(loss.data[0], inputs.size(0)) #NormMS.update(nms[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return (losses.avg,0)
def validate(val_loader, model, generated_weights, criterion): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() bar = Bar('Testing ', max=len(val_loader)) with torch.no_grad(): end = time.time() for batch_idx, (input, target) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() target = target.cuda(non_blocking=True) # compute output output = model(input, base_class_indexes = None, novel_class_classifiers = generated_weights) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(train_loader, model, criterion, optimizer, epoch, use_cuda): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(train_loader)) for batch_idx, (inputs, targets) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.data[0], inputs.size(0)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def train(self, examples): """ examples: list of examples, each example is of form (board, pi, v) """ for epoch in range(args.epochs): print('EPOCH ::: ' + str(epoch + 1) + ' len examples:' + str(len(examples))) data_time = AverageMeter() batch_time = AverageMeter() pi_losses = AverageMeter() v_losses = AverageMeter() end = time.time() bar = Bar(f'({epoch}/{args.epochs})) / Training Net', max=int(len(examples) / args.batch_size)) batch_idx = 0 # self.sess.run(tf.local_variables_initializer()) while batch_idx < int(len(examples) / args.batch_size): sample_ids = np.random.randint(len(examples), size=args.batch_size) boards, pis, vs = list(zip(*[examples[i] for i in sample_ids])) # predict and compute gradient and do SGD step input_dict = {self.nnet.input_boards: boards, self.nnet.target_pis: pis, self.nnet.target_vs: vs, self.nnet.dropout: args.dropout, self.nnet.isTraining: True} # measure data loading time data_time.update(time.time() - end) # record loss self.sess.run(self.nnet.train_step, feed_dict=input_dict) pi_loss, v_loss = self.sess.run([self.nnet.loss_pi, self.nnet.loss_v], feed_dict=input_dict) pi_losses.update(pi_loss, len(boards)) v_losses.update(v_loss, len(boards)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() batch_idx += 1 # plot progress verbose = True if verbose: print('({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_pi: {lpi:.4f} | Loss_v: {lv:.3f}'.format( batch=batch_idx, size=int(len(examples) / args.batch_size), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, lpi=pi_losses.avg, lv=v_losses.avg, ) ) bar.next() bar.finish()
def fine_tuning(train_loader, model, criterion, optimizer): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() microF1 = AverageMeter() macroF1 = AverageMeter() model.train() end = time.time() bar = Bar('Training', max=len(train_loader)) for batch_idx, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target.float()) target = target.data.cpu().float() output = output.data.cpu() micro, macro = calc_f1(target, output) losses.update(loss.item(), input.size(0)) microF1.update(micro.item(), input.size(0)) macroF1.update(macro.item(), input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() model.weight_norm() bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Micro-f1: {microF1: .4f} |Macro-f1: {macroF1: .4f}'.format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, microF1=microF1.avg, macroF1=macroF1.avg, ) bar.next() bar.finish() return (losses.avg, microF1.avg, macroF1.avg)
def train_adv(trainloader, net, criterion, optimizer, epoch, adversary): print('\nEpoch: %d' % epoch) net.train() batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, data_info in enumerate(trainloader): inputs = data_info[0] targets = data_info[1].long() inputs, targets = inputs.to(device), targets.to(device) # adv_inputs = adv_train(inputs, targets, net, criterion, adversary) _, adv_inputs = adversary.perturb(inputs, targets) net = net.train() for update_idx in range(args.update): # adv_inputs, targets = adv_inputs.to(device), targets.to(device) outputs = net(adv_inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Batch: {bt:.3f}s| Total:{total:}| ETA:{eta:}| Loss:{loss:.4f}| top1:{top1:.2f}'.format( batch=batch_idx + 1, size=len(trainloader), bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg) bar.next() bar.finish() return losses.avg, top1.avg
def get_novel_weights(novel_loader, model, weight_generator): batch_time = AverageMeter() data_time = AverageMeter() # switch to evaluate mode model.eval() weight_generator.eval() end = time.time() bar = Bar('Imprinting', max=len(novel_loader)) with torch.no_grad(): for batch_idx, (input, target) in enumerate(novel_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() # compute output output = model.extract(input) if batch_idx == 0: output_stack = output target_stack = target else: output_stack = torch.cat((output_stack, output), 0) target_stack = torch.cat((target_stack, target), 0) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format( batch=batch_idx + 1, size=len(novel_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td ) bar.next() bar.finish() new_weight = torch.zeros(100, 256) for i in range(100): tmp = output_stack[target_stack == (i + 100)].mean(0) if not args.random else torch.randn(256) new_weight[i] = tmp / tmp.norm(p=2) gen_weight = weight_generator(new_weight.cuda()) return gen_weight
def test(val_loader, model, criterion, epoch, use_cuda): global best_acc batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('P', max=len(val_loader)) for batch_idx, (inputs, targets) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output end = time.time() outputs = model(inputs) batch_time.update(time.time() - end) loss = criterion(outputs, targets) prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) # measure accuracy and record loss losses.update(loss.data[0], inputs.size(0)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) # plot progress if (batch_idx + 1) % 10 == 0: print( '({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} | t5: {top5: .3f}' .format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, )) bar.next() bar.finish() return (losses.avg, top1.avg, top5.avg)
def test(testloader, model, criterion, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, batch_data in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) inputs = batch_data['image'] targets = batch_data['landmarks'] if use_cuda: inputs, targets = inputs.cuda(), targets.cuda(async=True) inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets.squeeze()) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) # measure elapsed time batch_time.update(float(time.time() - end)) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return (losses.avg, 0)
def train(trainloader, model, criterion, optimizer, epoch): # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(trainloader)) for batch_idx, (inputs, targets) in enumerate(trainloader): # measure data loading time data_time.update(time.time() - end) inputs, targets = inputs.to(device), targets.to(device).squeeze() # compute output outputs = model(inputs) loss = criterion(outputs, targets) # measure accuracy and record loss prec1 = accuracy(outputs.data, targets.data, topk=(1, )) losses.update(loss.item(), inputs.size(0)) top1.update(prec1[0].item(), inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f}'.format( batch=batch_idx + 1, size=len(trainloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, ) bar.next() bar.finish() return (losses.avg, top1.avg)
def output_attention(val_loader, model, epoch, use_cuda, save_dir): batch_time = AverageMeter() data_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) fw = open(os.path.join(save_dir, 'attention.txt'), 'w') for batch_idx, (inputs, targets) in enumerate(val_loader): data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets) probs, attention = model(inputs) bs, c, w, h = attention.size() attention = attention.sum(1) attention = attention.cpu().data.numpy() attention = attention.reshape((bs, -1)) for index in range(bs): hot = '' for j in range(w * h): hot += '{:.3f} '.format(attention[index][j]) hot += '\n' fw.write(hot) prec1, prec5 = accuracy(probs.data, targets.data, topk=(1,5)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) bar.shuffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | top1: {top1: .4f} | top5: {top5: .4f}'.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, top1=top1.avg, top5=top5.avg ) bar.next() bar.finish() fw.close()
def test(testloader, model, use_cuda, loader_len): batch_time = AverageMeter() data_time = AverageMeter() data_represent_list = [] # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, (inputs, targets) in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) outputs = torch.nn.functional.avg_pool2d(outputs, kernel_size=(4, 4), stride=(1, 1)) outputs = outputs.view(outputs.size(0), -1) data_represent_list.extend(outputs.detach().cpu().numpy()) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td) bar.next() bar.finish() return data_represent_list
def get_p(evalloader, model, epoch, use_cuda): # switch to evaluate mode model.eval() p_results = [] bar = Bar('Evaluating', max=len(evalloader)) for batch_idx, (inputs, targets) in enumerate(evalloader): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs, volatile=True), torch.autograd.Variable(targets) # compute output outputs = model(inputs) outputs = nn.functional.softmax(outputs, dim=-1) # normalization p_results.append(outputs.cpu().data[0].numpy()) bar.suffix = '({batch}/{size})'.format(batch=batch_idx + 1, size=len(evalloader)) bar.next() bar.finish() return p_results
def test(testloader, model, use_cuda): landmarks = [] batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, batch_data in enumerate(testloader): # measure data loading time data_time.update(time.time() - end) inputs = batch_data if use_cuda: inputs = inputs.cuda() inputs = torch.autograd.Variable(inputs) # compute output outputs = model(inputs) landmarks.append(outputs.cpu().data.numpy()) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format( batch=batch_idx + 1, size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return landmarks
def attack_over_test(testloader, net, criterion, adversary): net.eval() batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() bar = Bar('Processing', max=len(testloader)) for batch_idx, data_info in enumerate(testloader): inputs = data_info[0] targets = data_info[1].long() # adv_inputs = inputs inputs, targets = inputs.to(device), targets.to(device) _, adv_inputs = adversary.perturb(inputs, targets) outputs = net(adv_inputs) loss = criterion(outputs, targets) prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Batch: {bt:.3f}s| Total: {total:}| ETA: {eta:}| Loss:{loss:.4f}| top1: {top1:.2f}'.format( batch=batch_idx + 1, size=len(testloader), bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg) bar.next() bar.finish() return losses.avg, top1.avg