def val_loss(self):

        if self.val_data is None:
            return None

        print('==> evaluating validation loss')
        self.net.eval()
        loss_meter = AverageMeter()

        for x, y in self.val_data:

            # print(x, y)
            x, y = x.float(), y.float()            
            if self.use_gpu is not False:
                x, y = x.cuda(), y.cuda()

            output = self.net(x)
            # regression loss
            loss = self.criterion(output.view(-1), y.view(-1)).detach()
            if self.use_gpu:
                loss = loss.cpu()
            loss_meter.update(loss.numpy())

        print('==> validation loss is %.3f' % loss_meter.avg)
        self.scheduler.step(loss_meter.avg)
        self.net.train()
        return loss_meter.avg
Exemple #2
0
def train_net(train_data_layer, net, epoch, args):
    net.train()
    losses = AverageMeter()
    time1 = time.time()
    epoch_num = train_data_layer._num_instance / train_data_layer._batch_size
    for step in range(epoch_num):
        image_blob, boxes, rel_boxes, SpatialFea, classes, ix1, ix2, rel_labels, rel_so_prior = train_data_layer.forward(
        )
        target = Variable(torch.from_numpy(rel_labels).type(
            torch.LongTensor)).cuda()
        rel_so_prior = -0.5 * (rel_so_prior + 1.0 / args.num_relations)
        rel_so_prior = Variable(
            torch.from_numpy(rel_so_prior).type(torch.FloatTensor)).cuda()
        # forward
        args.optimizer.zero_grad()
        obj_score, rel_score = net(image_blob, boxes, rel_boxes, SpatialFea,
                                   classes, ix1, ix2, args)
        loss = args.criterion((rel_so_prior + rel_score).view(1, -1), target)
        losses.update(loss.data[0])
        loss.backward()
        args.optimizer.step()
        if step % args.print_freq == 0:
            time2 = time.time()
            print "TRAIN:%d, Total LOSS:%f, Time:%s" % (
                step, losses.avg,
                time.strftime('%H:%M:%S', time.gmtime(int(time2 - time1))))
            time1 = time.time()
            losses.reset()
Exemple #3
0
    def val_loss(self):

        if self.val_data is None:
            return None

        print('==> evaluating validation loss')
        self.net.eval()
        loss_meter = AverageMeter()

        for x, y in self.val_data:

            # print(x, y)
            if self.use_gpu is not False:
                x, y = to_cuda(x), to_cuda(y)

            output = self.net(x)
            # regression loss
            loss = self.criterion(output, y)
            if self.use_gpu:
                loss = loss.cpu()
            loss_meter.update(loss.item())

        print('==> validation loss is %.3f' % loss_meter.avg)
        self.scheduler.step(loss_meter.avg)
        self.net.train()
        return loss_meter.avg
Exemple #4
0
    def val_acc(self):

        # return avg validation accuracy
        if self.val_data is None:
            return None

        print('==> evaluating validation accuracy')
        acc = AverageMeter()
        for iter in range(self.n_iters_val):
            # d is (x, y) for mlp, (x, y, x_lengths) for rnn
            d = self.val_data.next_batch(1)  # 1 so don't need to mask out
            d = list(d)
            if self.use_gpu is not False:
                d[0], d[1] = d[0].cuda(), d[1].cuda()
            y = d[1]  # (seq_len, bs)

            output = self.net.eval_forward(*d)  # seq_len x bs x output_size
            if len(output.shape) < 3:  # seq_len x bs, single output
                max_dim = 1
            else:  # seq_len x bs x output_size, multi output
                max_dim = 2
            _, ans = torch.max(output, max_dim)

            n = len(y)
            for j in range(n):
                if y[j].item() == ans[j].item():
                    acc.update(1)  # correct
                else:
                    acc.update(0)  # incorrect
        print('==> validation accuracy is %d%%' % (acc.avg * 100))
        self.scheduler.step(acc.avg)
        self.net.train()
        return acc.avg
Exemple #5
0
    def run_dataset(self, dataset, out_path, speedrun=False, restart=None):
        """
        :param dataset:   Dataset to work with (See datasets.py)
        :param out_path:  Root path for storing label images. Sequences of label pngs will be created in subdirectories.
        :param speedrun:  [Optional] Whether or not to warm up Pytorch when measuring the run time. Default: False
        :param restart:   [Optional] Name of sequence to restart from. Useful for debugging. Default: None
        """
        out_path.mkdir(exist_ok=True, parents=True)

        dset_fps = AverageMeter()

        print('Evaluating', dataset.name)

        restarted = False
        for sequence in dataset:
            if restart is not None and not restarted:
                if sequence.name != restart:
                    continue
                restarted = True

            # We preload data as we cannot both read from disk and upload to the GPU in the background,
            # which would be a reasonable thing to do. However, in PyTorch, it is hard or impossible
            # to upload images to the GPU in a data loader running as a separate process.
            sequence.preload(self.device)
            self.clear(
            )  # Mitigate out-of-memory that may occur on some YouTubeVOS sequences on 11GB devices.
            outputs, seq_fps = self.run_sequence(sequence, speedrun)
            dset_fps.update(seq_fps)

            dst = out_path / sequence.name
            dst.mkdir(exist_ok=True)
            for lb, f in zip(outputs, sequence.frame_names):
                imwrite_indexed(dst / (f + ".png"), lb)

        print("Average frame rate: %.2f fps" % dset_fps.avg)
Exemple #6
0
def validate(val_loader, model, device='cpu', print_freq=100, prefix='test'):
    batch_time = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (data, target) in enumerate(val_loader):
            data, target = data.to(device), target.to(device)

            # compute output
            output = model(data)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            top1.update(prec1[0], data.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % print_freq == 0:
                print(f'{prefix}: [{i}/{len(val_loader)}] '
                      f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                      f'Prec@1 {top1.val:.3f} ({top1.avg:.3f})')

        print(f' * {prefix} Prec@1 {top1.avg:.3f}')

    return top1.avg
Exemple #7
0
def NN(net, lemniscate, trainloader, testloader, recompute_memory=0):
    net.eval()
    net_time = AverageMeter()
    cls_time = AverageMeter()
    losses = AverageMeter()
    correct = 0.
    total = 0
    testsize = testloader.dataset.__len__()

    trainFeatures = lemniscate.memory.t()
    if hasattr(trainloader.dataset, 'imgs'):
        trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda()
    else:
        trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda()

    if recompute_memory:
        transform_bak = trainloader.dataset.transform
        trainloader.dataset.transform = testloader.dataset.transform
        temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
        for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
            targets = targets.cuda(async=True)
            batchSize = inputs.size(0)
            features = net(inputs)
            trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t()
        trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda()
        trainloader.dataset.transform = transform_bak
    
    end = time.time()
    print_freq=10
    with torch.no_grad():
        for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
            targets = targets.cuda(async=True)
            batchSize = inputs.size(0)
            features = net(inputs)
            net_time.update(time.time() - end)
            end = time.time()

            dist = torch.mm(features, trainFeatures)

            yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
            candidates = trainLabels.view(1,-1).expand(batchSize, -1)
            retrieval = torch.gather(candidates, 1, yi)

            retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
            yd = yd.narrow(1, 0, 1)

            total += targets.size(0)
            correct += retrieval.eq(targets.data).sum().item()
            
            cls_time.update(time.time() - end)
            end = time.time()

            if batch_idx % print_freq == 0:
                print('Test [{}/{}]\t'
                      'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
                      'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
                      'Top1: {:.2f}'.format(
                       batch_idx, len(testloader), correct*100./total, net_time=net_time, cls_time=cls_time))

    return correct/total
Exemple #8
0
    def forward(self, images, labels, meta):

        specs = SampleSpec.from_encoded(meta)

        losses = AverageMeter()
        iter_acc = 0
        n = 0

        cache_hits = self._initialize(images[0], labels[0], specs)

        for i in range(1, len(images)):

            s = self._forward(images[i].to(self.device))

            y = labels[i].to(self.device).float()
            acc = self.compute_accuracy(s.detach(), y)
            loss = self.compute_loss(s, y)
            loss.backward()

            losses.update(loss.item())
            iter_acc += acc.mean().cpu().numpy()
            n += 1

        stats = dict()
        stats['stats/loss'] = losses.avg
        stats['stats/accuracy'] = iter_acc / n
        stats['stats/fcache_hits'] = cache_hits

        return stats
Exemple #9
0
def train(round, epoch, trainloader, net, npc, structure, criterion, optimizer,
          device):
    # tracking variables
    train_loss = AverageMeter()

    # switch the model to train mode
    net.train()

    # adjust learning rate
    adjust_learning_rate(optimizer, epoch)
    optimizer.zero_grad()

    for batch_idx, (inputs, _, _, indexes) in enumerate(trainloader):
        inputs, indexes = inputs.to(device), indexes.to(device)

        features = net(inputs)
        neighbor_indexes = structure.neighbor_indexes_sim[indexes]
        outputs = npc(features, indexes, neighbor_indexes, round)

        loss = criterion(outputs, indexes, structure)
        loss.backward()
        train_loss.update(loss.item(), inputs.size(0))

        optimizer.step()
        optimizer.zero_grad()
        if batch_idx % 80 == 0:
            print('Round: {round} Epoch: [{epoch}][{elps_iters}/{tot_iters}] '
                  'Train loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '.
                  format(round=round,
                         epoch=epoch,
                         elps_iters=batch_idx,
                         tot_iters=len(trainloader),
                         train_loss=train_loss))
Exemple #10
0
def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0):
    net.eval()
    net_time = AverageMeter()
    cls_time = AverageMeter()
    losses = AverageMeter()
    correct = 0.
    total = 0
    testsize = testloader.dataset.__len__()

    trainFeatures = lemniscate.memory.t()
    if hasattr(trainloader.dataset, 'imgs'):
        trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda()
    else:
        trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda()

    if recompute_memory:
        transform_bak = trainloader.dataset.transform
        trainloader.dataset.transform = testloader.dataset.transform
        temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
        for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
            inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs, volatile=True), Variable(targets)
            batchSize = inputs.size(0)
            features = net(inputs)
            trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t()
        trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda()
        trainloader.dataset.transform = transform_bak
    
    end = time.time()
    for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        batchSize = inputs.size(0)
        features = net(inputs)
        net_time.update(time.time() - end)
        end = time.time()

        dist = torch.mm(features.data, trainFeatures)

        yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
        candidates = trainLabels.view(1,-1).expand(batchSize, -1)
        retrieval = torch.gather(candidates, 1, yi)

        retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
        yd = yd.narrow(1, 0, 1)

        total += targets.size(0)
        correct += retrieval.eq(targets.data).cpu().sum()
        
        cls_time.update(time.time() - end)
        end = time.time()

        print('Test [{}/{}]\t'
              'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
              'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
              'Top1: {:.2f}'.format(
              total, testsize, correct*100./total, net_time=net_time, cls_time=cls_time))

    return correct/total
Exemple #11
0
def train(train_loader, model, criterion, optimizer, scheduler, epoch,
          summary_writer, args):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(len(train_loader),
                             [batch_time, data_time, losses, top1, top5],
                             prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        images = images.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        if args.mixup:
            images, target_a, target_b, lam = mixup_data(
                images, target, args.alpha)
            output = model(images)
            loss = mixup_criterion(criterion, output, target_a, target_b, lam)
        else:
            output = model(images)
            loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))

        step = epoch * len(train_loader) + i
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if scheduler is not None:
            scheduler.step(step)

        # log
        summary_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], step)
        summary_writer.add_scalar('train_acc1', acc1, step)
        summary_writer.add_scalar('train_loss', loss, step)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)

    return top1.avg, losses.avg
Exemple #12
0
    def test(self, epoch):
        print('Testing...')
        self.model.eval()
        loss_record = AverageMeter()
        data_loader = self.test_loader
        with torch.no_grad():
            for i_batch, batch in enumerate(data_loader):
                if cuda:
                    batch['image'] = batch['image'].cuda()
                    batch['sym_cor'] = batch['sym_cor'].cuda()
                    batch['mask'] = batch['mask'].cuda()
                    batch['pts2d_map'] = batch['pts2d_map'].cuda()
                    batch['graph'] = batch['graph'].cuda()
                sym_cor_pred, mask_pred, pts2d_map_pred, graph_pred, sym_cor_loss, mask_loss, pts2d_loss, graph_loss = \
                        self.model(batch['image'], batch['sym_cor'], batch['mask'], batch['pts2d_map'], batch['graph'])
                mask_pred[mask_pred > 0.5] = 1.
                mask_pred[mask_pred <= 0.5] = 0.

                # losses: move to the same device
                sym_cor_loss = sym_cor_loss.mean()
                mask_loss = mask_loss.mean()
                pts2d_loss = pts2d_loss.mean()
                graph_loss = graph_loss.mean()
                current_loss = self.args.lambda_sym_cor * sym_cor_loss + \
                               self.args.lambda_mask * mask_loss + \
                               self.args.lambda_pts2d * pts2d_loss + \
                               self.args.lambda_graph * graph_loss
                if i_batch < 3:
                    # some visualizations
                    image = cv2.imread(batch['image_name'][0])
                    self.visualize_symmetry(sym_cor_pred[0], mask_pred[0],
                                            batch['sym_cor'][0],
                                            batch['mask'][0], image, epoch,
                                            i_batch)
                    self.visualize_mask(mask_pred[0], batch['mask'][0], epoch,
                                        i_batch)
                    self.visualize_votes(pts2d_map_pred[0],
                                         batch['pts2d_map'][0],
                                         batch['mask'][0], epoch, i_batch)
                    try:
                        self.visualize_keypoints(pts2d_map_pred[:1],
                                                 batch['pts2d'][0],
                                                 batch['mask'][:1], image,
                                                 epoch, i_batch)
                    except:
                        # we may not be able to vote keypoints at early stages
                        pass
                    self.visualize_graph(graph_pred[0], batch['graph'][0],
                                         batch['pts2d'][0], mask_pred[0],
                                         batch['mask'][0], image, epoch,
                                         i_batch)
                loss_record.update(current_loss.detach().cpu().numpy(),
                                   len(batch['image']))

        print('Loss: {:.4f}'.format(loss_record.avg))
        return loss_record.avg
Exemple #13
0
def train(train_loader, model, reglog, criterion, optimizer, epoch, forward):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # freeze also batch norm layers
    model.eval()

    end = time.time()
    for i, (input, target, _) in enumerate(train_loader):

        # measure data loading time
        data_time.update(time.time() - end)

        #adjust learning rate
        learning_rate_decay(optimizer, len(train_loader) * epoch + i, args.lr)

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input.cuda())
        target_var = torch.autograd.Variable(target)
        # compute output

        output = forward(input_var, model, reglog.conv)
        output = reglog(output)
        loss = criterion(output, target_var)
        # print(loss.data.item())
        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        # losses.update(loss.data[0], input.size(0))
        losses.update(loss.data.item(), input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and i % 100 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
                  .format(epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5))

    writer.add_scalar('train_loss', losses.avg, epoch)
Exemple #14
0
def train(train_loader, n_epoch):
    net.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    count_threshold = n_epoch * len(train_loader)
    count = 0

    for batch_idx, (inputs, targets) in enumerate(train_loader):
        data_time.update(time.time() - end)
        if use_cuda:
            #inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))
        # timing
        batch_time.update(time.time() - end)
        end = time.time()

        #progress_bar(batch_idx, len(train_loader), 'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'
        #             .format(losses.avg, top1.avg, top5.avg))
        if batch_idx % 200 == 0:
            print('[{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      batch_idx,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))

        count += 1
        if count >= count_threshold:
            break
    net.eval()
Exemple #15
0
    def train(self):
        self.model.train()
        self.optim.zero_grad()

        iteration = 0
        for epoch in range(cfg.SOLVER.MAX_EPOCH):
            if epoch == cfg.TRAIN.REINFORCEMENT.START:
                self.rl_stage = True
            self.setup_loader(epoch)

            start = time.time()
            data_time = AverageMeter()
            batch_time = AverageMeter()
            losses = AverageMeter()
            for _, (indices, input_seq, target_seq, gv_feat, att_feats,
                    att_mask) in enumerate(self.training_loader):
                data_time.update(time.time() - start)

                input_seq = input_seq.cuda()
                target_seq = target_seq.cuda()
                gv_feat = gv_feat.cuda()
                att_feats = att_feats.cuda()
                att_mask = att_mask.cuda()
                # att_mask = torch.ones(16,70).cuda()
                # print(att_mask.shape)

                kwargs = self.make_kwargs(indices, input_seq, target_seq,
                                          gv_feat, att_feats, att_mask)
                loss, loss_info = self.forward(kwargs)
                loss.backward()
                # utils.clip_gradient(self.optim.optimizer, self.model,
                #                     cfg.SOLVER.GRAD_CLIP_TYPE, cfg.SOLVER.GRAD_CLIP)
                self.optim.step()
                self.optim.zero_grad()
                # self.optim.scheduler_step('Iter')

                batch_time.update(time.time() - start)
                start = time.time()
                losses.update(loss.item())
                self.display(iteration, data_time, batch_time, losses,
                             loss_info)
                iteration += 1

                if self.distributed:
                    dist.barrier()

            self.save_model(epoch)
            val = self.eval(epoch)
            # self.optim.scheduler_step('Epoch', val)
            # self.scheduled_sampling(epoch)

            if self.distributed:
                dist.barrier()
Exemple #16
0
def div_train(train_loader, model, epoch, pred):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    #    model.train()

    end = time.time()
    #    optimizer.zero_grad()
    for i, (input, target, index, label) in enumerate(train_loader):
        #remove completely black images from training time

        # measure data loading time
        data_time.update(time.time() - end)

        index = index.cuda(async=True)
        # compute output
        feature = model(input)
        for x in list(zip(feature.data, index, label)):
            pred.append(x)


#        output = lemniscate(feature, index)
#        loss = criterion(output, index) / args.iter_size
#       loss = 0.0
#Backprop Apex optimizer loss
#        optimizer.backward(loss)

# measure accuracy and record loss
#        losses.update(loss.item() * args.iter_size, input.size(0))

#        if (i+1) % args.iter_size == 0:
# compute gradient and do SGD step
#            optimizer.step()
#            optimizer.zero_grad()

# measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time))
    return pred
Exemple #17
0
def test(epoch, test_loader, save=True):
    global best_acc
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(
                batch_idx, len(test_loader),
                'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                    losses.avg, top1.avg, top5.avg))

    if save:
        writer.add_scalar('loss/test', losses.avg, epoch)
        writer.add_scalar('acc/test_top1', top1.avg, epoch)
        writer.add_scalar('acc/test_top5', top5.avg, epoch)

        is_best = False
        if top1.avg > best_acc:
            best_acc = top1.avg
            is_best = True

        print('Current best acc: {}'.format(best_acc))
        save_checkpoint(
            {
                'epoch':
                epoch,
                'model':
                args.model,
                'dataset':
                args.dataset,
                'state_dict':
                net.module.state_dict()
                if isinstance(net, nn.DataParallel) else net.state_dict(),
                'acc':
                top1.avg,
                'optimizer':
                optimizer.state_dict(),
            },
            is_best,
            checkpoint_dir=log_dir)
Exemple #18
0
def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        target = target.cuda()
        input = input.cuda()
        with torch.no_grad():
            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1[0], input.size(0))
            top5.update(prec5[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

    logging.info(
        "    ---------------------------------------------------------------")
    logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(
        top1=top1, top5=top5))

    return top1.avg, top5.avg
Exemple #19
0
def test(epoch, test_loader, save=False):
    global best_accd
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()
    device0 = 'cuda'
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            if use_cuda:
                # inputs, targets = inputs.cuda(), targets.cuda()
                inputs, targets = inputs.to(device0), targets.to(device0)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(
                batch_idx, len(test_loader),
                'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                    losses.avg, top1.avg, top5.avg))

    return top1.avg
Exemple #20
0
def eval_train(net, train_loader):
    net.eval()
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    count_threshold = float('inf')
    count = 0

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(train_loader):

            if use_cuda:
                inputs, targets = inputs.to(device), targets.to(device)

            outputs = net(inputs)

            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

    return losses.avg, top1.avg, top5.avg
def train_loop(_print, cfg, model, train_loader, criterion, optimizer,
               scheduler, epoch):
    _print(f"\nEpoch {epoch + 1}")
    losses = AverageMeter()
    model.train()
    tbar = tqdm(train_loader)

    use_cutmix = cfg.DATA.CUTMIX
    use_mixup = cfg.DATA.MIXUP
    fl = SigmoidFocalLoss(gamma=2., alpha=0.25)
    # bce = torch.nn.BCEWithLogitsLoss()
    # for i, (image, mask) in enumerate(tbar):
    for i, (image, mask, label) in enumerate(tbar):
        image = image.cuda()
        mask = mask.cuda()
        label = label.cuda()
        # mixup/ cutmix
        if use_mixup:
            image, mask = mixup_data(image, mask, alpha=cfg.DATA.CM_ALPHA)
        elif use_cutmix:
            image, mask = cutmix_data(image, mask, alpha=cfg.DATA.CM_ALPHA)

        # compute loss
        # output = model(image)
        # loss = criterion(output, mask)
        output, cls_output = model(image)
        # aux_loss = bce(cls_output, label)
        aux_loss = fl(cls_output, label)
        loss = criterion(output, mask) + aux_loss * 0.4

        # gradient accumulation
        loss = loss / cfg.OPT.GD_STEPS
        if cfg.SYSTEM.FP16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()
        # lr scheduler and optim. step
        if (i + 1) % cfg.OPT.GD_STEPS == 0:
            scheduler(optimizer, i, epoch)
            optimizer.step()
            optimizer.zero_grad()
        # record loss
        losses.update(loss.item() * cfg.OPT.GD_STEPS, image.size(0))
        tbar.set_description("Train loss: %.5f, learning rate: %.6f" %
                             (losses.avg, optimizer.param_groups[-1]['lr']))

    _print("Train loss: %.5f, learning rate: %.6f" %
           (losses.avg, optimizer.param_groups[-1]['lr']))
Exemple #22
0
def train(epoch, net, trainloader, optimizer, npc, criterion, rlb, lr):
    train_loss = AverageMeter()
    net.train()
    adjust_learning_rate(optimizer, lr)
    for (inputs, _, indexes) in trainloader:
        optimizer.zero_grad()
        inputs, indexes = inputs.to(cfg.device), indexes.to(cfg.device)

        features = net(inputs)
        outputs = npc(features, indexes)
        loss = criterion(outputs, indexes, rlb)

        loss.backward()
        train_loss.update(loss.item(), inputs.size(0))

        optimizer.step()
    return train_loss.avg
Exemple #23
0
def compute_feature(trainloader, model, N, args):
    # from networks get features

    batch_time = AverageMeter()
    end = time.time()

    model.eval()

    trainFeatures = np.zeros((N, args.low_dim), dtype='float32')
    feature_index = np.zeros(N, dtype='int')

    with torch.no_grad():
        for batch_index, (inputs, _, targets,
                          indexes) in enumerate(trainloader):
            batchSize = inputs.size(0)
            features = model(inputs)
            # print(len(trainloader)) # 391
            # print(features.shape) #torch.Size([128, 128]) --> [batch,dim]
            if batch_index < len(trainloader) - 1:
                trainFeatures[batch_index * batchSize:batch_index * batchSize + batchSize, :] = features.data.cpu(). \
                    numpy().astype('float32')
                feature_index[batch_index * batchSize:batch_index * batchSize +
                              batchSize] = np.array(
                                  [x.item() for x in indexes])
                # print(batch_index * batchSize + batchSize)
            else:
                # print('*****************')
                # print(batch_index) # 390
                # print(batchSize) # 80
                # print(batchSize * batch_index)
                trainFeatures[batch_index *
                              args.batch_size:, :] = features.data.cpu().numpy(
                              ).astype('float32')
                feature_index[batch_index * args.batch_size:] = np.array(
                    [x.item() for x in indexes])

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # print('{0} / {1}\t'
            #       'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'
            #       .format(batch_index, len(trainloader), batch_time=batch_time))

    return trainFeatures, feature_index
Exemple #24
0
def train(args, model, criterion, optimizer, train_dataloader, epoch):
    torch.set_grad_enabled(True)
    model.train()

    losses = AverageMeter()
    accuracies = AverageMeter()
    for i, data in enumerate(train_dataloader, 1):
        # get inputs
        sampled_clips, u_clips, v_clips, targets, _ = data
        if args.modality == 'u':
            inputs = u_clips
        elif args.modality == 'v':
            inputs = v_clips
        else:  # rgb and res
            inputs = sampled_clips
        inputs = inputs.cuda()
        targets = targets.cuda()
        # zero the parameter gradients
        optimizer.zero_grad()
        # forward and backward
        if args.modality == 'res':
            outputs = model(diff(inputs))
        else:
            outputs = model(inputs)

        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        acc = calculate_accuracy(outputs, targets)
        losses.update(loss.data.item(), inputs.size(0))
        accuracies.update(acc, inputs.size(0))

        print('Train epoch: [{0:3d}/{1:3d}][{2:4d}/{3:4d}]\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Acc {acc.val:.3f} ({acc.avg:.3f})\t'
              'lr: {lr}'.format(epoch,
                                args.epochs,
                                i + 1,
                                len(train_dataloader),
                                loss=losses,
                                acc=accuracies,
                                lr=optimizer.param_groups[0]['lr']),
              end='\r')
    print('')
Exemple #25
0
def validate(val_loader, model, reglog, criterion, forward):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    softmax = nn.Softmax(dim=1).cuda()
    end = time.time()
    for i, (input_tensor, target, _) in enumerate(val_loader):
        if args.tencrops:
            bs, ncrops, c, h, w = input_tensor.size()
            input_tensor = input_tensor.view(-1, c, h, w)
        target = target.cuda(async=True)

        with torch.no_grad():
            input_var = torch.autograd.Variable(input_tensor.cuda())
            # input_var = torch.autograd.Variable(input_tensor.cuda(), volatile=True)
            target_var = torch.autograd.Variable(target)
            # target_var = torch.autograd.Variable(target, volatile=True)
            # torch.autograd.Variable(target, requires_grad=False)

            output = reglog(forward(input_var, model, reglog.conv))

            if args.tencrops:
                # print #100
                # print(ncrops) #10
                output_central = output.view(bs, ncrops, -1)[:, int(ncrops / 2 - 1), :]
                output = softmax(output)
                output = torch.squeeze(output.view(bs, ncrops, -1).mean(1))
            else:
                output_central = output

            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            top1.update(prec1[0], input_tensor.size(0))
            top5.update(prec5[0], input_tensor.size(0))
            loss = criterion(output_central, target_var)
            losses.update(loss.data.item(), input_tensor.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if args.verbose and i % 100 == 0:
                print('Validation: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
                      .format(i, len(val_loader), batch_time=batch_time,
                              loss=losses, top1=top1, top5=top5))

    return top1.avg, top5.avg, losses.avg
Exemple #26
0
def test(args, model, criterion, test_dataloader):
    torch.set_grad_enabled(False)
    model.eval()

    accuracies = AverageMeter()

    if args.modality == 'res':
        print("[Warning]: using residual frames as input")

    total_loss = 0.0
    for i, data in enumerate(test_dataloader, 1):
        # get inputs
        rgb_clips, u_clips, v_clips, targets, _ = data
        if args.modality == 'u':
            sampled_clips = u_clips
        elif args.modality == 'v':
            sampled_clips = v_clips
        else:  # rgb and res
            sampled_clips = rgb_clips
        sampled_clips = sampled_clips.cuda()
        targets = targets.cuda()
        outputs = []
        for clips in sampled_clips:
            inputs = clips.cuda()
            # forward
            if args.modality == 'res':
                o = model(diff(inputs))
            else:
                o = model(inputs)
            o = torch.mean(o, dim=0)
            outputs.append(o)
        outputs = torch.stack(outputs)
        loss = criterion(outputs, targets)
        # compute loss and acc
        total_loss += loss.item()
        acc = calculate_accuracy(outputs, targets)
        accuracies.update(acc, inputs.size(0))
        print('Test: [{}/{}], {acc.val:.3f} ({acc.avg:.3f})'.format(
            i, len(test_dataloader), acc=accuracies),
              end='\r')
    avg_loss = total_loss / len(test_dataloader)
    print('\n[TEST] loss: {:.3f}, acc: {:.3f}'.format(avg_loss,
                                                      accuracies.avg))
    return avg_loss
Exemple #27
0
    def train(self):

        for epoch in range(self.epoch + 1, self.max_epochs + 1):

            self.epoch = epoch
            self.stats = ddict(AverageMeter)

            dset = ConcatDataset(
                [eval(cls)(**params) for cls, params in self.dataset])

            loader = DataLoader(dset,
                                batch_size=self.batch_size,
                                num_workers=self.num_workers,
                                pin_memory=True,
                                shuffle=True)
            t0 = None
            runtime = AverageMeter()

            for i, batch in enumerate(loader, 1):
                t0 = time(
                ) if t0 is None else t0  # Ignore loader startup pause

                self.optimizer.zero_grad()
                stats = self.model(*batch)
                self.optimizer.step()

                runtime.update(time() - t0)
                t0 = time()

                stats['stats/lr'] = self.scheduler.get_last_lr()[0]
                self.update_stats(stats,
                                  i,
                                  len(loader),
                                  runtime,
                                  do_print=True)

            self.scheduler.step()

            if self.epoch % self.save_interval == 0:
                self.save_checkpoint()

            self.log_stats()

        print("%s done" % self.name)
Exemple #28
0
    def _validate(self, val_loader, model, verbose=False):
        '''
        Validate the performance on validation set
        :param val_loader:
        :param model:
        :param verbose:
        :return:
        '''
        batch_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        if torch.cuda.is_available():
            criterion = nn.CrossEntropyLoss().cuda()
        else:
            criterion = nn.CrossEntropyLoss()
        # switch to evaluate mode
        model.eval()
        end = time.time()

        t1 = time.time()
        with torch.no_grad():
            for i, (input, target) in enumerate(val_loader):
                if torch.cuda.is_available():
                    target = target.cuda(non_blocking=True)
                    input_var = torch.autograd.Variable(input).cuda()
                    target_var = torch.autograd.Variable(target).cuda()
                else:
                    input_var = torch.autograd.Variable(input)
                    target_var = torch.autograd.Variable(target)
                # compute output
                output = model(input_var)
                loss = criterion(output, target_var)

                # measure accuracy and record loss
                prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
                losses.update(loss.item(), input.size(0))
                top1.update(prec1.item(), input.size(0))
                top5.update(prec5.item(), input.size(0))

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
        t2 = time.time()
        if verbose:
            print('* Test loss: %.3f    top1: %.3f    top5: %.3f    time: %.3f' %
                  (losses.avg, top1.avg, top5.avg, t2 - t1))
        if self.acc_metric == 'acc1':
            return top1.avg
        elif self.acc_metric == 'acc5':
            return top5.avg
        else:
            raise NotImplementedError
Exemple #29
0
def my_knn(net, lemniscate, trainloader, testloader, K, sigma, train_dataset,
           val_dataset
           ):  #, recompute_memory=0): # Changed to recompute_memory in main
    net.eval()
    net_time = AverageMeter()
    cls_time = AverageMeter()

    testsize = testloader.dataset.__len__()

    trainFeatures = lemniscate.memory.t()
    if hasattr(trainloader.dataset, 'imgs'):
        trainLabels = torch.LongTensor(
            [y for (p, y) in trainloader.dataset.imgs]).cuda()
    else:
        trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda()
    C = int(trainLabels.max() + 1)

    end = time.time()
    with torch.no_grad():
        retrieval_one_hot = torch.zeros(K, C).cuda()
        i = 0
        for batch_idx, (inputs, targets, indexes,
                        path) in enumerate(tqdm.tqdm(testloader)):
            end = time.time()

            batchSize = inputs.size(0)
            features = net(inputs)
            net_time.update(time.time() - end)
            end = time.time()

            dist = torch.mm(features, trainFeatures)

            yd, yi = dist.topk(K, dim=1, largest=True, sorted=True)

            path = '/data/images_rgb/disc/'

            for num, inst in enumerate(yi):
                os.mkdir(path + 'view/{}'.format(i))
                for x in yi[num]:
                    img = train_dataset.__getitem__(x)[3]
                    copyfile(path + 'train/all/' + img,
                             path + 'view/{}/'.format(i) + img)
                i += 1
Exemple #30
0
def validate(args, model, criterion, val_dataloader, epoch):
    torch.set_grad_enabled(False)
    model.eval()

    losses = AverageMeter()
    accuracies = AverageMeter()
    for i, data in enumerate(val_dataloader):
        # get inputs
        sampled_clips, u_clips, v_clips, targets, _ = data
        if args.modality == 'u':
            inputs = u_clips
        elif args.modality == 'v':
            inputs = v_clips
        else:  # rgb and res
            inputs = sampled_clips
        inputs = inputs.cuda()
        targets = targets.cuda()
        # forward
        if args.modality == 'res':
            outputs = model(diff(inputs))
        else:
            outputs = model(inputs)

        loss = criterion(outputs, targets)
        # compute loss and acc
        acc = calculate_accuracy(outputs, targets)
        losses.update(loss.data.item(), inputs.size(0))
        accuracies.update(acc, inputs.size(0))

        print('Val epoch:   [{0:3d}/{1:3d}][{2:4d}/{3:4d}]\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Acc {acc.val:.3f} ({acc.avg:.3f})\t'
              'lr: {lr}'.format(epoch,
                                args.epochs,
                                i + 1,
                                len(val_dataloader),
                                loss=losses,
                                acc=accuracies,
                                lr=optimizer.param_groups[0]['lr']),
              end='\r')
    print('')
    return losses.avg
def supervised_evaluation(model, val_loader):
    batch_time = AverageMeter()
    losses = AverageMeter()

    # switch to evaluate mode
    model.train()

    prediction_box = []
    target_box = []
    with torch.no_grad():
        end = time.time()
        for i, (images, target, index, name) in enumerate(val_loader):

            images = images.cuda()
            target = target.cuda()
            output = model(images)

            output = torch.softmax(output, dim=1)
            output = output.data.cpu().numpy()
            output = np.argmax(output, axis=1)

            prediction_box += list(output)
            target_box += list(target.data.cpu().numpy())

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

    auc = roc_auc_score(target_box, prediction_box)
    corrects = np.equal(np.array(target_box), np.array(prediction_box))
    acc = float(sum(corrects)) / len(corrects)

    #  mean class
    precision = precision_score(target_box, prediction_box, average='macro')
    recall = recall_score(target_box, prediction_box, average='macro')
    f1score = f1_score(target_box, prediction_box, average='macro')

    return losses.avg, round(auc, 4), round(acc,
                                            4), round(precision, 4), round(
                                                recall, 4), round(f1score, 4)
Exemple #32
0
def train(train_loader, model, lemniscate, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    optimizer.zero_grad()
    for i, (input, _, index) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        index = index.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        index_var = torch.autograd.Variable(index)

        # compute output
        feature = model(input_var)
        output = lemniscate(feature, index_var)
        loss = criterion(output, index_var) / args.iter_size

        loss.backward()

        # measure accuracy and record loss
        losses.update(loss.data[0] * args.iter_size, input.size(0))

        if (i+1) % args.iter_size == 0:
            # compute gradient and do SGD step
            optimizer.step()
            optimizer.zero_grad()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t').format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses)
Exemple #33
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    adjust_learning_rate(optimizer, epoch)
    train_loss = AverageMeter()
    data_time = AverageMeter()
    batch_time = AverageMeter()
    correct = 0
    total = 0

    # switch to train mode
    net.train()

    end = time.time()
    for batch_idx, (inputs, targets, indexes) in enumerate(trainloader):
        data_time.update(time.time() - end)
        if use_cuda:
            inputs, targets, indexes = inputs.cuda(), targets.cuda(), indexes.cuda()
        optimizer.zero_grad()

        inputs, targets, indexes = Variable(inputs), Variable(targets), Variable(indexes)
        features = net(inputs)
        outputs = lemniscate(features, indexes)
        loss = criterion(outputs, indexes)

        loss.backward()
        optimizer.step()

        train_loss.update(loss.data[0], inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        print('Epoch: [{}][{}/{}]'
              'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
              'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
              'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f})'.format(
              epoch, batch_idx, len(trainloader), batch_time=batch_time, data_time=data_time, train_loss=train_loss))
Exemple #34
0
def kNN(epoch, net, lemniscate, trainloader, testloader, K, sigma, recompute_memory=0):
    net.eval()
    net_time = AverageMeter()
    cls_time = AverageMeter()
    total = 0
    testsize = testloader.dataset.__len__()

    trainFeatures = lemniscate.memory.t()
    if hasattr(trainloader.dataset, 'imgs'):
        trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda()
    else:
        trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda()
    C = trainLabels.max() + 1

    if recompute_memory:
        transform_bak = trainloader.dataset.transform
        trainloader.dataset.transform = testloader.dataset.transform
        temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
        for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
            inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs, volatile=True), Variable(targets)
            batchSize = inputs.size(0)
            features = net(inputs)
            trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t()
        trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda()
        trainloader.dataset.transform = transform_bak
    
    top1 = 0.
    top5 = 0.
    end = time.time()
    for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        batchSize = inputs.size(0)
        features = net(inputs)
        net_time.update(time.time() - end)
        end = time.time()

        dist = torch.mm(features.data, trainFeatures)

        yd, yi = dist.topk(K, dim=1, largest=True, sorted=True)
        candidates = trainLabels.view(1,-1).expand(batchSize, -1)
        retrieval = torch.gather(candidates, 1, yi)

        retrieval_one_hot = torch.FloatTensor(batchSize * K, C).zero_().cuda()
        retrieval_one_hot.scatter_(1, retrieval.view(-1, 1), 1)
        yd_transform = yd.clone().div_(sigma).exp_()
        probs = torch.sum(torch.mul(retrieval_one_hot.view(batchSize, -1 , C), yd_transform.view(batchSize, -1, 1)), 1)
        _, predictions = probs.sort(1, True)

        # Find which predictions match the target
        correct = predictions.eq(targets.data.view(-1,1))

        top1 = top1 + correct.narrow(1,0,1).sum()
        top5 = top5 + correct.narrow(1,0,5).sum()

        total += targets.size(0)

        cls_time.update(time.time() - end)
        end = time.time()

        print('Test [{}/{}]\t'
              'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
              'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
              'Top1: {:.2f}  Top5: {:.2f}'.format(
              total, testsize, top1*100./total, top5*100./total, net_time=net_time, cls_time=cls_time))

    print(top1*100./total)

    return top1/total