def valid(self, batch):

        self.model.eval()

        with torch.no_grad():

            if isinstance(self.valid_loader.dataset, Loader):
                x_1, x_2, x_3, x_4, x_5, y = batch
                x = torch.cat([x_1, x_2, x_3, x_4, x_5], dim=0)
                y = torch.cat(5 * [y], dim=0).squeeze().contiguous()
            else:
                x, y = batch

            x = x.to(self.device, non_blocking=True)
            y = y.to(self.device, non_blocking=True)

            embeddings, out = self.model.forward(x)
            embeddings_norm = F.normalize(embeddings, p=2, dim=1)

            out = self.model.out_proj(embeddings_norm, y)

            pred = F.softmax(out, dim=1)
            (correct_1, correct_5) = correct_topk(pred, y, (1, 5))

            # Get all triplets now for bin classifier
            triplets_idx = self.harvester.get_triplets(embeddings.detach(), y)
            triplets_idx = triplets_idx.to(self.device, non_blocking=True)

            emb_a = torch.index_select(embeddings, 0, triplets_idx[:, 0])
            emb_p = torch.index_select(embeddings, 0, triplets_idx[:, 1])
            emb_n = torch.index_select(embeddings, 0, triplets_idx[:, 2])

            emb_ap = torch.cat([emb_a, emb_p], 1)
            emb_an = torch.cat([emb_a, emb_n], 1)

            e2e_scores_p = self.model.forward_bin(emb_ap).squeeze()
            e2e_scores_n = self.model.forward_bin(emb_an).squeeze()
            cos_scores_p = torch.nn.functional.cosine_similarity(emb_a, emb_p)
            cos_scores_n = torch.nn.functional.cosine_similarity(emb_a, emb_n)

        return correct_1, correct_5, x.size(0), np.concatenate([
            e2e_scores_p.detach().cpu().numpy(),
            e2e_scores_n.detach().cpu().numpy()
        ], 0), np.concatenate([
            cos_scores_p.detach().cpu().numpy(),
            cos_scores_n.detach().cpu().numpy()
        ], 0), np.concatenate(
            [np.ones(e2e_scores_p.size(0)),
             np.zeros(e2e_scores_n.size(0))], 0)
Exemple #2
0
    def valid(self, batch):

        self.model.eval()

        with torch.no_grad():

            if isinstance(self.valid_loader.dataset, Loader):
                x_1, x_2, x_3, x_4, y = batch
                x = torch.cat([x_1, x_2, x_3, x_4], dim=0)
                y = torch.cat(4 * [y], dim=0).squeeze().contiguous()
            else:
                x, y = batch

            if self.cuda_mode:
                x = x.to(self.device, non_blocking=True)
                y = y.to(self.device, non_blocking=True)

            embeddings = self.model.forward(x)
            embeddings_norm = F.normalize(embeddings, p=2, dim=1)

            out = self.model.out_proj(embeddings_norm, y)

            pred = F.softmax(out, dim=1)
            (correct_1, correct_5) = correct_topk(pred, y, (1, 5))

            triplets_idx = self.harvester_val.get_triplets(embeddings, y)

            embeddings = embeddings.cpu()

            emb_a = torch.index_select(embeddings, 0, triplets_idx[:, 0])
            emb_p = torch.index_select(embeddings, 0, triplets_idx[:, 1])
            emb_n = torch.index_select(embeddings, 0, triplets_idx[:, 2])

            scores_p = F.cosine_similarity(emb_a, emb_p)
            scores_n = F.cosine_similarity(emb_a, emb_n)

        return correct_1, correct_5, x.size(0), np.concatenate(
            [scores_p.detach().cpu().numpy(),
             scores_n.detach().cpu().numpy()], 0), np.concatenate(
                 [np.ones(scores_p.size(0)),
                  np.zeros(scores_n.size(0))], 0)
Exemple #3
0
def test(epoch, loader=testloader, msg='Test'):
    global best_acc
    net.eval()
    batch_norm1, batch_norm2, classifier1, classifier2, classifier = classifier_blocks
    for bn in [batch_norm1, batch_norm2]:
        if bn is not None:
            bn.eval()

    test_loss = 0
    correct_top1, correct_top5 = 0, 0
    total = 0
    outputs_list = []
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(loader):
            inputs, targets = inputs.to(device), targets.to(device)
            if len(kernel_convolution) > 1:
                outputs = []
                for i in range(len(kernel_convolution)):
                    outputs.append(
                        net(inputs, kernel_convolution[i], whitening_operator,
                            minus_whitened_patches_mean))
                outputs1 = torch.cat([out[0] for out in outputs], dim=1)
                outputs2 = torch.cat([out[1] for out in outputs], dim=1)
                del outputs
            else:
                outputs1, outputs2 = net(inputs, kernel_convolution[0],
                                         whitening_operator,
                                         minus_whitened_patches_mean)

            if args.normalize_net_outputs:
                outputs1 = (outputs1 - mean1) / std1
                outputs2 = (outputs2 - mean2) / std2

            outputs, targets = compute_classifier_outputs(outputs1,
                                                          outputs2,
                                                          targets,
                                                          args,
                                                          batch_norm1,
                                                          batch_norm2,
                                                          classifier1,
                                                          classifier2,
                                                          classifier,
                                                          train=False)
            loss = criterion(outputs, targets)

            outputs_list.append(outputs)

            test_loss += loss.item()
            cor_top1, cor_top5 = correct_topk(outputs, targets, topk=(1, 5))
            correct_top1 += cor_top1
            correct_top5 += cor_top5
            _, predicted = outputs.max(1)
            total += targets.size(0)

        test_loss /= (batch_idx + 1)
        acc1, acc5 = 100. * correct_top1 / total, 100. * correct_top5 / total

        print(
            f'{msg}, epoch: {epoch}; Loss: {test_loss:.2f} | Acc: {acc1:.1f} @1 {acc5:.1f} @5 ; threshold {args.bias:.3f}'
        )

        outputs = torch.cat(outputs_list, dim=0).cpu()

        return acc1, outputs
Exemple #4
0
def test(epoch, loader=testloader, msg='Test'):
    global best_acc
    net.eval()
    batch_norm1, batch_norm2, classifier1, classifier2, classifier, batch_norm_bottleneck = classifier_blocks
    for bn in [batch_norm1, batch_norm2, batch_norm_bottleneck]:
        if bn is not None:
            bn.eval()

    test_loss = 0
    correct_top1, correct_top5 = 0, 0
    total = 0
    outputs_list = []
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(loader):
            inputs, targets = inputs.to(device), targets.to(device)

            patches_shape = kernel_convolution_2.shape
            operator = (V * ev_rescale) @ V.t()
            # operator = (V * torch.exp(ev_rescale)) @ V.t()
            zca_patches = kernel_convolution_2.view(patches_shape[0],
                                                    -1) @ operator
            zca_patches_normalized = zca_patches / zca_patches.norm(
                dim=1, keepdim=True) + 1e-8
            # zca_patches_normalized = zca_patches
            kernel_conv = zca_patches_normalized.view(
                patches_shape).contiguous()

            # outputs1, outputs2 = net(inputs, kernel_convolution[0])
            outputs1, outputs2 = net(inputs, kernel_conv)

            outputs, targets = compute_classifier_outputs(
                outputs1,
                outputs2,
                targets,
                args,
                batch_norm1,
                batch_norm2,
                classifier1,
                classifier2,
                classifier,
                batch_norm_bottleneck,
                train=False)

            loss = criterion(outputs, targets)

            outputs_list.append(outputs)

            test_loss += loss.item()
            cor_top1, cor_top5 = correct_topk(outputs, targets, topk=(1, 5))
            correct_top1 += cor_top1
            correct_top5 += cor_top5
            _, predicted = outputs.max(1)
            total += targets.size(0)
            progress_bar(batch_idx,
                         len(loader),
                         'Test, epoch: %i; Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                         (epoch, test_loss / (batch_idx + 1),
                          100. * correct_top1 / total, correct_top1, total),
                         hide=args.no_progress_bar)
        test_loss /= (batch_idx + 1)
        acc1, acc5 = 100. * correct_top1 / total, 100. * correct_top5 / total
        if args.no_progress_bar:
            print(
                f'{msg}, epoch: {epoch}; Loss: {test_loss:.2f} | Acc: {acc1:.1f} @1 {acc5:.1f} @5 ; threshold {args.bias:.3f}'
            )
        outputs = torch.cat(outputs_list, dim=0).cpu()
        if args.lambda_1 > 0.:
            group_sparsity_norm = torch.norm(torch.cat(
                [classifier1.weight, classifier2.weight], dim=0),
                                             dim=0,
                                             p=2)
            print(f'non_zero groups {(group_sparsity_norm != 0).int().sum()}')

        return acc1, outputs
Exemple #5
0
def test(epoch, loader=testloader, msg='Test'):
    global best_acc
    net.eval()
    if n_channel_convolution_scale_1 > 0:
        net_1.eval()
    if not args.multi_classif:
        batch_norm1, batch_norm2, classifier1, classifier2, classifier, dropout_mask, batch_norm_bottleneck = classifier_blocks
        if args.batch_norm:
            batch_norm1.eval()
            batch_norm2.eval()
            if n_channel_convolution_scale_1 > 0:
                batch_norm1_1.eval()
                batch_norm2_1.eval()
        if args.batch_norm_bottleneck:
            batch_norm_bottleneck.eval()

    test_loss = 0
    correct_top1, correct_top5 = 0, 0
    total = 0
    outputs_list = []
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(loader):
            inputs, targets = inputs.to(device), targets.to(device)
            if not args.learn_patches and torch.cuda.is_available:
                inputs = inputs.half()
            if len(kernel_convolution) > 1:
                outputs = []
                for i in range(len(kernel_convolution)):
                    outputs.append(net(inputs, kernel_convolution[i]))
                if args.multi_classif:
                    outs = 0
                    for i in range(len(kernel_convolution)):
                        batch_norm1, batch_norm2, classifier1, classifier2, classifier, dropout_mask, batch_norm_bottleneck = classifier_blocks[
                            i]
                        batch_norm1.eval(), batch_norm2.eval()
                        outs += compute_classifier_outputs(
                            outputs[i][0].float(),
                            outputs[i][1].float(),
                            targets,
                            args,
                            batch_norm1,
                            batch_norm2,
                            classifier1,
                            classifier2,
                            classifier,
                            dropout_mask,
                            batch_norm_bottleneck,
                            train=False)[0]
                    outputs = outs
                else:
                    outputs1 = torch.cat([out[0] for out in outputs], dim=1)
                    outputs2 = torch.cat([out[1] for out in outputs], dim=1)
                    del outputs
            else:
                outputs1, outputs2 = net(inputs, kernel_convolution[0])
            if not args.multi_classif:
                outputs1 = outputs1.float()
                outputs2 = outputs2.float()

                if n_channel_convolution_scale_1 > 0:
                    outputs1_1, outputs2_1 = net_1(inputs,
                                                   kernel_convolution_1)
                    outputs1_1 = outputs1_1.float()
                    outputs2_1 = outputs2_1.float()

                outputs, targets = compute_classifier_outputs(
                    outputs1,
                    outputs2,
                    targets,
                    args,
                    batch_norm1,
                    batch_norm2,
                    classifier1,
                    classifier2,
                    classifier,
                    dropout_mask,
                    batch_norm_bottleneck,
                    train=False)

                if n_channel_convolution_scale_1 > 0:
                    outputs_1, _ = compute_classifier_outputs(
                        outputs1_1,
                        outputs2_1,
                        targets,
                        args,
                        batch_norm1_1,
                        batch_norm2_1,
                        classifier1_1,
                        classifier2_1,
                        classifier_1,
                        dropout_mask_1,
                        batch_norm_bottleneck,
                        train=False)
                    outputs += outputs_1

            loss = criterion(outputs, targets)

            outputs_list.append(outputs)

            test_loss += loss.item()
            cor_top1, cor_top5 = correct_topk(outputs, targets, topk=(1, 5))
            correct_top1 += cor_top1
            correct_top5 += cor_top5
            _, predicted = outputs.max(1)
            total += targets.size(0)
            progress_bar(batch_idx,
                         len(loader),
                         'Test, epoch: %i; Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                         (epoch, test_loss / (batch_idx + 1),
                          100. * correct_top1 / total, correct_top1, total),
                         hide=args.no_progress_bar)
        test_loss /= (batch_idx + 1)
        acc1, acc5 = 100. * correct_top1 / total, 100. * correct_top5 / total
        if args.no_progress_bar:
            print(
                f'{msg}, epoch: {epoch}; Loss: {test_loss:.2f} | Acc: {acc1:.1f} @1 {acc5:.1f} @5 ; threshold {args.bias:.3f}'
            )
        outputs = torch.cat(outputs_list, dim=0).cpu()
        if args.lambda_1 > 0.:
            group_sparsity_norm = torch.norm(torch.cat(
                [classifier1.weight, classifier2.weight], dim=0),
                                             dim=0,
                                             p=2)
            print(f'non_zero groups {(group_sparsity_norm != 0).int().sum()}')

        return acc1, outputs
Exemple #6
0
def test(epoch, loader=testloader, msg='Test'):
    global best_acc
    net.eval()
    batch_norm1, batch_norm2, batch_norm, classifier1, classifier2, classifier = classifier_blocks
    for bn in [batch_norm1, batch_norm2, batch_norm]:
        if bn is not None:
            bn.eval()

    test_loss, correct_top1, correct_top5, total = 0, 0, 0, 0
    outputs_list = []
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(loader):
            if torch.cuda.is_available() and not args.learn_patches:
                inputs = inputs.half()
            targets = targets.to(device)
            if args.batchsize_net > 0:
                outputs = []
                for i in range(np.ceil(inputs.size(0)/args.batchsize_net).astype('int')):
                    start, end = i*args.batchsize_net, min((i+1)*args.batchsize_net, inputs.size(0))
                    inputs_batch = inputs[start:end].to(device)
                    outputs.append(net(inputs_batch))
                outputs1 = torch.cat([out[0] for out in outputs], dim=0)
                outputs2 = torch.cat([out[1] for out in outputs], dim=0)
            else:
                inputs = inputs.to(device)
                outputs1, outputs2 = net(inputs)

            if net_2 is not None:
                outputs1, outputs2 = net_2(torch.cat([outputs1, outputs2], dim=1).float())

            if args.feat_square:
                outputs1 = torch.cat([outputs1, outputs1**2], dim=1)
                outputs2 = torch.cat([outputs2, outputs1**2], dim=1)

            if args.resnet:
                outputs = torch.cat([outputs1, outputs2], dim=1).float()
                outputs = resnet(outputs)
            else:
                outputs1, outputs2 = outputs1.float(), outputs2.float()

                if args.normalize_net_outputs:
                    outputs1 = (outputs1 - mean1) / std1
                    outputs2 = (outputs2 - mean2) / std2

                outputs, targets = utils.compute_classifier_outputs(
                    outputs1, outputs2, targets, args, batch_norm1,
                    batch_norm2, batch_norm, classifier1, classifier2, classifier,
                    train=False)

            loss = criterion(outputs, targets)

            outputs_list.append(outputs)

            test_loss += loss.item()
            cor_top1, cor_top5 = utils.correct_topk(outputs, targets, topk=(1, 5))
            correct_top1 += cor_top1
            correct_top5 += cor_top5
            _, predicted = outputs.max(1)
            total += targets.size(0)

        test_loss /= (batch_idx + 1)
        acc1, acc5 = 100. * correct_top1 / total, 100. * correct_top5 / total

        print(f'{msg}, epoch: {epoch}; Loss: {test_loss:.2f} | Acc: {acc1:.1f} @1 {acc5:.1f} @5 ; kneighbors_fraction {args.kneighbors_fraction:.3f}')

        outputs = torch.cat(outputs_list, dim=0).cpu()


        return acc1, outputs