Exemple #1
0
def val_process_unknown(classes_to_process, feature_dic, evm_model, args_evm,
                        gpu, Q, done_event):
    with torch.no_grad():
        top1_Meter = Average_Meter()
        Pr_iterator = EVM_Inference(classes_to_process, feature_dic, args_evm,
                                    gpu, evm_model)

        for k, pr in enumerate(Pr_iterator):
            r = pr[1][1].cuda(gpu)
            m, m_i = torch.max(r, dim=1)
            u = (1 - m).view(-1, 1)
            q = torch.cat((u, r), 1)
            _, y = torch.max(1, dim=1)
            acc = torch.sum(y == 0) / q.shape[0]
            top1_Meter.update(acc[0].item(), r.size(0))
        Q.put((gpu, top1_Meter.avg, top1_Meter.count))
        done_event.wait()
        del r, m, m_i, u, q, y
        del acc, top1_Meter, Pr_iterator
def val_process(classes_to_process, feature_dic, evm_model, args_evm, gpu, Q,
                done_event):
    with torch.no_grad():
        top1_Meter = Average_Meter()
        Pr_iterator = EVM_Inference(classes_to_process, feature_dic, args_evm,
                                    gpu, evm_model)

        for k, pr in enumerate(Pr_iterator):
            r = pr[1][1].cuda(gpu)
            m, m_i = torch.max(r, dim=1)
            u = (1 - m).view(-1, 1)
            q = torch.cat((u, r), 1)
            norm = torch.norm(q, p=1, dim=1)
            p = q / norm[:, None]
            L = ((pr[1][0]) * torch.ones(r.shape[0])).long().cuda(gpu)
            acc = accuracy(p, L, topk=(1, ))
            top1_Meter.update(acc[0].item(), r.size(0))
        Q.put((gpu, top1_Meter.avg, top1_Meter.count))
        done_event.wait()
        del r, m, m_i, u, q, norm, p
        del L, acc, top1_Meter, Pr_iterator
Exemple #3
0
        del x_supervised, x_moco, Logit
        gc.collect()
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
        # FV_supervised = FV_supervised.cpu()
        # FV_moco_places = FV_moco_places.cpu()
        FV = torch.cat((FV_supervised, FV_moco_places), 1)

        Logit = linear_model(FV)
        softmax = SoftMax(Logit)
        # FV = FV.double()

        if args_owl.ood == "evm":
            feature_dict = OrderedDict()
            feature_dict[0] = FV.detach().clone().double()
            Pr_iterator = EVM_Inference([0], feature_dict, args_evm, 0, evm_model)
            for j, pr in enumerate(Pr_iterator):
                prob = pr[1][1]  # .cuda()
                assert j == 0
            del Pr_iterator, pr
            gc.collect()
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()
            P_max_all, _ = torch.max(prob, axis=1)
            predicted_known = P_max_all >= threshold_evm
        elif args_owl.ood == "energy":
            negatvie_energy = torch.logsumexp(Logit, dim=1)
            predicted_known = negatvie_energy >= threshold_energy
        elif args_owl.ood == "softmax":
            sm, _ = torch.max(softmax, axis=1)
            predicted_known = sm >= threshold_softmax
Exemple #4
0
        sm, _ = torch.max(softmax, axis=1)
        predicted_known = sm >= threshold_softmax
        predicted_unknown = ~predicted_known

        n = 1 + number_of_known_classes + number_of_discovered_classes
        probability_tensor = torch.zeros(batch_size, n, dtype=torch.double)
        probability_tensor[predicted_known, 1 : (1 + number_of_known_classes)] = softmax[predicted_known, :].cpu()

        if torch.sum(predicted_unknown) > 0:
            if number_of_discovered_classes > 0:
                FV_predicted_unknown = FV[predicted_unknown, :]
                feature_dict_predicted_unknown = OrderedDict()
                feature_dict_predicted_unknown[0] = FV_predicted_unknown.double()
                Pr_iterator_predicted_unknown = EVM_Inference(
                    [0], feature_dict_predicted_unknown, args_evm_incremental, 0, evm_model_incremental
                )
                for j_predicted_unknown, pr_predicted_unknown in enumerate(Pr_iterator_predicted_unknown):
                    prob_predicted_unknown = pr_predicted_unknown[1][1]  # .cuda()
                    assert j_predicted_unknown == 0
                del Pr_iterator_predicted_unknown, pr_predicted_unknown
                gc.collect()
                torch.cuda.empty_cache()
                torch.cuda.ipc_collect()
                P_max_predicted_unknown, _ = torch.max(prob_predicted_unknown, axis=1)
                pu_predicted_unknown = 1 - P_max_predicted_unknown
                probability_tensor[predicted_unknown, 0] = pu_predicted_unknown
                probability_tensor[predicted_unknown, 1 + number_of_known_classes :] = prob_predicted_unknown
            else:
                probability_tensor[predicted_unknown, 0] = 1.0
        x_moco = x_moco.cuda()
        FV_supervised, Logit = cnn_model_supervised(x_supervised)
        FV_moco_places, Logit = cnn_model_moco_places(x_moco)
        del x_supervised, x_moco, Logit
        gc.collect()
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
        FV_supervised = FV_supervised.cpu()
        FV_moco_places = FV_moco_places.cpu()
        FV = torch.cat((FV_supervised, FV_moco_places), 1)

        FV_track[(i * 100):((i + 1) * 100), :] = FV

        feature_dict = OrderedDict()
        feature_dict[0] = FV.double()
        Pr_iterator = EVM_Inference([0], feature_dict, args_evm, 0, evm_model)
        for j, pr in enumerate(Pr_iterator):
            prob = pr[1][1]  # .cuda()
            assert j == 0
        del Pr_iterator, pr
        gc.collect()
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
        n = 1 + number_of_known_classes + number_of_discovered_classes
        probability_tensor = torch.zeros(prob.shape[0], n)
        probability_tensor[:, 1:] = prob
        P_max_all, _ = torch.max(prob, axis=1)
        pu = 1 - P_max_all
        probability_tensor[:, 0] = pu
        norm = torch.norm(probability_tensor, p=1, dim=1)
        normalized_tensor = probability_tensor / norm[:, None]