示例#1
0
    def val(self, episodes, val_folders):
        self.net.eval()
        self.classifier.eval()
        accs = []
        for i in range(episodes):
            task = dataload.FewShotTask(val_folders, self.n_way, self.k_shot,
                                        self.q_query, self.dataset)
            spt_loader = dataload.get_Dataloader(task, self.k_shot, 'support')
            qry_loader = dataload.get_Dataloader(task,
                                                 self.q_query,
                                                 'query',
                                                 shuffle=True)
            spt, spt_y = iter(spt_loader).next()
            qry, qry_y = iter(qry_loader).next()
            if torch.cuda.is_available():
                spt, spt_y, qry, qry_y = self.datas_on_cuda(
                    spt, spt_y, qry, qry_y)

            spt_feature = self.net(spt)
            qry_feature = self.net(qry)
            dist = self.classifier(spt_feature, qry_feature)
            predict_label = torch.argmin(dist, dim=1)
            score = [
                1 if predict_label[j] == qry_y[j].long() else 0
                for j in range(len(qry_y))
            ]
            acc = np.sum(score) / 1.0 / len(score)
            accs.append(acc)
        acc, pacc = compute_confidence_interval(accs)
        print('acc is {:.4f} pacc is {:.4f}'.format(acc, pacc))
        self.logger.info('acc is {:.4f} pacc is {:.4f}'.format(acc, pacc))
        return acc, pacc
示例#2
0
def evaluate(model, normalize, epoch, support_loader, n, k, q, device, logger):
    accs_l2 = []
    accs_cosine = []
    model.eval()

    with torch.no_grad():
        for data in tqdm(support_loader):
            imgs, labels = prepare_nshot_task(n, k, q, data, device)
            _, outputs, _ = model(imgs, norm=normalize)

            acc_l2 = evaluation(outputs, labels, n, k, q, 'l2')
            acc_cosine = evaluation(outputs, labels, n, k, q, 'cosine')
            accs_l2.append(acc_l2)
            accs_cosine.append(acc_cosine)

    m_l2, pm_l2 = compute_confidence_interval(accs_l2)
    m_cosine, pm_cosine = compute_confidence_interval(accs_cosine)
    # file_writer.write(f'{epoch:3d}.pth {n}-shot\tAccuracy_l2: {m_l2:.2f}+/-{pm_l2:.2f} Accuracy_cosine: {m_cosine:.2f}+/-{pm_cosine:.2f}\n')
    logger.info(f'{epoch:3d}.pth: {n}-shot \t l2: {m_l2:.2f}+/-{pm_l2:.2f} \t '
                f'cosine: {m_cosine:.2f}+/-{pm_cosine:.2f}')
示例#3
0
def evaluate(model, normalize, epoch, support_loader, n, k, q, device, logger,
             nodes, desc_embeddings, id_to_class_name, classFile_to_wikiID):
    accs_l2 = []
    accs_cosine = []
    model.eval()

    with torch.no_grad():
        for data in tqdm(support_loader):
            imgs, labels = prepare_nshot_task(n, k, q, data, device)
            support_corr_nodeIndexs = find_nodeIndex_by_imgLabels(
                nodes, data[1][:n * k], id_to_class_name, classFile_to_wikiID)
            support_imgs, _, _, _, _ = model(imgs[:n * k],
                                             desc_embeddings,
                                             support_corr_nodeIndexs,
                                             norm=normalize)

            queries = []
            for i in range(k):
                query_corr_nodeIndexs = find_nodeIndex_by_imgLabels(
                    nodes, (q * k) * [data[1][0 + i * n]], id_to_class_name,
                    classFile_to_wikiID)
                query_imgs, _, _, _, _ = model(imgs[n * k:],
                                               desc_embeddings,
                                               query_corr_nodeIndexs,
                                               norm=normalize)
                queries.append(query_imgs)
            # import ipdb; ipdb.set_trace()

            acc_l2 = argmax_evaluation(support_imgs, queries, labels, n, k, q,
                                       'l2')
            acc_cosine = argmax_evaluation(support_imgs, queries, labels, n, k,
                                           q, 'cosine')
            accs_l2.append(acc_l2)
            accs_cosine.append(acc_cosine)
    m_l2, pm_l2 = compute_confidence_interval(accs_l2)
    m_cosine, pm_cosine = compute_confidence_interval(accs_cosine)
    logger.info(f'{epoch:3d}.pth: {n}-shot \t l2: {m_l2:.2f}+/-{pm_l2:.2f} \t '
                f'cosine: {m_cosine:.2f}+/-{pm_cosine:.2f}')
示例#4
0
def test(model, label, args, few_shot_params):
    if args.debug:
        n_test = 10
        print_freq = 2
    else:
        n_test = 1000
        print_freq = 100
    test_file = args.dataset_dir + 'test.json'
    test_datamgr = SetDataManager(test_file, args.dataset_dir, args.image_size,
                                  mode = 'val',n_episode = n_test ,**few_shot_params)
    loader = test_datamgr.get_data_loader(aug=False)

    test_acc_record = np.zeros((n_test,))

    warmup_state = torch.load(osp.join(args.checkpoint_dir, 'max_acc' + '.pth'))['params']
    model.load_state_dict(warmup_state, strict=False)
    model.eval()

    ave_acc = Averager()
    with torch.no_grad():
        for i, batch in enumerate(loader, 1):
            data, index_label = batch[0].cuda(), batch[1].cuda()
            logits = model(data, 'test')
            acc = count_acc(logits, label)
            ave_acc.add(acc)
            test_acc_record[i - 1] = acc
            if i % print_freq == 0:
                print('batch {}: {:.2f}({:.2f})'.format(i, ave_acc.item() * 100, acc * 100))

    m, pm = compute_confidence_interval(test_acc_record)
    # print('Val Best Epoch {}, Acc {:.4f}, Test Acc {:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc'],
    #                                                               ave_acc.item()))
    print('Test Acc {:.4f} + {:.4f}'.format(m, pm))
    acc_str = '%4.2f' % (m * 100)
    with open(args.save_dir + '/result.txt', 'a') as f:
        f.write('%s %s\n' % (acc_str, args.name))
示例#5
0
    ave_acc = Averager()
    label = torch.arange(args.validation_way).repeat(args.query)
    if torch.cuda.is_available():
        label = label.type(torch.cuda.LongTensor)
    else:
        label = label.type(torch.LongTensor)

    for i, batch in enumerate(loader, 1):
        if torch.cuda.is_available():
            data, _ = [_.cuda() for _ in batch]
        else:
            data = batch[0]
        k = args.validation_way * args.shot
        data_shot, data_query = data[:k], data[k:]

        logits = model(data_shot, data_query)
        acc = count_acc(logits, label)
        ave_acc.add(acc)
        test_acc_record[i - 1] = acc
        print('batch {}: {:.2f}({:.2f})'.format(i,
                                                ave_acc.item() * 100,
                                                acc * 100))

    m, pm = compute_confidence_interval(test_acc_record)
    print('Val Best Acc {:.4f}, Test Acc {:.4f}'.format(
        trlog['max_acc'], ave_acc.item()))
    print('Test Acc {:.4f} + {:.4f}'.format(m, pm))

    print(trlog)
    test_data = SVD.transform(test_data)

    x_train = torch.cuda.FloatTensor(train_data)

    x_test = torch.nn.functional.normalize(torch.cuda.FloatTensor(test_data), dim=1, p=2)

    adj = utils.generate_graphs(
        x_train, k=examples_per_class, examples_per_class=examples_per_class, num_classes=num_classes)
    x_train = torch.nn.functional.normalize(x_train, dim=1, p=2)

    x_train_filtered, y_train_filtered = utils.generate_filtered_features(
        x_train, adj, args.filter, alpha, num_classes=num_classes, examples_per_class=examples_per_class)
    x_train_filtered = torch.nn.functional.normalize(x_train_filtered, dim=1, p=2)

    results_1nn.append(utils.nearest_neighbor_classifier(
        x_train, x_test, y_train, y_test))
    results_filtered.append(utils.nearest_neighbor_classifier(
        x_train_filtered, x_test, y_train_filtered, y_test))
    results_ncm.append(utils.nearest_mean_classifier(torch.nn.functional.normalize(x_train, dim=1, p=2), torch.cuda.LongTensor(
        y_train), torch.nn.functional.normalize(x_test, dim=1, p=2), torch.cuda.LongTensor(y_test)))
    if run == 0:
        print("Shape train {}, shape test {}".format(x_train_filtered.shape,x_test.shape))

mean_1nn, confiance_1nn = utils.compute_confidence_interval(results_1nn)
mean_filtered, confiance_filtered = utils.compute_confidence_interval(results_filtered)
mean_ncm, confiance_ncm = utils.compute_confidence_interval(results_ncm)


print("Results 1nn {}+-{}, Filtered {}+-{}, NCM {}+-{}".format(
    mean_1nn, confiance_1nn, mean_filtered, confiance_filtered, mean_ncm, confiance_ncm))