def ensemble_meta_test(nets, testloader, only_base=True, is_norm=True, classifier='LR', num_workers=None): acc = [] acc1 = [] acc5 = [] nets = [net.eval() for net in nets] num_workers = num_workers if num_workers > 0 else 1 for idx, data in tqdm(enumerate(testloader)): support_xs, support_ys, query_xs, query_ys = data support_features_list, support_ys_list = map(list, zip( *[loop_get_features(net, support_xs, support_ys, only_base, is_norm, batch_size=1024) for net in nets])) query_features_list, query_ys_list = map(list, zip(*[loop_get_features(net, query_xs, query_ys, only_base, is_norm, batch_size=1024) for net in nets])) classifiers = [LogisticRegression(random_state=0, solver='lbfgs', max_iter=1000, multi_class='multinomial', n_jobs=num_workers).fit(support_features_list[i], support_ys_list[i]) for i in range(len(support_features_list))] probas = np.array( [classifier.predict_proba(query_features_list[i]) for i, classifier in enumerate(classifiers)]) log_probas = np.log(probas) gmean = np.exp(log_probas.sum(axis=0) / log_probas.shape[0]) query_ys_pred = np.argmax(gmean.squeeze(), axis=1) acc1.append(metrics.accuracy_score(query_ys_list[0], query_ys_pred)) return mean_confidence_interval(acc1)
def meta_test(net, testloader, only_base=True, is_norm=True, classifier='LR'): net = net.eval() acc1 = [] with torch.no_grad(): for idx, data in tqdm(enumerate(testloader)): support_xs, support_ys, query_xs, query_ys = data support_features, support_ys = loop_get_features(net, support_xs, support_ys, only_base, is_norm, batch_size=512) query_features, query_ys = loop_get_features(net, query_xs, query_ys, only_base, is_norm, batch_size=512) query_ys_pred = classify(classifier, query_features, support_features, support_ys) acc1.append(metrics.accuracy_score(query_ys, query_ys_pred)) return mean_confidence_interval(acc1)
def cascade_meta_test(nets, testloader, only_base=True, is_norm=True, classifier='LR'): coarse_net = nets[0] fine_net = nets[1] coarse_net = coarse_net.eval() fine_net = fine_net.eval() acc1 = [] for idx, data in tqdm(enumerate(testloader)): support_xs, support_ys, query_xs, query_ys = data _, _, support_coarse_preds = loop_get_features_with_class(coarse_net, support_xs, support_ys, is_norm, batch_size=1024) _, _, query_coarse_preds = loop_get_features_with_class(coarse_net, query_xs, query_ys, is_norm, batch_size=1024) support_features, support_ys = loop_get_features(fine_net, support_xs, support_ys, only_base, is_norm, batch_size=1024) query_features, query_ys = loop_get_features(fine_net, query_xs, query_ys, only_base, is_norm, batch_size=1024) dict_query = {i: None for i in range(len(query_ys))} for i in range(len(query_coarse_preds)): for j in range(len(support_coarse_preds)): if query_coarse_preds[i] == support_coarse_preds[j]: if dict_query[i] is None: dict_query[i] = [j] else: dict_query[i].append(j) preds = [None] * len(query_coarse_preds) classified = [] # predict on overlapping coarse classes for i, v in dict_query.items(): if v is not None: preds[i] = classify(classifier, query_features[i], support_features[v], support_ys[v]) classified.append(i) for i in dict_query: if i in classified: continue preds[i] = classify(classifier, query_features[i], support_features, support_ys) query_ys_pred = np.array(preds) acc1.append(metrics.accuracy_score(query_ys, query_ys_pred)) return mean_confidence_interval(acc1)
def concat_meta_test(nets, testloader, only_base=True, is_norm=True, classifier='LR'): acc1 = [] nets = [net.eval() for net in nets] for idx, data in tqdm(enumerate(testloader)): support_xs, support_ys, query_xs, query_ys = data support_features_list, support_ys_list = map( list, zip(*[ loop_get_features(net, support_xs, support_ys, only_base, is_norm, batch_size=1024) for net in nets ])) query_features_list, query_ys_list = map( list, zip(*[ loop_get_features(net, query_xs, query_ys, only_base, is_norm, batch_size=1024) for net in nets ])) support_features = np.concatenate(support_features_list, axis=-1) query_features = np.concatenate(query_features_list, axis=-1) query_ys_pred = classify(classifier, query_features, support_features, support_ys_list[0]) acc1.append(metrics.accuracy_score(query_ys_list[0], query_ys_pred)) return mean_confidence_interval(acc1)