示例#1
0
def plot_total_energy(models, best_combo, X_test, y_test, save=False):
    energy_system = []
    noises = np.linspace(0, 2, 50)
    f1 = []
    for noise in noises:
        X = torch.zeros(X_test.size()).float()
        bigmid = int(X.size(0) / 2)
        smallmid = int(float(bigmid) / 2)
        X = X_test.clone()
        X = white_noise(X.data.numpy(), noise)
        modes = [X[:, :4], X[:, 4:]]
        energies = models['model-with'][best_combo][0].total_energy(
            modes).data.numpy()
        energy_system.append(np.mean(energies))
        F1, _, _, _ = evaluation(models['model-with'][best_combo],
                                 'model-with', X, y_test)
        f1.append(F1)
    plt.plot(noises, energy_system)
    plt.xlabel(r'$\sigma$ corruption', fontsize=30)
    plt.ylabel('Total Energy', fontsize=30)
    plt.tick_params(axis='both', which='major', labelsize=25)
    if save: plt.savefig('results/noise-vs-system-energy')
    plt.show()

    plt.plot(noises, f1)
    plt.xlabel(r'$\sigma$ corruption', fontsize=30)
    plt.ylabel('F1-score', fontsize=30)
    plt.tick_params(axis='both', which='major', labelsize=25)
    if save: plt.savefig('results/noise-vs-system-f1')
    plt.show()
示例#2
0
def plot_heatmap(models, meta, base_F1, cut_noise, X_test, y_test, save=False):
    X, y = torch.zeros(X_test.size()).float(), torch.zeros(
        y_test.size()).float()
    mid = int(X.size(0) / 2)
    X[:mid, :4] = torch.tensor(
        white_noise(X_test[:mid, :4].data.numpy(), cut_noise)).float()
    X[mid:,
      4:] = torch.tensor(white_noise(X_test[mid:, 4:].data.numpy(),
                                     cut_noise)).float()
    coldness = meta['coldness']
    lambda_regul = meta['lambda_regul']
    lambda_capacity = meta['lambda_capacity']
    score = np.zeros((len(lambda_regul), len(lambda_capacity)))
    for i, tau in enumerate(coldness):
        for j, l_cap in enumerate(lambda_capacity):
            best_F1 = -float("Inf")
            for l_reg in lambda_regul:
                F1, _, _, _ = evaluation(
                    models['model-with'][(tau, l_reg, l_cap)], 'model-with', X,
                    y_test)
                if F1 > best_F1:
                    best_F1 = F1
            score[i, j] = best_F1
    score = (score - base_F1) / base_F1
    ax = sns.heatmap(score,
                     annot=True,
                     xticklabels=lambda_capacity,
                     yticklabels=coldness,
                     cmap="YlGnBu")
    plt.xlabel('coldness', fontsize=17)
    plt.ylabel('lambda capacity', fontsize=17)
    if save: plt.savefig('results/heatmap')
    plt.show()
示例#3
0
def compare_graph_to_baseline(Dtest,Qtest,Rtest,threshold, priors):
    p1.topics = topics
    try:
        p1_ranking = p1.evaluation(topics, (doc_index['p'], doc_index['n']), ('test', docs['test']),
                                   (p1.stem_analyzer,), (p1.NamedBM25F(K1=2, B=1),), skip_indexing=True)
    except Exception as e:
        p1_ranking = p1.evaluation(topics, (doc_index['p'], doc_index['n']), ('test', docs['test']),
                                   (p1.stem_analyzer,), (p1.NamedBM25F(K1=2, B=1),), skip_indexing=False)
    p1_ranking = list(p1_ranking.values())[0]


    pk_results_vanilla = get_pk_results(p1_ranking, Dtest, Qtest, Rtest, type='vanilla', threshold = threshold,  priors = priors)
    pk_results_extended= get_pk_results(p1_ranking, Dtest, Qtest, Rtest, type='extended', threshold = threshold,  priors = priors)

    plot_iap_for_models({'Baseline IR System': p1_ranking, 'Vanilla': pk_results_vanilla})
    plot_iap_for_models({'Baseline IR System': p1_ranking, 'Personalized': pk_results_extended})
示例#4
0
    def selective_retrain(self,
                          x_train,
                          y_train,
                          loss,
                          optimizer,
                          n_epochs=10,
                          mu=.1):
        # Retrain output layer
        out_params = self.layers[-1].parameters()
        output_optimizer = t.optim.SGD(out_params, lr=0.01)
        old_l = float('inf')
        for e in range(n_epochs):
            l = self.batch_pass(x_train,
                                y_train,
                                loss,
                                output_optimizer,
                                mu=mu,
                                reg_list=[self.param_norm],
                                args_reg=[[1]])
            #Early stopping
            if (old_l - l < 1e-3):
                print("SR_last:", e, "epochs")
                break
            old_l = l
        self.sparsify_thres()
        # perform BFS
        self.compute_hooks()
        self.register_hooks()

        # train subnetwork
        #init book-keeping
        train_losses = []
        train_accs = []
        old_l = float('inf')
        for e in range(n_epochs):
            l = self.batch_pass(x_train,
                                y_train,
                                loss,
                                optimizer,
                                mu=mu,
                                reg_list=[self.param_norm],
                                args_reg=[[2]])
            #Early stopping
            if (old_l - l < 0):
                print("SR:", e, "epochs")
                break
            old_l = l
            #eval network's loss and acc
            train_l, _, train_acc = helper.evaluation(self,
                                                      loss,
                                                      x_train,
                                                      y_train,
                                                      2,
                                                      use_cuda=self.use_cuda)
            train_accs.append(train_acc)
            train_losses.append(train_l)

        self.unhook()
        return train_losses[-1]
示例#5
0
def plot_yerkes_dodson(coldness, models, best_combo, X, y, save=False):
    f1 = []
    for tau in coldness:
        model = models['model-with'][(tau, best_combo[1], best_combo[2])]
        F1, _, recall, _ = evaluation(model, 'model-with', X, y)
        f1.append(F1)
    plt.plot(coldness, f1, marker='o')
    plt.xscale('log')
    plt.xlabel('coldness', fontsize=17)
    plt.ylabel('F1-score', fontsize=17)
    plt.ylim([0, 1])
    plt.tick_params(axis='both', which='major', labelsize=11)
    plt.legend(loc='upper left')
    if save: plt.savefig('results/yerkes-dodson')
    plt.show()
示例#6
0
def plot_specific(models, X_test, y_test, save=False):
    best_combo = (1e-4, 1e-2, 1e-3)
    f1_with = []
    f1_without = []
    f1_base = []
    noises = np.linspace(-20, 5, 50)
    beta_ip, beta_dm = [], []
    for noise in noises:
        X = torch.zeros(X_test.size()).float()
        X = X_test.clone()
        noise = 10**(noise / 20)
        X[:, 4:] = white_noise(X[:, 4:].data.numpy(), noise)
        F1, _, recall, _ = evaluation(models['model-with'][best_combo],
                                      'model-with', X, y_test)
        f1_with.append(F1)
        F1, _, recall, _ = evaluation(models['model-without'], 'model-without',
                                      X, y_test)
        f1_without.append(F1)
        F1, _, recall, _ = evaluation(models['base-model'], 'base-model', X,
                                      y_test)
        f1_base.append(F1)
        modes = [X[:, :4], X[:, 4:]]
        _, betas = models['model-with'][best_combo][0].get_alpha_beta(modes)
        beta_ip.append(torch.mean(betas[:, 0]).data.numpy())
        beta_dm.append(torch.mean(betas[:, 1]).data.numpy())
    plt.plot(noises, f1_with, label='with', c='b')
    plt.plot(noises, f1_without, label='without')
    plt.plot(noises, f1_base, label='base')
    plt.axvline(x=20 * np.log10(0.5), ls='--', c='darkgrey')
    plt.axhline(y=0.82, ls='--', c='grey')
    plt.xlabel('NSR (dB)', fontsize=30)
    plt.ylabel('F1-score', fontsize=30)
    plt.ylim([0, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    # plt.savefig('results/presentation/noise-generalisation-dm-noisy')
    plt.show()

    plt.plot(noises, beta_ip, label=r'$\beta$ corrupted mode', c='m')
    plt.plot(noises, beta_dm, label=r'$\beta$ other mode', c='orange')
    plt.xlabel('NSR (dB)', fontsize=30)
    plt.ylabel(r'$\beta$', fontsize=30)
    plt.ylim([-0.1, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    plt.savefig('results/presentation/noise-generalisation-dm-noisy-beta')
    plt.show()

    h_combo = (1e-4, 1e-2, 1e-1)
    m_combo = (1e-4, 1e-2, 1e-3)
    l_combo = (1e-4, 1e-2, 0)

    # h_combo = (1e-4, 1e-1, 1e-3)
    # m_combo = (1e-4, 1e-2, 1e-3)
    # l_combo = (1e-4, 0, 1e-3)

    # # h_combo = (1, 1e-2, 1e-3)
    # # m_combo = (1e-2, 1e-2, 1e-3)
    # # l_combo = (1e-4, 1e-2, 1e-3)

    f1_highcap = []
    f1_midcap = []
    f1_lowcap = []
    noises = np.linspace(-20, 5, 50)
    h_beta_ip, h_beta_dm = [], []
    m_beta_ip, m_beta_dm = [], []
    l_beta_ip, l_beta_dm = [], []
    for noise in noises:
        X = torch.zeros(X_test.size()).float()
        X = X_test.clone()
        noise = 10**(noise / 20)
        X[:, :4] = white_noise(X[:, :4].data.numpy(), noise)
        F1, _, recall, _ = evaluation(models['model-with'][h_combo],
                                      'model-with', X, y_test)
        f1_highcap.append(F1)
        F1, _, recall, _ = evaluation(models['model-with'][m_combo],
                                      'model-with', X, y_test)
        f1_midcap.append(F1)
        F1, _, recall, _ = evaluation(models['model-with'][l_combo],
                                      'model-with', X, y_test)
        f1_lowcap.append(F1)
        modes = [X[:, :4], X[:, 4:]]

        _, betas = models['model-with'][h_combo][0].get_alpha_beta(modes)
        h_beta_ip.append(torch.mean(betas[:, 0]).data.numpy())
        h_beta_dm.append(torch.mean(betas[:, 1]).data.numpy())

        _, betas = models['model-with'][m_combo][0].get_alpha_beta(modes)
        m_beta_ip.append(torch.mean(betas[:, 0]).data.numpy())
        m_beta_dm.append(torch.mean(betas[:, 1]).data.numpy())

        _, betas = models['model-with'][l_combo][0].get_alpha_beta(modes)
        l_beta_ip.append(torch.mean(betas[:, 0]).data.numpy())
        l_beta_dm.append(torch.mean(betas[:, 1]).data.numpy())
    plt.plot(noises, f1_highcap, label='high energy regul.', c='purple')
    plt.plot(noises, f1_midcap, label='good energy regul.', c='b')
    plt.plot(noises, f1_lowcap, label='no energy regul.', c='skyblue')
    plt.axvline(x=20 * np.log10(0.5), ls='--', c='darkgrey')
    plt.axhline(y=0.82, ls='--', c='grey')
    plt.xlabel('NSR (dB)', fontsize=30)
    plt.ylabel('F1-score', fontsize=30)
    plt.ylim([0, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    plt.savefig('results/presentation/energy-dm-noisy')
    plt.show()

    plt.plot(noises, h_beta_ip, label=r'$\beta$ corrupted mode', c='m')
    plt.plot(noises, h_beta_dm, label=r'$\beta$ other mode', c='orange')
    plt.axvline(x=20 * np.log10(0.5), ls='--', c='darkgrey')
    plt.xlabel('NSR (dB)', fontsize=30)
    plt.ylabel(r'$\beta$', fontsize=30)
    plt.ylim([-0.1, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    plt.savefig('results/presentation/high-capacity-ip-noisy-beta')
    plt.show()

    plt.plot(noises, m_beta_ip, label=r'$\beta$ corrupted mode', c='m')
    plt.plot(noises, m_beta_dm, label=r'$\beta$ other mode', c='orange')
    plt.axvline(x=20 * np.log10(0.5), ls='--', c='darkgrey')
    plt.xlabel('NSR (dB)', fontsize=30)
    plt.ylabel(r'$\beta$', fontsize=30)
    plt.ylim([-0.1, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    plt.savefig('results/presentation/normal-ip-noisy-beta')
    plt.show()

    plt.plot(noises, l_beta_ip, label=r'$\beta$ corrupted mode', c='m')
    plt.plot(noises, l_beta_dm, label=r'$\beta$ other mode', c='orange')
    plt.axvline(x=20 * np.log10(0.5), ls='--', c='darkgrey')
    plt.xlabel('NSR (dB)', fontsize=30)
    plt.ylabel(r'$\beta$', fontsize=30)
    plt.ylim([-0.1, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    plt.savefig('results/presentation/no-capacity-ip-noisy-beta')
    plt.show()
示例#7
0
def plot_noise_generalisation(models,
                              best_combo,
                              X_test,
                              y_test,
                              save=False,
                              idx=None):
    if idx is None:
        suffix = ""
    else:
        suffix = "-" + str(idx)

    f1_with = []
    f1_without = []
    f1_base = []
    noises = np.linspace(0, 2, 50)
    beta_ip, beta_dm = [], []
    for noise in noises:
        X = torch.zeros(X_test.size()).float()
        X = X_test.clone()
        X[:, :4] = white_noise(X[:, :4].data.numpy(), noise)
        F1, _, recall, _ = evaluation(models['model-with'][best_combo],
                                      'model-with', X, y_test)
        f1_with.append(F1)
        F1, _, recall, _ = evaluation(models['model-without'], 'model-without',
                                      X, y_test)
        f1_without.append(F1)
        F1, _, recall, _ = evaluation(models['base-model'], 'base-model', X,
                                      y_test)
        f1_base.append(F1)
        modes = [X[:, :4], X[:, 4:]]
        _, betas = models['model-with'][best_combo][0].get_alpha_beta(modes)
        beta_ip.append(torch.mean(betas[:, 0]).data.numpy())
        beta_dm.append(torch.mean(betas[:, 1]).data.numpy())
    plt.plot(noises, f1_with, label='with')
    plt.plot(noises, f1_without, label='without')
    plt.plot(noises, f1_base, label='base')
    plt.axvline(x=0.5, ls='--', c='green')
    plt.xlabel(r'$\sigma$ corruption', fontsize=30)
    plt.ylabel('F1-score', fontsize=30)
    plt.ylim([0, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    if save: plt.savefig('results/noise-generalisation-ip-noisy' + suffix)
    plt.show()

    plt.plot(noises, beta_ip, label=r'$\beta$-ip')
    plt.plot(noises, beta_dm, label=r'$\beta$-dm')
    plt.xlabel(r'$\sigma$ corruption', fontsize=30)
    plt.ylabel(r'$\beta$', fontsize=30)
    plt.ylim([-0.1, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    if save: plt.savefig('results/noise-generalisation-ip-noisy-beta' + suffix)
    plt.show()

    beta_ip, beta_dm = [], []
    f1_with = []
    f1_without = []
    f1_base = []
    noises = np.linspace(0, 2, 50)
    for noise in noises:
        X = torch.zeros(X_test.size()).float()
        X = X_test.clone()
        X[:, 4:] = white_noise(X[:, 4:].data.numpy(), noise)
        F1, _, recall, _ = evaluation(models['model-with'][best_combo],
                                      'model-with', X, y_test)
        f1_with.append(F1)
        F1, _, recall, _ = evaluation(models['model-without'], 'model-without',
                                      X, y_test)
        f1_without.append(F1)
        F1, _, recall, _ = evaluation(models['base-model'], 'base-model', X,
                                      y_test)
        f1_base.append(F1)
        modes = [X[:, :4], X[:, 4:]]
        _, betas = models['model-with'][best_combo][0].get_alpha_beta(modes)
        beta_ip.append(torch.mean(betas[:, 0]).data.numpy())
        beta_dm.append(torch.mean(betas[:, 1]).data.numpy())
    plt.plot(noises, f1_with, label='with')
    plt.plot(noises, f1_without, label='without')
    plt.plot(noises, f1_base, label='base')
    plt.axvline(x=0.5, ls='--', c='green')
    plt.xlabel(r'$\sigma$ corruption', fontsize=30)
    plt.ylabel('F1-score', fontsize=30)
    plt.ylim([0, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    if save: plt.savefig('results/noise-generalisation-dm-noisy' + suffix)
    plt.show()

    plt.plot(noises, beta_ip, label=r'$\beta$-ip')
    plt.plot(noises, beta_dm, label=r'$\beta$-dm')
    plt.xlabel(r'$\sigma$ corruption', fontsize=30)
    plt.ylabel(r'$\beta$', fontsize=30)
    plt.ylim([-0.1, 1])
    plt.tick_params(axis='both', which='major', labelsize=25)
    plt.legend(loc='best')
    if save: plt.savefig('results/noise-generalisation-dm-noisy-beta' + suffix)
    plt.show()

    x = np.linspace(0, 2, 20)
    y = np.linspace(0, 2, 20)
    lx = len(x)
    ly = len(y)
    beta_ip, beta_dm = np.zeros((20, 20)), np.zeros((20, 20))

    for i in range(lx):
        noise_ip = x[i]
        X = X_test.clone()
        X[:, :4] = white_noise(X[:, :4].data.numpy(), noise_ip)
        for j in range(ly):
            noise_dm = y[j]
            X[:, 4:] = white_noise(X[:, 4:].data.numpy(), noise_dm)
            modes = [X[:, :4], X[:, 4:]]
            _, betas = models['model-with'][best_combo][0].get_alpha_beta(
                modes)
            beta_ip[i, j] = torch.mean(betas[:, 0]).data.numpy()
            beta_dm[i, j] = torch.mean(betas[:, 1]).data.numpy()

    fig = plt.figure()
    ax = Axes3D(fig)
    ax = fig.gca(projection='3d')
    x, y = np.meshgrid(x, y)
    surf = ax.plot_surface(x, y, beta_ip, rstride=1, cstride=1, cmap='hot')
    fig.colorbar(surf, shrink=0.5, aspect=5)
    plt.show()

    fig = plt.figure()
    ax = Axes3D(fig)
    ax = fig.gca(projection='3d')
    surf = ax.plot_surface(x, y, beta_dm, rstride=1, cstride=1, cmap='hot')
    fig.colorbar(surf, shrink=0.5, aspect=5)
    plt.show()
示例#8
0
def main(experiment=Experiment.compare_simple):
    global docs, topics, topic_index, doc_index
    docs, topics, topic_index, doc_index = setup()
    p1.topics = topics
    try:
        p1_results = p1.evaluation(topics, (doc_index['p'], doc_index['n']),
                                   ('test', docs['test']),
                                   (p1.stem_analyzer, ),
                                   (p1.NamedBM25F(K1=2, B=1), ),
                                   'tfidf',
                                   skip_indexing=True)
    except Exception as e:
        p1_results = p1.evaluation(topics, (doc_index['p'], doc_index['n']),
                                   ('test', docs['test']),
                                   (p1.stem_analyzer, ),
                                   (p1.NamedBM25F(K1=2, B=1), ),
                                   'tfidf',
                                   skip_indexing=False)

    p1_ranking = list(p1_results[0].values())[0]
    p1_retrieval = list(p1_results[1].values())[0]

    experimental_evaluate = lambda models: evaluate(topics,
                                                    docs['test'],
                                                    topic_index,
                                                    models=models,
                                                    ranking_results=p1_ranking,
                                                    retrieval_results=
                                                    p1_retrieval)
    if experiment == experiment:
        experimental_evaluate(
            get_all_combinations(simple_vectorizers, [mlp_classifier]))

    elif experiment == Experiment.tuned_mlp:
        experimental_evaluate(
            get_all_combinations(simple_vectorizers, [tuned_mlp_classifier]))

    elif experiment == Experiment.knn:
        experimental_evaluate(
            get_all_combinations(simple_vectorizers, [knn_classifier]))

    elif experiment == Experiment.tuned_knn:
        experimental_evaluate(
            get_all_combinations(simple_vectorizers, [tuned_knn_classifier]))

    elif experiment == Experiment.compare_simple:
        experimental_evaluate([(tfidf_vectorizer, tuned_mlp_classifier),
                               (tfidf_vectorizer, tuned_knn_classifier)])

    elif experiment == Experiment.emsembles_knn:
        experimental_evaluate(
            get_all_combinations(emsembled_vectorizers + (tfidf_vectorizer, ),
                                 [tuned_knn_classifier]))

    elif experiment == Experiment.emsembles_mlp:
        experimental_evaluate(
            get_all_combinations(emsembled_vectorizers + (tfidf_vectorizer, ),
                                 [tuned_mlp_classifier]))

    elif experiment == Experiment.ablation_knn_neighbours:
        knn_n_neighbour_variant_classifiers = [
            NamedClassifier(
                GridSearchCV(KNeighborsClassifier(n_neighbors=k),
                             {'metric': TESTED_KNN_DISTANCES},
                             verbose=0,
                             cv=3), f'Tuned {k}NN')
            for k in TESTED_N_NEIGHBOURS
        ]
        experimental_evaluate(
            get_all_combinations((tfidf_vectorizer, ),
                                 knn_n_neighbour_variant_classifiers))

    elif experiment == Experiment.ablation_knn_distances:
        knn_distance_variant_classifiers = [
            NamedClassifier(
                GridSearchCV(KNeighborsClassifier(metric=distance),
                             {'n_neighbors': TESTED_N_NEIGHBOURS},
                             verbose=0,
                             cv=3), f'Tuned KNN {distance}')
            for distance in TESTED_KNN_DISTANCES
        ]
        experimental_evaluate(
            get_all_combinations((tfidf_vectorizer, ),
                                 knn_distance_variant_classifiers))

    elif experiment == Experiment.ablation_mlp_1_layer_comps:
        mlp_1_layer_comp_variant_classifiers = [
            NamedClassifier(
                MLPClassifier(hidden_layer_sizes=layer_comp,
                              random_state=1,
                              max_iter=1000), f'MLP {layer_comp}',
                f'mlp_{layer_comp[0]}') for layer_comp in TESTED_LAYER_COMPS
            if len(layer_comp) == 1
        ]
        experimental_evaluate(
            get_all_combinations((tfidf_vectorizer, ),
                                 mlp_1_layer_comp_variant_classifiers))

    elif experiment == Experiment.ablation_mlp_2_layer_comps:
        mlp_2_layer_comp_variant_classifiers = [
            NamedClassifier(
                MLPClassifier(hidden_layer_sizes=layer_comp,
                              random_state=1,
                              max_iter=1000), f'MLP {layer_comp}',
                f'mlp_{layer_comp[0]}_{layer_comp[1]}')
            for layer_comp in TESTED_LAYER_COMPS if len(layer_comp) == 2
        ]
        experimental_evaluate(
            get_all_combinations((tfidf_vectorizer, ),
                                 mlp_2_layer_comp_variant_classifiers))

    elif experiment == Experiment.ablation_mlp_3_layer_comps:
        mlp_3_layer_comp_variant_classifiers = [
            NamedClassifier(
                MLPClassifier(hidden_layer_sizes=layer_comp,
                              random_state=1,
                              max_iter=1000), f'MLP {layer_comp}',
                f'mlp_{layer_comp[0]}_{layer_comp[1]}_{layer_comp[2]}')
            for layer_comp in TESTED_LAYER_COMPS if len(layer_comp) == 3
        ]
        experimental_evaluate(
            get_all_combinations((tfidf_vectorizer, ),
                                 mlp_3_layer_comp_variant_classifiers))

    else:
        print("Insert a valid experiment")