Example #1
0
def __kernel_knn_cv_trainset(dataset_all, ds_name, knn_options, kernel_options,
                             gram_matrix_unnorm, time_precompute_gm,
                             train_examples, save_results, dir_save,
                             fn_output_detail, fn_output_summary):
    y_all = dataset_all.targets
    n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[
        'n_splits'], knn_options['test_size']

    # compute distance matrix.
    gram_matrix = normalize_gram_matrix(gram_matrix_unnorm.copy())
    dis_mat, _, _, _ = compute_distance_matrix(gram_matrix)

    # get shuffles.
    train_indices, test_indices, _, _ = __get_shuffles(y_all, n_splits,
                                                       test_size)

    accuracies = []
    for trial in range(len(train_indices)):
        print('\ntrial =', trial)

        train_index = train_indices[trial]
        test_index = test_indices[trial]
        y_app = [y_all[i] for i in train_index]
        y_test = [y_all[i] for i in test_index]

        N = len(train_index)

        d_app = dis_mat[train_index, :][:, train_index].copy()

        d_test = np.zeros((N, len(test_index)))
        for i in range(N):
            for j in range(len(test_index)):
                d_test[i, j] = dis_mat[train_index[i], test_index[j]]

        accuracies.append(
            knn_classification(d_app,
                               d_test,
                               y_app,
                               y_test,
                               n_neighbors,
                               verbose=True,
                               text=train_examples))

        # write result detail.
        if save_results:
            print('writing results to files...')
            f_detail = open(dir_save + fn_output_detail, 'a')
            csv.writer(f_detail).writerow([
                ds_name, kernel_options['name'], train_examples, trial,
                knn_options['n_neighbors'],
                len(gram_matrix), knn_options['test_size'], accuracies[-1][0],
                accuracies[-1][1]
            ])
            f_detail.close()

    results = {}
    results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
    results['std_perf_train'] = np.std([i[0] for i in accuracies],
                                       axis=0,
                                       ddof=1)
    results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
    results['std_perf_test'] = np.std([i[1] for i in accuracies],
                                      axis=0,
                                      ddof=1)

    # write result summary for each letter.
    if save_results:
        f_summary = open(dir_save + fn_output_summary, 'a')
        csv.writer(f_summary).writerow([
            ds_name, kernel_options['name'], train_examples,
            knn_options['n_neighbors'], knn_options['test_size'],
            results['ave_perf_train'], results['ave_perf_test'],
            results['std_perf_train'], results['std_perf_test'],
            time_precompute_gm
        ])
        f_summary.close()
Example #2
0
def __kernel_knn_cv_median(dataset_all, ds_name, knn_options, mpg_options,
                           kernel_options, mge_options, ged_options,
                           gram_matrix_unnorm, time_precompute_gm,
                           train_examples, save_results, dir_save,
                           fn_output_detail, fn_output_summary):
    Gn = dataset_all.graphs
    y_all = dataset_all.targets
    n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[
        'n_splits'], knn_options['test_size']

    # get shuffles.
    train_indices, test_indices, train_nums, y_app = __get_shuffles(
        y_all, n_splits, test_size)

    accuracies = [[], [], []]
    for trial in range(len(train_indices)):
        print('\ntrial =', trial)

        train_index = train_indices[trial]
        test_index = test_indices[trial]
        G_app = [Gn[i] for i in train_index]
        G_test = [Gn[i] for i in test_index]
        y_test = [y_all[i] for i in test_index]
        gm_unnorm_trial = gram_matrix_unnorm[
            train_index, :][:, train_index].copy()

        # compute pre-images for each class.
        medians = [[], [], []]
        train_nums_tmp = [0] + train_nums
        print('\ncomputing pre-image for each class...\n')
        for i_class in range(len(train_nums_tmp) - 1):
            print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes:')
            i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
            i_end = i_start + train_nums_tmp[i_class + 1]
            median_set = G_app[i_start:i_end]

            dataset = dataset_all.copy()
            dataset.load_graphs([g.copy() for g in median_set], targets=None)
            mge_options['update_order'] = True
            mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[
                i_start:i_end, i_start:i_end].copy()
            mpg_options['runtime_precompute_gm'] = 0
            set_median, gen_median_uo = __generate_median_preimages(
                dataset, mpg_options, kernel_options, ged_options, mge_options)
            mge_options['update_order'] = False
            mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[
                i_start:i_end, i_start:i_end].copy()
            mpg_options['runtime_precompute_gm'] = 0
            _, gen_median = __generate_median_preimages(
                dataset, mpg_options, kernel_options, ged_options, mge_options)
            medians[0].append(set_median)
            medians[1].append(gen_median)
            medians[2].append(gen_median_uo)

        # for each set of medians.
        print('\nperforming k-nn...')
        for i_app, G_app in enumerate(medians):
            # compute dis_mat between medians.
            dataset = dataset_all.copy()
            dataset.load_graphs([g.copy() for g in G_app], targets=None)
            gm_app_unnorm, _ = __compute_gram_matrix_unnorm(
                dataset, kernel_options.copy())

            # compute the entire Gram matrix.
            graph_kernel = __get_graph_kernel(dataset.copy(),
                                              kernel_options.copy())
            kernels_to_medians = []
            for g in G_app:
                kernels_to_median, _ = graph_kernel.compute(
                    g, G_test, **kernel_options.copy())
                kernels_to_medians.append(kernels_to_median)
            kernels_to_medians = np.array(kernels_to_medians)
            gm_all = np.concatenate((gm_app_unnorm, kernels_to_medians),
                                    axis=1)
            gm_all = np.concatenate(
                (gm_all,
                 np.concatenate(
                     (kernels_to_medians.T,
                      gram_matrix_unnorm[test_index, :][:, test_index].copy()),
                     axis=1)),
                axis=0)

            gm_all = normalize_gram_matrix(gm_all.copy())
            dis_mat, _, _, _ = compute_distance_matrix(gm_all)

            N = len(G_app)

            d_app = dis_mat[range(N), :][:, range(N)].copy()

            d_test = np.zeros((N, len(test_index)))
            for i in range(N):
                for j in range(len(test_index)):
                    d_test[i, j] = dis_mat[i, j]

            accuracies[i_app].append(
                knn_classification(d_app,
                                   d_test,
                                   y_app,
                                   y_test,
                                   n_neighbors,
                                   verbose=True,
                                   text=train_examples))

        # write result detail.
        if save_results:
            f_detail = open(dir_save + fn_output_detail, 'a')
            print('writing results to files...')
            for i, median_type in enumerate(
                ['set-median', 'gen median', 'gen median uo']):
                csv.writer(f_detail).writerow([
                    ds_name, kernel_options['name'],
                    train_examples + ': ' + median_type, trial,
                    knn_options['n_neighbors'],
                    len(gm_all), knn_options['test_size'],
                    accuracies[i][-1][0], accuracies[i][-1][1]
                ])
            f_detail.close()

    results = {}
    results['ave_perf_train'] = [
        np.mean([i[0] for i in j], axis=0) for j in accuracies
    ]
    results['std_perf_train'] = [
        np.std([i[0] for i in j], axis=0, ddof=1) for j in accuracies
    ]
    results['ave_perf_test'] = [
        np.mean([i[1] for i in j], axis=0) for j in accuracies
    ]
    results['std_perf_test'] = [
        np.std([i[1] for i in j], axis=0, ddof=1) for j in accuracies
    ]

    # write result summary for each letter.
    if save_results:
        f_summary = open(dir_save + fn_output_summary, 'a')
        for i, median_type in enumerate(
            ['set-median', 'gen median', 'gen median uo']):
            csv.writer(f_summary).writerow([
                ds_name, kernel_options['name'],
                train_examples + ': ' + median_type,
                knn_options['n_neighbors'], knn_options['test_size'],
                results['ave_perf_train'][i], results['ave_perf_test'][i],
                results['std_perf_train'][i], results['std_perf_test'][i],
                time_precompute_gm
            ])
        f_summary.close()
Example #3
0
def __kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options,
                            gram_matrix_unnorm, time_precompute_gm,
                            train_examples, save_results, dir_save,
                            fn_output_detail, fn_output_summary):
    Gn = dataset_all.graphs
    y_all = dataset_all.targets
    n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[
        'n_splits'], knn_options['test_size']

    # get shuffles.
    train_indices, test_indices, train_nums, y_app = __get_shuffles(
        y_all, n_splits, test_size)

    accuracies = []
    for trial in range(len(train_indices)):
        print('\ntrial =', trial)

        train_index = train_indices[trial]
        test_index = test_indices[trial]
        G_app = [Gn[i] for i in train_index]
        G_test = [Gn[i] for i in test_index]
        y_test = [y_all[i] for i in test_index]
        gm_unnorm_trial = gram_matrix_unnorm[
            train_index, :][:, train_index].copy()

        # get best graph from trainset according to distance in kernel space for each class.
        best_graphs = []
        train_nums_tmp = [0] + train_nums
        print('\ngetting best graph from trainset for each class...')
        for i_class in range(len(train_nums_tmp) - 1):
            print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes.')
            i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
            i_end = i_start + train_nums_tmp[i_class + 1]
            G_class = G_app[i_start:i_end]
            gm_unnorm_class = gm_unnorm_trial[i_start:i_end, i_start:i_end]
            gm_class = normalize_gram_matrix(gm_unnorm_class.copy())

            k_dis_list = []
            for idx in range(len(G_class)):
                k_dis_list.append(
                    compute_k_dis(idx,
                                  range(0, len(G_class)),
                                  [1 / len(G_class)] * len(G_class),
                                  gm_class,
                                  withterm3=False))
            idx_k_dis_min = np.argmin(k_dis_list)
            best_graphs.append(G_class[idx_k_dis_min].copy())

        # perform k-nn.
        print('\nperforming k-nn...')
        # compute dis_mat between medians.
        dataset = dataset_all.copy()
        dataset.load_graphs([g.copy() for g in best_graphs], targets=None)
        gm_app_unnorm, _ = __compute_gram_matrix_unnorm(
            dataset, kernel_options.copy())

        # compute the entire Gram matrix.
        graph_kernel = __get_graph_kernel(dataset.copy(),
                                          kernel_options.copy())
        kernels_to_best_graphs = []
        for g in best_graphs:
            kernels_to_best_graph, _ = graph_kernel.compute(
                g, G_test, **kernel_options.copy())
            kernels_to_best_graphs.append(kernels_to_best_graph)
        kernels_to_best_graphs = np.array(kernels_to_best_graphs)
        gm_all = np.concatenate((gm_app_unnorm, kernels_to_best_graphs),
                                axis=1)
        gm_all = np.concatenate(
            (gm_all,
             np.concatenate(
                 (kernels_to_best_graphs.T,
                  gram_matrix_unnorm[test_index, :][:, test_index].copy()),
                 axis=1)),
            axis=0)

        gm_all = normalize_gram_matrix(gm_all.copy())
        dis_mat, _, _, _ = compute_distance_matrix(gm_all)

        N = len(best_graphs)

        d_app = dis_mat[range(N), :][:, range(N)].copy()

        d_test = np.zeros((N, len(test_index)))
        for i in range(N):
            for j in range(len(test_index)):
                d_test[i, j] = dis_mat[i, j]

        accuracies.append(
            knn_classification(d_app,
                               d_test,
                               y_app,
                               y_test,
                               n_neighbors,
                               verbose=True,
                               text=train_examples))

        # write result detail.
        if save_results:
            f_detail = open(dir_save + fn_output_detail, 'a')
            print('writing results to files...')
            csv.writer(f_detail).writerow([
                ds_name, kernel_options['name'], train_examples, trial,
                knn_options['n_neighbors'],
                len(gm_all), knn_options['test_size'], accuracies[-1][0],
                accuracies[-1][1]
            ])
            f_detail.close()

    results = {}
    results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
    results['std_perf_train'] = np.std([i[0] for i in accuracies],
                                       axis=0,
                                       ddof=1)
    results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
    results['std_perf_test'] = np.std([i[1] for i in accuracies],
                                      axis=0,
                                      ddof=1)

    # write result summary for each letter.
    if save_results:
        f_summary = open(dir_save + fn_output_summary, 'a')
        csv.writer(f_summary).writerow([
            ds_name, kernel_options['name'], train_examples,
            knn_options['n_neighbors'], knn_options['test_size'],
            results['ave_perf_train'], results['ave_perf_test'],
            results['std_perf_train'], results['std_perf_test'],
            time_precompute_gm
        ])
        f_summary.close()