Exemple #1
0
def impact_heatmap():
    kernel_weights, layer, sizes, weights = get_layer_weigh_list()
    top_k_map = {}
    limit = 20
    for i in range(1, 4):
        previous = weights[i - 1]
        previous_kernel = []
        y_axis = []
        for n in range(previous.shape[0]):
            y_axis.append(f'L{i-1}, K{n}')
            previous_kernel.append(np.mean(np.abs(previous[n])))
        impact = []
        data = np.zeros((sizes[i], previous.shape[0]))
        x_axis = []
        for j in range(sizes[i]):
            x_axis.append(f'L{i}, K{j}')
            to_analyze = np.abs(kernel_weights[f'{layer[i]}_kernel{j}'])
            kernel_impact = 0.0
            sum = np.sum(to_analyze.flatten())
            for k in range(to_analyze.shape[0]):
                value = np.sum(to_analyze[k])
                # previous_mean = previous_kernel[k]
                # value = np.mean(to_analyze[k])
                # data[j, k] = previous_mean * value
                data[j, k] = value / sum
            impact.append(kernel_impact)
        plot_heatmap(data[0:limit, 0:limit],
                     x_axis[0:limit],
                     y_axis[0:limit],
                     'Heat map kernel influence form previous layer sum',
                     vmax=0.06)
    print(top_k_map)
def plot_over_layer(prev, this, name):
    type = []
    for i in prev:
        counts = np.bincount(i)
        type.append(np.argmax(counts))
    index = 0
    new_this = np.zeros(this.shape)
    for i in range(np.max(prev) + 1):
        for j in range(this.shape[1]):
            if i == type[j]:
                new_this[:, index] = this[:, j]
                index += 1
    plot_heatmap(new_this, 'Channel', 'Kernel', f'{name}_sorted')
Exemple #3
0
def mutual_information():
    model = get_model('CORnet-S_base', True)
    # model = get_model('CORnet-S_random', False)
    # model = get_model('CORnet-S_train_second_kernel_conv_epoch_00', True)
    counter = 0
    plt.figure(figsize=(4, 25))
    gs = gridspec.GridSpec(20,
                           3,
                           width_ratios=[1] * 3,
                           wspace=0.5,
                           hspace=0.5,
                           top=0.95,
                           bottom=0.05,
                           left=0.1,
                           right=0.95)
    kernel_avgs = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d and 'V1' not in name:
            weights = m.weight.data.squeeze().numpy()
            scores = np.zeros([weights.shape[0], weights.shape[0]])
            kernels = weights.shape[0]
            for i in range(kernels):
                for j in range(kernels):
                    # print(f'Score kernel {i} and {j}')
                    scores[i, j] = feature_selection.mutual_info_regression(
                        weights[i].flatten().reshape(-1, 1),
                        weights[j].flatten())
            print(f'Kernel mean mutual information {np.mean(scores)}')
            plot_heatmap(scores,
                         title=f'Kernel mutual information {name}',
                         col_labels='Kernels',
                         row_labels='Kernels')
            if len(weights.shape) > 2:
                channels = weights.shape[1]
                scores = np.zeros([kernels, channels, channels])
                for i in range(kernels):
                    for j in range(channels):
                        for k in range(channels):
                            # print(f'Score channel {k} and {j} in kernel {i}')
                            scores[
                                i, j,
                                k] = feature_selection.mutual_info_regression(
                                    weights[i, j].flatten().reshape(-1, 1),
                                    weights[i, k].flatten())
                scores = np.mean(scores, axis=0)
                # print(f'Channel mean mutual information {np.mean(scores)}')
                plot_heatmap(scores,
                             title=f'Channel mean mutual information {name}',
                             col_labels='Channel',
                             row_labels='Channel')
def plot_correlation(prev, this, name):
    corr = []
    corr_matrix = np.zeros([this.shape[0], prev.shape[1]])
    for i in range(this.shape[0]):
        for j in range(prev.shape[1]):
            value = pearsonr(this[i], prev[:, j])[0]
            corr_matrix[i, j] = value
            # plt.plot(corr)
    # plt.title(f'{name}_correlation_channel')
    # plt.xlabel('Number of clusters')
    # plt.ylabel('WCSS')
    # plt.show()
    # for i in range(this.shape[0]):
    #     for j in range(prev.shape[0]):
    #         corr_matrix[i,i] = np.corrcoef(this[i], prev[j])
    plot_heatmap(corr_matrix, 'Kernel next', 'Kernel prev',
                 f'{name}_kernel_corr')
def predict_next_kernel_to_kernel(prev, next, name):
    predicatability = np.zeros([next.shape[0], prev.shape[0]])
    prev = prev.reshape(prev.shape[0], -1)
    next = next.reshape(next.shape[0], -1)
    for i in range(next.shape[0]):
        high_scores = {}
        kernel = next[i]

        # train, test, y_train, y_test = train_test_split(kernels, next, test_size=0.1, random_state=42)
        # kernels = kernels.reshape(kernels.shape[0],-1)
        next = next.reshape(next.shape[0], -1)
        best_score = -100
        best_index = 0
        min = np.min([prev.shape[0], next.shape[0]])
        kernels = np.repeat(kernel.reshape(1, -1), min, axis=0)
        for j in range(prev.shape[0]):
            reg = LinearRegression()
            reg.fit(prev[j].reshape(1, -1), kernel.reshape(1, -1))
            # score = reg.score(kernel.reshape(1, -1), next[j].reshape(1,-1))
            # print(f'Score {score}')
            score = reg.score(prev[:min], kernels)
            high_scores[j] = score
            if score > best_score:
                # print(f'Score on all: {score}')
                best_score, best_index = score, j
        best_k = nlargest(3, high_scores, key=high_scores.get)
        # if len(next.shape) >2:
        #     next = np.transpose(next, [1,0,2]).reshape(next.shape[0], -1)
        # else:
        #     next = next.T
        # if len(prev.shape) > 2:
        #     prev = prev.reshape(prev.shape[0], -1)

        # for j in range(next.shape[0]):
        # print('Best score ')
        predicatability[i, best_k] = [high_scores.get(key) for key in best_k]
        # y_pred = reg.predict(test)
        # print(f'layer {name} has regression score for kernel to channel bank prediction {reg.score(kernels, next)}' )#  and explained variance {explained_variance_score(y_test, y_pred)}
    # n_larg = nlargest(3, high_scores, key = high_scores.get)
    # reg = LinearRegression()
    # reg.fit(kernel.reshape(1, -1), next[j].reshape(1,-1))
    plot_heatmap(predicatability, 'L+1 kernel', 'L kernel',
                 f'{name}_predictability')
def delta_heatmap(model1, model2, imgs, epochs, selection=[], title='', ax=None):
    names = []
    conn = get_connection()
    for model in [model1, model2]:
        if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
            model_spec = model
        else:
            model_spec = model
        for img in imgs:
            name = f'{model}_img{img}'
            for epoch in epochs:
                names.append(f'{name}_epoch_{epoch:02d}')
            names.append(f'{name}_epoch_{convergence_images[name]}')
        names.append(f'{model_spec}_epoch_{convergence_epoch[model_spec]}')
        for epoch in epochs:
            names.append(f'{model}_epoch_{epoch:02d}')
        names.append('CORnet-S_full_epoch_43')
    model_dict = load_scores(conn, names, benchmarks)
    full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection])
    matrix = np.zeros([len(imgs) + 1, len(epochs) + 1])
    data = {}
    for i in range(len(imgs)):
        for j in range(len(epochs)):
            name1 = f'{model1}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            name2 = f'{model2}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            matrix[i, j] = calc_dif(name1, name2, model_dict, full, selection)
        name = f'{model1}_img{imgs[i]}'
        name = f'{name}_epoch_{convergence_images[name]:02d}'
        name2 = f'{model2}_img{imgs[i]}'
        name2 = f'{name2}_epoch_{convergence_images[name2]:02d}'
        matrix[i, -1] = calc_dif(name, name2, model_dict, full, selection)
    names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}')
    for j in range(len(epochs)):
        name1 = f'{model1}_epoch_{epochs[j]:02d}'
        name2 = f'{model2}_epoch_{epochs[j]:02d}'
        matrix[-1, j] = calc_dif(name1, name2, model_dict, full, selection)
    name = f'CORnet-S_cluster2_v2_IT_trconv3_bi_epoch_{convergence_epoch["CORnet-S_cluster2_v2_IT_trconv3_bi"]:02d}'
    name2 = f'{model2}_epoch_{convergence_epoch[model2]:02d}'
    matrix[-1, -1] = calc_dif(name, name2, model_dict, full, selection)
    plot_heatmap(matrix, r'\textbf{Epochs}', r'\textbf{Images}',
                 title=title, annot=True, ax=ax,
                 cbar=False, cmap='RdYlGn', percent=False, alpha=0.6,
                 fmt='.0%', vmin=-0.30, vmax=0.30, yticklabels=imgs + ['All'], xticklabels=epochs + ['Convergence'])
def image_epoch_heatmap(model, imgs, epochs, selection=[], title=r'\textbf{Standard training epochs/images trade-off}',
                        ax=None):
    names = []
    conn = get_connection()
    # delta= 'CORnet-S_full'
    # if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
    #     model1 = model
    # else:
    #     model1 = model
    for img in imgs:
        name = f'{model}_img{img}'
        # for epoch in epochs:
        #     names.append(f'{name}_epoch_{epoch:02d}')
        names.append(name)
    names.append(model)
    # for epoch in epochs:
    #     names.append(f'{model}_epoch_{epoch:02d}')
    names.append('CORnet-S_full')
    model_dict = load_error_bared(conn, names, epochs=epochs, benchmarks=benchmarks)
    full = np.mean(model_dict['CORnet-S_full'][selection])
    matrix = np.zeros([len(imgs) + 1, len(epochs) + 1])
    data = {}
    for i in range(len(imgs)):
        for j in range(len(epochs)):
            name1 = f'{model}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            frac = (np.mean(model_dict[name1][selection]) / full)
            matrix[i, j] = frac
        name = f'{model}_img{imgs[i]}'
        # name = f'{name}_epoch_{convergence_images[name]:02d}'
        frac = (np.mean(model_dict[name][selection]) / full)
        matrix[i, -1] = frac

    # names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}')
    for j in range(len(epochs)):
        name1 = f'{model}_epoch_{epochs[j]:02d}'
        frac = (np.mean(model_dict[name1][selection]) / full)
        matrix[-1, j] = frac
    # name = f'{model}_epoch_{convergence_epoch[model]:02d}'
    frac = (np.mean(model_dict[model][selection]) / full)
    matrix[-1, -1] = frac
    plot_heatmap(matrix, r'\textbf{Epochs}', r'\textbf{Images}', title=title, annot=True, ax=ax,
                 cbar=False, cmap='YlOrRd', percent=False,
                 fmt='.0%', vmin=0, vmax=1, yticklabels=imgs + ['All'], xticklabels=epochs + ['Convergence'], alpha=0.8)
Exemple #8
0
def analyze_param_dist(name, plot=False):
    params = np.load(f'{name}.npy')
    names = [
        'Frequency', 'Theta', 'Sigma X', 'Sigma Y', 'Offset', 'Center X',
        'Center Y'
    ]
    variables = np.zeros((7, 192))
    # for i in range(params.shape[2]-1):
    #     param = params[:,:,i]
    #     variables[i] = param.flatten()
    param = params[:, :, :-1]
    param = param.reshape(64, -1)

    pca_res = pca(param, n_components=21)
    principal_components = pca_res.transform(param)
    # principal_components shape: (192,3)
    if plot:
        plot_2d(principal_components[:, 0], principal_components[:, 1],
                'PC 1 & 2')
        plot_2d(principal_components[:, 1], principal_components[:, 2],
                'PC 2 & 3')
        plot_2d(principal_components[:, 0], principal_components[:, 2],
                'PC 1 & 3')
        plot_2d(principal_components[:, 0], principal_components[:, 3],
                'PC 1 & 4')
        plot_2d(principal_components[:, 0], principal_components[:, 4],
                'PC 1 & 5')
        plot_3d(principal_components[:, 0], principal_components[:, 1],
                principal_components[:, 2],
                'Principal components of gabor filter params')
    reg = fit_data(principal_components, variables.T)
    small_samples = multivariate_gaussian(principal_components.T, 10)
    # small_samples shape (3, 10)
    full_params = reg.predict(small_samples.T)  # shape (10, 7)
    small_samples_hat = pca_res.transform(full_params)  # shape (10,3)
    full_params_hat = reg.predict(small_samples_hat)
    print(mean_squared_error(small_samples.T, small_samples_hat))
    idx = 0
    plt.figure(figsize=(5, 10))
    gs = gridspec.GridSpec(20,
                           3,
                           width_ratios=[1] * 3,
                           wspace=0.5,
                           hspace=0.5,
                           top=0.95,
                           bottom=0.05,
                           left=0.1,
                           right=0.95)
    for i in range(10):
        alpha = full_params[i]
        beta = full_params_hat[i]
        kernel2 = gabor_kernel_3(beta[0],
                                 theta=beta[1],
                                 sigma_x=beta[2],
                                 sigma_y=beta[3],
                                 offset=beta[4],
                                 x_c=beta[5],
                                 y_c=beta[6],
                                 ks=7)
        kernel1 = gabor_kernel_3(alpha[0],
                                 theta=alpha[1],
                                 sigma_x=alpha[2],
                                 sigma_y=alpha[3],
                                 offset=alpha[4],
                                 x_c=alpha[5],
                                 y_c=alpha[6],
                                 ks=7)
        ax = plt.subplot(gs[i, 0])
        ax.set_xticks([])
        ax.set_yticks([])
        ax.set_title(f'Samples parameter set', pad=3, fontsize=5)
        idx += 1
        plt.imshow(kernel2, cmap='gray')
        ax = plt.subplot(gs[i, 1])
        ax.set_title(f'Reconstruction', pad=10, fontsize=5)
        plt.imshow(kernel1, cmap='gray')
        ax.set_xticks([])
        ax.set_yticks([])
        plt.tight_layout()
        idx += 1
    plt.savefig(f'reconstructions.png')
    plt.show()

    if plot:
        principal_components = principal_components.T
        corr_pca = generate_correlation_map(principal_components,
                                            principal_components)
        corr_map = generate_correlation_map(variables, variables)
        mask = np.zeros_like(corr_map)
        mask[np.triu_indices_from(mask)] = True
        plot_heatmap(corr_map,
                     names,
                     names,
                     title='Gabor parameter correlation',
                     mask=mask)
        mask = np.zeros_like(corr_pca)
        mask[np.triu_indices_from(mask)] = True
        plot_heatmap(corr_pca,
                     names,
                     names,
                     title='PCA correlations',
                     mask=mask)
def plot_type_count(this, name):
    counts = np.zeros([this.shape[0], np.max(this) + 1])
    for i in range(this.shape[0]):
        counts[i] = np.bincount(this[i], minlength=np.max(this) + 1)
    plot_heatmap(counts, 'kernel', 'Typr', f'{name}_type_count')