コード例 #1
0
def get_strongest_filters(img_id, layer, top=3):
    filters = AlexNet.channels[layer]

    # Get activations for shortened model
    activation_img = AlexNet(layer).predict(get_path_from_id(img_id))

    # Make sure that dimensions 2 and 3 are spacial (Image is square)
    assert activation_img.shape[2] == activation_img.shape[3], "Index ordering incorrect"
    assert activation_img.shape[1] == filters

    # Find maximum activation for each filter for a given image
    activation_img = np.nanmax(activation_img, axis=3)
    activation_img = np.nanmax(activation_img, axis=2)

    # Remove batch size dimension
    assert activation_img.shape[0] == 1
    activation_img = activation_img.sum(0)

    # Make activations 1-based indexing
    activation_img = np.insert(activation_img, 0, 0.0)

    #  activation_image is now a vector of length equal to number of filters (plus one for one-based indexing)
    #  each entry corresponds to the maximum/summed activation of each filter for a given image

    top_filters = activation_img.argsort()[-top:]
    return list(top_filters)
コード例 #2
0
def project_top_layer_filters(img_id=None, deconv_base_model=None):
    if img_id is None:
        img_id = randint(1, 50000)
    if deconv_base_model is None:
        deconv_base_model = Deconvolution(AlexNet().model)

    path = get_path_from_id(img_id)
    save_to_folder = 'TopFilters'

    projections = []
    box_borders = []
    layer = 5
    for max_filter in get_strongest_filters(img_id, layer, top=3):
        projection = deconv_base_model.project_down(path, layer, max_filter)

        # Increase Contrast
        percentile = 99
        max_val = np.percentile(projection, percentile)
        projection *= (20 / max_val)
        box_borders.append(get_bounding_box_coordinates(projection))
        projections.append(projection)

    superposed_projections = np.maximum.reduce(projections)
    # superposed_projections = sum(projections)
    assert superposed_projections.shape == projections[0].shape

    DeconvOutput(superposed_projections).save_as(
        save_to_folder, '{}_activations.JPEG'.format(img_id))

    original_image = preprocess_image_batch(path)
    original_image = draw_bounding_box(original_image, box_borders)
    DeconvOutput(original_image).save_as(save_to_folder,
                                         '{}.JPEG'.format(img_id))
コード例 #3
0
def project_multiple_layer_filters(img_id=None, deconv_base_model=None):
    if img_id is None:
        img_id = randint(1, 50000)
    if deconv_base_model is None:
        deconv_base_model = Deconvolution(AlexNet().model)

    path = get_path_from_id(img_id)
    save_to_folder = 'MultipleLayers'

    projections = []
    box_borders = []
    contrast = [None, 1, 3, 7, 13, 22]
    for layer in (5, ):  # 1, 2, 3, 4,
        assert get_strongest_filter(img_id,
                                    layer) == get_strongest_filters(img_id,
                                                                    layer,
                                                                    top=1)
        max_filter = get_strongest_filter(img_id, layer)
        if layer == 1: print(img_id, ': ', max_filter)
        projection = deconv_base_model.project_down(
            path, layer, max_filter)  # 某一层的特征图解卷积后的重构图

        if layer != 1:
            # 提升对比度
            percentile = 99
            # max_val = np.nanargmax(unarranged_array)
            max_val = np.percentile(
                projection, percentile
            )  # 百分位数: 具体参见 stackflow.com/questions/2374640/how-do-i-calculate-percentiles-with-python-numpy
            projection *= (contrast[layer] / max_val)
        else:
            projection *= 0.3

        box_borders.append(get_bounding_box_coordinates(projection))

        # x_diff[layer].append(box_borders[-1][1] - box_borders[-1][0])
        # y_diff[layer].append(box_borders[-1][3] - box_borders[-1][2])

        projections.append(projection)
    superposed_projections = np.maximum.reduce(projections)
    # superposed_projections = sum(projections)
    assert superposed_projections.shape == projections[0].shape
    DeconvOutput(superposed_projections).save_as(
        save_to_folder, '{}_activations.JPEG'.format(img_id))

    original_image = preprocess_image_batch(path)
    original_image = draw_bounding_box(original_image, box_borders)
    DeconvOutput(original_image).save_as(save_to_folder,
                                         '{}.JPEG'.format(img_id))
コード例 #4
0
def get_heatmaps(img_id, alexnet, title):
    base_model = alexnet.base_model
    top_layer_model = alexnet.model
    labels = get_labels()

    path = get_path_from_id(img_id)

    strongest_filter = get_strongest_filter(img_id, layer=5)
    true_label = labels[img_id]
    print(strongest_filter, true_label)

    predictions = AlexNet(base_model=base_model).predict(path)
    print(decode_classnames_json(predictions))
    print(decode_classnumber(predictions))
    print(true_label)

    # DeconvOutput(preprocess_image_batch_grey_square(image_paths=path, square_x=50, square_y=50)).save_as('Occlusion',
    # title + '.JPEG')
    activations = np.zeros((30, 30))
    class_prop = np.zeros((30, 30))

    for x in range(0, 30):
        print(x)
        for y in range(0, 30):
            prep_image = preprocess_image_batch_grey_square(
                path, 13 + x * 7, 13 + y * 7)
            activation = get_summed_activation_of_feature_map(
                top_layer_model, strongest_filter, prep_image)
            prediction = base_model.predict(prep_image)
            activations[x, y] = activation
            class_prop[x, y] = prediction[0][true_label]
    print('done')

    fig, ax = plt.subplots()
    cax = ax.imshow(activations, interpolation='nearest', cmap='plasma')
    plt.axis('off')
    # Add colorbar, make sure to specify tick locations to match desired ticklabels
    cbar = fig.colorbar(cax)
    plt.show()

    fig, ax = plt.subplots()
    cax = ax.imshow(class_prop, interpolation='nearest', cmap='plasma')
    plt.axis('off')
    # Add colorbar, make sure to specify tick locations to match desired ticklabels
    cbar = fig.colorbar(cax)
    plt.show()