Example #1
0
def standard_test(model, layer, unit_index, preferred_stimulus):
    colours = Colours()
    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    preferred_colour = preferred_stimulus['colour']

    square_shape = (100, 100)

    angle = preferred_stimulus['angle']
    rotation = [[np.cos(angle), -np.sin(angle)],
                [np.sin(angle), np.cos(angle)]]
    position_1 = np.add(np.dot(rotation,
                               np.array([-50, 0]).transpose()),
                        [200, 200]).astype(np.int)
    position_2 = np.add(np.dot(rotation,
                               np.array([50, 0]).transpose()),
                        [200, 200]).astype(np.int)

    # Stimuli as in panels A-D of Zhou et al. Figure 2
    stimulus_A = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_A, position_1, square_shape, angle, bg_colour)

    stimulus_B = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_B, position_2, square_shape, angle,
                  preferred_colour)

    stimulus_C = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_C, position_1, square_shape, angle,
                  preferred_colour)

    stimulus_D = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_D, position_2, square_shape, angle, bg_colour)

    stimulus_pref = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_pref, [200, 200],
                  (preferred_stimulus['width'], preferred_stimulus['length']),
                  preferred_stimulus['angle'], preferred_colour)

    A = run_mask_net(model, stimulus_A, layer, unit_index)
    B = run_mask_net(model, stimulus_B, layer, unit_index)
    C = run_mask_net(model, stimulus_C, layer, unit_index)
    D = run_mask_net(model, stimulus_D, layer, unit_index)
    responses = [A, B, C, D]
    m = np.mean(responses)
    side = np.abs((A + C) / 2 - (B + D) / 2) / m * 100
    contrast = np.abs((A + B) / 2 - (C + D) / 2) / m * 100
    print('side: {} contrast: {}'.format(side, contrast))

    return {
        'responses': responses,
        'side': side,
        'contrast': contrast,
        'mean': m
    }
Example #2
0
def inner_product_differences(optimal_stimulus, preferred_stimulus, im_width):
    colours = Colours()
    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)
    pref_colour = preferred_stimulus['colour']

    # unrealistically large negative effect of surround if we don't centre this
    optimal_stimulus = optimal_stimulus - np.mean(optimal_stimulus)

    bg_image = get_image(
        (im_width, im_width, 3), bg_colour) - .5  # centering these too
    pref_image = get_image((im_width, im_width, 3), pref_colour) - .5

    pref_left = inner_product(optimal_stimulus,
                              pref_image,
                              mask=get_mask(preferred_stimulus, im_width, True,
                                            False, False))
    pref_right = inner_product(optimal_stimulus,
                               pref_image,
                               mask=get_mask(preferred_stimulus, im_width,
                                             False, True, False))
    pref_surround = inner_product(optimal_stimulus,
                                  pref_image,
                                  mask=get_mask(preferred_stimulus, im_width,
                                                False, False, True))
    bg_left = inner_product(optimal_stimulus,
                            bg_image,
                            mask=get_mask(preferred_stimulus, im_width, True,
                                          False, False))
    bg_right = inner_product(optimal_stimulus,
                             bg_image,
                             mask=get_mask(preferred_stimulus, im_width, False,
                                           True, False))
    bg_surround = inner_product(optimal_stimulus,
                                bg_image,
                                mask=get_mask(preferred_stimulus, im_width,
                                              False, False, True))

    side = (pref_left + bg_right) - (pref_right + bg_left)
    surround = pref_surround - bg_surround

    # print('pref {} {} {}'.format(pref_left, pref_right, pref_surround))
    # print('bg {} {} {}'.format(bg_left, bg_right, bg_surround))
    # print('side {} surround {}'.format(side, surround))

    return side, surround
Example #3
0
def get_mask(preferred_stimulus, im_width, include_left, include_right,
             include_background):
    square_shape = (im_width / 4, im_width / 4)
    angle = preferred_stimulus['angle']
    rotation = [[np.cos(angle), -np.sin(angle)],
                [np.sin(angle), np.cos(angle)]]
    offset = im_width / 8
    centre = im_width / 2
    position_1 = np.add(np.dot(rotation,
                               np.array([-offset, 0]).transpose()),
                        [centre, centre]).astype(np.int)
    position_2 = np.add(np.dot(rotation,
                               np.array([offset, 0]).transpose()),
                        [centre, centre]).astype(np.int)

    stimulus_left = get_image((im_width, im_width, 3), (0, 0, 0))
    add_rectangle(stimulus_left, position_1, square_shape, angle, (1, 1, 1))
    mask_left = stimulus_left[:, :, 0] > .5

    stimulus_right = get_image((im_width, im_width, 3), (0, 0, 0))
    add_rectangle(stimulus_right, position_2, square_shape, angle, (1, 1, 1))
    mask_right = stimulus_right[:, :, 0] > .5

    mask_both = np.logical_or(mask_left, mask_right)
    mask_bg = np.full((im_width, im_width), True)
    mask_bg = np.logical_xor(mask_bg, mask_both)

    result = np.full((im_width, im_width), False)

    if include_left:
        result = np.logical_or(result, mask_left)

    if include_right:
        result = np.logical_or(result, mask_right)

    if include_background:
        result = np.logical_or(result, mask_bg)

    return result
Example #4
0
def get_stimuli(preferred_stimulus, im_width):
    # TODO: extract this method in experiment

    colours = Colours()
    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    preferred_colour = preferred_stimulus['colour']

    square_shape = (im_width / 4, im_width / 4)

    angle = preferred_stimulus['angle']
    rotation = [[np.cos(angle), -np.sin(angle)],
                [np.sin(angle), np.cos(angle)]]
    offset = im_width / 8
    centre = im_width / 2
    position_1 = np.add(np.dot(rotation,
                               np.array([-offset, 0]).transpose()),
                        [centre, centre]).astype(np.int)
    position_2 = np.add(np.dot(rotation,
                               np.array([offset, 0]).transpose()),
                        [centre, centre]).astype(np.int)

    stimulus_A = get_image((im_width, im_width, 3), preferred_colour)
    add_rectangle(stimulus_A, position_1, square_shape, angle, bg_colour)

    stimulus_B = get_image((im_width, im_width, 3), bg_colour)
    add_rectangle(stimulus_B, position_2, square_shape, angle,
                  preferred_colour)

    stimulus_C = get_image((im_width, im_width, 3), bg_colour)
    add_rectangle(stimulus_C, position_1, square_shape, angle,
                  preferred_colour)

    stimulus_D = get_image((im_width, im_width, 3), preferred_colour)
    add_rectangle(stimulus_D, position_2, square_shape, angle, bg_colour)

    return stimulus_A, stimulus_B, stimulus_C, stimulus_D
Example #5
0
def make_bar_stimuli(directory='.'):
    """
    Creates and saves bar stimulus images. These are used for finding optimal bar
    stimuli for units in a CNN, approximating the procedure in:

    H. Zhou, H. S. Friedman, and R. von der Heydt, "Coding of border ownership in monkey visual
    cortex.," J. Neurosci., vol. 20, no. 17, pp. 6594-6611, 2000.
    """

    colours = Colours()

    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    fg_colour_names = [
        key for key in colours.colours.keys() if key != bg_colour_name
    ]

    # TODO: probably need more sizes and angles
    lengths = [40, 80]
    widths = [4, 8]
    angles = np.pi * np.array([0, .25, .5, .75])

    parameters = []

    for fg_colour_name in fg_colour_names:
        n_luminances = colours.get_num_luminances(fg_colour_name)
        n_stimuli = len(lengths) * len(widths) * len(angles) * n_luminances
        print('Creating {} {} stimuli'.format(n_stimuli, fg_colour_name))
        for i in range(n_luminances):
            RGB = colours.get_RGB(fg_colour_name, i)
            for length in lengths:
                for width in widths:
                    for angle in angles:
                        parameters.append({
                            'colour': RGB,
                            'length': length,
                            'width': width,
                            'angle': angle
                        })

                        stimulus = get_image((400, 400, 3), bg_colour)
                        add_rectangle(stimulus, (200, 200), (width, length),
                                      angle, RGB)

                        filename = 'bar{}.jpg'.format(len(parameters) - 1)
                        sio.imsave(os.path.join(directory, filename), stimulus)

    return parameters
Example #6
0
def find_optimal_bars(input, layers, im_shape=(400, 400)):
    """
    Finds bar stimuli that optimally activate each of the feature maps in a single layer of a
    convolutional network, approximating the procedure in:

    H. Zhou, H. S. Friedman, and R. von der Heydt, “Coding of border ownership in monkey visual
    cortex.,” J. Neurosci., vol. 20, no. 17, pp. 6594–6611, 2000.

    Their description of the procedure is, ""After isolation of a cell, the receptive field was
    examined with rectangular bars, and the optimal stimulus parameters were
    determined by varying the length, width, color, orientation ..."

    We approximate this by applying a variety of bar stimuli, and finding which one most strongly
    activates the centre unit in each feature map. Testing the whole layer at once is more
    efficient than testing each feature map individually, since the whole network up to that point
    must be run whether we record a single unit or all of them.

    :param input: Input to TensorFlow model (Placeholder node)
    :param layers: Layers of convolutional network to record from
    :return: parameters, responses, preferred_stimuli
    """

    colours = Colours()

    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    fg_colour_names = [
        key for key in colours.colours.keys() if key != bg_colour_name
    ]

    # TODO: probably need more sizes and angles, also shift bar laterally
    lengths = [40, 80]
    widths = [4, 8]
    angles = np.pi * np.array([0, .25, .5, .75])

    parameters = []
    responses = {}
    preferred_stimuli = {}

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        model_tf = ()
        for layer in layers:
            model_tf += (sess.graph.get_tensor_by_name(layer), )
            responses[layer] = []

        for fg_colour_name in fg_colour_names:
            input_data = None
            n_luminances = colours.get_num_luminances(fg_colour_name)
            n_stimuli = len(lengths) * len(widths) * len(angles) * n_luminances
            print('Testing {} {} stimuli'.format(n_stimuli, fg_colour_name))
            for i in range(n_luminances):
                RGB = colours.get_RGB(fg_colour_name, i)
                for length in lengths:
                    for width in widths:
                        for angle in angles:
                            parameters.append({
                                'colour': RGB,
                                'length': length,
                                'width': width,
                                'angle': angle
                            })

                            stimulus = get_image((im_shape[0], im_shape[1], 3),
                                                 bg_colour)
                            add_rectangle(stimulus,
                                          (im_shape[0] / 2, im_shape[1] / 2),
                                          (width, length), angle, RGB)

                            # plt.imshow(stimulus)
                            # plt.show()

                            if input_data is None:
                                input_data = np.expand_dims(stimulus, 0)
                            else:
                                input_data = np.concatenate(
                                    (input_data, np.expand_dims(stimulus, 0)),
                                    0)

            activities = sess.run(model_tf, feed_dict={input: input_data})
            # activities is a tuple with shape stim x h x w x feats
            for i, activity in enumerate(activities):
                centre = (int(activity.shape[1] / 2),
                          int(activity.shape[2] / 2))

                responses[layers[i]].append(activity[:, centre[0],
                                                     centre[1], :])

    for layer in layers:
        # reshape to layers x stim x feats
        responses[layer] = np.concatenate(responses[layer])
        preferred_stimuli[layer] = np.argmax(responses[layer], axis=0)

    return parameters, responses, preferred_stimuli
def standard_test(input, layer, unit_index, preferred_stimulus):
    # Note: we put edge of square on centre of preferred-stimulus bar
    # Zhou et al. determined significance of the effects of contrast and border ownership with
    # a 3-factor ANOVA, significance .01. The factors were side-of-ownership, contrast polarity,
    # and time. Having no time component we use a two-factor ANOVA.
    # "In the standard test, sizes
    # of 4 or 6° were used for cells of V1 and V2, and sizes between 4 and 17°
    # were used for cells of V4, depending on response field size."
    # I don't see where they mention the number of reps per condition, but there are 10 reps
    # in Figure 4.

    colours = Colours()
    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    preferred_colour = preferred_stimulus['colour']

    square_shape = (100, 100)

    angle = preferred_stimulus['angle']
    rotation = [[np.cos(angle), -np.sin(angle)],
                [np.sin(angle), np.cos(angle)]]
    position_1 = np.add(np.dot(rotation, np.array(
        [-50, 0]).transpose()), [200, 200]).astype(np.int)
    position_2 = np.add(np.dot(rotation, np.array([50, 0]).transpose()), [
                        200, 200]).astype(np.int)

    # preferred_shape = (preferred_stimulus['width'], preferred_stimulus['length'])
    # add_rectangle(stimulus, (200,200), preferred_shape, angle, preferred_colour)

    # Stimuli as in panels A-D of Zhou et al. Figure 2
    stimulus_A = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_A, position_1, square_shape, angle, bg_colour)

    stimulus_B = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_B, position_2, square_shape, angle, preferred_colour)

    stimulus_C = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_C, position_1, square_shape, angle, preferred_colour)

    stimulus_D = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_D, position_2, square_shape, angle, bg_colour)
    
    # Stimulus of different size
    square_shape = (150, 150)
    
    stimulus_A2 = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_A2, position_1, square_shape, angle, bg_colour)

    stimulus_B2 = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_B2, position_2, square_shape, angle, preferred_colour)

    stimulus_C2 = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_C2, position_1, square_shape, angle, preferred_colour)

    stimulus_D2 = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_D2, position_2, square_shape, angle, bg_colour)

    input_data = np.stack((stimulus_A, stimulus_B, stimulus_C, stimulus_D,
                           stimulus_A2, stimulus_B2, stimulus_C2, stimulus_D2))

    # print(input_data.shape)
    # plt.imshow(stimulus_D)
    # plt.show()

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        model_tf = sess.graph.get_tensor_by_name(layer)
        activities = sess.run(
            model_tf, feed_dict={input: input_data})

        centre = (int(activities.shape[1] / 2), int(activities.shape[2] / 2))
        responses = activities[:, centre[0], centre[1], unit_index]

    m = np.mean(responses[:4])
    m2 = np.mean(responses[4:])

    A, B, C, D, A2, B2, C2, D2 = responses
    side = np.abs((A+C)/2 - (B+D)/2) / m * 100
    side2 = np.abs((A2+C2)/2 - (B2+D2)/2) / m2 * 100

    return {'responses': responses, 'side': side, 'side2': side2, 'mean': m, 'mean2': m2}
Example #8
0
def standard_test(doc_converted, hed_converted, preferred_stimulus):
    # Note: we put edge of square on centre of preferred-stimulus bar
    # Zhou et al. determined significance of the effects of contrast and border ownership with
    # a 3-factor ANOVA, significance .01. The factors were side-of-ownership, contrast polarity,
    # and time. Having no time component we use a two-factor ANOVA.
    # "In the standard test, sizes
    # of 4 or 6° were used for cells of V1 and V2, and sizes between 4 and 17°
    # were used for cells of V4, depending on response field size."
    # I don't see where they mention the number of reps per condition, but there are 10 reps
    # in Figure 4.

    colours = Colours()
    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    # Use contour response
    preferred_stimulus = parameters[preferred_stimulus['hed'][0]]
    preferred_colour = preferred_stimulus['colour']

    square_shape = (100, 100)

    angle = preferred_stimulus['angle']
    rotation = [[np.cos(angle), -np.sin(angle)],
                [np.sin(angle), np.cos(angle)]]
    position_1 = np.add(np.dot(rotation,
                               np.array([-50, 0]).transpose()),
                        [200, 200]).astype(np.int)
    position_2 = np.add(np.dot(rotation,
                               np.array([50, 0]).transpose()),
                        [200, 200]).astype(np.int)

    # preferred_shape = (preferred_stimulus['width'], preferred_stimulus['length'])
    # add_rectangle(stimulus, (200,200), preferred_shape, angle, preferred_colour)

    # Stimuli as in panels A-D of Zhou et al. Figure 2
    stimulus_A = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_A, position_1, square_shape, angle, bg_colour)

    stimulus_B = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_B, position_2, square_shape, angle,
                  preferred_colour)

    stimulus_C = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_C, position_1, square_shape, angle,
                  preferred_colour)

    stimulus_D = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_D, position_2, square_shape, angle, bg_colour)

    input_data = np.stack((stimulus_A, stimulus_B, stimulus_C, stimulus_D))

    #     print(input_data.shape)
    fig = plt.figure(figsize=(12, 8))
    plt.subplot(2, 6, 1)
    plt.imshow(stimulus_A)
    plt.title('A')
    plt.subplot(2, 6, 2)
    plt.imshow(stimulus_C)
    plt.title('C')
    plt.subplot(2, 6, 7)
    plt.imshow(stimulus_B)
    plt.title('B')
    plt.subplot(2, 6, 8)
    plt.imshow(stimulus_D)
    plt.title('D')
    #     plt.show()

    layers = ['doc', 'hed']
    responses = {'doc': [], 'hed': []}
    side = {'doc': [], 'hed': []}
    contrast = {'doc': [], 'hed': []}
    m = {'doc': [], 'hed': []}

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        # Get input and output tensors
        doc_input, doc_output = doc_converted
        hed_input, hed_output = hed_converted

        activities = sess.run((doc_output, hed_output),
                              feed_dict={
                                  doc_input: input_data,
                                  hed_input: input_data
                              })

        # Do this for each map separately
        for i, activity in enumerate(activities):
            centre = (int(activity.shape[1] / 2), int(activity.shape[2] / 2))
            responses[layers[i]] = activity[:, centre[0], centre[1]]

            m[layers[i]] = np.mean(responses[layers[i]])
            A, B, C, D = responses[layers[i]]
            side[layers[i]] = np.abs((A + C) / 2 -
                                     (B + D) / 2) / m[layers[i]] * 100
            contrast[layers[i]] = np.abs((A + B) / 2 -
                                         (C + D) / 2) / m[layers[i]] * 100
            # print('side: {} contrast: {}'.format(side, contrast))

    # Orientation


#     plt.figure()
    plt.subplot(2, 6, 3)
    plt.imshow(activities[0][0].squeeze())
    plt.title('A')
    plt.subplot(2, 6, 4)
    plt.imshow(activities[0][2].squeeze())
    plt.title('C')
    plt.subplot(2, 6, 9)
    plt.imshow(activities[0][1].squeeze())
    plt.title('B')
    plt.subplot(2, 6, 10)
    plt.imshow(activities[0][3].squeeze())
    plt.title('D')

    # Contour
    #     plt.figure()
    plt.subplot(2, 6, 5)
    plt.imshow(activities[1][0].squeeze())
    plt.title('A')
    plt.subplot(2, 6, 6)
    plt.imshow(activities[1][2].squeeze())
    plt.title('C')
    plt.subplot(2, 6, 11)
    plt.imshow(activities[1][1].squeeze())
    plt.title('B')
    plt.subplot(2, 6, 12)
    plt.imshow(activities[1][3].squeeze())
    plt.title('D')
    plt.tight_layout()
    #     fig.savefig('doc_preferred.png', dpi=200)

    #     # Show the different conditions
    #     plt.figure(figsize=(7,3))
    #     plt.subplot(1, 2, 1)
    #     plt.bar([0, 1, 2, 3], responses['hed'].squeeze())
    #     plt.xticks([0, 1, 2, 3], ('A', 'B', 'C', 'D'))
    #     plt.ylabel('BOS')
    #     plt.title('HED')

    #     plt.subplot(1, 2, 2)
    #     plt.bar([0, 1, 2, 3], responses['doc'].squeeze())
    #     plt.xticks([0, 1, 2, 3], ('A', 'B', 'C', 'D'))
    #     plt.ylabel('BOS')
    #     plt.title('DOC')

    return {
        'responses': responses,
        'side': side,
        'contrast': contrast,
        'mean': m
    }