Пример #1
0
def test_integration(decorrelate, fft, inceptionv1):
    obj = objectives.neuron("mixed3a_pre_relu", 0)
    param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft)
    rendering = render.render_vis(
        inceptionv1,
        obj,
        param_f=param_f,
        thresholds=(1, 2),
        verbose=False,
        transforms=[],
    )
    start_image = rendering[0]
    end_image = rendering[-1]
    objective_f = objectives.neuron("mixed3a", 177)
    param_f = lambda: param.image(64, decorrelate=decorrelate, fft=fft)
    rendering = render.render_vis(
        inceptionv1,
        objective_f,
        param_f,
        verbose=False,
        thresholds=(0, 64),
        use_fixed_seed=True,
    )
    start_image, end_image = rendering

    assert (start_image != end_image).any()
Пример #2
0
def test_integration(decorrelate, fft):
    obj = objectives.neuron("mixed3a_pre_relu", 0)
    param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft)
    rendering = render.render_vis(model,
                                  obj,
                                  param_f=param_f,
                                  thresholds=(1, 2),
                                  verbose=False,
                                  transforms=[])
    start_image = rendering[0]
    end_image = rendering[-1]
    assert (start_image != end_image).any()
Пример #3
0
def visualization(learning_rate, neuron, channel, contrast, NRO_IMG, SAVE_P):
    LEARNING_RATE = learning_rate

    optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
    obj  = objectives.neuron(neuron, channel)
    imgs = render.render_vis(model,  obj,
                             optimizer=optimizer,
                             transforms=[],
                             param_f=lambda: param.image(256, fft=True, decorrelate=True, init_val=NRO_IMG),  # 256 es el tamanio de la imagen
                             thresholds=(0,2), verbose=False)


    # Note that we're doubling the image scale to make artifacts more obvious
    plt.figure()
    plt.imshow(imgs[0][0])
    plt.axis('off')
    contraste = contrast # Mover este numero hasta ver algo razonable
    plt.imshow(contraste*(imgs[1][0]-imgs[0][0]) + 0.5)
    plt.savefig(SAVE_P, bbox_inches='tight')
Пример #4
0
def test_integration_any_channels():
    inceptionv1 = InceptionV1()
    objectives_f = [
        objectives.deepdream("mixed4a_pre_relu"),
        objectives.channel("mixed4a_pre_relu", 360),
        objectives.neuron("mixed3a", 177)
    ]
    params_f = [
        lambda: param.grayscale_image_rgb(128),
        lambda: arbitrary_channels_to_rgb(128, channels=10)
    ]
    for objective_f in objectives_f:
        for param_f in params_f:
            rendering = render.render_vis(
                inceptionv1,
                objective_f,
                param_f,
                verbose=False,
                thresholds=(0, 64),
                use_fixed_seed=True,
            )
            start_image, end_image = rendering

            assert (start_image != end_image).any()
Пример #5
0
def test_neuron(inceptionv1):
    objective = objectives.neuron("mixed4a_pre_relu", 42)
    assert_gradient_ascent(objective, inceptionv1)