Exemplo n.º 1
0
    def get_layer_outputs(layer_name, input_path):
        is_grayscale = (input_channels == 1)
        input_img = load_img(input_path, single_input_shape, grayscale=is_grayscale)

        output_generator = get_outputs_generator(model, layer_name)

        with get_evaluation_context():

            layer_outputs = output_generator(input_img)[0]
            output_files = []

            if keras.backend.backend() == 'theano':
                #correct for channel location difference betwen TF and Theano
                layer_outputs = np.rollaxis(layer_outputs, 0,3)
            for z in range(0, layer_outputs.shape[2]):
                img = layer_outputs[:, :, z]
                deprocessed = deprocess_image(img)
                filename = get_output_name(temp_folder, layer_name, input_path, z)
                output_files.append(
                    relpath(
                        filename,
                        abspath(temp_folder)
                    )
                )
                imsave(filename, deprocessed)

        return jsonify(output_files)
Exemplo n.º 2
0
    def get_layer_outputs(layer_name, input_path):

        input_img = load_img(input_path, single_input_shape, grayscale)
        output_generator = get_outputs_generator(model, layer_name)

        with get_evaluation_context():

            layer_outputs = output_generator(input_img)[0]
            output_files = []
            print('layer_output SHAPE: ', layer_outputs.shape)

            for z in range(0, layer_outputs.shape[0]):

                img = layer_outputs[z, :, :]
                deprocessed = deprocess_image(img)
                filename = get_output_name(temp_folder, layer_name, input_path, z)
                output_files.append(
                    relpath(
                        filename,
                        abspath(temp_folder)
                    )
                )
                imsave(filename, deprocessed)

        return jsonify(output_files)
Exemplo n.º 3
0
    def get_layer_outputs(layer_name, input_path):

        input_img = load_img(input_path, single_input_shape)
        output_generator = get_outputs_generator(model, layer_name)

        with graph.as_default():

            layer_outputs = output_generator(input_img)[0]
            output_files = []

            for z in range(0, layer_outputs.shape[2]):

                img = layer_outputs[:, :, z]
                deprocessed = deprocess_image(img)
                filename = get_output_name(temp_folder, layer_name, input_path,
                                           z)
                output_files.append(relpath(filename, abspath(temp_folder)))
                imsave(filename, deprocessed)

        return jsonify(output_files)
Exemplo n.º 4
0
def class_visualization(target_y):
    L2_REG = 1e-6
    LEARNING_RATE = 20000
    NUM_ITERATIONS = 200
    MAX_JITTER = 4

    solver_path = './DeepFaceNetDeploy.prototxt'
    weights_path = './snapshots/_iter_42000.caffemodel'
    mean_image = np.load("../data/mean_image.npy").astype(np.uint8)

    caffe.set_mode_gpu()

    # Load the network
    net = caffe.Net(solver_path, weights_path, caffe.TRAIN)

    # Start with a random image
    # X = np.random.randint(0, 256, size=(224,224,3)).astype(np.float)
    # X -= mean_image
    # X = X[:,:,::-1]

    mean_image_bgr = mean_image[:, :, ::-1].astype(np.float)
    # print mean_image_bgr.flatten()[0:50]

    if not os.path.exists('outputs-v1/'):
        os.makedirs('outputs-v1/')

    X = np.random.normal(0, 10, (224, 224, 3))
    plt.clf()
    plt.imshow(mean_image)
    plt.axis('off')
    plt.savefig('outputs-v1/mean-image.png')
    # out=Image.fromarray(mean_image,mode="RGB")
    # out.save('outputs/mean-image.png')

    # Set up blob data
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))

    data_blob_shape = net.blobs['data'].data.shape
    data_blob_shape = list(data_blob_shape)
    net.blobs['data'].reshape(1, data_blob_shape[1], data_blob_shape[2],
                              data_blob_shape[3])
    net.blobs['data'].data[...] = transformer.preprocess('data', X)

    # Set the target diffs at fc8 layers
    one_hots = []
    for i in xrange(1, 43):
        diff = net.blobs['fc8-%d' % (i)].diff
        one_hot = np.zeros_like(diff)
        one_hot[0, target_y[i - 1]] = 1

        net.blobs['fc8-%d' % (i)].diff[...] = one_hot

        one_hots.append(one_hot)

    # print 'Before'
    # print net.blobs['fc8-1'].diff
    # print net.blobs['fc8-42'].diff
    # _ = net.forward()
    # dX = net.backward(start='fc8-42')
    # print 'After'
    # print net.blobs['fc8-1'].diff
    # print net.blobs['fc8-42'].diff

    print 'Saving image %d' % (0)
    plt.clf()
    plt.imshow(util.deprocess_image(X, mean_image))
    plt.axis('off')
    plt.savefig('outputs-v1/image-%d.png' % (0))

    # print mean_image.flatten()[0:10]
    for t in xrange(1, NUM_ITERATIONS + 1):
        # As a regularizer, add random jitter to the image
        ox, oy = np.random.randint(-MAX_JITTER, MAX_JITTER + 1, 2)
        X = np.roll(np.roll(X, ox, -1), oy, -2)

        print 'Performing iteration %d...' % (t)
        net.blobs['data'].data[...] = transformer.preprocess('data', X)
        for i in xrange(1, 43):
            net.blobs['fc8-%d' % (i)].diff[...] = one_hots[i - 1]

        _ = net.forward()
        dX = net.backward(start='fc8-42')
        dX = dX['data']
        dX = dX[0, :, :, :]
        dX = np.transpose(dX, (1, 2, 0))

        dX -= 2 * L2_REG * X
        # print dX.flatten()[0:50]
        X += LEARNING_RATE * dX

        # Undo the jitter
        X = np.roll(np.roll(X, -ox, -1), -oy, -2)

        # As a regularizer, clip the image
        # print X.flatten()[0:50]
        X = np.clip(X, -mean_image_bgr, 255.0 - mean_image_bgr)
        # print X.flatten()[0:50]
        # print '--------------'

        # As a regularizer, periodically blur the image
        # if t % blur_every == 0:
        # 	X = blur_image(X)

        if t % 10 == 0 or t == NUM_ITERATIONS:
            print 'Saving image %d' % (t)
            plt.clf()
            plt.imshow(util.deprocess_image(X, mean_image))
            plt.axis('off')
            plt.savefig('outputs-v1/image-%d.png' % (t))
Exemplo n.º 5
0
def show_filter_responses(model, layer_index, input_img_path, save_dir=None, filter_index=None, dpi=100.0, save_original=False):
    """
    Show and save the filter responses for all or a selected filter at given layer.
    :param model: pre-trained model object
    :param layer_index: index of the layer
    :param input_img_path: path of the input image
    :param save_dir: path of directory to save the filters/original image. Filters are only displayed but not saved if this is None
    :param filter_index: index of the filter in the given layer. All filter responses are displayed if this is None
    :param dpi: DPI of the display
    """
    input_img = np.array(imageio.imread(input_img_path), dtype=np.float32)
    if K.image_dim_ordering() == "th":
        input_img = input_img.transpose((2, 0, 1))
    
    layer = model.layers[layer_index]
    inputs = [K.learning_phase()] + model.inputs
    _layer_f = K.function(inputs, [layer.output])

    def layer_f(X):
        # The [0] is to disable the training phase flag
        return _layer_f([0] + [X])

    if K.image_dim_ordering() == "th":
        display_image = input_img.transpose((1, 2, 0))
    else:
        display_image = np.copy(input_img)
    display_image = util.deprocess_image(display_image, alter_dim=False)

    pl.figure(
        figsize=(display_image.shape[0] / dpi, display_image.shape[1] / dpi),
        dpi=dpi
    )
    util.nice_imshow(pl.gca(), display_image, cmap=cm.binary)

    images = np.array([input_img])
    c1 = layer_f(images)
    c1 = np.squeeze(c1)

    if K.image_dim_ordering() == "tf":
        c1 = c1.transpose((2, 0, 1))

    print("c1 shape : ", c1.shape)

    if filter_index is None:
        grid_dim = int(math.ceil(math.sqrt(c1.shape[0])))
        out_img = util.make_mosaic(c1, grid_dim, grid_dim)
    else:
        out_img = c1[filter_index]
    
    if save_dir is not None:
        prefix = "layer_" + str(layer_index)
        if filter_index is not None:
            prefix += "_filter_" + str(filter_index)
        if save_original:
            imageio.imsave(os.path.join(save_dir, prefix + "_input.png"), display_image)
        imageio.imsave(os.path.join(save_dir, prefix + "_output.png"), util.deprocess_image(out_img, alter_dim=False))

    pl.figure(
        figsize=(out_img.shape[0] / dpi, out_img.shape[1] / dpi),
        dpi=dpi
    )
    pl.suptitle(layer.name)
    util.nice_imshow(pl.gca(), out_img, cmap=cm.binary)
Exemplo n.º 6
0
outputs += grads
f_outputs = K.function([combination_image], outputs)

evaluator = Evaluator(height, width, f_outputs)

x = np.random.uniform(0, 255, (1, height, width, 3)) - 128.

prev_loss = None
loss_percentage = 100
i = 0

while (loss_percentage > 5 and i < 9):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)
    print('Current loss value:', min_val)
    end_time = time.time()
    print('Iteration %d completed in %ds' % (i, end_time - start_time))
    if (prev_loss != None):
        loss_percentage = util.calculate_loss_drop_percentage(
            min_val, prev_loss)
        print('Loss drop percentage: ', loss_percentage, '%')
    prev_loss = min_val
    i += 1

x = util.deprocess_image(x, height, width)
util.imshow(x)