def reconstruct_style_image(img, conv_layer, file_name, plot_name):
    # convert image to array and preprocess for vgg
    x = Tools.convert_image_to_array_vgg(img)

    # we'll use this throughout the rest of the script
    batch_shape = x.shape
    shape = x.shape[1:]

    vgg = Tools.VGG19_AvgPool(shape)
    style_layers_features_outputs, symbolic_conv_outputs, style_features_extractor_model = get_style_image_features(
        x, conv_layer, vgg)

    # calculate the total style loss
    loss = 0
    for symbolic, actual in zip(symbolic_conv_outputs,
                                style_layers_features_outputs):
        # gram_matrix() expects a (H, W, C) as input
        if conv_layer == 1:
            loss += style_loss(symbolic[0], actual)
        else:
            loss += style_loss(symbolic[0], actual[0])

    grads = K.gradients(loss, style_features_extractor_model.input)

    # just like theano.function
    get_loss_and_grads = K.function(
        inputs=[style_features_extractor_model.input], outputs=[loss] + grads)

    def get_loss_and_grads_wrapper(x_vec):
        l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
        return l.astype(np.float64), g.flatten().astype(np.float64)

    final_image, losses = Tools.LBFGS_Optimizer(get_loss_and_grads_wrapper, 10,
                                                batch_shape)

    # plot loss
    plt.plot(losses)
    plt.savefig(plot_name)
    plt.show()

    # save image
    final_image = Tools.scale_img(final_image)
    plt.imshow(final_image)
    plt.imsave(file_name, final_image)
    plt.show()
예제 #2
0
def reconstruct_content_image(img, conv_layer, file_name, plot_name):
    """:param
    This function reconstructs the input image from the given convolution layer number
    in VGG19 architecture
    """
    # convert image to array and preprocess for vgg
    x = Tools.convert_image_to_array_vgg(img)

    # we'll use this throughout the rest of the script
    batch_shape = x.shape
    shape = x.shape[1:]
    #
    # # see the image
    # plt.imshow(img)
    # plt.show()

    # make a content model
    # try different cutoffs to see the images that result
    vgg = Tools.VGG19_AvgPool(shape)
    content_features, content_model_extractor = get_content_image_features(
        x, conv_layer, vgg)

    # define our loss in keras
    loss = K.mean(K.square(content_features - content_model_extractor.output))

    # gradients which are needed by the optimizer
    grads = K.gradients(loss, content_model_extractor.input)

    # just like theano.function
    get_loss_and_grads = K.function(inputs=[content_model_extractor.input],
                                    outputs=[loss] + grads)

    def get_loss_and_grads_wrapper(x_vec):
        # scipy's minimizer allows us to pass back
        # function value f(x) and its gradient f'(x)
        # simultaneously, rather than using the fprime arg
        #
        # we cannot use get_loss_and_grads() directly
        # input to minimizer func must be a 1-D array
        # input to get_loss_and_grads must be [batch_of_images]
        #
        # gradient must also be a 1-D array
        # and both loss and gradient must be np.float64
        # will get an error otherwise

        l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
        return l.astype(np.float64), g.flatten().astype(np.float64)

    final_image, losses = Tools.LBFGS_Optimizer(get_loss_and_grads_wrapper, 10,
                                                batch_shape)

    # plot loss
    plt.plot(losses)
    plt.savefig(plot_name)
    plt.show()

    # save image
    final_image = Tools.scale_img(final_image)
    plt.imshow(final_image)
    plt.imsave(file_name, final_image)
    plt.show()
예제 #3
0
def style_transfer(content_image_path, style_image_path):
    mydir = './Outputs/style_transfer_details'

    # 1: load the content and style images, then rescale the style image to the scale of content image
    content_img = Tools.load_img_and_preprocess_resize(content_image_path, resize=512)
    h, w = content_img.shape[1:3]

    # test_content_reconstruction(content_img[0], mydir, [16, 17, 18, 19], 0)

    style_img = Tools.load_img_and_preprocess_shape(style_image_path, (h, w))
    # show all blocks output
    # test_style_reconstruction(style_img[0], mydir, 1)

    batch_shape = content_img.shape
    shape = content_img.shape[1:]

    vgg = Tools.VGG19_AvgPool(shape)
    print(vgg.summary())

    # 2: get content and style features + features extractor model
    content_features, content_features_extractor_model = ContentReconstruction.get_content_image_features(content_img,
                                                                                                          14, vgg)
    style_layers_features_outputs, symbolic_conv_outputs, style_features_extractor_model = StyleReconstruction.get_style_image_features(
        style_img, 5, vgg)

    # we will assume the weight of the content loss is 1
    # and only weight the style losses
    style_weights = [0.2, 0.4, 0.3, 0.5, 0.2]
    # style_weights = [0.4, 0.6, 0.6, 0.7, 0.4]

    # create the total loss which is the sum of content + style loss
    loss = 1 * K.mean(K.square(content_features_extractor_model.output - content_features))

    for w, symbolic, actual in zip(style_weights, symbolic_conv_outputs, style_layers_features_outputs):
        # gram_matrix() expects a (H, W, C) as input
        loss += w * Tools.style_loss(symbolic[0], actual[0])

    # loss += 0.0001 * tf.image.total_variation(vgg.input)

    # once again, create the gradients and loss + grads function
    # note: it doesn't matter which model's input you use
    # they are both pointing to the same keras Input layer in memory
    grads = K.gradients(loss, vgg.input)

    # just like theano.function
    get_loss_and_grads = K.function(
        inputs=[vgg.input],
        outputs=[loss] + grads
    )

    def get_loss_and_grads_wrapper(x_vec):
        l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
        return l.astype(np.float64), g.flatten().astype(np.float64)

    final_image, losses = Tools.LBFGS_Optimizer(get_loss_and_grads_wrapper, 10, batch_shape)

    # plot loss
    plt.plot(losses)
    # plt.savefig(plot_name)
    plt.show()

    # save image
    final_image = Tools.scale_img(final_image)
    plt.imshow(final_image)
    # plt.imsave(file_name, final_image)
    plt.show()