style_model = Model(vgg.input, symbolic_conv_outputs) # calculate the targets that are output at each layer style_layers_outputs = [ K.variable(y) for y in style_model.predict(style_img) ] # we will assume the weight of the content loss is 1 # and only weight the style losses style_weights = [0.2, 0.4, 0.3, 0.5, 0.2] # create the total loss which is the sum of content + style loss loss = K.mean(K.square(content_model.output - content_target)) for w, symbolic, actual in zip(style_weights, symbolic_conv_outputs, style_layers_outputs): # gram_matrix() expects a (H, W, C) as input loss += w * style_loss(symbolic[0], actual[0]) # once again, create the gradients and loss + grads function # note: it doesn't matter which model's input you use # they are both pointing to the same Keras Input layer in memory grads = K.gradients(loss, vgg.input) # just like theano.function get_loss_and_grads = K.function(inputs=[vgg.input], outputs=[loss] + grads) final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape) plt.imshow(scale_img(final_img)) plt.show()
# create the total loss which is the sum of content + style loss loss = K.mean(K.square(content_model.output - content_target)) for w, symbolic, actual in zip(style_weights, symbolic_conv_outputs, style_layers_outputs): # gram_matrix() expects a (H, W, C) as input loss += w * style_loss(symbolic[0], actual[0]) # once again, create the gradients and loss + grads function # note: it doesn't matter which model's input you use # they are both pointing to the same keras Input layer in memory grads = K.gradients(loss, vgg.input) # just like theano.function get_loss_and_grads = K.function( inputs=[vgg.input], outputs=[loss] + grads ) def get_loss_and_grads_wrapper(x_vec): l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)]) return l.astype(np.float64), g.flatten().astype(np.float64) final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape) plt.imshow(scale_img(final_img)) plt.show()