コード例 #1
0
    def train_step(self,
                   image,
                   extractor,
                   loss_function,
                   opt,
                   total_variation_weight=0):
        """
        Mutates image as a step to conform to training
        """
        with tf.GradientTape() as tape:
            outputs = extractor(image)
            loss = loss_function(outputs)
            loss += total_variation_weight * tf.image.total_variation(image)

        grad = tape.gradient(loss, image)
        opt.apply_gradients([(grad, image)])
        image.assign(ImageUtils.clip_0_1(image))
コード例 #2
0
        # Trying face detection with OpenCV
        x, y, w, h = detectAnime(path_to_pics + filename, i)
        if x is not None:
            print(x, y, w, h)
        style_image = ImageUtils.grab_image("out.png")
        # style_image = tf.image.resize_with_crop_or_pad(style_image, 1000,1000)

        # Testing code ends here

        style_orig = style_image
        style_image = ImageUtils.image_op(
            images=[content_image, style_image],
            # Change this from a + b to something else
            # May use CNN to map features
            op=lambda a, b: ImageUtils.clip_0_1(a + b))

        print(" - Generating image", i)
        stylized_image = hub_model(tf.constant(content_image),
                                   tf.constant(style_image))[0]

        # Uncomment this and comment out previous stylized_image definition to
        # map style image on content image without style transfer
        # stylized_image = style_image

        # Clear figure and update images
        fig.clf()
        plotset1 = (  # This shows 2x3 5-plot layout
            ((2, 3, 1), content_image, 'Content Image'),
            ((2, 3, 4), stylized_image, 'New Image'),
            ((1, 3, 2),
コード例 #3
0
ファイル: driver.py プロジェクト: vchen8761/AI-Style-Transfer
# Also, this high frequency component is basically an edge-detector.


def high_pass_x_y(image):
    """For visualizing produced artifacts"""
    x_var = image[:, :, 1:, :] - image[:, :, :-1, :]
    y_var = image[:, 1:, :, :] - image[:, :-1, :, :]

    return x_var, y_var


x_deltas, y_deltas = high_pass_x_y(transferer.content_image)

plt.figure(figsize=(14, 10))
plt.subplot(2, 2, 1)
ImageUtils.imshow(ImageUtils.clip_0_1(2 * y_deltas + 0.5),
                  "Horizontal Deltas: Original")

plt.subplot(2, 2, 2)
ImageUtils.imshow(ImageUtils.clip_0_1(2 * x_deltas + 0.5),
                  "Vertical Deltas: Original")

x_deltas, y_deltas = high_pass_x_y(image)

plt.subplot(2, 2, 3)
ImageUtils.imshow(ImageUtils.clip_0_1(2 * y_deltas + 0.5),
                  "Horizontal Deltas: Styled")

plt.subplot(2, 2, 4)
ImageUtils.imshow(ImageUtils.clip_0_1(2 * x_deltas + 0.5),
                  "Vertical Deltas: Styled")