Exemplo n.º 1
0
def main(args):
    style = args.style
    #img_width = img_height =  args.image_size
    output_file = args.input[-14:-4]

    print("Ouputfilestores in", output_file)
    input_file = args.input
    original_color = args.original_color
    blend_alpha = args.blend
    media_filter = args.media_filter

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    img_width = img_height = x.shape[1]
    net = nets.image_transform_net(img_width, img_height)
    model = nets.loss_net(net.output, net.input, img_width, img_height, "", 0,
                          0)

    #model.summary()

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights("pretrained/" + style + '_weights.h5', by_name=False)

    t1 = time.time()
    y = net.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)
    print(output_file)
    imsave('%s_output.png' % output_file, y)
    return y
    def pred(self, image, media_filter, blend_alpha, original_color, style,
             output_file):
        aspect_ratio, x = preprocess_reflect_video(image, size_multiple=4)
        t1 = time.time()
        y = self.net.predict(x)[0]
        y = crop_image(y, aspect_ratio)

        print("process: %s" % (time.time() - t1))

        ox = crop_image(x[0], aspect_ratio)

        y = median_filter_all_colours(y, media_filter)

        if blend_alpha > 0:
            y = blend(ox, y, blend_alpha)

        if original_color > 0:
            y = original_colors(ox, y, original_color)

        #imsave('%s_output.png' % output_file, y)
        #imshow(y)
        return y
def main(args):
    texture = args.texture
    style = args.style

    #img_width = img_height =  args.image_size
    output_file = args.output
    input_file = args.input
    original_color = args.original_color
    blend_alpha = args.blend
    media_filter = args.media_filter

    #processing for texture model
    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    img_width = img_height = x.shape[1]
    net = nets.image_transform_net(img_width, img_height)

    model = nets.loss_net(net.output, net.input, img_width, img_height, "", 0,
                          0)

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    #load texture model
    model.load_weights(texture, by_name=False)

    t1 = time.time()
    y = net.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)

    imsave('%s_texture.png' % output_file, y)
    imshow(y)

    #processing for second style transform
    aspect_ratio2, x2 = preprocess_reflect_layer2(y, size_multiple=4)

    img_width2 = img_height2 = x2.shape[1]

    net2 = nets.image_transform_net(img_width2, img_height2)
    model2 = nets.loss_net(net2.output, net2.input, img_width2, img_height2,
                           "", 0, 0)
    model2.compile(Adam(), dummy_loss)
    #load style model
    model2.load_weights(style, by_name=False)

    y2 = net2.predict(x2)[0]
    y2 = crop_image(y2, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox2 = crop_image(x2[0], aspect_ratio2)

    y2 = median_filter_all_colours(y2, media_filter)

    if blend_alpha > 0:
        y2 = blend(ox2, y2, blend_alpha)

    if original_color > 0:
        y2 = original_colors(ox2, y2, original_color)

    #save and display the transformed image
    imsave('%s_output.png' % output_file, y2)
    imshow(y2)