def apply_style(content, style):
    print(style)
    style = style

    input_file = content

    media_filter = 3

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    img_width = img_height = x.shape[1]
    model = image_transform_net(img_width, img_height)

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights(STYLE_MODEL_DIR + style + '_weights.h5', by_name=True)

    t1 = time.time()
    y = model.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    del model
    K.clear_session()
    gc.collect()
    return y
Пример #2
0
def transfer(base_image,
             syle_image,
             original_color=0,
             blend_alpha=0,
             media_filter=3):
    input_file = base_image
    style_file = syle_image

    original_color = original_color
    blend_alpha = blend_alpha
    media_filter = media_filter
    aspect_ratio = 1

    content_processed = style_swap_preprocess_image(input_file)
    style_processed = style_swap_preprocess_image(style_file)

    img_width = img_height = content_processed.shape[1]
    print(img_width, img_height)

    encode_net = build_encode_net_with_swap_3_1((img_width, img_height, 3))
    print('encode_net')

    inverse_net = InverseNet_3_1(
        (int(img_width / 4), int(img_height / 4), 256))
    inverse_net.load_weights(file_path.MODELS_PATH +
                             "/style_swap/pretrained/inverse_net_vgg19.h5",
                             by_name=True)
    print('Model loaded')

    inverse_net.compile(optimizer="adam", loss='mse')

    image_feature = encode_net.predict([content_processed, style_processed])

    t1 = time.time()
    y = inverse_net.predict([image_feature])[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(content_processed[0], aspect_ratio)

    #y =  median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)

    output = BytesIO()
    im = toimage(y)
    im.save(output, format='JPEG')

    del encode_net
    del inverse_net
    K.clear_session()
    gc.collect()
    return output.getvalue()
Пример #3
0
def transfer(base_image,
             syle_image_path,
             color_adjusting_mode=0,
             blending_alpha=0,
             median_filter_size=3):
    """Fast style transformation

    Params
        ======
            base_image (ndarray): content image passed by user
            syle_image_path (str): style image stored in server
            color_adjusting_mode (float): Color adjusting mode. 
                0: None,
                1: Preserve color, 
                2: Blend with original color
            blending_alpha (float): the degree of the blending images, from 0 to 100
            median_filter_size (int): the size of the median filter

    """
    style = split_path(syle_image_path)
    input_file = base_image
    color_adjusting_mode = int(color_adjusting_mode)
    blending_alpha = float(blending_alpha) / 100  # scale to 0 ~ 1
    median_filter_size = median_filter_size
    """ Preprocessing """
    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)
    img_width = img_height = x.shape[1]
    """ Load Model """
    model = nets.depthwise_image_transform_net(img_width, img_height)
    model.compile(Adam(), dummy_loss)
    model.load_weights(file_path.MODELS_PATH +
                       "/fast_style_transfer/pretrained/" + style +
                       '_weights.h5')
    print('Model loaded')
    """ Start transfer """
    t1 = time.time()
    y = model.predict(x)[0]
    """ Post processing """
    y = crop_image(y, aspect_ratio)
    print("process: %s" % (time.time() - t1))
    ox = crop_image(x[0], aspect_ratio)
    y = median_filter_all_colours(y, median_filter_size)
    """ Color adjusting """
    if color_adjusting_mode == 1:
        y = original_color_transform(ox, y)
    elif color_adjusting_mode == 2:
        y = blend_with_original_colors(ox, y, blending_alpha)
    """ Return the processed image """
    output = BytesIO()
    im = toimage(y)
    im.save(output, format='JPEG')
    del model
    K.clear_session()
    gc.collect()
    return output.getvalue()
Пример #4
0
def transfer(base_image,
             syle_image_path,
             original_color=0,
             blend=0,
             media_filter=3):
    style = split_path(syle_image_path)
    input_file = base_image
    original_color = original_color
    blend_alpha = blend
    media_filter = media_filter

    #aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    #img_width = x.shape[1]
    #img_height = x.shape[2]
    img_width = img_height = x.shape[1]
    model = nets.image_transform_net(img_width, img_height)
    #model = nets.loss_net(net.output,net.input,img_width,img_height,"",0,0)

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights(file_path.MODELS_PATH +
                       "/fast_style_transfer/pretrained/" + style +
                       '_weights.h5',
                       by_name=True)
    print('Model loaded')

    t1 = time.time()
    y = model.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)

    output = BytesIO()
    im = toimage(y)
    im.save(output, format='JPEG')
    del model
    K.clear_session()
    gc.collect()
    return output.getvalue()
Пример #5
0
def apply_style(content, style, median_filter_size=3):
    """Apply style using fast style transformation

    Params
        ======
            content (ndarray): content image passed by user
            style (str): style image stored in server

    """
    style = style
    input_file = content
    median_filter_size = median_filter_size
    """ Preprocessing """
    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)
    img_width = img_height = x.shape[1]
    """ Load Model """
    model = depthwise_image_transform_net(img_width, img_height)
    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes
    model.load_weights(STYLE_MODEL_DIR + style + '_weights.h5')
    """ Start transfer """
    t1 = time.time()
    y = model.predict(x)[0]
    print("process: %s" % (time.time() - t1))
    """ Post processing """
    y = crop_image(y, aspect_ratio)
    y = median_filter_all_colours(y, median_filter_size)

    del model
    K.clear_session()
    gc.collect()
    return y
Пример #6
0
def main(args):
    style = args.style
    #img_width = img_height =  args.image_size
    output_file = args.output
    input_file = args.input
    original_color = args.original_color
    blend_alpha = args.blend
    media_filter = args.media_filter

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    img_width = img_height = x.shape[1]
    net = nets.image_transform_net(img_width, img_height)
    model = nets.loss_net(net.output, net.input, img_width, img_height, "", 0,
                          0)

    #model.summary()

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights("pretrained/" + style + '_weights.h5', by_name=False)

    t1 = time.time()
    y = net.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)

    imsave('%s_output.png' % output_file, y)
def transfer(base_image, style_image, color_adjusting_mode=0, blending_alpha=0, median_filter_size=3):
    """Style swap transformation

    Params
        ======
            base_image (ndarray): content image passed by user
            style_image (ndarray): style image passed by user
            color_adjusting_mode (float): Color adjusting mode. 
                0: None,
                1: Preserve color, 
                2: Blend with original color
            blending_alpha (float): the degree of the blending images, from 0 to 100
            median_filter_size (int): the size of the median filter

    """
    input_file = base_image
    style_file = style_image
    IMG_WIDTH = IMG_HEIGHT = 700

    color_adjusting_mode = int(color_adjusting_mode)
    blending_alpha = float(blending_alpha) / 100  # scale to 0 ~ 1
    median_filter_size = median_filter_size

    aspect_ratio = 1
    
    """ Preprocessing """
    content_original = style_swap_preprocess_image(input_file, IMG_HEIGHT, IMG_WIDTH, preserve_original=True)
    content_processed = style_swap_preprocess_image(input_file, IMG_HEIGHT, IMG_WIDTH)
    style_processed = style_swap_preprocess_image(style_file, IMG_HEIGHT, IMG_WIDTH)

    img_width = img_height = content_processed.shape[1]
    print(img_width, img_height)
    
    """ Load Model """
    encode_net = build_encode_net_with_swap_3_1((img_height, img_width, 3))
    print('Encode Model loaded')
    inverse_net = InverseNet_3_1((int(img_height / 4) , int(img_width / 4), 256))
    inverse_net.load_weights(file_path.MODELS_PATH + "/style_swap/pretrained/inverse_net_vgg19.h5", by_name=True)
    print('Model loaded')
    inverse_net.compile(optimizer="adam", loss='mse')

    """ Start transfer """
    t1 = time.time()
    image_feature = encode_net.predict([content_processed, style_processed])
    y = inverse_net.predict([image_feature])[0]
    print("process: %s" % (time.time() -t1))

    """ Post processing """
    y = crop_image(y, aspect_ratio)
    y =  median_filter_all_colours(y, median_filter_size)
    ox = crop_image(content_original[0], aspect_ratio)
    
    """ Color adjusting """
    if color_adjusting_mode == 1:
        y = original_color_transform(ox, y)
    elif color_adjusting_mode == 2:
        y = blend_with_original_colors(ox, y, blending_alpha)
    
    y = y[10:-10, 10:-10]
    """ Return the processed image """
    output = BytesIO()
    im = toimage(y)
    im.save(output, format='JPEG')
    del encode_net
    del inverse_net
    K.clear_session()
    gc.collect()
    return output.getvalue()
Пример #8
0
def transfer(content_image,
             foresyle_image_path,
             backstyle_image_path,
             color_adjusting_mode=0,
             blending_alpha=0,
             median_filter_size=3):
    """Mask style transformation

    Params
        ======
            content_image (ndarray): content image passed by user
            foresyle_image_path (str): foreground style image stored in server
            backstyle_image_path (str): background style image stored in server
            color_adjusting_mode (float): Color adjusting mode. 
                0: None,
                1: Preserve color, 
                2: Blend with original color
            blending_alpha (float): the degree of the blending images, from 0 to 100
            median_filter_size (int): the size of the median filter

    """
    content_image = imread(content_image, mode="RGB")
    color_adjusting_mode = int(color_adjusting_mode)
    blending_alpha = float(blending_alpha) / 100  # scale to 0 ~ 1
    median_filter_size = median_filter_size
    """ Preprocessing """
    content_image = check_resize_img(content_image)
    aspect_ratio, x = preprocess_reflect_image(content_image, size_multiple=4)
    img_width = img_height = x.shape[1]

    forestyle = split_path(foresyle_image_path)
    backstyle = split_path(backstyle_image_path)

    print(forestyle)
    print(backstyle)
    """ Start transfer """
    start_time = time.time()
    mask_i = detect_mask(content_image)
    generated_1 = apply_style(style=forestyle, content=content_image)
    print("generated_1", generated_1.shape)
    generated_2 = apply_style(style=backstyle, content=content_image)
    print("generated_2", generated_2.shape)

    generated_3 = apply_style_mask(content=generated_1,
                                   generated=generated_2,
                                   mask=mask_i[:, :, 0])
    print("generated_3", generated_3.shape)
    print("total_transfer_time:", time.time() - start_time)

    ox = crop_image(x[0], aspect_ratio)
    """ Color adjusting """
    print(color_adjusting_mode)
    if color_adjusting_mode == 1:
        y = original_color_transform(ox, generated_3)
    elif color_adjusting_mode == 2:
        y = blend_with_original_colors(ox, generated_3, blending_alpha)
    else:
        y = generated_3

    output = BytesIO()
    im = toimage(y)
    im.save(output, format='JPEG')

    K.clear_session()
    gc.collect()
    return output.getvalue()