예제 #1
0
def main(args):
    weights = os.listdir("./models/fast_style_transfer/original_pretrained/")
    styles = [filename.split("_weights")[0] for filename in weights]
    if len(styles) == 0:
        print(
            "You have no original pretrained model, please check the link on github to download them"
        )
    else:
        print("You have {} pretrained models can be used to produce images".
              format(len(styles)))

    datagen = image.ImageDataGenerator()

    image_path = args.path
    epoch = 82783
    batch_size = 1
    img_width = img_height = args.image_size

    content_gen = datagen.flow_from_directory(image_path,
                                              shuffle=False,
                                              target_size=(img_width,
                                                           img_height),
                                              batch_size=batch_size,
                                              class_mode=None)

    model = nets.image_transform_net(img_width, img_height)
    start_time = time.time()

    for style in styles:
        print("\nProcessing {}".format(style))
        single_model_start_time = time.time()
        model = nets.image_transform_net(img_width, img_height)
        model.load_weights(
            "./models/fast_style_transfer/original_pretrained/" + style +
            '_weights.h5')
        for i in range(epoch):
            content = content_gen.next()

            result = model.predict(content)

            image_name = content_gen.filenames[i][2:]

            if i % 10 == 0:
                print("\repoch: %d" % (i), end="")

            save_without_deprocess(result,
                                   img_width,
                                   img_height,
                                   save=True,
                                   name=image_name,
                                   iterate=i,
                                   style=style)
        print("\nProcessing {} finish in {}".format(
            style,
            time.time() - single_model_start_time))
        gc.collect()

    end_time = time.time() - start_time
    print("\nTotal Time cost: ", end_time)
def apply_style(content, style):
    print(style)
    style = style

    input_file = content

    media_filter = 3

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    img_width = img_height = x.shape[1]
    model = image_transform_net(img_width, img_height)

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights(STYLE_MODEL_DIR + style + '_weights.h5', by_name=True)

    t1 = time.time()
    y = model.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    del model
    K.clear_session()
    gc.collect()
    return y
예제 #3
0
def transfer(base_image,
             syle_image_path,
             original_color=0,
             blend=0,
             media_filter=3):
    style = split_path(syle_image_path)
    input_file = base_image
    original_color = original_color
    blend_alpha = blend
    media_filter = media_filter

    #aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    #img_width = x.shape[1]
    #img_height = x.shape[2]
    img_width = img_height = x.shape[1]
    model = nets.image_transform_net(img_width, img_height)
    #model = nets.loss_net(net.output,net.input,img_width,img_height,"",0,0)

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights(file_path.MODELS_PATH +
                       "/fast_style_transfer/pretrained/" + style +
                       '_weights.h5',
                       by_name=True)
    print('Model loaded')

    t1 = time.time()
    y = model.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)

    output = BytesIO()
    im = toimage(y)
    im.save(output, format='JPEG')
    del model
    K.clear_session()
    gc.collect()
    return output.getvalue()
예제 #4
0
def main(args):
    style = args.style
    #img_width = img_height =  args.image_size
    output_file = args.output
    input_file = args.input
    original_color = args.original_color
    blend_alpha = args.blend
    media_filter = args.media_filter

    aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4)

    img_width = img_height = x.shape[1]
    net = nets.image_transform_net(img_width, img_height)
    model = nets.loss_net(net.output, net.input, img_width, img_height, "", 0,
                          0)

    #model.summary()

    model.compile(
        Adam(),
        dummy_loss)  # Dummy loss since we are learning from regularizes

    model.load_weights("pretrained/" + style + '_weights.h5', by_name=False)

    t1 = time.time()
    y = net.predict(x)[0]
    y = crop_image(y, aspect_ratio)

    print("process: %s" % (time.time() - t1))

    ox = crop_image(x[0], aspect_ratio)

    y = median_filter_all_colours(y, media_filter)

    if blend_alpha > 0:
        y = blend(ox, y, blend_alpha)

    if original_color > 0:
        y = original_colors(ox, y, original_color)

    imsave('%s_output.png' % output_file, y)
예제 #5
0
def main(args):
    style_weight = args.style_weight
    content_weight = args.content_weight
    tv_weight = args.tv_weight
    style = args.style
    img_width = img_height = args.image_size

    style_image_path = get_style_img_path(style)

    net = nets.image_transform_net(img_width, img_height, tv_weight)
    model = nets.loss_net(net.output, net.input, img_width, img_height,
                          style_image_path, content_weight, style_weight)
    model.summary()

    nb_epoch = 82785 * 2
    train_batchsize = 1
    train_image_path = "E:\cocodataset"

    learning_rate = 1e-3  #1e-3
    optimizer = Adam()  # Adam(lr=learning_rate,beta_1=0.99)

    model.compile(
        optimizer,
        dummy_loss)  # Dummy loss since we are learning from regularizes

    datagen = ImageDataGenerator()

    dummy_y = np.zeros(
        (train_batchsize, img_width, img_height,
         3))  # Dummy output, not used since we use regularizers to train

    #model.load_weights(style+'_weights.h5',by_name=False)

    skip_to = 0

    i = 0
    t1 = time.time()
    for x in datagen.flow_from_directory(train_image_path,
                                         class_mode=None,
                                         batch_size=train_batchsize,
                                         target_size=(img_width, img_height),
                                         shuffle=False):
        if i > nb_epoch:
            break

        if i < skip_to:
            i += train_batchsize
            if i % 1000 == 0:
                print("skip to: %d" % i)

            continue

        hist = model.train_on_batch(x, dummy_y)

        if i % 50 == 0:
            print(hist, (time.time() - t1))
            t1 = time.time()

        if i % 500 == 0:
            print("epoc: ", i)
            val_x = net.predict(x)

            display_img(i, x[0], style)
            display_img(i, val_x[0], style, True)
            model.save_weights(style + '_weights.h5')

        i += train_batchsize