def transform(args): style = args.style #img_width = img_height = args.image_size output_file = args.output input_file = args.input original_color = args.original_color blend_alpha = args.blend media_filter = args.media_filter aspect_ratio, x = img_util.preprocess_reflect_image(input_file, size_multiple=4) img_width = img_height = x.shape[1] net = nets.image_transform_net(img_width, img_height) model = nets.loss_net(net, img_width, img_height, "", 0, 0) model.compile( "adam", loss.dummy_loss) # Dummy loss since we are learning from regularizes model.load_weights("pretrained/" + style + '_weights.h5', by_name=False) t1 = time.time() y = net.predict(x)[0] y = crop_image(y, aspect_ratio) print("process: %s" % (time.time() - t1)) ox = crop_image(x[0], aspect_ratio) y = median_filter_all_colours(y, media_filter) if blend_alpha > 0: y = blend(ox, y, blend_alpha) if original_color > 0: y = original_colors(ox, y, original_color) imsave('%s_output.png' % output_file, y)
def main(args): style= args.style #img_width = img_height = args.image_size output_file =args.output input_file = args.input original_color = args.original_color blend_alpha = args.blend media_filter = args.media_filter aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4) img_width= img_height = x.shape[1] net = nets.image_transform_net(img_width,img_height) z = concatenate([net.output, net.input], axis=0) model = VGG16(include_top=False,input_tensor=z) #model.summary() #model.compile(Adam(), dummy_loss) # Dummy loss since we are learning from regularizes model.load_weights("pretrained/"+style+'_weights.h5',by_name=False) t1 = time.time() y = net.predict(x)[0] print("process: %s" % (time.time() -t1)) imsave('%s_output.png' % output_file, y) net.save('keras.h5')
def transfer_image(self, img_input, style): aspect_ratio, x = ImageUtil.reflect(img_input, size_multiple=4) img_width= img_height = x.shape[1] net = nets.image_transform_net(img_width,img_height) model = nets.loss_net(net.output,net.input,img_width,img_height,"",0,0) model.summary() # Dummy loss since we are learning from regularizes model.compile(Adam(), dummy_loss) model.load_weights(self.style_util.get_style_weight_path(style), by_name=False) t1 = time.time() y = net.predict(x)[0] y = ImageUtil.crop(y, aspect_ratio) print("process: %s" % (time.time() -t1)) ox = ImageUtil.crop(x[0], aspect_ratio) # メディアンフィルタ img_output = self._median_filter_colors(y, self.size_media_filter) if self.blend_alpha > 0: img_output = self._blend(ox, img_output, self.blend_alpha) if self.original_color > 0: img_output = self._original_colors(ox, img_output,self.original_color ) return img_output
def train(args): style_weight = args.style_weight content_weight = args.content_weight tv_weight = args.tv_weight output_path = args.output img_width, img_height = [int(x) for x in args.image_size.split("*")] style = args.style style_image_path = "images/style/" + style + ".jpg" net = nets.image_transform_net(img_width, img_height, tv_weight) model = nets.loss_net(net, img_width, img_height, style_image_path, content_weight, style_weight) model.summary() nb_epoch = 2000 train_batchsize = 1 train_image_path = "images/train/" model.compile(optimizer="adam", loss=loss.dummy_loss ) # Dummy loss since we are learning from regularizes datagen = ImageDataGenerator() dummy_y = np.zeros( (train_batchsize, img_width, img_height, 3)) # Dummy output, not used since we use regularizers to train skip_to = 0 i = 0 t1 = time.time() """ for x in datagen.flow_from_directory(train_image_path, class_mode=None, batch_size=train_batchsize, target_size=(img_width, img_height), shuffle=True): if i > nb_epoch: break if i < skip_to: i+=train_batchsize if i % 1000 ==0: print("skip to: %d" % i) continue hist = model.train_on_batch(x, dummy_y) if i % 50 == 0: print(hist, (time.time() -t1)) t1 = time.time() if i % 500 == 0: print("epoc: ", i) val_x = net.predict(x) display_img(i, x[0], output_path, style) display_img(i, val_x[0], output_path, style, True) model.save_weights("pretrained/"+style+'_weights.h5') i += train_batchsize """ x_generator = datagen.flow_from_directory(train_image_path, class_mode=None, batch_size=train_batchsize, target_size=(img_width, img_height), shuffle=True) dummy_y = np.zeros((train_batchsize, img_width, img_height, 3)) model.fit_generator(batch_generator(x_generator, dummy_y), steps_per_epoch=2000, epochs=2000) model.save_weights("pretrained/" + style + '_weights.h5')
def __init__(self, image, media_filter, blend_alpha, original_color, style, output_file): aspect_ratio, x = preprocess_reflect_video(image, size_multiple=4) img_width = img_height = x.shape[1] print("img_width = ", img_width) self.net = nets.image_transform_net(img_width, img_height) self.model = nets.loss_net(self.net.output, self.net.input, img_width, img_height, "", 0, 0) self.model.compile( Adam(), dummy_loss) # Dummy loss since we are learning from regularizes self.model.load_weights(style, by_name=False)
def __init__(self, style_weight, content_weigth, tv_weight, style_p, img_size): self.style_w = style_weight self.content_w = content_weigth self.tv_w = tv_weight self.style_p = style_p self.img_size = img_size self.img_w, self.img_h = self.img_size # build net self.net = nets.image_transform_net(self.img_w, self.img_h, self.tv_w) self.model = nets.loss_net(self.net.ouput, self.net.input, self.img_w, self.img_h, self.style_p, self.content_w, self.style_w)
def main(args): texture = args.texture style = args.style #img_width = img_height = args.image_size output_file = args.output input_file = args.input original_color = args.original_color blend_alpha = args.blend media_filter = args.media_filter #processing for texture model aspect_ratio, x = preprocess_reflect_image(input_file, size_multiple=4) img_width = img_height = x.shape[1] net = nets.image_transform_net(img_width, img_height) model = nets.loss_net(net.output, net.input, img_width, img_height, "", 0, 0) model.compile( Adam(), dummy_loss) # Dummy loss since we are learning from regularizes #load texture model model.load_weights(texture, by_name=False) t1 = time.time() y = net.predict(x)[0] y = crop_image(y, aspect_ratio) print("process: %s" % (time.time() - t1)) ox = crop_image(x[0], aspect_ratio) y = median_filter_all_colours(y, media_filter) if blend_alpha > 0: y = blend(ox, y, blend_alpha) if original_color > 0: y = original_colors(ox, y, original_color) imsave('%s_texture.png' % output_file, y) imshow(y) #processing for second style transform aspect_ratio2, x2 = preprocess_reflect_layer2(y, size_multiple=4) img_width2 = img_height2 = x2.shape[1] net2 = nets.image_transform_net(img_width2, img_height2) model2 = nets.loss_net(net2.output, net2.input, img_width2, img_height2, "", 0, 0) model2.compile(Adam(), dummy_loss) #load style model model2.load_weights(style, by_name=False) y2 = net2.predict(x2)[0] y2 = crop_image(y2, aspect_ratio) print("process: %s" % (time.time() - t1)) ox2 = crop_image(x2[0], aspect_ratio2) y2 = median_filter_all_colours(y2, media_filter) if blend_alpha > 0: y2 = blend(ox2, y2, blend_alpha) if original_color > 0: y2 = original_colors(ox2, y2, original_color) #save and display the transformed image imsave('%s_output.png' % output_file, y2) imshow(y2)
def main(args): style_weight = args.style_weight content_weight = args.content_weight tv_weight = args.tv_weight style = args.style img_width = img_height = args.image_size style_image_path = get_style_img_path(style) net = nets.image_transform_net(img_width, img_height, tv_weight) model = nets.loss_net(net.output, net.input, img_width, img_height, style_image_path, content_weight, style_weight) model.summary() nb_epoch = 82785 * 2 train_batchsize = 1 train_image_path = "images/train/" learning_rate = 1e-3 #1e-3 optimizer = Adam() # Adam(lr=learning_rate,beta_1=0.99) model.compile( optimizer, dummy_loss) # Dummy loss since we are learning from regularizes datagen = ImageDataGenerator() dummy_y = np.zeros( (train_batchsize, img_width, img_height, 3)) # Dummy output, not used since we use regularizers to train #model.load_weights(style+'_weights.h5',by_name=False) skip_to = 0 i = 0 t1 = time.time() for x in datagen.flow_from_directory(train_image_path, class_mode=None, batch_size=train_batchsize, target_size=(img_width, img_height), shuffle=False): if i > nb_epoch: break if i < skip_to: i += train_batchsize if i % 1000 == 0: print("skip to: %d" % i) continue hist = model.train_on_batch(x, dummy_y) if i % 50 == 0: print(hist, (time.time() - t1)) t1 = time.time() if i % 500 == 0: print("epoc: ", i) val_x = net.predict(x) display_img(i, x[0], style) display_img(i, val_x[0], style, True) model.save_weights(style + '_weights.h5') i += train_batchsize
def main(args): """ Main. """ # Extract CLA values style = args.style tv_weight = args.tv_weight style_weight = args.style_weight content_weight = args.content_weight img_width = img_height = args.image_size # Get relative path of style image style_image_path = get_style_img_path(style) # Create image transform network model. # Also note that the style and content losses # are already added when creating the image network # model net = nets.image_transform_net(img_width, img_height, tv_weight) model = nets.loss_net(net.output, net.input, img_width, img_height, style_image_path, content_weight, style_weight) model.summary() # Epochs nb_epoch = 40000 train_batchsize = 4 # train_image_path = "images/style/" train_image_path = "/home/data/MSCOCO/train2014" learning_rate = 1e-3 #1e-3 optimizer = Adam(learning_rate=learning_rate) # Dummy loss since we are learning from regularizes model.compile(optimizer, dummy_loss) # Keras data generator datagen = ImageDataGenerator() # Dummy output, not used since we use regularizers to train dummy_y = np.zeros((train_batchsize, img_width, img_height, 3)) # Uncomment the line below if you want to keep # training a previously saved model # model.load_weights(style+'_weights.h5',by_name=False) # Skip to a particular # epoch in case you wanna # resume from that epoch skip_to = 0 # Starting epoch i = 0 # Time is essential t1 = time.time() # Loop over generate data (MSCOCO dataset) for x in datagen.flow_from_directory(train_image_path, class_mode=None, batch_size=train_batchsize, target_size=(img_width, img_height), shuffle=False): # Break if over epochs if i > nb_epoch: break # Skip to particular epoch if i < skip_to: i += train_batchsize if i % 1000 == 0: print("skip to: %d" % i) continue hist = model.train_on_batch(x, dummy_y) if i % 50 == 0: print(hist,(time.time() -t1)) t1 = time.time() if i % 500 == 0: print("epoc: ", i) val_x = net.predict(x) display_img(i, x[0], style) display_img(i, val_x[0],style, True) # Save model model.save_weights(style+'_weights.h5') # Save model (to be removed, just check if works) model.save_weights(f"pidgeots_{style}_weights.h5") i += train_batchsize
graph = tf.get_default_graph() from flask import Flask, render_template, request, jsonify from PIL import Image import os, io, sys import numpy as np import cv2 import base64 from flask_cors import CORS from utils import create_mask, get_masks, merge import nets dim = 512 folder = "saved_weights/" model = nets.image_transform_net(dim, dim, 1e-6) model.load_weights(folder + "colorfull.h5") app = Flask(__name__) @app.after_request def after_request(response): print("log: setting cors", file=sys.stderr) response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response # # cors = CORS(app, resources={r"/*": {"origins": "*"}})
def execute(self, args): style_weight = args.style_weight content_weight = args.content_weight tv_weight = args.tv_weight style = args.style img_width = img_height = args.image_size style_image_path = self.style_util.get_style_img_path(style) net = nets.image_transform_net(img_width, img_height, tv_weight) model = nets.loss_net(net.output, net.input, img_width, img_height, style_image_path, content_weight, style_weight) model.summary() #nb_epoch = 82785 *2 nb_epoch = self.epochs train_batchsize = 1 learning_rate = 1e-3 optimizer = Adam() # Adam(lr=learning_rate,beta_1=0.99) # Dummy loss since we are learning from regularizes model.compile(optimizer, dummy_loss) datagen = ImageDataGenerator() # Dummy output, not used since we use regularizers to train dummy_y = np.zeros((train_batchsize, img_width, img_height, 3)) print("dummy_y", type(dummy_y)) #model.load_weights(style+'_weights.h5',by_name=False) print("dir_train:", self.dir_train, img_width, img_height) skip_to = 0 i = 0 t1 = time.time() img_ary = datagen.flow_from_directory(self.dir_train, target_size=(img_width, img_height), color_mode='rgb', classes=None, class_mode=None, batch_size=train_batchsize, shuffle=False, seed=None, save_to_dir=None, save_prefix='', save_format='jpg', follow_links=False, subset=None, interpolation='nearest') if len(img_ary) == 0: print("training images Not Found.:", self.dir_train) return for x in img_ary: print("epoch:", i) if i > nb_epoch: break if i < skip_to: i += train_batchsize if i % 1000 == 0: print("skip to: %d" % i) continue hist = model.train_on_batch(x, dummy_y) if i % 10 == 0: print(hist, (time.time() - t1)) t1 = time.time() if i % 10 == 0: print("epoc: ", i) val_x = net.predict(x) self._display_img(i, x[0], style) self._display_img(i, val_x[0], style, True) model.save_weights(style + '_weights.h5') i += train_batchsize