def model_nn(sess, input_image, num_iterations=200, is_print_info=True, is_plot=True, is_save_process_image=True, save_last_image_to="output/generated_image.jpg"): #初始化全局变量 sess.run(tf.global_variables_initializer()) #运行带噪声的输入图像 sess.run(model["input"].assign(input_image)) for i in range(num_iterations): #运行最小化的目标: sess.run(train_step) #产生把数据输入模型后生成的图像 generated_image = sess.run(model["input"]) if is_print_info and i % 20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("第 " + str(i) + "轮训练," + " 总成本为:" + str(Jt) + " 内容成本为:" + str(Jc) + " 风格成本为:" + str(Js)) if is_save_process_image: nst_utils.save_image("output/" + str(i) + ".png", generated_image) nst_utils.save_image(save_last_image_to, generated_image) return generated_image
def NST_model(num_iter=1000): content_img = load_img("con_niu.jpg") content_img = nu.reshape_and_normalize_image(content_img) style_img = load_img("style_cloud.jpg") style_img = nu.reshape_and_normalize_image(style_img) generated_img = nu.generate_noise_image(content_img) print(np.shape(content_img)) print(np.shape(style_img)) print(np.shape(generated_img)) with tf.Session() as sess: model = nu.load_vgg_model( "pretrained-model/imagenet-vgg-verydeep-19.mat") sess.run(model['input'].assign(content_img)) out = model['conv4_2'] a_c = sess.run(out) a_g = out J_content = content_cost(a_c, a_g) STYLE_LAYERS = [('conv1_1', 0.2), ('conv2_1', 0.2), ('conv3_1', 0.2), ('conv4_1', 0.2), ('conv5_1', 0.2)] sess.run(model['input'].assign(style_img)) J_style = style_cost(model, STYLE_LAYERS, sess) J = total_cost(J_content, J_style) optimizer = tf.train.AdamOptimizer(2.0) train_step = optimizer.minimize(J) tf.global_variables_initializer().run() sess.run(model['input'].assign(generated_img)) for i in range(num_iter): sess.run(train_step) generated_img = sess.run(model['input']) if i % 20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) print(generated_img.shape) nu.save_image("output4/" + str(i) + ".png", generated_img) nu.save_image("output4/generated_image.png", generated_img) return generated_img
def nn_model(sess, input_image, num_iterations=300): sess.run(tf.global_variables_initializer()) sess.run(model['input'].assign(input_image)) for i in range(num_iterations): sess.run(train_step) generated_image = sess.run(model['input']) if i % 20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) save_image("./a_" + str(i) + ".png", generated_image) save_image('./generated_image_2.jpg', generated_image) return generated_image
def model_nst(self, input_image, optimizer, content_img, style_img, output_name, graph, sess, print_output=False): sess.run(self.model['input'].assign(content_img)) out = self.model['conv4_2'] a_c = sess.run(out) a_g = out j_content = self.compute_content_cost(a_c, a_g) sess.run(self.model['input'].assign(style_img)) j_style = self.compute_style_cost(self.model, self.style_layers, sess) j = self.total_cost(j_content, j_style) train_step = optimizer.minimize(j) sess.run(tf.global_variables_initializer()) sess.run(self.model['input'].assign(input_image)) for i in range(self.num_iterations): sess.run(train_step) generated_image = sess.run(self.model['input']) if i % 20 == 0 and print_output: jt, jc, js = sess.run([j, j_content, j_style]) print("Iteration " + str(i) + " :") print("Total cost = " + str(jt)) print("Content cost = " + str(jc)) print("Style cost = " + str(js)) save_image( self.output_dir + output_name + '_' + str(i) + ".png", generated_image) sess.close() path_generated_img = self.output_dir + output_name + '.png' save_image(path_generated_img, generated_image) return generated_image, path_generated_img
def fit(self, learning_rate=2.0, num_iterations=200): history = [] # Assign the content image to be the input of the VGG model. # sess.run(self.model['input'].assign(self.content_image)) # out = self.model['conv4_2'] # a_C = sess.run(out) # a_G = out # J_content = self.compute_content_cost(a_C, a_G) J_content = self.content_cost() J_style = self.style_cost() J = self.total_cost(J_content, J_style) optimizer = tf.train.AdamOptimizer(learning_rate) self.train_step = optimizer.minimize(J) # init global variables sess.run(tf.global_variables_initializer()) # run the noisy input image, use assign() sess.run(self.model['input'].assign(self.init_image)) if not os.path.isdir('output'): os.mkdir('output') for i in range(num_iterations): sess.run(self.train_step) generated_image = sess.run(self.model['input']) # Print every 20 iteration. if i % 20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) # save current generated image in the "/output" directory nst_utils.save_image("output/" + str(i) + ".png", generated_image) history.append((Jt, Jc, Js)) # save last generated image nst_utils.save_image('output/generated_image.jpg', generated_image) return history
def model_nn(sess, input_image, num_iterations=400): # Initialize global variables (you need to run the session on the initializer) ### START CODE HERE ### (1 line) sess.run(tf.global_variables_initializer()) ### END CODE HERE ### # Run the noisy input image (initial generated image) through the model. Use assign(). ### START CODE HERE ### (1 line) generated_image = input_image sess.run(model['input'].assign(input_image)) ### END CODE HERE ### for i in range(num_iterations): # Run the session on the train_step to minimize the total cost ### START CODE HERE ### (1 line) sess.run(train_step) ### END CODE HERE ### # Compute the generated image by running the session on the current model['input'] ### START CODE HERE ### (1 line) generated_image = sess.run(model['input']) ### END CODE HERE ### # Print every 20 iteration. if i % 20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) # save current generated image in the "/output" directory save_image("output/" + str(i) + ".png", generated_image) # save last generated image save_image('output/generated_image.jpg', generated_image) return generated_image
def generate_noise_image2(content_image, noise_ratio=0.6): """ Generates a noisy image by adding random noise to the content_image """ height = content_image.shape[1] width = content_image.shape[2] # Generate a random noise_image noise_image = np.random.uniform(-20, 20, (1, height, width, 3)).astype('float32') # range_ = np.max(content_image) - np.min(content_image) # noise_image = np.random.normal(loc=np.mean(content_image), scale = 0.7*range_, size=(1, height, width, 3)).astype('float32') # Set the input_image to be a weighted average of the content_image and a noise_image input_image = noise_image * noise_ratio + content_image * (1 - noise_ratio) return input_image STYLE_LAYERS = [('conv1_1', 0.2), ('conv2_1', 0.2), ('conv3_1', 0.2), ('conv4_1', 0.2), ('conv5_1', 0.2)] # Reset the graph tf.reset_default_graph() # Start interactive session sess = tf.InteractiveSession() content_image = scipy.misc.imread("images/z_me3.jpg") content_image = reshape_and_normalize_image(content_image) style_image = scipy.misc.imread("images/constable.jpg") style_image = reshape_and_normalize_image(style_image) generated_image = generate_noise_image2(content_image) #print(generated_image.shape) imshow(generated_image[0]) model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat") # Assign the content image to be the input of the VGG model. sess.run(model['input'].assign(content_image)) # Select the output tensor of layer conv4_2 out = model['conv4_2'] # Set a_C to be the hidden layer activation from the layer we have selected a_C = sess.run(out) # Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2'] # and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that # when we run the session, this will be the activations drawn from the appropriate layer, with G as input. a_G = out # Compute the content cost J_content = compute_content_cost(a_C, a_G) # Assign the input of the model to be the "style" image sess.run(model['input'].assign(style_image)) # Compute the style cost J_style = compute_style_cost(sess, model, STYLE_LAYERS) ### START CODE HERE ### (1 line) J = total_cost(J_content, J_style, alpha=10, beta=40) ### END CODE HERE ### # define optimizer (1 line) optimizer = tf.train.AdamOptimizer(1.0) # define train_step (1 line) train_step = optimizer.minimize(J) # Initialize global variables (you need to run the session on the initializer) ### START CODE HERE ### (1 line) sess.run(tf.global_variables_initializer()) ### END CODE HERE ### # Run the noisy input image (initial generated image) through the model. Use assign(). ### START CODE HERE ### (1 line) sess.run(model['input'].assign(generated_image)) ### END CODE HERE ### for i in range(200): # Run the session on the train_step to minimize the total cost ### START CODE HERE ### (1 line) sess.run(train_step) ### END CODE HERE ### # Compute the generated image by running the session on the current model['input'] ### START CODE HERE ### (1 line) generated_image = sess.run(model['input']) ### END CODE HERE ### # Print every 20 iteration. if i % 2 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) # save current generated image in the "/output" directory save_image("output/z_m3_3_constable_" + str(i) + ".png", generated_image) # save last generated image save_image('output/z_me_3_constable_generated_image.jpg', generated_image) return generated_image
def Generate(cls, content, style, alpha=10, beta=40, no_iter=100, display=False): """ call signature : Generate(content,style,alpha=10,beta=40,iter=100) input -- content : content image, style : style image, alpha : content cost multiplier, beta : style cost multiplier, iter : number of iteration return --- a dictionary total_cost : array of total cost at eact 10th iteration, content_cost : array of content cost at each 10th iteration, style_cost : array of style cost at each 10th iteration, image : np array of generated image of shape(1,h,w,c) side effect --- save the generated image in output dir """ J_content = cls.calculateTotalContentCost(content, cls.content) J_style = cls.calculateTotalStyleCost(style, cls.style) #compute Total Cost J = alpha * J_content + beta * J_style #Initialize noisy Generated Image image = generate_noise_image(content) #Set Optimizer optimizer = tf.train.AdamOptimizer(cls.learning_rate) train_step = optimizer.minimize(J) #initialize plot if (display): fig, ax1, ax2 = cls._createFig() J_show = [] J_C_show = [] J_S_show = [] #Start the Session with tf.Session() as sess: #initialize Variables sess.run(tf.global_variables_initializer()) sess.run(model['input'].assign(image)) #run optimization for i in range(no_iter): sess.run(train_step) generated_image = sess.run(model['input']) #print infomation if i % 10 == 0: temp_1, temp_2, temp_3 = sess.run([J, J_content, J_style]) J_show.append(temp_1) J_C_show.append(temp_2) J_S_show.append(temp_3) print('iter : {}, J : {}'.format(i, temp_1)) if (display): cls._updateFig(fig, ax1, ax2, generated_image, (J_show, J_C_show, J_S_show), no_iter) mat = { 'total_cost': J_show, 'content_cost': J_C_show, 'style_cost': J_S_show, 'image': generated_image } save_image(CONFIG.OUTPUT_DIR + cls.name + '.jpg', generated_image) scipy.io.savemat(CONFIG.OUTPUT_DIR + cls.name + '.mat', mat) return mat
optimizer = tf.train.AdamOptimizer(CONFIG.LEARNING_RATE) train_step = optimizer.minimize(J) # Step 7: Run graph for a large number of iterations, updating the generated image at every step # Initialize global variable sess.run(tf.global_variables_initializer()) # Run the noisy initial generated image through the model. sess.run(model['input'].assign(generated_image)) for i in range(CONFIG.NUM_ITERATIONS): # Run the session on the train_step to minimize the total cost sess.run(train_step) # Compute the generated image by running the session on the current model['input'] generated_image = sess.run(model['input']) # Print every 20 iteration. if i % 20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) # save current generated image in the "/output" directory save_image("output/" + str(i) + ".png", generated_image) # save last generated image save_image('output/generated_image.jpg', generated_image)