def do_for_file(self, file_path, output_folder="output"): filename, extension = os.path.splitext(file_path) output_folder += "/" org_image = util.load_image(file_path) util.save_image(output_folder + file_path, org_image) if len(org_image.shape ) >= 3 and org_image.shape[2] == 3 and self.channels == 1: input_y_image = util.convert_rgb_to_y(org_image, jpeg_mode=self.jpeg_mode) scaled_image = util.resize_image_by_pil(input_y_image, self.scale) output_y_image = self.do(input_y_image) scaled_ycbcr_image = util.convert_rgb_to_ycbcr( util.resize_image_by_pil(org_image, self.scale), jpeg_mode=self.jpeg_mode) image = util.convert_y_and_cbcr_to_rgb(output_y_image, scaled_ycbcr_image[:, :, 1:3], jpeg_mode=self.jpeg_mode) else: scaled_image = util.resize_image_by_pil(org_image, self.scale) image = self.do(org_image) util.save_image(output_folder + filename + "_result" + extension, image) return 0
def stylize_image(content_image, style_image): layer_weights = [0.5, 1, 1.5, 3.0, 4.0] weights_path = 'vgg16_weights.npz' with tf.Session() as sess: # first load the model # generated_image = tf.Variable(np.random.uniform(-50, 50, (1, 224, 224, 3)).astype('float32'), name='generated_image', trainable=True) generated_image = tf.Variable(tf.constant(np.array(content_image)), name='generated_image') # then generate static white image and use the loss function to get a pretty picture # generate the content and style losses print "Running the network on the content image" vgg_net = vgg16(tf.constant(content_image), weights_path) content_layers = sess.run(vgg_net.get_layers()) print "Running the network on the style image" vgg_net = vgg16(tf.constant(style_image), weights_path) style_layers = sess.run(vgg_net.get_layers()) print "Time to style the image" sess.run(tf.initialize_all_variables()) vgg_net = vgg16(generated_image, weights_path) generated_layers = vgg_net.get_layers() print "calculate loss functions" c_loss = content_loss(content_layers[4], generated_layers[4]) s_loss = style_loss(style_layers, generated_layers, layer_weights) overall_loss = FLAGS.alpha * c_loss + FLAGS.beta * s_loss # use adam optimizer to minimize cost function print "optimize" optimizer = tf.train.AdamOptimizer(2.0) train_step = optimizer.minimize(overall_loss) # make summaries sess.run(tf.initialize_all_variables()) tf.scalar_summary("content_loss", c_loss) tf.scalar_summary("style_loss", s_loss) summary_op = tf.merge_all_summaries() summary_writer = tf.train.SummaryWriter('../logs', sess.graph) for step in range(FLAGS.iterations): sess.run(train_step) print "iteration: ", step if (step) % 10 == 0 or (step + 1) == FLAGS.iterations: print "content cost", sess.run(c_loss) print "style cost", sess.run(s_loss) print "Overall cost", sess.run(overall_loss) filename = 'output/%d.png' % (step) save_image(filename, sess.run(generated_image))
def text_reply(msg): print(msg) if msg.actualNickName == "诸隆隆": if msg.type == 'Picture': downinfo = msg.download(fileName=msg.fileName) imageMsg = { 'fileName': msg.fileName, } helper.save_image(imageMsg) print(downinfo) elif msg.type == 'Text': txtMsg = {'text': msg.text} helper.save_text(txtMsg)
def show_active_img_and_save_denoise_filter2(name,img,path): mat = img.astype(np.float) mat[mat<0.1] = 0 mat = - mat + 1 mat = mat * 255.0 mat[mat < 0] = 0 mat[mat > 255] = 255 mat=mat.astype(np.uint8) mat = ndimage.median_filter(mat, 1) #cv2.imshow(name,mat) #cv2.imwrite(path,mat) save_image(mat,'sketch.jpg') print('SAVE IMAGE') return
def main(content_features, style_grams, target, model, learning_rate, alpha, beta, steps, result_path): ''' A function which handle the forward and backward pass as well as the optimisation of the generated image.\n `content_features`: Dictionary which stores the content image features.\n `style_grams`: Dictionary which stores the style image gram matrices.\n `target`: Tensor which is to be optimised to get the stylised image.\n `model`: Model which is used for the style transfer.\n `learning_rate`: The learning rate for the optimizer.\n `alpha`: The weight of the content loss.\n `beta`: The weight of the style loss.\n `result_path:` Path where the image is supposed to be stored. ''' optimizer = optim.Adam([target], lr=learning_rate) for step in range(1, steps + 1): total_loss = content_loss = style_loss = 0 optimizer.zero_grad() target_features = get_features(target, model, layers) content_loss = torch.mean( (target_features['conv4_2'] - content_features['conv4_2'])**2) for layer in style_weights: target_feature = target_features[layer] _, c, w, h = target_feature.size() target_gram = gram_matrix(target_feature) style_gram = style_grams[layer] layer_style_loss = style_weights[layer] * torch.mean( (target_gram - style_gram)**2) style_loss += layer_style_loss / (c * w * h) total_loss = alpha * content_loss + beta * style_loss total_loss.backward(retain_graph=True) optimizer.step() if step % 400 == 0: print(f'Step({step}/{steps}) => Loss: {total_loss.item():.4f}') save_image(target, result_path)
async def post_resp(self, request): data = await request.post() data_dict = self.post_data_to_dict(data) if data['image']: data_dict.pop('image') image = data['image'] image_file = image.file filename = image.filename path = '%s/%s' % (settings.IMAGE_DIRECTORY.rstrip('/'), filename) save_image(image_file, path) data_dict['image_url'] = '%s://%s:%s/%s%s' % ( request.url.scheme, request.url.host, request.url.port, settings.IMAGE_URL.lstrip('/'), filename) # data_dict.update({'dish_id': get_next_sequence_value(counter_dish, 'dish_id')}) try: dish = Dish(**data_dict) await dish.save() except InvalidOperation as e: return {'error': 'validation %s' % e} else: return {'message': 'Item successfully added'}
def do_for_evaluate(self, file_path, output_directory="output", output=True, print_console=True): filename, extension = os.path.splitext(file_path) output_directory += "/" true_image = util.set_image_alignment(util.load_image(file_path), self.scale) if true_image.shape[2] == 3 and self.channels == 1: input_y_image = util.build_input_image(true_image, channels=self.channels, scale=self.scale, alignment=self.scale, convert_ycbcr=True, jpeg_mode=self.jpeg_mode) # for color images if output: input_bicubic_y_image = util.resize_image_by_pil( input_y_image, self.scale) true_ycbcr_image = util.convert_rgb_to_ycbcr( true_image, jpeg_mode=self.jpeg_mode) output_y_image = self.do(input_y_image, input_bicubic_y_image) psnr, ssim = util.compute_psnr_and_ssim(true_ycbcr_image[:, :, 0:1], output_y_image, border_size=self.scale) loss_image = util.get_loss_image(true_ycbcr_image[:, :, 0:1], output_y_image, border_size=self.scale) output_color_image = util.convert_y_and_cbcr_to_rgb( output_y_image, true_ycbcr_image[:, :, 1:3], jpeg_mode=self.jpeg_mode) util.save_image(output_directory + file_path, true_image) util.save_image( output_directory + filename + "_input" + extension, input_y_image) util.save_image( output_directory + filename + "_input_bicubic" + extension, input_bicubic_y_image) util.save_image( output_directory + filename + "_true_y" + extension, true_ycbcr_image[:, :, 0:1]) util.save_image( output_directory + filename + "_result" + extension, output_y_image) util.save_image( output_directory + filename + "_result_c" + extension, output_color_image) util.save_image( output_directory + filename + "_loss" + extension, loss_image) else: true_y_image = util.convert_rgb_to_y(true_image, jpeg_mode=self.jpeg_mode) output_y_image = self.do(input_y_image) psnr, ssim = util.compute_psnr_and_ssim(true_y_image, output_y_image, border_size=self.scale) elif true_image.shape[2] == 1 and self.channels == 1: # for monochrome images input_image = util.build_input_image(true_image, channels=self.channels, scale=self.scale, alignment=self.scale) output_image = self.do(input_image) psnr, ssim = util.compute_psnr_and_ssim(true_image, output_image, border_size=self.scale) if output: util.save_image(output_directory + file_path, true_image) util.save_image( output_directory + filename + "_result" + extension, output_image) else: psnr = ssim = None if print_console: print("PSNR:%f SSIM:%f" % (psnr, ssim)) return psnr, ssim