예제 #1
0
def unfreeze_model():
    """unfreeze model"""
    output_graph_path = os.path.join(FLAGS.model, FLAGS.out_graph_name)

    # Ouput path
    model_p = FLAGS.model
    model_p = model_p if not model_p.endswith("/") else model_p[:-1]
    model_p = os.path.split(model_p)
    output_path = os.path.join("output", model_p[len(model_p) - 1])

    with tf.Graph().as_default():
        output_graph_def = tf.GraphDef()
        with open(output_graph_path, "rb") as f:
            output_graph_def.ParseFromString(f.read())
            _ = tf.import_graph_def(output_graph_def, name="")

        content_images = reader.get_image(FLAGS.content_image,
                                          FLAGS.image_size)
        images = tf.pack([content_images])

        with tf.Session() as sess:
            input_node = sess.graph.get_tensor_by_name("input_node:0")
            output_node = sess.graph.get_tensor_by_name("output_node:0")

            im_images = sess.run(images)
            output = sess.run(output_node, feed_dict={input_node: im_images})

            out_path = os.path.join(output_path,
                                    FLAGS.output + '-unfreeze.png')
            print("Save result in: ", out_path)
            print('------------------------------------')
            print('Finished!')

            misc.imsave(out_path, output[0])
예제 #2
0
 def execute(self, data, batch_size):
     sess = self.output['sess']
     graph = self.output['graph']
     for i in range(batch_size):
         height = 0
         width = 0
         with open(data[i], 'rb') as img:
             if data[i].lower().endswith('png'):
                 im = sess.run(tf.image.decode_png(img.read()))
             else:
                 im = sess.run(tf.image.decode_jpeg(img.read()))
             height = im.shape[0]
             width = im.shape[0]
         tf.logging.info('Image size: %dx%d' % (width, height))
         image = reader.get_image(data[i], height, width,
                                  self.output['image_preprocessing_fn'])
         image = tf.expand_dims(image, 0)
         generated = model.net(image, training=False)
         generated = tf.cast(generated, tf.uint8)
         generated = tf.squeeze(generated, [0])
         # Save it to UCloud
         generated_file = 'generated/res.jpg'
         if os.path.exists('generated') is False:
             os.makedirs('generated')
         with open(generated_file, 'wb') as img:
             start_time = time.time()
             img.write(sess.run(tf.image.encode_jpeg(generated)))
             end_time = time.time()
             tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
             tf.logging.info('Done. Please check %s.' % generated_file)
def main(_):

    # Get image's height and width.
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width,
                                     image_preprocessing_fn)

            # Add batch dimension
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # Remove batch dimension
            generated = tf.squeeze(generated, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            # Use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            # Make sure 'generated' directory exists.
            #            generated_file = 'generated/res.jpg'
            generated_file = FLAGS.image_file.replace("content", "res")
            if os.path.exists('generated') is False:
                os.makedirs('generated')

            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
                tf.logging.info('Done. Please check %s.' % generated_file)
            #tf.train.export_meta_graph(filename='model.meta',collection_list=['input_tensor'],as_text=True)
            tf.train.export_meta_graph(filename='model.meta', as_text=True)
예제 #4
0
def gen(imagename, png = False):

    # Get image's height and width.
    height = 0
    width = 0
    with open(imagename, 'rb') as img:
        with tf.Session().as_default() as sess:
            if png:
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            image = reader.get_image(imagename, height, width, image_preprocessing_fn)

            # Add batch dimension
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # Remove batch dimension
            generated = tf.squeeze(generated, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            # Use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            # Make sure 'generated' directory exists.
            generated_file = 'generated/' + imagename.split("/")[-2]+'/'+imagename[imagename.rfind("/")+1:]
            print(imagename)
            print(generated_file)
            if os.path.exists('generated') is False:
                os.makedirs('generated')
            if os.path.exists( 'generated/' + imagename.split("/")[-2]) is False:
                os.makedirs('generated/' + imagename.split("/")[-2])

            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.image.encode_png(generated)))
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))

                tf.logging.info('Done. Please check %s.' % generated_file)
예제 #5
0
def generation(image_file, model_file, generated_file):

    result = dict()

    # Get image's height and width.
    height = 0
    width = 0
    with open(image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    print('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                loss_model, is_training=False)
            image = reader.get_image(image_file, height, width,
                                     image_preprocessing_fn)

            # Add batch dimension
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # Remove batch dimension
            generated = tf.squeeze(generated, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])

            # Use absolute path
            model_file = os.path.abspath(model_file)
            saver.restore(sess, model_file)

            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                end_time = time.time()
                result['time'] = (end_time - start_time)
                result['path'] = generated_file

        return result
예제 #6
0
def main(models, img_c):
    #tf.app.flags.DEFINE_string("image_file",img_c, "")

    #FLAGS = tf.app.flags.FLAGS
    FLAGS = arguement('vgg_16', models, img_c)
    # Get image's height and width.
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width,
                                     image_preprocessing_fn)

            # Add batch dimension
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # Remove batch dimension
            generated = tf.squeeze(generated, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            # Use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            # Make sure 'generated' directory exists.
            generated_file = 'generated/res.jpg'
            if os.path.exists('generated') is False:
                os.makedirs('generated')

            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                #start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
예제 #7
0
def main(_):
    #获取图片的长宽
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('图片尺寸为: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            #读取图片信息
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width,
                                     image_preprocessing_fn)

            #增加维度
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            #从tensor中删除所有大小是1的维度
            generated = tf.squeeze(generated, [0])

            #保存模型
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            """获取已训练好的model"""
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)
            """生成转换style后的image"""
            # 确认'generated'文件夹存在.
            generated_file = 'generated/res.jpg'
            if os.path.exists('generated') is False:
                os.makedirs('generated')

            # 将转换后的图片信息写入文件
            with open(generated_file, 'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))

                tf.logging.info('Done. Please check %s.' % generated_file)
예제 #8
0
def main(_):
    # Get image's height and width.
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size %d X %d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            #Read image data
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width,
                                     image_preprocessing_fn)

            # Add batch dimension
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # Remove batch dimension
            generated = tf.squeeze(generated, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])

            print('load model parameters...')
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            # Make sure 'generated' directory exists.
            generated_file = 'generated/res.jpg'
            if os.path.exists('generated') is False:
                os.mkdir('generated')

            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                end_time = time.time()
                tf.logging.info('Elapsed time %fs' % (end_time - start_time))

                tf.logging.info('Done. please check %s.' % generated_file)
def get_content_features(content_path, content_layers):
    with tf.Graph().as_default() as g:
        image = tf.expand_dims(reader.get_image(content_path, FLAGS.IMAGE_SIZE), 0)
        net, _ = vgg.net(FLAGS.VGG_PATH, image)
        layers = []
        for layer in content_layers:
            layers.append(net[layer])

        with tf.Session() as sess:
            return sess.run(layers + [image])
예제 #10
0
def get_style_features(style_paths, style_layers):
    with tf.Graph().as_default() as g:
        images = tf.stack([reader.get_image(style_paths,256)])
        net, _ = vgg.net(images - reader.mean_pixel)
        features = []
        for layer in style_layers:
            features.append(gram(net[layer]))

        with tf.Session() as sess:
            return sess.run(features)
def get_content_features(content_path, content_layers):
    with tf.Graph().as_default() as g:
        image = tf.expand_dims(reader.get_image(content_path, FLAGS.IMAGE_SIZE), 0)
        net, _ = vgg.net(FLAGS.VGG_PATH, image)
        layers = []
        for layer in content_layers:
            layers.append(net[layer])

        with tf.Session() as sess:
            return sess.run(layers + [image])
예제 #12
0
def gen_single():
    """ Transfer an image. """

    content_images = reader.get_image(FLAGS.content_image, FLAGS.image_size)
    images = tf.stack([content_images])
    generated_images = model.net(images / 255., if_train=False)

    output_format = tf.saturate_cast(generated_images + reader.mean_pixel,
                                     tf.uint8)

    # Output path

    model_path = os.path.join('models',
                              FLAGS.model_name + utils.get_model_suffix())
    ### model_p = model_p if not model_p.endswith("/") else model_p[:-1]
    ### model_p = os.path.split(model_p)
    output_path = os.path.join("output",
                               FLAGS.model_name + utils.get_model_suffix())

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    with tf.Session() as sess:
        file_ = tf.train.latest_checkpoint(model_path)
        if not file_:
            print('Could not find trained model in {}'.format(model_path))
            return
        print('Using model from {}'.format(file_))

        # Get trained step
        index = file_.rfind("-")
        trained_step = file_[index:]

        saver = tf.train.Saver()
        saver.restore(sess, file_)

        print("Style image:", FLAGS.content_image)
        start_time = time.time()

        # Run inference
        images_t = sess.run(output_format)

        elapsed = time.time() - start_time
        print('Time: {}'.format(elapsed))

        out_path = os.path.join(
            output_path,
            FLAGS.output + trained_step + '-' + str(int(time.time())) + '.jpg')
        print("Save result in: ", out_path)
        misc.imsave(out_path, images_t[0])

        print('------------------------------------')
        print('Finished!')

    return
def get_style_features(style_paths, style_layers):
    with tf.Graph().as_default() as g:
        size = int(round(FLAGS.IMAGE_SIZE * FLAGS.STYLE_SCALE))
        images = tf.pack([reader.get_image(path, size) for path in style_paths])
        net, _ = vgg.net(FLAGS.VGG_PATH, images)
        features = []
        for layer in style_layers:
            features.append(gram(net[layer]))

        with tf.Session() as sess:
            return sess.run(features)
def get_style_features(style_paths, style_layers):
    with tf.Graph().as_default() as g:
        size = int(round(FLAGS.IMAGE_SIZE * FLAGS.STYLE_SCALE))
        images = tf.pack([reader.get_image(path, size) for path in style_paths])
        net, _ = vgg.net(FLAGS.VGG_PATH, images)
        features = []
        for layer in style_layers:
            features.append(gram(net[layer]))

        with tf.Session() as sess:
            return sess.run(features)
예제 #15
0
def get_style_features(style_paths, style_layers, net_type):
    with tf.Graph().as_default() as g:
        size = int(round(FLAGS.image_size * FLAGS.style_scale))
        images = tf.stack(
            [reader.get_image(path, size) for path in style_paths])
        net, _ = vgg.net(FLAGS.vgg_path, images, net_type)
        features = []
        for layer in style_layers:
            features.append(model.gram(net[layer], FLAGS.batch_size))

        with tf.Session() as sess:
            return sess.run(features)
예제 #16
0
def main(_):

    # Get image's height and width.
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width, image_preprocessing_fn)

            # Add batch dimension
            image = tf.expand_dims(image, 0)

            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # Remove batch dimension
            generated = tf.squeeze(generated, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            # Use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            # Make sure 'generated' directory exists.
            generated_file = 'generated/res.jpg'
            if os.path.exists('generated') is False:
                os.makedirs('generated')

            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))

                tf.logging.info('Done. Please check %s.' % generated_file)
예제 #17
0
def get_style_features(style_paths, style_layers, image_size, style_scale,
                       vgg_path):
    with tf.Graph().as_default(), tf.Session() as sess:
        size = int(round(image_size * style_scale))
        images = tf.stack(
            [reader.get_image(path, size) for path in style_paths])
        net, _ = vgg.net(vgg_path, images - vgg.MEAN_PIXEL)
        features = []
        for layer in style_layers:
            features.append(gram(net[layer]))

        return sess.run(features)
예제 #18
0
def freeze_model():
    """ freeze graph. """
    input_node_names = "input_node"
    output_node_names = "output_node"

    content_images = reader.get_image(FLAGS.content_image, FLAGS.image_size)
    images = tf.pack([content_images])

    input_images = tf.placeholder(dtype=tf.float32, name=input_node_names)
    generated_images = model.net(input_images / 255., if_train=False)

    output_format = tf.saturate_cast(generated_images + reader.mean_pixel,
                                     tf.uint8,
                                     name=output_node_names)

    with tf.Session() as sess:
        checkpoint_file = tf.train.latest_checkpoint(FLAGS.model)
        if not checkpoint_file:
            print('Could not find trained model in {}'.format(FLAGS.model))
            return
        print('Using model from {}'.format(checkpoint_file))

        saver = tf.train.Saver()
        saver.restore(sess, checkpoint_file)

        in_images = sess.run(images)
        images_t = sess.run(output_format, feed_dict={input_images: in_images})

        # Save graph
        tf.train.write_graph(sess.graph.as_graph_def(), FLAGS.model,
                             FLAGS.in_graph_name)

    checkpoint_prefix = os.path.join(FLAGS.model, "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(FLAGS.model, FLAGS.in_graph_name)
    input_saver_def_path = ""
    input_binary = False

    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(FLAGS.model, FLAGS.out_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_file, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    print('------------------------------------')
    print('Finished!')
예제 #19
0
def style_transform(style, model_file, img_file, img_filename, result_file):
    height = 0
    width = 0
    with open(img_file, 'rb') as img:
        img_data = img
        with tf.Session().as_default() as sess:
            if img_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    print('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            image = reader.get_image(img_file, height, width,
                                     image_preprocessing_fn)
            image = tf.expand_dims(image, 0)
            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)
            generated = tf.squeeze(generated, [0])
            saver = tf.train.Saver(tf.global_variables())
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            FLAGS.model_file = os.path.abspath(model_file)
            saver.restore(sess, FLAGS.model_file)
            generated_file = 'static/img/generated/' + result_file
            if os.path.exists('static/img/generated') is False:
                os.makedirs('static/img/generated')
            with open(generated_file, 'wb') as img:
                result_data = img
                start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                end_time = time.time()
                now = datetime.datetime.now()
                print('Elapsed time: %fs' % (end_time - start_time))
                print('Done. Please check %s.' % generated_file)

    cluster = Cluster(contact_points=['127.0.0.1'], port=9042)
    session = cluster.connect()
    log.info("setting keyspace...")
    session.set_keyspace(KEYSPACE)
    session.execute(
        """
        INSERT INTO imagerecord (time,style,input_name,output_name)
        VALUES (%s,%s,%s,%s)
        """, (now, style, img_filename, result_file))
예제 #20
0
def get_content_features(content_layers):
    with tf.Graph().as_default() as g:
        #tf.expand_dims 在位置0扩展了一个维度[2,3]=>[1,2,3]变成图片
        image = tf.expand_dims(reader.get_image(content_path, IMAGE_SIZE), 0)
        net, _ = vgg.net(image)
        layers = []
        for layer in content_layers:
            # net字典形式,net{"conv1_1":"conv1_1的结果“}
            layers.append(net[layer])

        with tf.Session() as sess:
            #返回features + 图片
            return sess.run(layers + [image])
예제 #21
0
def get_style_features(style_paths, style_layers):
    with tf.Graph().as_default() as g:
        size = int(round(FLAGS.IMAGE_SIZE * FLAGS.STYLE_SCALE))
        images = tf.pack(
            [reader.get_image(path, size) for path in style_paths])
        net, _ = vgg.net(FLAGS.VGG_PATH, images)
        features = []
        for layer in style_layers:
            features.append(gram(net[layer]))
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 1.0
        with tf.Session(config=config) as sess:
            return sess.run(features)
예제 #22
0
def get_style_features(style_paths, style_layers):
    with tf.Graph().as_default() as g:
        size = int(round(IMAGE_SIZE))
        images = tf.stack(
            [reader.get_image(path, size) for path in style_paths])

        net, _ = vgg.net(images - reader.mean_pixel)
        features = []
        for layer in style_layers:
            features.append(gram(net[layer]))

        with tf.Session() as sess:
            return sess.run(features)
예제 #23
0
def neualstyle(model_file, image_file):

    # 初始化参数
    loss_model = 'vgg_16'
    height = 0
    width = 0

    # 读取图片,并将图片转化为tensor张量
    with open(image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if image_file.lower().endswith('png'):
                # 将图片转化为张量,tensor,[height,width,channels]
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            # 获取处理图片的方法
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                loss_model, is_training=False)
            # 获取预处理后的tensor
            image = reader.get_image(image_file, height, width,
                                     image_preprocessing_fn)
            # 添加一个维度,oldshape=[height,width,3] ——> newshape=[1,height,width,3]
            image = tf.expand_dims(image, 0)

            # 获取神经网络
            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)

            # 去掉增加的维
            generated = tf.squeeze(generated, [0])

            # 保存模型的变量
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            saver.restore(sess, model_file)

            # 保存图片
            generated_name = image_file.split('/')[-1]
            generated_file = os.getcwd(
            ) + '/wx_robot/handler/neuralstyle/neuralpic/' + generated_name
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
예제 #24
0
def main(_): #main(_)表示什么意思???
    #get image width and height
    width = 0
    height = 0
    with open(FLAGS.image_file,'rb') as img:
        with tf.Session().as_default as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpge(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('image size is : %dx%d' % (height,width))

    with tf.Graph().as_default():
        with tf.Session.as_default() as sess:
            #Read image data
            image_preprocessing_fn,_ = preprocessing_factory.get_preprocessing(FLAGS.loss_model,is_training = False) #FLAGS.loss_model为用于评估的model,该function返回的是,在loss_model下,对image进行预处理的function
            image = reader.get_image(FLAGS.image_file,height,width,image_preprocessing_fn) #利用image_preprocessing_fn函数,对image进行预处理,返回预处理后的image

            #Add batch dimension
            image = np.expand_dims(image,0)

            generated = model.net(image,training = False) #形成图像风格迁移:返回的是一个需要sess.run()的model
            generated = tf.cast(generated,tf.uint8) #将generated的数据格式转为uint8

            #remove batch dimension
            generated = np.squeeze(generated,[0])

            #restore model variables??? 预训练模型,为什么还要在刷新其参数???
            saver = tf.train.saver(tf.global_variables(),write_version = tf.train.SaverDef.V1) #创建类实例
            #是否可去???: sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()]) #这里运行的是model.net中的图像生成网络?input image在最开始的时候,已经输入图像生成网络,这里是进行训练,但是,在model.net(train=False),所以不需要进行训练,那么这里为什么还有在对参数进行初始化呢?不合逻辑,正常理解,应该是,重载模型参数,saver.restore(),然后,在用sess.run(generated)???

            #use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess,FLAGS.model_file) #重载模型参数,为什么要重载模型参数,不是已经风格迁移了吗???
            #上边这几句是不是写的反了???

            #make sure generated directory exists
            generated_file = 'generated/res.jpg'
            if not os.path.exists('generated'):
                os.makedirs('generated')

            #generate and write image data to file
            with open(generated_file,'wb') as img:
                start_time = time.time()
                img.write(sess.run(tf.encode_jpeg(generated)))
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))

                tf.logging.info('Done. Please check %s.' % generated_file)
예제 #25
0
def generate():
    height, width = 0, 0
    with open((FLAGS.image_path + FLAGS.input_image), 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.input_image.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            img_prep_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            img = reader.get_image((FLAGS.image_path + FLAGS.input_image),
                                   height, width, img_prep_fn)
            # Add batch dimension
            img = tf.expand_dims(img, 0)
            gen_img = tf.cast(model.img_trans_net(img, training=False),
                              tf.uint8)
            # Remove batch dimension
            gen_img = tf.squeeze(gen_img, [0])

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V2)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            # Use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_path +
                                               FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            # Make sure FLAGS.image_path directory exists.
            gen_file = FLAGS.image_path + FLAGS.output_image
            if os.path.exists(FLAGS.image_path) is False:
                os.makedirs(FLAGS.image_path)

            # generate image
            with open(gen_file, 'wb') as img:
                start = time.time()
                img.write(sess.run(tf.image.encode_jpeg(gen_img)))
                elapsed = time.time() - start
                print('Elapsed time: {}s'.format(elapsed))
                tf.logging.info('Elapsed time: {}s'.format(elapsed))
                tf.logging.info('Done. Please check {}.'.format(gen_file))
예제 #26
0
 def infer(self,model_file,image_file):
     image_file=decode_img(image_file)
     image1 = cv2.imread(image_file)
     height = image1.shape[0]
     width = image1.shape[1]
     config = tf.ConfigProto()
     config.gpu_options.allow_growth = True
     print('Image size: %dx%d' % (width, height))
     with tf.Graph().as_default():
         with tf.Session(config=config).as_default() as sess:
 
             # Read image data.
             image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                 self.loss_model,
                 is_training=False)
             image = reader.get_image(image_file, height, width, image_preprocessing_fn)
 
             # Add batch dimension
             image = tf.expand_dims(image, 0)
 
             generated = model.net(image, training=False)
             generated = tf.cast(generated, tf.uint8)
 
             # Remove batch dimension
             generated = tf.squeeze(generated, [0])
 
             # Restore model variables.
             saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
             sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
             # Use absolute path
             model_file = os.path.abspath(model_file)
             saver.restore(sess, model_file)
 
             # Make sure 'generated' directory exists.
             generated_file ='./tmp/'+time.strftime('%Y_%m_%d_%H_%M_%S',time.localtime(time.time()))+'.jpg'
             if os.path.exists('tmp') is False:
                 os.makedirs('tmp')
 
             # Generate and write image data to file.
             with open(generated_file, 'wb') as img:
                 start_time = time.time()
                 img.write(sess.run(tf.image.encode_jpeg(generated)))
                 end_time = time.time()
                 print('Elapsed time: %fs' % (end_time - start_time))
             os.remove(image_file)
             with open(generated_file,'rb') as f:
                 style_base64=base64.b64encode(f.read())
                 s = style_base64.decode()
                 #str(style_base64, encoding = "utf8")  
         return s  
예제 #27
0
def main(argv=None):
    network_fn = nets_factory.get_network_fn('vgg_16',
                                             num_classes=1,
                                             is_training=False)
    image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
        'vgg_16', is_training=False)

    preprocess_content_image = reader.get_image(FLAGS.CONTENT_IMAGE,
                                                FLAGS.IMAGE_SIZE)

    # add bath for vgg net training
    preprocess_content_image = tf.expand_dims(preprocess_content_image, 0)
    _, endpoints_dict = network_fn(preprocess_content_image,
                                   spatial_squeeze=False)

    # Log the structure of loss network
    tf.logging.info(
        'Loss network layers(You can define them in "content_layers" and "style_layers"):'
    )
    for key in endpoints_dict:
        tf.logging.info(key)
    """Build Losses"""
    # style_features_t = losses.get_style_features(endpoints_dict, FLAGS.STYLE_LAYERS)
    content_loss, generaged_image = losses.content_loss(
        endpoints_dict, FLAGS.CONTENT_LAYERS, FLAGS.CONTENT_IMAGE)
    style_loss, style_loss_summary = losses.style_loss(endpoints_dict,
                                                       FLAGS.style_layers,
                                                       FLAGS.STYLE_IMAGE)
    tv_loss = losses.total_variation_loss(
        generaged_image)  # use the unprocessed image

    loss = FLAGS.STYLE_WEIGHT * style_loss + FLAGS.CONTENT_WEIGHT * content_loss + FLAGS.TV_WEIGHT * tv_loss
    train_op = tf.train.AdamOptimizer(FLAGS.LEARNING_RATE).minimize(loss)

    output_image = tf.image.encode_png(
        tf.saturate_cast(
            tf.squeeze(generaged_image) + reader.mean_pixel, tf.uint8))

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        start_time = time.time()
        for step in range(FLAGS.NUM_ITERATIONS):
            _, loss_t, cl, sl = sess.run(
                [train_op, loss, content_loss, style_loss])
            elapsed = time.time() - start_time
            start_time = time.time()
            print(step, elapsed, loss_t, cl, sl)
        image_t = sess.run(output_image)
        with open('out.png', 'wb') as f:
            f.write(image_t)
예제 #28
0
def get_style_features():
    with tf.Graph().as_default() as g:
        size = int(round(IMAGE_SIZE))
        images = tf.stack([reader.get_image(style_path, size)])
        net, _ = vgg.net(images)
        features = []
        for layer in STYLE_LAYERS:
            # net字典形式,net{"conv1_1":"1_1的结果“}
            # layer 依次 = "relu1_1,relu2_1,relu3_1,relu4_1,relu5_1"
            #返回每一层的gram矩阵
            features.append(gram(net[layer]))

        with tf.Session() as sess:
            return sess.run(features)
예제 #29
0
def main(_):
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width,
                                     image_preprocessing_fn)
            image = tf.expand_dims(image, 0)
            generated = model.net(image, training=False)
            generated = tf.squeeze(generated, [0])
            #saver = tf.train.Saver(tf.all_variables())
            #sess.run([tf.initialize_all_variables(), tf.initialize_local_variables()])
            saver = tf.train.Saver(tf.global_variables())
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            start_time = time.time()
            generated = sess.run(generated)
            generated = tf.cast(generated, tf.uint8)
            end_time = time.time()
            tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
            filename_image, file_extension = os.path.splitext(
                os.path.basename(FLAGS.image_file))
            filename_model, tmp_extension = os.path.splitext(
                os.path.basename(FLAGS.model_file))
            generated_file = 'generated/' + filename_image + "-" + filename_model + file_extension
            if os.path.exists('generated') is False:
                os.makedirs('generated')
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                tf.logging.info('Done. Please check %s.' % generated_file)
예제 #30
0
def img(image_file='img/test.jpg',
        loss_model='vgg_16',
        image_size=256,
        model_file='models/wave.ckpt-done'):

    height = 0
    width = 0
    with open(image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    # tf.logging.info('Image size: %dx%d' % (width, height))
    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                loss_model, is_training=False)
            image = reader.get_image(image_file, height, width,
                                     image_preprocessing_fn)
            # Add batch dimension
            image = tf.expand_dims(image, 0)
            generated = model.net(image, training=False)
            generated = tf.cast(generated, tf.uint8)
            # Remove batch dimension
            generated = tf.squeeze(generated, [0])
            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(),
                                   write_version=tf.train.SaverDef.V1)
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            # Use absolute path
            model_file = os.path.abspath(model_file)
            saver.restore(sess, model_file)
            # Make sure 'generated' directory exists.
            generated_file = 'generated/test3.jpg'
            if os.path.exists('generated') is False:
                os.makedirs('generated')
            # Generate and write image data to file.
            with open(generated_file, 'wb') as img:
                # start_time = time.time()
                img.write(sess.run(tf.image.encode_jpeg(generated)))
예제 #31
0
def get_style_feature():

    with tf.Graph().as_default():

        preprocess_func, unprocess_func = preprocessing.preprocessing_factory.get_preprocessing(
            args.loss_model, is_training=False)

        style_img = reader.get_image(args.style_dir, args.size, args.size,
                                     preprocess_func)
        style_img = tf.expand_dims(style_img, 0)

        loss_model = nets.nets_factory.get_network_fn(args.loss_model,
                                                      1,
                                                      is_training=False)

        _, end_dict = loss_model(style_img, spatial_squeeze=False)

        init_loss_model = load_pretrained_weight(args.loss_model)
        #init_loss_model2 = load_pretrained_weight(args.loss_model)

        features = []
        feature_grams = []

        #sess.run([tf.global_variables_initializer() , tf.local_variables_initializer()])
        #sess.run([init_loss_model])

        for layer in loss_config.style_loss_dict[args.loss_model]:

            #print('--layer--' + layer)
            feature = end_dict[layer]
            gram = losses.gram_matrix(feature)

            feature_s = tf.squeeze(feature, [0])
            gram_s = tf.squeeze(gram, [0])

            #f , g = sess.run([ feature_s , gram_s ])

            features.append(feature_s)
            feature_grams.append(gram_s)

        with tf.Session() as sess:

            init_loss_model(sess)
            ff, gg = sess.run([features, feature_grams])
            #print('qwq')
            return ff, gg
예제 #32
0
def main(_):
    height = 0
    width = 0
    for head in demo_list:
        model_file = 'models/%s/fast-style-model.ckpt-2000' % head
        with open(FLAGS.image_file, 'rb') as img:
            with tf.Session().as_default() as sess:
                if FLAGS.image_file.lower().endswith('png'):
                    image = sess.run(tf.image.decode_png(img.read()))
                else:
                    image = sess.run(tf.image.decode_jpeg(img.read()))
                height = image.shape[0]
                width = image.shape[1]
        tf.logging.info('Image size: %dx%d' % (width, height))

        with tf.Graph().as_default():
            with tf.Session(config=tf.ConfigProto(
                    gpu_options=gpu_options)).as_default() as sess:
                image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                    FLAGS.loss_model, is_training=False)
                image = reader.get_image(FLAGS.image_file, height, width,
                                         image_preprocessing_fn)
                image = tf.expand_dims(image, 0)
                generated = model.net(image, training=False)
                generated = tf.squeeze(generated, [0])
                saver = tf.train.Saver(tf.all_variables())
                sess.run([
                    tf.initialize_all_variables(),
                    tf.initialize_local_variables()
                ])
                #name = FLAGS.model_file.split('/')[1]
                model_file = os.path.abspath(model_file)
                saver.restore(sess, model_file)

                start_time = time.time()
                generated = sess.run(generated)
                generated = tf.cast(generated, tf.uint8)
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
                generated_file = 'generated/result_0338/%s.jpg' % head
                #generated_file = 'generated/%s.jpg'%head
                if os.path.exists('generated') is False:
                    os.makedirs('generated')
                with open(generated_file, 'wb') as img:
                    img.write(sess.run(tf.image.encode_jpeg(generated)))
                    tf.logging.info('Done. Please check %s.' % generated_file)
예제 #33
0
def get_content_features(content_layers):
    with tf.Graph().as_default() as g:
        #tf.expand_dims 在位置0扩展了一个维度[2,3]=>[1,2,3]变成图片
        fea_content = []
        for file in range(1000):
            file_path = "GrayPics/"
            file_name = str(file) + ".jpg"
            img_path = path.join(file_path, file_name)
            image = tf.stack([reader.get_image(img_path,256)])
            net, _ = vgg.net(image)
            for layer in content_layers:
                # net字典形式,net{"conv1_1":"conv1_1的结果“}
                fea_content.append(gram(net[layer]))

        with tf.Session() as sess:
                #返回features + 图片
                return sess.run(fea_content)
예제 #34
0
def main(_):
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            """获取经过预处理的输入图片,用于后面获取图片的content"""
            image = reader.get_image(FLAGS.image_file, height, width, image_preprocessing_fn)
            image = tf.expand_dims(image, 0)
            generated = model.transform_network(image, training=False)
            generated = tf.squeeze(generated, [0])
            saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

            """获取已训练好的model"""
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            """生成转换style后的image"""
            start_time = time.time()
            generated = sess.run(generated)
            generated = tf.cast(generated, tf.uint8)
            end_time = time.time()
            tf.logging.info('Elapsed time: %fs' % (end_time - start_time))

            generated_file = FLAGS.target_file
            if os.path.exists('static/img/generated') is False:
                os.makedirs('static/img/generated')
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                tf.logging.info('Done. Please check %s.' % generated_file)
예제 #35
0
def style_transform(style, model_file, img_file, result_file):
    height = 0
    width = 0
    with open(img_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if img_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    print('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            image = reader.get_image(img_file, height, width, image_preprocessing_fn)
            image = tf.expand_dims(image, 0)
            generated = model.transform_network(image, training=False)
            generated = tf.squeeze(generated, [0])
            saver = tf.train.Saver(tf.global_variables())
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            FLAGS.model_file = os.path.abspath(model_file)
            saver.restore(sess, FLAGS.model_file)

            start_time = time.time()
            generated = sess.run(generated)
            generated = tf.cast(generated, tf.uint8)
            end_time = time.time()
            print('Elapsed time: %fs' % (end_time - start_time))
            generated_file = 'static/img/generated/' + result_file
            if os.path.exists('static/img/generated') is False:
                os.makedirs('static/img/generated')
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                print('Done. Please check %s.' % generated_file)
예제 #36
0
def main(_):
    height = 0
    width = 0
    with open(FLAGS.image_file, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width, image_preprocessing_fn)
            image = tf.expand_dims(image, 0)
            generated = model.net(image, training=False)
            generated = tf.squeeze(generated, [0])
            saver = tf.train.Saver(tf.all_variables())
            sess.run([tf.initialize_all_variables(), tf.initialize_local_variables()])
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            start_time = time.time()
            generated = sess.run(generated)
            generated = tf.cast(generated, tf.uint8)
            end_time = time.time()
            tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
            generated_file = 'generated/res.jpg'
            if os.path.exists('generated') is False:
                os.makedirs('generated')
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
                tf.logging.info('Done. Please check %s.' % generated_file)