示例#1
0
def transform_forward(path_in, path_out, checkpoint_dir):
    image_input = get_image(path_in)
    image_input = np.array([image_input])
    image_shape = image_input.shape

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Graph().as_default(), tf.device('/cpu:0'), tf.Session(config=config) as sess:
        initial_image = tf.placeholder(tf.float32,
                                       shape=image_shape,
                                       name='initial_image')
        with tf.variable_scope("Transform_net"):
            transformed_image_tensor = transform.net(initial_image)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        transformed_image = sess.run(transformed_image_tensor,
                                     feed_dict={initial_image: image_input})

        save_img(path_out, transformed_image[0])
示例#2
0
def optimize(content_targets, style_target, content_weight, style_weight,
             tv_weight, vgg_path, epochs=2, print_iterations=1000,
             batch_size=4, save_path='saver/fns.ckpt', slow=False,
             learning_rate=1e-3, debug=False):
             
###             
  mod = len(content_targets) % batch_size       # 由于batch_size=4,如果content一共有7个样本,那么只能构成1个batch
  if mod > 0:                                   # 所以后面的3个就要被丢弃,这段代码就完成这样的功能!
    content_targets = content_targets[:-mod]    # *** 这里的[:-mod]表示取content从第1个开始到倒数第mod个样本! ***
###  
  style_features = {}
  batch_shape = (batch_size, 256, 256, 3)       # batch_size * H * W * C;这是tensorflow的标准数据格式
  style_shape = (1,) + style_target.shape       # 前面的1表示style的batch_size = 1
  
  with tf.Graph.as_default(),tf.device('/gpu:0'),tf.Session as sess:      # 分别是指定:tf的计算图,运行设备,Session
    style_image = tf.placeholder(tf.float32, shape=style_shape, name='style_image')
    style_image_pre = vgg.preprocess(style_image)
    net = vgg.net(vgg_path, style_image_pre)                              # 调用来让预处理完成的style图片跑一遍VGG
    stylp_pre = np.array(style_target)
    for layer in STYLE_LAYERS:
      features = net[layer].eval(feed_dict = {style_image:stylp_pre})   # 取出对应层的feature_map
      features = np.reshape(features, (-1, features.shape[3]))           ###
      gram = np.matmul(features.T, features)                             # 这里的3行代码是根据论文的设计实现而已
      style_features[layer] = gram                                       ###
      
   with tf.Graph.as_default(),tf.Session as sess:                         # 分别是指定:tf的计算图,运行设备,Session   
     x_content = tf.placeholder(tf.float32, shape=batch_shape,name='x_content')
     x_pre = vgg.preprocess(x_content)
     content_features = {}
     content_net = vgg.net(vgg_path, x_pre)
     content_features[CONTENT_LAYER] = content_net[CONTENT_LAYER]
     
    preds = transform.net(x_content/255.0)
    def __init__(self, vgg_path, style_image, content_shape, content_weight,
                 style_weight, tv_weight, batch_size, device, log_f):
        with tf.device(device):
            self.log_file = log_f
            vgg = vgg_network.VGG(vgg_path)
            self.style_image = style_image
            self.batch_size = batch_size
            self.batch_shape = (batch_size, ) + content_shape

            self.input_batch = tf.placeholder(tf.float32,
                                              shape=self.batch_shape,
                                              name="input_batch")

            self.stylized_image = transform.net(self.input_batch, _vgg=vgg)

            loss_calculator = LossCalculator(vgg, self.stylized_image)

            self.content_loss = loss_calculator.content_loss(
                self.input_batch, self.CONTENT_LAYER,
                content_weight) / self.batch_size

            self.style_loss = loss_calculator.style_loss(
                self.style_image, self.STYLE_LAYERS,
                style_weight) / self.batch_size

            self.total_variation_loss = loss_calculator.tv_loss(
                self.stylized_image, tv_weight) / batch_size

            self.loss = self.content_loss + self.style_loss + self.total_variation_loss
def protobuf_from_checkpoint(ckpt_file, image_shape, batch_size, output_name):

    if not os.path.isfile(ckpt_file):
        raise ValueError(f'File "{ckpt_file}" does not exist or is not a file.')

    # create the tf Session
    sess            = tf.Session()
    # Compute the shape of the input placeholder. This shape is what the serialized model can
    # process. For other input shapes you will have to resize the images or make a new model export
    batch_shape     = [batch_size] + image_shape
    img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')
    # create the network to the variables are in the global scope
    preds           = transform.net(img_placeholder)  # noqa
    saver           = tf.train.Saver()
    # load our checkpoint into the variables
    saver.restore(sess, ckpt_file)

    # get the tf graph and retrieve operation names
    graph    = tf.get_default_graph()
    op_names = [op.name for op in graph.get_operations()]
    # convert the protobuf GraphDef to a GraphDef that has no variables but just constants with the
    # current values.
    output_graph_def = graph_util.convert_variables_to_constants(
        sess,
        graph.as_graph_def(), op_names)

    # dump GraphDef to file
    graph_io.write_graph(output_graph_def, './', output_name, as_text=False)
    sess.close()
示例#5
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/cpu:0', bw=False):

    img_shape = get_img(data_in).shape

    g = tf.Graph()

    with g.as_default(), g.device(device_t), tf.Session(config=soft_config) as sess:
        batch_shape = (1,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)

        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        if bw:
            img = get_bw_img(data_in)
        else:
            img = get_img(data_in)

        X[0] = img

        _preds = sess.run(preds, feed_dict={img_placeholder:X})
        save_img(paths_out, _preds[0])
示例#6
0
def rundeeplearning(img_in, checkpoint_dir, batch_size=1):

    img = get_rgb_np(img_in)
    img_shape = get_img(img).shape

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    config_mu = tf.ConfigProto(intra_op_parallelism_threads=16)
    config_ = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1, \
                        allow_soft_placement=True, device_count = {'CPU': 1})
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), \
            tf.Session(config=config_mu) as sess:
        batch_shape = (batch_size, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')
        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        # Load
        saver.restore(sess, checkpoint_dir)
        X = np.zeros(batch_shape, dtype=np.float32)
        X[0] = img

        _preds = sess.run(preds, feed_dict={img_placeholder: X})
        img_np = np.clip(_preds[0], 0, 255).astype(np.uint8)
        img_out = Image.fromarray(img_np)
        buffer = io.BytesIO()
        img_out.save(buffer, format="PNG")

        return buffer
示例#7
0
def ckpt2pb(ckpt_path):
    graph = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    sess = tf.Session(config=soft_config)
    
    # construct the model first
    batch_shape = (1, 256, 256, 3)
    img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')
    # transform.net is the func which build and return the model in this case
    # replace it use your model -> output = your_model(img_placeholder)
    output = transform.net(img_placeholder)

    graph_def = sess.graph.as_graph_def()
    for node in graph_def.node:
        print _node_name(node.name)

    # find the ouput tensor name of the model in the print above
    # assumpt it is the last one
    final_tensor_name =_node_name(graph_def.node[-1].name)

    saver = tf.train.Saver()
    saver.restore(sess, ckpt_path)

    output_filename = ckpt_path.replace('ckpt', 'pb')
    output_graph_def = graph_util.convert_variables_to_constants(sess,
                           sess.graph.as_graph_def(), [final_tensor_name])

    with gfile.FastGFile(output_filename, 'wb') as f:
        f.write(output_graph_def.SerializeToString())
def simple_evaluate(image_array, checkpoint_dir, device_t='/cpu:0'):
    img_shape = image_array.shape

    reshaped_image = image_array.reshape((1, img_shape[0], img_shape[1], img_shape[2]))

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(config=soft_config) as sess:
        batch_shape = (1,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')
        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise Exception("No checkpoint found...")

        _preds = sess.run(preds, feed_dict={img_placeholder: reshaped_image})

    result = np.clip(_preds[0], 0, 255).astype(np.uint8)
    # Check if shapes match and act accordingly
    if result.shape == img_shape:
        return result
    else:
        return result[0: img_shape[0], 0: img_shape[1], 0: img_shape[2]]
def get_ckpt_model():
    models_dir = os.listdir("temp")

    for i, style_name in enumerate(models_dir):
        style_name, _ = os.path.splitext(style_name)
        batch_shape = (None, None, None, 3)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')
        preds = transform.net(img_placeholder)
        with tf.Session() as sess:
            saver = tf.train.Saver()
            print("process {}, {}/{}".format(style_name, i, len(models_dir)))
            old_model_dir = "temp/" + style_name + ".ckpt"
            new_model_dir = "./temp2/" + style_name + "/"
            if os.path.isdir(old_model_dir):
                ckpt = tf.train.get_checkpoint_state(old_model_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    raise Exception("No checkpoint found...")
            else:
                saver.restore(sess, old_model_dir)
            print(saver.save(sess, new_model_dir))
        tf.compat.v1.reset_default_graph()
示例#10
0
def main():
    batch_size = 1
    checkpoint_dir = 'style/wave.ckpt'
    device_t = '/gpu:0'
    data_in = 'in/chicago.jpg'
    path_out = 'out/chicago.jpg'
    img_shape = get_img(data_in).shape

    is_paths = type(data_in[0]) == str
    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')
        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()

        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)
        img = get_img(data_in)
        X[0] = img
        _preds = sess.run(preds, feed_dict={img_placeholder: X})
        save_img(path_out, _preds[0])
示例#11
0
def style_transfer(style, in_path, out_path, device_t='/gpu:0', batch_size=1):
    print("Log: Begin - style transfer")
    curr_style = STYLE_MODEL_MAP[style]

    img_shape = get_img(in_path).shape
    graph = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with graph.as_default(), graph.device(device_t), tf.Session(config=soft_config) as sess:
        img_val = tf.placeholder(tf.float32, shape=(1,) + img_shape, name='img_val')
        preds = transform.net(img_val)
        saver = tf.train.Saver()
        if os.path.isdir(curr_style):
            ckpt = tf.train.get_checkpoint_state(curr_style)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, curr_style)

        img = get_img(in_path)
        _preds = sess.run(preds, feed_dict={img_val:[img]})
        save_img(out_path, _preds[0])
    print("Log: End - style transfer - Success√")
    return _preds[0]
示例#12
0
    def ffwd_video(path_in,
                   path_out,
                   checkpoint_dir,
                   device_t='/gpu:0',
                   batch_size=4):
        video_clip = VideoFileClip(path_in, audio=False)
        video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                        video_clip.size,
                                                        video_clip.fps,
                                                        codec="libx264",
                                                        preset="medium",
                                                        bitrate="2000k",
                                                        audiofile=path_in,
                                                        threads=None,
                                                        ffmpeg_params=None)

        g = tf.Graph()
        soft_config = tf.ConfigProto(allow_soft_placement=True)
        soft_config.gpu_options.allow_growth = True
        with g.as_default(), g.device(device_t), \
                tf.Session(config=soft_config) as sess:
            batch_shape = (batch_size, video_clip.size[1], video_clip.size[0],
                           3)
            img_placeholder = tf.placeholder(tf.float32,
                                             shape=batch_shape,
                                             name='img_placeholder')

            preds = transform.net(img_placeholder)
            saver = tf.train.Saver()
            if os.path.isdir(checkpoint_dir):
                ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    raise Exception("No checkpoint found...")
            else:
                saver.restore(sess, checkpoint_dir)

            X = np.zeros(batch_shape, dtype=np.float32)

            def style_and_write(count):
                for i in range(count, batch_size):
                    X[i] = X[count - 1]  # Use last frame to fill X
                _preds = sess.run(preds, feed_dict={img_placeholder: X})
                for i in range(0, count):
                    video_writer.write_frame(
                        np.clip(_preds[i], 0, 255).astype(np.uint8))

            frame_count = 0  # The frame count that written to X
            for frame in video_clip.iter_frames():
                X[frame_count] = frame
                frame_count += 1
                if frame_count == batch_size:
                    style_and_write(frame_count)
                    frame_count = 0

            if frame_count != 0:
                style_and_write(frame_count)

            video_writer.close()
示例#13
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    if is_paths:
        assert len(data_in) == len(paths_out)
        img_shape = get_img(data_in[0]).shape
    else:
        assert data_in.size[0] == len(paths_out)
        img_shape = X[0].shape

    g = tf.Graph()
    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out)/batch_size)
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos+batch_size]
            if is_paths:
                curr_batch_in = data_in[pos:pos+batch_size]
                X = np.zeros(batch_shape, dtype=np.float32)
                for j, path_in in enumerate(curr_batch_in):
                    img = get_img(path_in)
                    assert img.shape == img_shape, \
                        'Images have different dimensions. ' +  \
                        'Resize images or use --allow-different-dimensions.'
                    X[j] = img
            else:
                X = data_in[pos:pos+batch_size]

            _preds = sess.run(preds, feed_dict={img_placeholder:X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])
                
        remaining_in = data_in[num_iters*batch_size:]
        remaining_out = paths_out[num_iters*batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in, remaining_out, checkpoint_dir, 
            device_t=device_t, batch_size=1)
示例#14
0
def deep_art(img, style):
    # check if style is valid
    if not style in [
            "la_muse", "rain_princess", "scream", "udnie", "wave", "wreck"
    ]:
        return "Please enter a valid style from the list: [la_muse, rain_princess, scream, udnie, wave, wreck]"

    # whether to use GPU or CPU, setup batch size, and select which style to use
    device_t = '/gpu:0'
    batch_size = 1
    checkpoint_dir = f"./fast-style-transfer/models/{style}.ckpt"

    img_shape = img.shape
    g = tf.Graph()
    curr_num = 0

    # setup GPU configurations for model
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True

    # setup batch shape from image size to be compatible with model input shape
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        # 'predict' the output stylized image and save it
        pred = transform.net(img_placeholder)
        saver = tf.train.Saver()

        # use pretrained model as the checkpoint to start from
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        # in this case we do 1 image at     a time, so num_iters = 1
        num_iters = 1
        pos = batch_size

        # change image dimensions to 1xNxMx3 from NxMx3 then run the model
        styled = sess.run(pred,
                          feed_dict={img_placeholder: img[np.newaxis, ...]})

        # change output back to 1xNxMx3 and convert to image
        im = Image.fromarray(styled[0, :, :].astype('uint8'), 'RGB')

        return im
示例#15
0
def evaluate_img(img_in, img_path, ckpt):
    img_shape = (256, 256, 3)
    batch_shape = (1, 256, 256, 3)

    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with tf.Graph().as_default(), tf.Session(config=soft_config) as sess:
        # Declare placeholders we'll feed into the graph
        X_inputs = tf.placeholder(
            tf.float32, shape=batch_shape, name='X_inputs')

        # Define output node
        preds = transform.net(X_inputs)  # (1, 720, 720, 3)
        tf.identity(preds[0], name='output')

        # For restore training checkpoints (important)
        saver = tf.train.Saver()
        saver.restore(sess, ckpt)  # run
        """
        saver = tf.train.Saver()
        if os.path.isdir(FLAGS.checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)  # run
            else:
                raise Exception("No checkpoint found...")
        else:
            ckpt = saver.restore(sess, FLAGS.checkpoint_dir)
        """

        X = np.zeros(batch_shape, dtype=np.float32)  # feed

        img = get_img(img_in, img_shape)
        X[0] = img

        _preds = sess.run(preds, feed_dict={X_inputs: X})
        save_img(img_path, _preds[0])

        # Write graph.
        start_time = time.time()
        tf.train.write_graph(
            sess.graph.as_graph_def(),
            FLAGS.model_dir,
            FLAGS.model_name + '.pb',
            as_text=False)
        tf.train.write_graph(
            sess.graph.as_graph_def(),
            FLAGS.model_dir,
            FLAGS.model_name + '.pb.txt',
            as_text=True)
        end_time = time.time()
        delta_time = end_time - start_time
        print('Save pb and pb.txt done!, time:', delta_time)
示例#16
0
def ffwd(content, network_path):
    with tf.Session() as sess:
        content_placeholder = tf.placeholder(tf.float32,
                                             shape=content.shape,
                                             name='content_placeholder')
        network = transform.net(content_placeholder / 255.0)

        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(network_path)
        saver.restore(sess, ckpt.model_checkpoint_path)
        prediction = sess.run(network,
                              feed_dict={content_placeholder: content})
    return prediction[0]
示例#17
0
    def __init__(self, image_resolution=(300, 250)):

        self._image_resolution = image_resolution

        self._graph = tf.Graph()
        self._session = tf.Session(graph=self._graph)

        with self._graph.as_default():
            self._image_placeholder = tf.placeholder(dtype=tf.float32,
                                                     shape=(1, 300, 250, 3),
                                                     name='img_placeholder')

            self._output = transform.net(self._image_placeholder)
示例#18
0
def generate():
    if not FLAGS.CONTENT_IMAGE:
        tf.logging.info("train a fast nerual style need to set the Content images path")
        return

    if not os.path.exists(FLAGS.OUTPUT_FOLDER):
        os.mkdir(FLAGS.OUTPUT_FOLDER)

    # 获取图片信息
    height = 0
    width = 0
    with open(FLAGS.CONTENT_IMAGE, 'rb') as img:
        with tf.Session().as_default() as sess:
            if FLAGS.CONTENT_IMAGE.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(img.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(img.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default(), tf.Session() as sess:
        # 读取图片文件
        path = FLAGS.CONTENT_IMAGE
        png = path.lower().endswith('png')
        img_bytes = tf.read_file(path)

        # 图片解码
        content_image = tf.image.decode_png(img_bytes, channels=3) if png else tf.image.decode_jpeg(img_bytes, channels=3)
        content_image = tf.image.convert_image_dtype(content_image, tf.float32) * 255.0
        content_image = tf.expand_dims(content_image, 0)

        generated_images = transform.net(content_image - vgg.MEAN_PIXEL, training=False)
        output_format = tf.saturate_cast(generated_images, tf.uint8)

        # 开始转换
        saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
        sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
        model_path = os.path.abspath(FLAGS.MODEL_PATH)
        tf.logging.info('Usage model {}'.format(model_path))
        saver.restore(sess, model_path)

        filename = os.path.basename(FLAGS.CONTENT_IMAGE)
        (shotname, extension) = os.path.splitext(filename)
        filename = shotname + '-' + os.path.basename(FLAGS.MODEL_PATH) + extension

        tf.logging.info("image {}".format(filename))
        images_t = sess.run(output_format)

        assert len(images_t) == 1
        misc.imsave(os.path.join(FLAGS.OUTPUT_FOLDER, filename), images_t[0])
示例#19
0
    def __init__(self, checkpoint_dir, device_t):
        # Create local graph and use it in the session
        self.graph = tf.Graph()
        soft_config = tf.ConfigProto(allow_soft_placement=True)
        soft_config.gpu_options.allow_growth = True
        self.sess = tf.Session(graph=self.graph, config=soft_config)
        with self.graph.as_default(), self.graph.device(device_t):
            batch_shape = (1,) + (256, 256, 3)
            self.img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')
            self.pred = transform.net(self.img_placeholder)

            # Import saved model from location 'checkpoint_dir' into local graph
            saver = tf.train.Saver()
            saver.restore(self.sess, checkpoint_dir)
示例#20
0
def setup(options):
    global sess
    global img_placeholder
    global preds
    global g
    h, w = 480, 640
    img_shape = (h, w, 3)
    batch_shape = (1,) + img_shape
    g = tf.get_default_graph()
    sess = tf.Session(graph=g)
    img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')
    preds = transform.net(img_placeholder)
    load_checkpoint(os.path.join(options['checkpoint_path'], 'fns.ckpt'), sess)
    return sess
示例#21
0
def forward_prop(data_in,
                 paths_out,
                 checkpoint_dir,
                 device_t='/cpu:0',
                 batch_size=1):
    assert len(paths_out) > 0
    is_paths = type(data_in) == str

    img_shape = get_img(data_in).shape

    #print("Batch size: ", batch_size)

    g = tf.Graph()

    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        #print("Batch_shape: ", batch_shape)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()

        #Restore checkpoint in session
        saver.restore(sess, checkpoint_dir)

        curr_batch_out = paths_out

        if is_paths:
            curr_batch_in = data_in
            print("curr_batch_in: ", curr_batch_in)
            print("curr_batch_out: ", curr_batch_out)
            X = np.zeros(batch_shape, dtype=np.float32)

            img = get_img(curr_batch_in)
            assert img.shape == img_shape, 'Images have different dimensions. ' + 'Resize images'
            X[0] = img
            #print("Shape: ", X.shape)  #(1,960,960,3)

        _preds = sess.run(preds, feed_dict={img_placeholder: X})

        save_img(curr_batch_out, _preds[0])
        sess.close()
    print("Done!!")
    return curr_batch_out
示例#22
0
def main(checkpoint_path, input_shape, out_graph_name):
    # Init graph and session to be used
    g = tf.Graph()
    soft_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    with g.as_default(), g.device('/cpu'), tf.compat.v1.Session(
            config=soft_config) as sess:
        # Placeholder variable for graph input
        img_placeholder = tf.compat.v1.placeholder(tf.float32,
                                                   shape=input_shape,
                                                   name='img_placeholder')
        # The model from the repo
        transform.net(img_placeholder)

        # Restore model from checkpoint
        saver = tf.compat.v1.train.Saver()
        saver.restore(sess, checkpoint_path)

        # Freeze graph from the session.
        # "add_37" is the actual last operation of graph
        frozen = tf.compat.v1.graph_util.convert_variables_to_constants(
            sess, sess.graph_def, ["add_37"])
        # Write frozen graph to a file
        graph_io.write_graph(frozen, './', out_graph_name, as_text=False)
        print(f'Frozen graph {out_graph_name} is saved!')
示例#23
0
def ffwd_video(path_in, path_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    video_clip = VideoFileClip(path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out, video_clip.size, video_clip.fps, codec="libx264",
                                                    preset="medium", bitrate="2000k",
                                                    audiofile=path_in, threads=None,
                                                    ffmpeg_params=None)

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        def style_and_write(count):
            for i in range(count, batch_size):
                X[i] = X[count - 1]  # Use last frame to fill X
            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for i in range(0, count):
                video_writer.write_frame(np.clip(_preds[i], 0, 255).astype(np.uint8))

        frame_count = 0  # The frame count that written to X
        for frame in video_clip.iter_frames():
            X[frame_count] = frame
            frame_count += 1
            if frame_count == batch_size:
                style_and_write(frame_count)
                frame_count = 0

        if frame_count != 0:
            style_and_write(frame_count)

        video_writer.close()
示例#24
0
def ffwd(content, network_path, cin):
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=content.shape,
                                         name='img_placeholder')
        network = transform.net(img_placeholder)
        saver = tf.train.Saver()

        saver.restore(sess, tf.train.latest_checkpoint(network_path))

        prediction = sess.run(network, feed_dict={img_placeholder: content})

        sess.close()
        return prediction[0]
def setup(options):
    global sess
    global img_placeholder
    global preds
    h, w = 480, 640
    img_shape = (h, w, 3)
    batch_shape = (1,) + img_shape
    g = tf.Graph()
    g.as_default()
    g.device('/gpu:0')
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    sess = tf.Session(config=soft_config)
    img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')
    preds = transform.net(img_placeholder)
    load_checkpoint(options['checkpoint_path'], sess)
    return sess
def create_transfer(img, checkpoint_dir, device_t='/gpu:0'):
    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
             tf.Session(config=soft_config) as sess:
        batch_shape = (1,) + img.shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        saver.restore(sess, ckpt.model_checkpoint_path)
        X = np.zeros(batch_shape, dtype=np.float32)
        X[0] = img
        _preds = sess.run(preds, feed_dict={img_placeholder:X})
        return _preds
def ffwd(content, network_path):
    with tf.Session() as sess:
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=content.shape,
                                         name='img_placeholder')

        network = transform.net(img_placeholder)
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(network_path)

        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise Exception("No checkpoint found...")

        prediction = sess.run(network, feed_dict={img_placeholder: content})
        return prediction[0]
def convert(model_path, input_paths, output_paths):
    sess = tf.Session(config=config)
    batch_shape = (None, None, None, 3)
    img_placeholder = tf.placeholder(tf.float32,
                                     shape=batch_shape,
                                     name='img_placeholder')
    preds = transform.net(img_placeholder)
    saver = tf.train.Saver()
    saver.restore(sess, model_path)
    for input_path, output_path in zip(input_paths, output_paths):
        # Transform image
        x_input = get_img(input_path)
        x_input = np.expand_dims(x_input, 0)
        y_out = sess.run(preds, feed_dict={img_placeholder: x_input})
        save_img(output_path, y_out[0])
    tf.compat.v1.reset_default_graph()
    gc.collect()
    return
def ffwd(content, mask, model):
    with tf.Session() as sess:
        shape = list(content.shape)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=shape,
                                         name='img_placeholder')
        print(img_placeholder.shape)
        mask_placeholder = tf.placeholder(tf.float32, shape=[1, 64, 64, 1])
        network = transform.net(img_placeholder, mask_placeholder)
        saver = tf.train.Saver()

        saver.restore(sess, model)

        prediction = sess.run(network,
                              feed_dict={
                                  img_placeholder: content,
                                  mask_placeholder: mask
                              })
        return prediction[0]
示例#30
0
def export(checkpoint_dir, batch_shape):
    with tf.Graph().as_default(), tf.Session() as sess:
        images = tf.placeholder(tf.float32, shape=batch_shape, name='images')
        preds = transform.net(images / 255.0)
        tensor_info_images = tf.saved_model.utils.build_tensor_info(images)
        tensor_info_preds = tf.saved_model.utils.build_tensor_info(preds)
        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs={'images': tensor_info_images},
                outputs={'preds': tensor_info_preds},
                method_name=tf.saved_model.signature_constants.
                PREDICT_METHOD_NAME))
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
        export_path = os.path.join(checkpoint_dir, "1")
        builder = tf.saved_model.builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={'predict_images': prediction_signature})
        builder.save()
示例#31
0
def ffwd(checkpoint_dir, device_t='/gpu:0'):

    img_shape = (500, 500, 3)

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (1, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        saver.restore(sess, checkpoint_dir)

        import cv2
        video_stream = cv2.VideoCapture(0)

        while (True):

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            ret, frame = video_stream.read()

            img = cv2.resize(frame, (img_shape[0], img_shape[1]))
            cv2.imshow('frame', img)
            #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            X = [img]

            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            # save_img(path_out, _preds[j
            #print(_preds[0].shape)
            result = np.clip(_preds[0], 0, 255).astype(np.uint8)
            cv2.imshow('frame', cv2.resize(result,
                                           (img_shape[0], img_shape[1])))
示例#32
0
def from_pipe(opts):
    command = ["ffprobe",
               '-v', "quiet",
               '-print_format', 'json',
               '-show_streams', opts.in_path]

    info = json.loads(str(subprocess.check_output(command), encoding="utf8"))
    width = int(info["streams"][0]["width"])
    height = int(info["streams"][0]["height"])
    fps = round(eval(info["streams"][0]["r_frame_rate"]))

    command = ["ffmpeg",
               '-loglevel', "quiet",
               '-i', opts.in_path,
               '-f', 'image2pipe',
               '-pix_fmt', 'rgb24',
               '-vcodec', 'rawvideo', '-']

    pipe_in = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=10 ** 9, stdin=None, stderr=None)

    command = ["ffmpeg",
               '-loglevel', "info",
               '-y',  # (optional) overwrite output file if it exists
               '-f', 'rawvideo',
               '-vcodec', 'rawvideo',
               '-s', str(width) + 'x' + str(height),  # size of one frame
               '-pix_fmt', 'rgb24',
               '-r', str(fps),  # frames per second
               '-i', '-',  # The imput comes from a pipe
               '-an',  # Tells FFMPEG not to expect any audio
               '-c:v', 'libx264',
               '-preset', 'slow',
               '-crf', '18',
               opts.out]

    pipe_out = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=None, stderr=None)
    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True

    with g.as_default(), g.device(opts.device), \
         tf.Session(config=soft_config) as sess:
        batch_shape = (opts.batch_size, height, width, 3)
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')
        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(opts.checkpoint):
            ckpt = tf.train.get_checkpoint_state(opts.checkpoint)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, opts.checkpoint)

        X = np.zeros(batch_shape, dtype=np.float32)
        nbytes = 3 * width * height
        read_input = True
        last = False

        while read_input:
            count = 0
            while count < opts.batch_size:
                raw_image = pipe_in.stdout.read(width * height * 3)

                if len(raw_image) != nbytes:
                    if count == 0:
                        read_input = False
                    else:
                        last = True
                        X = X[:count]
                        batch_shape = (count, height, width, 3)
                        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                                     name='img_placeholder')
                        preds = transform.net(img_placeholder)
                    break

                image = numpy.fromstring(raw_image, dtype='uint8')
                image = image.reshape((height, width, 3))
                X[count] = image
                count += 1

            if read_input:
                if last:
                    read_input = False
                _preds = sess.run(preds, feed_dict={img_placeholder: X})

                for i in range(0, batch_shape[0]):
                    img = np.clip(_preds[i], 0, 255).astype(np.uint8)
                    try:
                        pipe_out.stdin.write(img)
                    except IOError as err:
                        ffmpeg_error = pipe_out.stderr.read()
                        error = (str(err) + ("\n\nFFMPEG encountered"
                                             "the following error while writing file:"
                                             "\n\n %s" % ffmpeg_error))
                        read_input = False
                        print(error)
        pipe_out.terminate()
        pipe_in.terminate()
        pipe_out.stdin.close()
        pipe_in.stdout.close()
        del pipe_in
        del pipe_out
示例#33
0
def optimize(content_targets, style_target, content_weight, style_weight,
             tv_weight, vgg_path, epochs=2, print_iterations=1000,
             batch_size=4, save_path='saver/fns.ckpt', slow=False,
             learning_rate=1e-3, debug=False):
    if slow:
        batch_size = 1
    mod = len(content_targets) % batch_size
    if mod > 0:
        print("Train set has been trimmed slightly..")
        content_targets = content_targets[:-mod] 

    style_features = {}

    batch_shape = (batch_size,256,256,3)
    style_shape = (1,) + style_target.shape
    print(style_shape)

    # precompute style features
    with tf.Graph().as_default(), tf.device('/cpu:0'), tf.Session() as sess:
        style_image = tf.placeholder(tf.float32, shape=style_shape, name='style_image')
        style_image_pre = vgg.preprocess(style_image)
        net = vgg.net(vgg_path, style_image_pre)
        style_pre = np.array([style_target])
        for layer in STYLE_LAYERS:
            features = net[layer].eval(feed_dict={style_image:style_pre})
            features = np.reshape(features, (-1, features.shape[3]))
            gram = np.matmul(features.T, features) / features.size
            style_features[layer] = gram

    with tf.Graph().as_default(), tf.Session() as sess:
        X_content = tf.placeholder(tf.float32, shape=batch_shape, name="X_content")
        X_pre = vgg.preprocess(X_content)

        # precompute content features
        content_features = {}
        content_net = vgg.net(vgg_path, X_pre)
        content_features[CONTENT_LAYER] = content_net[CONTENT_LAYER]

        if slow:
            preds = tf.Variable(
                tf.random_normal(X_content.get_shape()) * 0.256
            )
            preds_pre = preds
        else:
            preds = transform.net(X_content/255.0)
            preds_pre = vgg.preprocess(preds)

        net = vgg.net(vgg_path, preds_pre)

        content_size = _tensor_size(content_features[CONTENT_LAYER])*batch_size
        assert _tensor_size(content_features[CONTENT_LAYER]) == _tensor_size(net[CONTENT_LAYER])
        content_loss = content_weight * (2 * tf.nn.l2_loss(
            net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) / content_size
        )

        style_losses = []
        for style_layer in STYLE_LAYERS:
            layer = net[style_layer]
            bs, height, width, filters = map(lambda i:i.value,layer.get_shape())
            size = height * width * filters
            feats = tf.reshape(layer, (bs, height * width, filters))
            feats_T = tf.transpose(feats, perm=[0,2,1])
            grams = tf.matmul(feats_T, feats) / size
            style_gram = style_features[style_layer]
            style_losses.append(2 * tf.nn.l2_loss(grams - style_gram)/style_gram.size)

        style_loss = style_weight * functools.reduce(tf.add, style_losses) / batch_size

        # total variation denoising
        tv_y_size = _tensor_size(preds[:,1:,:,:])
        tv_x_size = _tensor_size(preds[:,:,1:,:])
        y_tv = tf.nn.l2_loss(preds[:,1:,:,:] - preds[:,:batch_shape[1]-1,:,:])
        x_tv = tf.nn.l2_loss(preds[:,:,1:,:] - preds[:,:,:batch_shape[2]-1,:])
        tv_loss = tv_weight*2*(x_tv/tv_x_size + y_tv/tv_y_size)/batch_size

        loss = content_loss + style_loss + tv_loss

        # overall loss
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
        sess.run(tf.global_variables_initializer())
        import random
        uid = random.randint(1, 100)
        print("UID: %s" % uid)
        for epoch in range(epochs):
            num_examples = len(content_targets)
            iterations = 0
            while iterations * batch_size < num_examples:
                start_time = time.time()
                curr = iterations * batch_size
                step = curr + batch_size
                X_batch = np.zeros(batch_shape, dtype=np.float32)
                for j, img_p in enumerate(content_targets[curr:step]):
                   X_batch[j] = get_img(img_p, (256,256,3)).astype(np.float32)

                iterations += 1
                assert X_batch.shape[0] == batch_size

                feed_dict = {
                   X_content:X_batch
                }

                train_step.run(feed_dict=feed_dict)
                end_time = time.time()
                delta_time = end_time - start_time
                if debug:
                    print("UID: %s, batch time: %s" % (uid, delta_time))
                is_print_iter = int(iterations) % print_iterations == 0
                if slow:
                    is_print_iter = epoch % print_iterations == 0
                is_last = epoch == epochs - 1 and iterations * batch_size >= num_examples
                should_print = is_print_iter or is_last
                if should_print:
                    to_get = [style_loss, content_loss, tv_loss, loss, preds]
                    test_feed_dict = {
                       X_content:X_batch
                    }

                    tup = sess.run(to_get, feed_dict = test_feed_dict)
                    _style_loss,_content_loss,_tv_loss,_loss,_preds = tup
                    losses = (_style_loss, _content_loss, _tv_loss, _loss)
                    if slow:
                       _preds = vgg.unprocess(_preds)
                    else:
                       saver = tf.train.Saver()
                       res = saver.save(sess, save_path)
                    yield(_preds, losses, iterations, epoch)