def inference(from_file_path, args):
    with tf.Graph().as_default(), tf.Session() as sess:
        alpha = args[0]

        encoder = Encoder()
        decoder = Decoder()

        content_input = tf.placeholder(tf.float32,
                                       shape=(1, None, None, 3),
                                       name='content_input')
        style_input = tf.placeholder(tf.float32,
                                     shape=(1, None, None, 3),
                                     name='style_input')

        # switch RGB to BGR
        content = tf.reverse(content_input, axis=[-1])
        style = tf.reverse(style_input, axis=[-1])
        # preprocess image
        content = encoder.preprocess(content)
        style = encoder.preprocess(style)

        # encode image
        # we should initial global variables before restore model
        enc_c_net = encoder.encode(content, 'content/')
        enc_s_net = encoder.encode(style, 'style/')

        # pass the encoded images to AdaIN
        target_features = AdaIN(enc_c_net.outputs,
                                enc_s_net.outputs,
                                alpha=alpha)

        # decode target features back to image
        dec_net = decoder.decode(target_features, prefix="decoder/")

        generated_img = dec_net.outputs

        # deprocess image
        generated_img = encoder.deprocess(generated_img)

        # switch BGR back to RGB
        generated_img = tf.reverse(generated_img, axis=[-1])

        # clip to 0..255
        generated_img = tf.clip_by_value(generated_img, 0.0, 255.0)

        sess.run(tf.global_variables_initializer())

        encoder.restore_model(sess, ENCODER_PATH, enc_c_net)
        encoder.restore_model(sess, ENCODER_PATH, enc_s_net)
        decoder.restore_model(sess, DECODER_PATH, dec_net)

        model_args = (sess, generated_img, content_input, style_input)
        if from_file_path:
            run_from_file_paths(model_args, args)
        else:
            return run_from_layers(model_args, args)
示例#2
0
文件: views.py 项目: lmxia/education
    def init_session_handler(self):
        self.sess = tf.Session()

        encoder = Encoder()
        decoder = Decoder()

        self.content_input = tf.placeholder(tf.float32,
                                            shape=(1, None, None, 3),
                                            name='content_input')
        self.style_input = tf.placeholder(tf.float32,
                                          shape=(1, None, None, 3),
                                          name='style_input')

        # switch RGB to BGR
        content = tf.reverse(self.content_input, axis=[-1])
        style = tf.reverse(self.style_input, axis=[-1])
        # preprocess image
        content = encoder.preprocess(content)
        style = encoder.preprocess(style)

        # encode image
        # we should initial global variables before restore model
        enc_c_net = encoder.encode(content, 'content/')
        enc_s_net = encoder.encode(style, 'style/')

        # pass the encoded images to AdaIN
        target_features = transfer_util.AdaIN(enc_c_net.outputs,
                                              enc_s_net.outputs,
                                              alpha=alpha)

        # decode target features back to image
        dec_net = decoder.decode(target_features, prefix="decoder/")

        self.generated_img = dec_net.outputs

        # deprocess image
        self.generated_img = encoder.deprocess(self.generated_img)

        # switch BGR back to RGB
        self.generated_img = tf.reverse(self.generated_img, axis=[-1])

        # clip to 0..255
        self.generated_img = tf.clip_by_value(self.generated_img, 0.0, 255.0)
        self.sess.run(tf.global_variables_initializer())

        # sess.run(tf.global_variables_initializer())

        encoder.restore_model(self.sess, self.encode_path, enc_c_net)
        encoder.restore_model(self.sess, self.encode_path, enc_s_net)
        decoder.restore_model(self.sess, self.decode_path, dec_net)
示例#3
0
        content_input = tf.placeholder(tf.float32,
                                       shape=INPUT_SHAPE,
                                       name='content_input')
        style_input = tf.placeholder(tf.float32,
                                     shape=INPUT_SHAPE,
                                     name='style_input')

        # switch RGB to BGR
        content = tf.reverse(content_input, axis=[-1])
        style = tf.reverse(style_input, axis=[-1])
        # preprocess image
        content = encoder.preprocess(content)
        style = encoder.preprocess(style)

        content_enc_net = encoder.encode(content, 'content/')
        style_enc_net = encoder.encode(style, 'style/')

        adain_features = utils.AdaIN(content_enc_net.outputs,
                                     style_enc_net.outputs)

        stylied_dec_net = decoder.decode(adain_features, 'stylized_dec/')

        # add the mean values back
        stylied_image = encoder.deprocess(stylied_dec_net.outputs)

        # switch BGR back to RGB
        stylied_image = tf.reverse(stylied_image, axis=[-1])

        # clip to 0..255
        stylied_image = tf.clip_by_value(stylied_image, 0.0, 255.0)
示例#4
0
                                       shape=(1, None, None, 3),
                                       name='content_input')
        style_input = tf.placeholder(tf.float32,
                                     shape=(1, None, None, 3),
                                     name='style_input')

        # switch RGB to BGR
        content = tf.reverse(content_input, axis=[-1])
        style = tf.reverse(style_input, axis=[-1])
        # preprocess image
        content = encoder.preprocess(content)
        style = encoder.preprocess(style)

        # encode image
        # we should initial global variables before restore model
        enc_c_net = encoder.encode(content, 'content/')
        enc_s_net = encoder.encode(style, 'style/')

        # pass the encoded images to AdaIN
        target_features = utils.AdaIN(enc_c_net.outputs, enc_s_net.outputs)

        # decode target features back to image
        dec_net = decoder.decode(target_features, prefix="decoder/")

        generated_img = dec_net.outputs

        # deprocess image
        generated_img = encoder.deprocess(generated_img)

        # switch BGR back to RGB
        generated_img = tf.reverse(generated_img, axis=[-1])