def test_test_model_single_call():
    from vgg import vgg19, vgg19_rev
    import os.path as osp
    import tensorlayer as tl
    VGG19_PARTIAL_WEIGHTS_PATH = 'pretrained_models/predefined_vgg19_endwith(conv4_1)_weights.h5'
    DEC_BEST_WEIGHTS_PATH = 'pretrained_models/dec_best_weights.h5'
    CONTENT_DATA_PATH = './test_images/content'
    STYLE_DATA_PATH = './test_images/style'
    test_content_filenames = ['brad_pitt_01.jpg']
    test_style_filenames = ['cat.jpg']
    TEST_INPUT_CONSTRAINTED_SIZE = 800
    TEST_OUTPUT_PATH = './test_images/output'

    tl.logging.set_verbosity(tl.logging.DEBUG)
    enc_net = vgg19(pretrained=False, end_with='conv4_1')
    enc_net.load_weights(VGG19_PARTIAL_WEIGHTS_PATH, in_order=False)
    dec_net = vgg19_rev(pretrained=False, batch_norm=False, input_depth=512)
    dec_net.load_weights(DEC_BEST_WEIGHTS_PATH, skip=True)

    i = 0  # only test 1 pair of input
    test_content = utils.imread(
        osp.join(CONTENT_DATA_PATH, test_content_filenames[i]))
    test_style = utils.imread(
        osp.join(STYLE_DATA_PATH, test_style_filenames[i]))
    # import cv2
    # test_content = cv2.cvtColor(test_content, cv2.COLOR_BGR2RGB)  # <- moved to utils.imread
    # test_style = cv2.cvtColor(test_style, cv2.COLOR_BGR2RGB)      # <- moved to utils.imread

    content_features = enc_net(test_content, is_train=False)
    style_features = enc_net(test_style, is_train=False)
    target_features = utils.AdaIN(content_features, style_features, alpha=1)
    del content_features, style_features
    generated = dec_net(target_features, is_train=False)
    import tensorflow as tf
    if isinstance(generated, tf.Tensor):
        if generated.dtype == tf.float32:
            generated = tf.cast(generated, tf.uint8)
        generated = generated[0].numpy()
    saved_path = f"{osp.splitext(test_style_filenames[i])[0]}+{osp.splitext(test_content_filenames[i])[0]}"
    saved_path = osp.join(TEST_OUTPUT_PATH, f"{saved_path}.jpg")
    # generated = cv2.cvtColor(generated, cv2.COLOR_RGB2BGR)  # <- moved to utils.imsave
    utils.imsave(saved_path, generated)
    tl.logging.info(f"saved_path = {saved_path}")
    tl.logging.info(f"generated.shape = {generated.shape}")
def test_test_arbitrary_sized_inputs():
    from vgg import vgg19, vgg19_rev
    import os.path as osp
    import tensorlayer as tl
    DEC_LATEST_WEIGHTS_PATH = 'pretrained_models/dec_latest_weights.h5'
    STYLE_LAYERS = ('conv1_1', 'conv2_1', 'conv3_1', 'conv4_1')  # for Encoders
    CONTENT_DATA_PATH = './dataset/content_samples'  # COCO_train_2014/'
    STYLE_DATA_PATH = './dataset/style_samples'  # wiki_all_images/'
    test_content_filenames = ['000000532397.jpg'
                              ]  #, '000000048289.jpg', '000000526781.jpg']
    test_style_filenames = ['53154.jpg']  #, '2821.jpg', '216.jpg']
    TEST_INPUT_CONSTRAINTED_SIZE = 800
    TEMP_IMAGE_PATH = './temp_images/'

    tl.logging.set_verbosity(tl.logging.DEBUG)
    enc_net = vgg19(pretrained=True, end_with='conv4_1')
    # NOTE: batch_norm=True will lower quality of the generated image = need retrain
    dec_net = vgg19_rev(pretrained=False, batch_norm=False, input_depth=512)
    if osp.exists(DEC_LATEST_WEIGHTS_PATH):
        dec_net.load_weights(DEC_LATEST_WEIGHTS_PATH, skip=True)

    enc_net.eval()
    dec_net.eval()
    for epoch in range(1):  # for test generator validity
        # Note: generator need reset for reuse
        test_inputs_gen = utils.single_inputs_generator(
            list(zip(test_content_filenames, test_style_filenames)),
            CONTENT_DATA_PATH, STYLE_DATA_PATH, TEST_INPUT_CONSTRAINTED_SIZE)
        for i, (test_content, test_style) in enumerate(test_inputs_gen):
            # shape=[1, w, h, c], so as to feed arbitrary sized test images one by one
            content_features = enc_net(test_content)
            style_features = enc_net(test_style, )
            target_features = utils.AdaIN(content_features,
                                          style_features,
                                          alpha=1)
            del content_features, style_features
            generated_images = dec_net(target_features)
            paired_name = f"{osp.splitext(test_style_filenames[i])[0]}+{osp.splitext(test_content_filenames[i])[0]}"
            utils.imsave(
                osp.join(TEMP_IMAGE_PATH,
                         f"temp_{paired_name}_epoch{epoch}.jpg"),
                generated_images[0].numpy())
예제 #3
0
        def forward(self, inputs, training=None, alpha=1):
            """
            :param inputs: [content_batch, style_batch], both have shape as [batch_size, w, h, c]
            :param training:
            :param alpha:
            :return:
            """
            # TL1to2: preprocessing and reverse -> vgg forward() will handle it
            # # switch RGB to BGR
            # content = tf.reverse(content_input, axis=[-1])
            # style = tf.reverse(style_input, axis=[-1])
            # # preprocess image
            # content = Encoder.preprocess(content_input)
            # style = Encoder.preprocess(style_input)
            content, style = inputs

            # encode image
            # we should initial global variables before restore model
            content_features = self.enc_net(content)
            style_features = self.enc_net(style)

            # pass the encoded images to AdaIN  # IMPROVE: try alpha gradients
            target_features = utils.AdaIN(content_features,
                                          style_features,
                                          alpha=alpha)

            # decode target features back to image
            generated_img = self.dec_net(target_features)

            # # deprocess image
            # generated_img = Encoder.reverse_preprocess(generated_img)
            # # switch BGR back to RGB
            # generated_img = tf.reverse(generated_img, axis=[-1])
            # # clip to 0..255
            # generated_img = tf.clip_by_value(generated_img, 0.0, 255.0)

            return generated_img
예제 #4
0
                                       name='content_input')
        style_input = tf.placeholder(tf.float32,
                                     shape=INPUT_SHAPE,
                                     name='style_input')

        # switch RGB to BGR
        content = tf.reverse(content_input, axis=[-1])
        style = tf.reverse(style_input, axis=[-1])
        # preprocess image
        content = encoder.preprocess(content)
        style = encoder.preprocess(style)

        content_enc_net = encoder.encode(content, 'content/')
        style_enc_net = encoder.encode(style, 'style/')

        adain_features = utils.AdaIN(content_enc_net.outputs,
                                     style_enc_net.outputs)

        stylied_dec_net = decoder.decode(adain_features, 'stylized_dec/')

        # add the mean values back
        stylied_image = encoder.deprocess(stylied_dec_net.outputs)

        # switch BGR back to RGB
        stylied_image = tf.reverse(stylied_image, axis=[-1])

        # clip to 0..255
        stylied_image = tf.clip_by_value(stylied_image, 0.0, 255.0)

        # switch RGB to BGR
        stylied_image = tf.reverse(stylied_image, axis=[-1])
예제 #5
0
        def forward(self, inputs: list, training=None, alpha=1):
            """
            :param inputs: [content_batch, style_batch], both have shape as [batch_size, w, h, c]
            :param training:
            :param alpha:
            :return:
            """
            # TL1to2: preprocessing and reverse -> vgg forward() will handle it
            # # switch RGB to BGR
            # content = tf.reverse(content_input, axis=[-1])
            # style = tf.reverse(style_input, axis=[-1])
            # # preprocess image
            # content = Encoder.preprocess(content_input)
            # style = Encoder.preprocess(style_input)
            content, style = inputs

            # 1.encode image: get content features and style features (i.e. intermediate style features)
            c_content_features = self.enc_net(content)
            s_content_features, s_style_feats_in_layers = self.enc_net(style, observed_layer_names=STYLE_LAYERS)

            # 2.pass the encoded content and style to AdaIN
            target_features = utils.AdaIN(c_content_features, s_content_features, alpha=alpha)

            # 3.decode target features back to generate an image
            generated_images = self.dec_net(target_features)

            # # de-preprocess image
            # generated_images = Encoder.reverse_preprocess(generated_images)
            # # switch BGR back to RGB
            # generated_images = tf.reverse(generated_images, axis=[-1])
            # # clip to 0..255
            # generated_images = tf.clip_by_value(generated_images, 0.0, 255.0)

            # 4.compute content features and style features of the generated image
            g_content_features, g_style_feats_in_layers = self.enc_net(generated_images, observed_layer_names=STYLE_LAYERS)
            tl.logging.info(
                f"c_c_feat:{c_content_features.shape}, s_c_feat:{s_content_features.shape}, "
                f"t_feat:{target_features.shape}, g:{generated_images.shape}, g_c_feat:{g_content_features.shape}")

            # 5.compute losses
            self.content_loss = tf.reduce_sum(
                tf.reduce_mean(tf.square(g_content_features - target_features), axis=[1, 2]))

            style_layer_loss = []
            for idx, layer_name in enumerate(STYLE_LAYERS):
                # TL1to2: tl.layers.get_layers_with_name -> observe intermediate outputs through model.__call__
                # s_style_feat = tl.layers.get_layers_with_name(self.enc_s_net, 'style/' + layer, True)[0]
                # g_style_feat = tl.layers.get_layers_with_name(self.enc_net, 'stylized_enc/' + layer, True)[0]
                s_style_feat = s_style_feats_in_layers[idx]
                g_style_feat = g_style_feats_in_layers[idx]
                mean_s, var_s = tf.nn.moments(s_style_feat, [1, 2])
                mean_g, var_g = tf.nn.moments(g_style_feat, [1, 2])
                sigma_s = tf.sqrt(var_s + EPSILON)
                sigma_g = tf.sqrt(var_g + EPSILON)
                l2_mean = tf.reduce_sum(tf.square(mean_g - mean_s))
                l2_sigma = tf.reduce_sum(tf.square(sigma_g - sigma_s))
                style_layer_loss.append(l2_mean + l2_sigma)
            self.style_loss = tf.reduce_sum(style_layer_loss)

            self.loss = self.content_loss + self.style_weight * self.style_loss  # IMPROVE: tune STYLE_WEIGHT

            return generated_images
예제 #6
0
                                     name='style_input')

        # switch RGB to BGR
        content = tf.reverse(content_input, axis=[-1])
        style = tf.reverse(style_input, axis=[-1])
        # preprocess image
        content = encoder.preprocess(content)
        style = encoder.preprocess(style)

        # encode image
        # we should initial global variables before restore model
        enc_c_net = encoder.encode(content, 'content/')
        enc_s_net = encoder.encode(style, 'style/')

        # pass the encoded images to AdaIN
        target_features = utils.AdaIN(enc_c_net.outputs, enc_s_net.outputs)

        # decode target features back to image
        dec_net = decoder.decode(target_features, prefix="decoder/")

        generated_img = dec_net.outputs

        # deprocess image
        generated_img = encoder.deprocess(generated_img)

        # switch BGR back to RGB
        generated_img = tf.reverse(generated_img, axis=[-1])

        # clip to 0..255
        generated_img = tf.clip_by_value(generated_img, 0.0, 255.0)
예제 #7
0
def main(*args, **kwargs):
    INFO('--- Experiment begins: {}---------------'.format(Config.ExperimentName))
    config_experiment = ConfigSerializer.load(Path.ExperimentConfigAbs)

    init_logging(VerbosityLevel.DEBUG)

    from helpers.tf_helper import async_preload_gpu_devices
    async_preload_gpu_devices()

    # TODO: explain terms or key concepts in comments, ref: overlook.vsd
    from modules.data.data_manager import DataManager
    x_train, y_train = None, None
    x_test, y_test = None, None
    data = None
    from modules.models.model_manager import ModelManager
    model = None

    if config_experiment.train.enabled:
        INFO('--- Training begins ---------')
        config_data: Params = config_experiment.data_set.data
        data = DataManager.load_data(config_data.signature, **config_data)
        INFO(f"data loaded: {dump_iterable_data(data)}")
        if not isinstance(data, tuple) or any(not isinstance(_, tuple) for _ in data):
            raise ValueError("data loaded must be in type of ((x_train, y_train), (x_test, y_test))")
        (x_train, y_train), (x_test, y_test) = data  # unpack: tuple of 4 np.ndarrays
        # e.g.((60000,28,28), (60000,)), ((10000,28,28), (10000,))

        config_model: Params = config_experiment.model_set.model_base
        model = ModelManager.load_model(config_model.signature, **config_model)
        model = ModelManager.model_train(model, data=(x_train, y_train), **config_experiment.train)
        eval_metrics = ModelManager.model_evaluate(model, data=(x_test, y_test))

    else:
        INFO('--- Training was disabled ---------')

    if config_experiment.predict.enabled:
        INFO('--- Prediction begins ---------')
        predictions = None
        while True:
            if 'meta_info' not in vars():
                meta_info = {}  # retrieve meta info from DataManager
            if x_test is None or y_test is None:  # not config_experiment.train.enabled
                data_key = config_experiment.predict.data_inputs.__str__()
                config_data_test: Params = config_experiment.data_set[data_key]
                # test signature "ui_web_files", need to keep compatibility with other type of data
                data = DataManager.load_data(config_data_test.signature,
                                             meta_info=meta_info, **config_data_test)
                INFO(f"data loaded: {dump_iterable_data(data)}")
                import tensorflow as tf
                from helpers.tf_helper import is_tfdataset
                if isinstance(data, tf.data.Dataset):
                    from helpers.tf_helper import tf_obj_to_np_array
                    if type(data.element_spec) is tuple:
                        # x_test = data.map(lambda x, y: x)
                        # y_test = data.map(lambda x, y: y)

                        # IMPROVE: unzip the ZipDataset by dataset.map(lambda). Any tf API for unzip?
                        x_test = tf_obj_to_np_array(data.map(lambda x, y: x))
                        y_test = tf_obj_to_np_array(data.map(lambda x, y: y))
                    else:
                        # data = data.batch(1)  # TODO: read config `batch_size` in model_train()
                        # data = data.prefetch(1)
                        x_test, y_test = tf_obj_to_np_array(data), None

            enc, dec = None, None
            if enc is None or dec is None:  # not config_experiment.train.enabled
                config_model_enc: Params = config_experiment.model_set.model_enc
                config_model_dec: Params = config_experiment.model_set.model_dec
                if not config_model_enc.is_defined() and config_model_dec.is_defined():
                    raise ValueError('Config error: `model_trained` node is not defined')
                enc = ModelManager.load_model(config_model_enc.signature, **config_model_enc)
                dec = ModelManager.load_model(config_model_dec.signature, **config_model_dec)

            generated = None
            generated_saved = False
            webapp = ensure_web_app()

            @webapp.on_task_query(namespace="main::model_predict", onetime=False)
            def handle_task_query(task_id):
                nonlocal generated
                handler_result = {}
                if not generated_saved:
                    handler_result.update({'status': 'processing'})
                else:
                    # return abspath to webapp
                    handler_result.update({'status': 'finished', 'result': generated_save_path})
                    generated = None
                return handler_result

            c_feature = ModelManager.model_predict(enc, x_test[0], **config_experiment.predict_enc)
            s_feature = ModelManager.model_predict(enc, x_test[1], **config_experiment.predict_enc)
            target_features = utils.AdaIN(c_feature, s_feature, alpha=1)
            generated = ModelManager.model_predict(dec, target_features, **config_experiment.predict_dec)
            import tensorflow as tf
            if isinstance(generated, tf.Tensor):
                if generated.dtype == tf.float32:
                    generated = tf.cast(generated, tf.uint8)
                generated = generated.numpy()
            # show_image_mat(generated[0])
            import cv2
            generated_save_path = osp.join(Path.ExperimentFolderAbs, tmp_filename_by_time('jpg'))
            save_image_mat(generated[0], generated_save_path)
            generated_saved = True
        # INFO(f"generated: {', '.join([str(_) for _ in generated])}")
    else:
        INFO('--- Prediction was disabled ---------')

    INFO('--- Experiment ends: {}---------------'.format(Config.ExperimentName))