def transfer(content, weights, max_dim, result):

    if content[-3:] in image_type:
        # Build the feed-forward network and load the weights.
        network = feed_forward()
        network.load_weights(weights).expect_partial()

        # Load content image.
        image = load_img(path_to_img=content, max_dim=max_dim, resize=False)

        print('Transfering image...')
        # Geneerate the style imagee
        image = network(image)

        # Clip pixel values to 0-255
        image = clip_0_1(image)

        # Save the style image
        tensor_to_image(image).save(result)

    else:
        network = feed_forward()
        network.load_weights(weights)

        resolve_video(network, path_to_video=content, result=result)
示例#2
0
def main(mode, data_root, filename=None):
    # raise argument error
    if filename:
        assert mode == 'img' or mode == 'demo' or mode == "video"
        filepath = os.path.join(data_root, 'input', filename)
    else:
        assert mode == 'pmd' or mode == 'dir'

    # set configs
    config = Config(get_config(data_root))
    config.mode = mode

    # set moduels root path
    if os.path.basename(os.getcwd()) in ['src', 'app']:
        config.mod_root = os.path.join(os.getcwd(), 'modules')
    elif os.path.basename(os.getcwd()) == 'ganonymizerv2':
        config.mod_root = os.path.join(os.getcwd(), 'src', 'modules')

    # create directories
    if config.mode == 'img' or config.mode == 'demo':
        filepath = create_dir(filepath)
    else:
        dirpath = create_dir(data_root)

    # setup environment
    device = torch.device(
        'cuda:{}'.format(config.cuda) if torch.cuda.is_available() else 'cpu')

    # if the mode is 'demo', all modules mode is changed 'exec'.
    if config.mode == 'demo' or config.mode == "video":
        config = demo_config(config)

    # define the model
    model = GANonymizer(config, device)

    if config.mode == 'img' or config.mode == 'demo':
        print('Loading "{}"'.format(filepath))
        img, fname, fext = load_img(filepath)
        if config.mode == 'demo':
            img = demo_resize(img)
        config.fname = fname
        config.fext = fext
        # model prediction
        model.predict(img)

    elif config.mode == 'dir':
        inpath = os.path.join(dirpath, 'input')
        files = os.listdir(inpath)
        files = [
            os.path.join(inpath, f) for f in files
            if os.path.isfile(os.path.join(inpath, f)) and f[0] != '.'
        ]
        for f in files:
            print('Loading "{}"'.format(f))
            img, fname, fext = load_img(f)
            config.fname = fname
            config.fext = fext
            # model prediction
            model.predict(img)

    elif config.mode == 'pmd':
        inpath = os.path.join(dirpath, 'input')
        files = os.listdir(inpath)
        files = [
            os.path.join(inpath, f) for f in files
            if os.path.isfile(os.path.join(inpath, f)) and f[0] != '.'
        ]
        for f in files:
            print('Loading "{}"'.format(f))

            # with pmd
            config = pmd_mode_change(config, 'on')
            model.reload_config(config)
            img, fname, fext = load_img(f)
            config.fname = fname
            config.fext = fext
            # model prediction
            out_on = model.predict(img)

            # without pmd
            config = pmd_mode_change(config, 'off')
            model.reload_config(config)
            img, fname, fext = load_img(f)
            config.fname = fname
            config.fext = fext
            # model prediction
            out_on = model.predict(img)

    elif config.mode == "video":
        print("Loading '{}'".format(filepath))
        count = 1
        # Load the video
        fname, cap, origin_fps, frames, width, height = load_video(filepath)
        writer = video_writer(filepath, origin_fps, width, height * 2)
        config.fname, config.fext = "0", "0"

        while (cap.isOpened()):
            print('')
            ret, frame = cap.read()
            if ret:
                print('-----------------------------------------------------')
                print('[INFO] Count: {}/{}'.format(count, frames))

                # process
                img = copy.deepcopy(frame)
                output = model.predict(img)
                concat = np.concatenate([frame, output], axis=0)
                writer.write(concat)
                count += 1

            else:
                break

        # Stop video process
        cap.release()
        writer.release()
        cv2.destroyAllWindows()
示例#3
0
def trainer(style_file, dataset_path, weights_path, content_weight,
            style_weight, tv_weight, learning_rate, batch_size, epochs, debug):

    # Setup the given layers
    content_layers = ['block4_conv2']

    style_layers = [
        'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
        'block5_conv1'
    ]

    # Build Feed-forward transformer
    network = feed_forward()

    # Build VGG-19 Loss network
    extractor = StyleContentModel(style_layers, content_layers)

    # Load style target image
    style_image = load_img(style_file, resize=False)

    # Initialize content target images
    batch_shape = (batch_size, 256, 256, 3)
    X_batch = np.zeros(batch_shape, dtype=np.float32)

    # Extract style target
    style_target = extractor(style_image * 255.0)['style']

    # Build optimizer
    opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)

    loss_metric = tf.keras.metrics.Mean()
    sloss_metric = tf.keras.metrics.Mean()
    closs_metric = tf.keras.metrics.Mean()
    tloss_metric = tf.keras.metrics.Mean()

    @tf.function()
    def train_step(X_batch):
        with tf.GradientTape() as tape:

            content_target = extractor(X_batch * 255.0)['content']
            image = network(X_batch)
            outputs = extractor(image)

            s_loss = style_weight * style_loss(outputs['style'], style_target)
            c_loss = content_weight * content_loss(outputs['content'],
                                                   content_target)
            t_loss = tv_weight * total_variation_loss(image)
            loss = s_loss + c_loss + t_loss

        grad = tape.gradient(loss, network.trainable_variables)
        opt.apply_gradients(zip(grad, network.trainable_variables))

        loss_metric(loss)
        sloss_metric(s_loss)
        closs_metric(c_loss)
        tloss_metric(t_loss)

    train_dataset = tf.data.Dataset.list_files(dataset_path + '/*.jpg')
    train_dataset = train_dataset.map(
        load_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train_dataset = train_dataset.shuffle(1024)
    train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
    train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)

    import time
    start = time.time()

    for e in range(epochs):
        print('Epoch {}'.format(e))
        iteration = 0

        for img in train_dataset:

            for j, img_p in enumerate(img):
                X_batch[j] = img_p

            iteration += 1

            train_step(X_batch)

            if iteration % 3000 == 0:
                # Save checkpoints
                network.save_weights(weights_path, save_format='tf')
                print('=====================================')
                print('            Weights saved!           ')
                print('=====================================\n')

                if debug:
                    print('step %s: loss = %s' %
                          (iteration, loss_metric.result()))
                    print('s_loss={}, c_loss={}, t_loss={}'.format(
                        sloss_metric.result(), closs_metric.result(),
                        tloss_metric.result()))

    end = time.time()
    print("Total time: {:.1f}".format(end - start))

    # Training is done !
    network.save_weights(weights_path, save_format='tf')
    print('=====================================')
    print('             All saved!              ')
    print('=====================================\n')