Example #1
0
def test_glrenderer():
    w, h = 400, 200
    renderer = glrenderer.MeshRenderer((w, h))
    renderer.fovy = 90
    position = [[0, 1, -1], [-2, -1, -1], [2, -1, -1]]
    color = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
    img = renderer.render_mesh(position, color)
    img, alpha = img[..., :3], img[..., 3]

    assert all(img[0, 0] == 0)  # black corner
    assert all(img[0, -1] == 0)  # black corner
    assert img[10, w // 2].argmax() == 0  # red corner
    assert img[-1, 10].argmax() == 1  # green corner
    assert img[-1, -10].argmax() == 2  # blue corner
    assert np.abs(img.sum(-1) - alpha).max() < 1e-5
Example #2
0
    def textureStyle3D(self):
        print("Importing tensorflow...")
        import tensorflow as tf

        print("Checking that GPU is visible for tensorflow...")
        if not tf.test.is_gpu_available():
            raise Exception("No GPU available for tensorflow!")

        print("Importing other libraries...")
        import os
        import io
        import sys
        from string import Template
        from pathlib import Path

        import numpy as np
        import PIL.Image
        # import matplotlib.pylab as pl

        from IPython.display import clear_output, display, Image, HTML

        # if os.name != 'nt':
        #     from lucid.misc.gl.glcontext import create_opengl_context
        import OpenGL.GL as gl

        from lucid.misc.gl import meshutil
        from lucid.misc.gl import glrenderer
        import lucid.misc.io.showing as show
        import lucid.misc.io as lucid_io
        from lucid.misc.tfutil import create_session

        from lucid.modelzoo import vision_models
        from lucid.optvis import objectives
        from lucid.optvis import param
        from lucid.optvis.style import StyleLoss, mean_l1_loss
        from lucid.optvis.param.spatial import sample_bilinear

        # if os.name != 'nt':
        #     print("Creating OpenGL context...")
        #     create_opengl_context()
        gl.glGetString(gl.GL_VERSION)

        print("Loading vision model...")
        model = vision_models.InceptionV1()
        model.load_graphdef()

        def prepare_image(fn, size=None):
            data = lucid_io.reading.read(fn)
            im = PIL.Image.open(io.BytesIO(data)).convert('RGB')
            if size:
                im = im.resize(size, PIL.Image.ANTIALIAS)
            return np.float32(im) / 255.0

        self.loadCameras()

        print("Loading input model from '{}'...".format(self.input_model_path))
        mesh = meshutil.load_obj(self.input_model_path)
        if self.cameras is None:
            mesh = meshutil.normalize_mesh(mesh)

        print("Loading input texture from '{}'...".format(
            self.input_texture_path))
        original_texture = prepare_image(
            self.input_texture_path, (self.texture_size, self.texture_size))

        print("Loading style from '{}'...".format(self.style_path))
        style = prepare_image(self.style_path)

        rendering_width = self.rendering_width
        rendering_height = int(rendering_width // self.aspect_ratio)

        print("Creating renderer with resolution {}x{}...".format(
            rendering_width, rendering_height))
        renderer = glrenderer.MeshRenderer((rendering_width, rendering_height))
        if self.cameras is not None:
            print("  renderer fovy: {:.2f} degrees".format(self.max_fovy))
            renderer.fovy = self.max_fovy

        sess = create_session(timeout_sec=0)

        # t_fragments is used to feed rasterized UV coordinates for the current view.
        # Channels: [U, V, _, Alpha]. Alpha is 1 for pixels covered by the object, and
        # 0 for background.
        t_fragments = tf.placeholder(tf.float32, [None, None, 4])
        t_uv = t_fragments[..., :2]
        t_alpha = t_fragments[..., 3:]

        # Texture atlas to optimize
        t_texture = param.image(self.texture_size, fft=True,
                                decorrelate=True)[0]

        # Variable to store the original mesh texture used to render content views
        content_var = tf.Variable(tf.zeros(
            [self.texture_size, self.texture_size, 3]),
                                  trainable=False)

        # Sample current and original textures with provided pixel data
        t_joined_texture = tf.concat([t_texture, content_var], -1)
        t_joined_frame = sample_bilinear(t_joined_texture, t_uv) * t_alpha
        t_frame_current, t_frame_content = t_joined_frame[
            ..., :3], t_joined_frame[..., 3:]
        t_joined_frame = tf.stack([t_frame_current, t_frame_content], 0)

        # Feeding the rendered frames to the Neural Network
        t_input = tf.placeholder_with_default(t_joined_frame,
                                              [None, None, None, 3])
        model.import_graph(t_input)

        # style loss
        style_layers = [
            sess.graph.get_tensor_by_name('import/%s:0' % s)[0]
            for s in self.googlenet_style_layers
        ]
        # L1-loss seems to be more stable for GoogleNet
        # Note that we use style_decay>0 to average style-describing Gram matrices
        # over the recent viewports. Please refer to StyleLoss for the details.
        sl = StyleLoss(style_layers, self.style_decay, loss_func=mean_l1_loss)

        # content loss
        content_layer = sess.graph.get_tensor_by_name(
            'import/%s:0' % self.googlenet_content_layer)
        content_loss = mean_l1_loss(content_layer[0],
                                    content_layer[1]) * self.content_weight

        # setup optimization
        total_loss = content_loss + sl.style_loss
        t_lr = tf.constant(0.05)
        trainer = tf.train.AdamOptimizer(t_lr)
        train_op = trainer.minimize(total_loss)

        init_op = tf.global_variables_initializer()
        loss_log = []

        def reset(style_img, content_texture):
            del loss_log[:]
            init_op.run()
            sl.set_style({t_input: style_img[None, ...]})
            content_var.load(content_texture)

        def sample_random_view():
            if self.cameras is None:
                return meshutil.sample_view(10.0, 12.0)
            else:
                rand_m = self.cameras[np.random.randint(0, len(
                    self.cameras))]["transformToCamera"].copy()
                return rand_m

        def run(mesh, step_n=400):
            app = QtWidgets.QApplication.instance()

            for i in range(step_n):
                fragments = renderer.render_mesh(
                    modelview=sample_random_view(),
                    position=mesh['position'],
                    uv=mesh['uv'],
                    face=mesh['face'])
                _, loss = sess.run([train_op, [content_loss, sl.style_loss]],
                                   {t_fragments: fragments})
                loss_log.append(loss)
                if i == 0 or (i + 1) % 50 == 0:
                    # clear_output()
                    last_frame, last_content = sess.run(
                        [t_frame_current, t_frame_content],
                        {t_fragments: fragments})
                    # show.images([last_frame, last_content], ['current frame', 'content'])
                if i == 0 or (i + 1) % 10 == 0:
                    print(len(loss_log), loss)
                    pass

                # Show progress
                self.pBar.setValue(
                    (i + step_n // 10 + 1) / (step_n + step_n // 10) * 100)
                app.processEvents()

        reset(style, original_texture)

        print("Running {} iterations...".format(self.steps_number))
        run(mesh, step_n=self.steps_number)

        print("Finished!")
        texture = t_texture.eval()
        print("Exporting result texture to '{}'...".format(
            self.output_texture_path))
        lucid_io.save(texture, self.output_texture_path, quality=90)

        sess.close()

        print("Importing result model to Metashape '{}'...".format(
            self.result_model_path))
        chunk.model = None
        chunk.importModel(self.result_model_path)
        chunk.model.label = self.style_name

        Metashape.app.messageBox(
            "Everything worked fine!\n"
            "Please save project and RESTART Metashape!\n"
            "Because video memory was not released by TensorFlow!")
Example #3
0
def main():
    # Parse args
    args = parse_args(object_yaw_min=0,
                      object_yaw_max=360,
                      object_pitch_min=-10,
                      object_pitch_max=30,
                      object_z_min=15,
                      object_z_max=60)

    # Setup logging
    logger = logging.getLogger()
    logger.setLevel(logging.ERROR)
    tf.logging.set_verbosity(tf.logging.ERROR)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    log = logging.getLogger('shapeshifter')
    log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s: %(levelname)s %(name)s: %(message)s')
    handler = logging.StreamHandler()
    handler.setFormatter(formatter)
    log.addHandler(handler)
    log.propagate = False

    # Set seeds from the start
    if args.seed:
        log.debug("Setting seed")
        np.random.seed(args.seed)
        tf.set_random_seed(args.seed)

    # Load textures, textures_masks, backgrounds, and mesh
    log.debug("Loading textures, textures masks, backgrounds, and mesh")
    backgrounds = load_and_tile_images(args.backgrounds)

    textures = load_and_tile_images(args.textures)
    textures_masks = (load_and_tile_images(args.textures_masks)[:, :, :, :1] >=
                      0.5).astype(np.float32)
    assert (textures.shape[:3] == textures_masks.shape[:3])

    objects = [
        meshutil.normalize_mesh(meshutil.load_obj(obj)) for obj in args.objects
    ]
    objects_masks = None

    # Create OpenGL context and mesh renderer
    log.debug("Creating renderer")
    create_opengl_context((backgrounds.shape[2], backgrounds.shape[1]))
    renderer = glrenderer.MeshRenderer(
        (backgrounds.shape[2], backgrounds.shape[1]))

    # Create test data
    generate_data_partial = partial(generate_data,
                                    renderer=renderer,
                                    backgrounds=backgrounds,
                                    objects=objects,
                                    objects_masks=objects_masks,
                                    objects_class=args.target_class,
                                    objects_transforms={
                                        'yaw_range': args.object_yaw_range,
                                        'yaw_bins': args.object_yaw_bins,
                                        'yaw_fn': args.object_yaw_fn,
                                        'pitch_range': args.object_pitch_range,
                                        'pitch_bins': args.object_pitch_bins,
                                        'pitch_fn': args.object_pitch_fn,
                                        'roll_range': args.object_roll_range,
                                        'roll_bins': args.object_roll_bins,
                                        'roll_fn': args.object_roll_fn,
                                        'x_range': args.object_x_range,
                                        'x_bins': args.object_x_bins,
                                        'x_fn': args.object_x_fn,
                                        'y_range': args.object_y_range,
                                        'y_bins': args.object_y_bins,
                                        'y_fn': args.object_y_fn,
                                        'z_range': args.object_z_range,
                                        'z_bins': args.object_z_bins,
                                        'z_fn': args.object_z_fn
                                    },
                                    textures_transforms={
                                        'yaw_range': args.texture_yaw_range,
                                        'yaw_bins': args.texture_yaw_bins,
                                        'yaw_fn': args.texture_yaw_fn,
                                        'pitch_range':
                                        args.texture_pitch_range,
                                        'pitch_bins': args.texture_pitch_bins,
                                        'pitch_fn': args.texture_pitch_fn,
                                        'roll_range': args.texture_roll_range,
                                        'roll_bins': args.texture_roll_bins,
                                        'roll_fn': args.texture_roll_fn,
                                        'x_range': args.texture_x_range,
                                        'x_bins': args.texture_x_bins,
                                        'x_fn': args.texture_x_fn,
                                        'y_range': args.texture_y_range,
                                        'y_bins': args.texture_y_bins,
                                        'y_fn': args.texture_y_fn,
                                        'z_range': args.texture_z_range,
                                        'z_bins': args.texture_z_bins,
                                        'z_fn': args.texture_z_fn
                                    },
                                    seed=args.seed)

    # Create adversarial textures, render mesh using them, and pass rendered images into model. Finally, create summary statistics.
    log.debug("Creating perturbable texture")
    textures_var_, textures_ = create_textures(
        textures,
        textures_masks,
        use_spectral=args.spectral,
        soft_clipping=args.soft_clipping)

    log.debug("Creating rendered input images")
    input_images_ = create_rendered_images(args.batch_size, textures_)

    log.debug("Creating object detection model")
    predictions, detections, losses = create_model(input_images_,
                                                   args.model_config,
                                                   args.model_checkpoint,
                                                   is_training=True)

    log.debug("Creating attack losses")
    victim_class_, target_class_, losses_summary_ = create_attack(
        textures_,
        textures_var_,
        predictions,
        losses,
        optimizer_name=args.optimizer,
        clip=args.sign_gradients)

    log.debug("Creating evaluation metrics")
    metrics_summary_, texture_summary_ = create_evaluation(
        victim_class_, target_class_, textures_, textures_masks, input_images_,
        detections)

    summaries_ = tf.summary.merge([losses_summary_, metrics_summary_])

    global_init_op_ = tf.global_variables_initializer()
    local_init_op_ = tf.local_variables_initializer()

    # Create tensorboard file writer for train and test evaluations
    saver = tf.train.Saver([textures_var_, tf.train.get_global_step()])
    train_writer = None
    test_writer = None

    if args.logdir is not None:
        log.debug(f"Tensorboard logging: {args.logdir}")
        os.makedirs(args.logdir, exist_ok=True)

        arguments_summary_ = tf.summary.text(
            'Arguments', tf.constant('```' + ' '.join(sys.argv[1:]) + '```'))
        # TODO: Save argparse

        graph = None
        if args.save_graph:
            log.debug("Graph will be saved to tensorboard")
            graph = tf.get_default_graph()

        train_writer = tf.summary.FileWriter(args.logdir + '/train',
                                             graph=graph)
        test_writer = tf.summary.FileWriter(args.logdir + '/test')

        # Find existing checkpoint
        os.makedirs(args.logdir + '/checkpoints', exist_ok=True)
        checkpoint_path = tf.train.latest_checkpoint(args.logdir +
                                                     '/checkpoints')
        args.checkpoint = checkpoint_path

    # Create session
    log.debug("Creating session")
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    sess = tf.Session(config=config)
    sess.run(global_init_op_)
    sess.run(local_init_op_)

    # Set initial texture
    if args.checkpoint is not None:
        log.debug(f"Restoring from checkpoint: {args.checkpoint}")
        saver.restore(sess, args.checkpoint)
    else:
        if args.gray_start:
            log.debug("Setting texture to gray")
            textures = np.zeros_like(textures) + 128 / 255

        if args.random_start > 0:
            log.debug(
                f"Adding uniform random perturbation texture with at most {args.random_start}/255 per pixel"
            )
            textures = textures + np.random.randint(
                size=textures.shape,
                low=-args.random_start,
                high=args.random_start) / 255

        sess.run('project_op', {'textures:0': textures})

    # Get global step
    step = sess.run('global_step:0')

    if train_writer is not None:
        log.debug("Running arguments summary")
        summary = sess.run(arguments_summary_)

        train_writer.add_summary(summary, step)
        test_writer.add_summary(summary, step)

    loss_tensors = [
        'total_loss:0', 'total_rpn_cls_loss:0', 'total_rpn_loc_loss:0',
        'total_rpn_foreground_loss:0', 'total_rpn_background_loss:0',
        'total_rpn_cw_loss:0', 'total_box_cls_loss:0', 'total_box_loc_loss:0',
        'total_box_target_loss:0', 'total_box_victim_loss:0',
        'total_box_target_cw_loss:0', 'total_box_victim_cw_loss:0',
        'total_sim_loss:0', 'grad_l2:0', 'grad_linf:0'
    ]

    metric_tensors = [
        'proposal_average_precision:0', 'victim_average_precision:0',
        'target_average_precision:0'
    ]

    output_tensors = loss_tensors + metric_tensors

    log.info(
        'global_step [%s]', ', '.join([
            tensor.replace(':0', '').replace('total_', '')
            for tensor in output_tensors
        ]))

    test_feed_dict = {
        'learning_rate:0': args.learning_rate,
        'momentum:0': args.momentum,
        'decay:0': args.decay,
        'rpn_iou_thresh:0': args.rpn_iou_threshold,
        'rpn_cls_weight:0': args.rpn_cls_weight,
        'rpn_loc_weight:0': args.rpn_loc_weight,
        'rpn_foreground_weight:0': args.rpn_foreground_weight,
        'rpn_background_weight:0': args.rpn_background_weight,
        'rpn_cw_weight:0': args.rpn_cw_weight,
        'rpn_cw_conf:0': args.rpn_cw_conf,
        'box_iou_thresh:0': args.box_iou_threshold,
        'box_cls_weight:0': args.box_cls_weight,
        'box_loc_weight:0': args.box_loc_weight,
        'box_target_weight:0': args.box_target_weight,
        'box_victim_weight:0': args.box_victim_weight,
        'box_target_cw_weight:0': args.box_target_cw_weight,
        'box_target_cw_conf:0': args.box_target_cw_conf,
        'box_victim_cw_weight:0': args.box_victim_cw_weight,
        'box_victim_cw_conf:0': args.box_victim_cw_conf,
        'sim_weight:0': args.sim_weight,
        'victim_class:0': args.victim_class,
        'target_class:0': args.target_class
    }

    # Keep attacking until CTRL+C. The only issue is that we may be in the middle of some operation.
    try:
        log.debug("Entering attacking loop (use ctrl+c to exit)")
        while True:
            # Run summaries as necessary
            if args.logdir and step % args.save_checkpoint_every == 0:
                log.debug("Saving checkpoint")
                saver.save(sess,
                           args.logdir + '/checkpoints/texture',
                           global_step=step,
                           write_meta_graph=False,
                           write_state=True)

            if step % args.save_texture_every == 0 and test_writer is not None:
                log.debug("Writing texture summary")
                test_texture = sess.run(texture_summary_)
                test_writer.add_summary(test_texture, step)

            if step % args.save_test_every == 0:
                log.debug("Runnning test summaries")
                start_time = time.time()

                sess.run(local_init_op_)
                batch_accumulate(sess, test_feed_dict, args.test_batch_size,
                                 args.batch_size, generate_data_partial,
                                 detections, predictions, args.categories)

                end_time = time.time()
                log.debug(
                    f"Loss accumulation took {end_time - start_time} seconds")

                test_output = sess.run(output_tensors, test_feed_dict)
                log.info('test %d %s', step, test_output)

                if test_writer is not None:
                    log.debug("Writing test summaries")
                    test_summaries = sess.run(summaries_, test_feed_dict)
                    test_writer.add_summary(test_summaries, step)

            # Create train feed_dict
            train_feed_dict = test_feed_dict.copy()

            train_feed_dict[
                'image_channel_multiplicative_noise:0'] = args.image_multiplicative_channel_noise_range
            train_feed_dict[
                'image_channel_additive_noise:0'] = args.image_additive_channel_noise_range
            train_feed_dict[
                'image_pixel_multiplicative_noise:0'] = args.image_multiplicative_pixel_noise_range
            train_feed_dict[
                'image_pixel_additive_noise:0'] = args.image_additive_pixel_noise_range
            train_feed_dict[
                'image_gaussian_noise_stddev:0'] = args.image_gaussian_noise_stddev_range

            train_feed_dict[
                'texture_channel_multiplicative_noise:0'] = args.texture_multiplicative_channel_noise_range
            train_feed_dict[
                'texture_channel_additive_noise:0'] = args.texture_additive_channel_noise_range
            train_feed_dict[
                'texture_pixel_multiplicative_noise:0'] = args.texture_multiplicative_pixel_noise_range
            train_feed_dict[
                'texture_pixel_additive_noise:0'] = args.texture_additive_pixel_noise_range
            train_feed_dict[
                'texture_gaussian_noise_stddev:0'] = args.texture_gaussian_noise_stddev_range

            # Zero out gradient accumulation, losses, and metrics, then accumulate batches
            log.debug("Starting gradient accumulation...")
            start_time = time.time()

            sess.run(local_init_op_)
            batch_accumulate(sess, train_feed_dict, args.train_batch_size,
                             args.batch_size, generate_data_partial,
                             detections, predictions, args.categories)

            end_time = time.time()
            log.debug(
                f"Gradient accumulation took {end_time - start_time} seconds")

            train_output = sess.run(output_tensors, train_feed_dict)
            log.info('train %d %s', step, train_output)

            if step % args.save_train_every == 0 and train_writer is not None:
                log.debug("Writing train summaries")
                train_summaries = sess.run(summaries_, test_feed_dict)
                train_writer.add_summary(train_summaries, step)

            # Update textures and project texture to feasible set
            # TODO: We can probably run these together but probably need some control dependency
            log.debug("Projecting attack")
            sess.run('attack_op', train_feed_dict)
            sess.run('project_op')
            step = sess.run('global_step:0')

    except KeyboardInterrupt:
        log.warn('Interrupted')

    finally:
        if test_writer is not None:
            test_writer.close()

        if train_writer is not None:
            train_writer.close()

        if sess is not None:
            sess.close()
Example #4
0
def index():
    #     return 'Hello World!'
    model = vision_models.InceptionV1()
    model.load_graphdef()
    TEXTURE_SIZE = 1024
    mesh = meshutil.load_obj('article_models/bunny.obj')
    mesh = meshutil.normalize_mesh(mesh)
    original_texture = prepare_image('article_models/bunny.png',
                                     (TEXTURE_SIZE, TEXTURE_SIZE))
    style_url = 'https://upload.wikimedia.org/wikipedia/commons/d/db/RIAN_archive_409362_Literaturnaya_Gazeta_article_about_YuriGagarin%2C_first_man_in_space.jpg'
    style = prepare_image(style_url)
    renderer = glrenderer.MeshRenderer((512, 512))
    googlenet_style_layers = [
        'conv2d2',
        'mixed3a',
        'mixed3b',
        'mixed4a',
        'mixed4b',
        'mixed4c',
    ]
    googlenet_content_layer = 'mixed3b'
    content_weight = 100.0
    # Style Gram matrix weighted average decay coefficient
    style_decay = 0.95

    sess = create_session(timeout_sec=0)

    # t_fragments is used to feed rasterized UV coordinates for the current view.
    # Channels: [U, V, _, Alpha]. Alpha is 1 for pixels covered by the object, and
    # 0 for background.
    t_fragments = tf.placeholder(tf.float32, [None, None, 4])
    t_uv = t_fragments[..., :2]
    t_alpha = t_fragments[..., 3:]

    # Texture atlas to optimize
    t_texture = param.image(TEXTURE_SIZE, fft=True, decorrelate=True)[0]

    # Variable to store the original mesh texture used to render content views
    content_var = tf.Variable(tf.zeros([TEXTURE_SIZE, TEXTURE_SIZE, 3]),
                              trainable=False)

    # Sample current and original textures with provided pixel data
    t_joined_texture = tf.concat([t_texture, content_var], -1)
    t_joined_frame = sample_bilinear(t_joined_texture, t_uv) * t_alpha
    t_frame_current, t_frame_content = t_joined_frame[..., :3], t_joined_frame[
        ..., 3:]
    t_joined_frame = tf.stack([t_frame_current, t_frame_content], 0)

    # Feeding the rendered frames to the Neural Network
    t_input = tf.placeholder_with_default(t_joined_frame,
                                          [None, None, None, 3])
    model.import_graph(t_input)

    # style loss
    style_layers = [
        sess.graph.get_tensor_by_name('import/%s:0' % s)[0]
        for s in googlenet_style_layers
    ]
    # L1-loss seems to be more stable for GoogleNet
    # Note that we use style_decay>0 to average style-describing Gram matrices
    # over the recent viewports. Please refer to StyleLoss for the details.
    sl = StyleLoss(style_layers, style_decay, loss_func=mean_l1_loss)

    # content loss
    content_layer = sess.graph.get_tensor_by_name('import/%s:0' %
                                                  googlenet_content_layer)
    content_loss = mean_l1_loss(content_layer[0],
                                content_layer[1]) * content_weight

    # setup optimization
    total_loss = content_loss + sl.style_loss
    t_lr = tf.constant(0.05)
    trainer = tf.train.AdamOptimizer(t_lr)
    train_op = trainer.minimize(total_loss)

    init_op = tf.global_variables_initializer()
    loss_log = []

    reset(style, original_texture)
    run(mesh)
    texture = t_texture.eval()
    return show.textured_mesh(mesh, texture)
  if size:
    im = im.resize(size, PIL.Image.ANTIALIAS)
  return np.float32(im)/255.0

mesh = meshutil.load_obj('article_models/bunny.obj')
mesh = meshutil.normalize_mesh(mesh)

original_texture = prepare_image('article_models/bunny.png', (TEXTURE_SIZE, TEXTURE_SIZE))

style_url = 'https://upload.wikimedia.org/wikipedia/commons/d/db/RIAN_archive_409362_Literaturnaya_Gazeta_article_about_YuriGagarin%2C_first_man_in_space.jpg'
style = prepare_image(style_url)
show.image(style, 'jpeg')

"""## Texture Synthesis"""

renderer = glrenderer.MeshRenderer((512, 512))

googlenet_style_layers = [
    'conv2d2',
    'mixed3a',
    'mixed3b',
    'mixed4a',
    'mixed4b',
    'mixed4c',
]

googlenet_content_layer = 'mixed3b'

content_weight = 100.0
# Style Gram matrix weighted average decay coefficient
style_decay = 0.95