Exemplo n.º 1
0
 def compileShader(self):
     create_opengl_context((self.width, self.height))
     glClear(GL_COLOR_BUFFER_BIT)
     self.shader = shaders.compileProgram(self.compileVertexShader(), self.compileFragmentShader())
     self.xpos = glGetUniformLocation(self.shader, 'xpos')
     self.ypos = glGetUniformLocation(self.shader, 'ypos')
     self.vdir_x = glGetUniformLocation(self.shader, 'vdir_x')
     self.vdir_y = glGetUniformLocation(self.shader, 'vdir_y')
     self.vdir_z = glGetUniformLocation(self.shader, 'vdir_z')
     self.arrow_size = glGetUniformLocation(self.shader, 'size')
     self.res_loc = glGetUniformLocation(self.shader, 'iResolution')
Exemplo n.º 2
0
def main():
    # Parse args
    args = parse_args(object_yaw_min=0,
                      object_yaw_max=360,
                      object_pitch_min=-10,
                      object_pitch_max=30,
                      object_z_min=15,
                      object_z_max=60)

    # Setup logging
    logger = logging.getLogger()
    logger.setLevel(logging.ERROR)
    tf.logging.set_verbosity(tf.logging.ERROR)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    log = logging.getLogger('shapeshifter')
    log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s: %(levelname)s %(name)s: %(message)s')
    handler = logging.StreamHandler()
    handler.setFormatter(formatter)
    log.addHandler(handler)
    log.propagate = False

    # Set seeds from the start
    if args.seed:
        log.debug("Setting seed")
        np.random.seed(args.seed)
        tf.set_random_seed(args.seed)

    # Load textures, textures_masks, backgrounds, and mesh
    log.debug("Loading textures, textures masks, backgrounds, and mesh")
    backgrounds = load_and_tile_images(args.backgrounds)

    textures = load_and_tile_images(args.textures)
    textures_masks = (load_and_tile_images(args.textures_masks)[:, :, :, :1] >=
                      0.5).astype(np.float32)
    assert (textures.shape[:3] == textures_masks.shape[:3])

    objects = [
        meshutil.normalize_mesh(meshutil.load_obj(obj)) for obj in args.objects
    ]
    objects_masks = None

    # Create OpenGL context and mesh renderer
    log.debug("Creating renderer")
    create_opengl_context((backgrounds.shape[2], backgrounds.shape[1]))
    renderer = glrenderer.MeshRenderer(
        (backgrounds.shape[2], backgrounds.shape[1]))

    # Create test data
    generate_data_partial = partial(generate_data,
                                    renderer=renderer,
                                    backgrounds=backgrounds,
                                    objects=objects,
                                    objects_masks=objects_masks,
                                    objects_class=args.target_class,
                                    objects_transforms={
                                        'yaw_range': args.object_yaw_range,
                                        'yaw_bins': args.object_yaw_bins,
                                        'yaw_fn': args.object_yaw_fn,
                                        'pitch_range': args.object_pitch_range,
                                        'pitch_bins': args.object_pitch_bins,
                                        'pitch_fn': args.object_pitch_fn,
                                        'roll_range': args.object_roll_range,
                                        'roll_bins': args.object_roll_bins,
                                        'roll_fn': args.object_roll_fn,
                                        'x_range': args.object_x_range,
                                        'x_bins': args.object_x_bins,
                                        'x_fn': args.object_x_fn,
                                        'y_range': args.object_y_range,
                                        'y_bins': args.object_y_bins,
                                        'y_fn': args.object_y_fn,
                                        'z_range': args.object_z_range,
                                        'z_bins': args.object_z_bins,
                                        'z_fn': args.object_z_fn
                                    },
                                    textures_transforms={
                                        'yaw_range': args.texture_yaw_range,
                                        'yaw_bins': args.texture_yaw_bins,
                                        'yaw_fn': args.texture_yaw_fn,
                                        'pitch_range':
                                        args.texture_pitch_range,
                                        'pitch_bins': args.texture_pitch_bins,
                                        'pitch_fn': args.texture_pitch_fn,
                                        'roll_range': args.texture_roll_range,
                                        'roll_bins': args.texture_roll_bins,
                                        'roll_fn': args.texture_roll_fn,
                                        'x_range': args.texture_x_range,
                                        'x_bins': args.texture_x_bins,
                                        'x_fn': args.texture_x_fn,
                                        'y_range': args.texture_y_range,
                                        'y_bins': args.texture_y_bins,
                                        'y_fn': args.texture_y_fn,
                                        'z_range': args.texture_z_range,
                                        'z_bins': args.texture_z_bins,
                                        'z_fn': args.texture_z_fn
                                    },
                                    seed=args.seed)

    # Create adversarial textures, render mesh using them, and pass rendered images into model. Finally, create summary statistics.
    log.debug("Creating perturbable texture")
    textures_var_, textures_ = create_textures(
        textures,
        textures_masks,
        use_spectral=args.spectral,
        soft_clipping=args.soft_clipping)

    log.debug("Creating rendered input images")
    input_images_ = create_rendered_images(args.batch_size, textures_)

    log.debug("Creating object detection model")
    predictions, detections, losses = create_model(input_images_,
                                                   args.model_config,
                                                   args.model_checkpoint,
                                                   is_training=True)

    log.debug("Creating attack losses")
    victim_class_, target_class_, losses_summary_ = create_attack(
        textures_,
        textures_var_,
        predictions,
        losses,
        optimizer_name=args.optimizer,
        clip=args.sign_gradients)

    log.debug("Creating evaluation metrics")
    metrics_summary_, texture_summary_ = create_evaluation(
        victim_class_, target_class_, textures_, textures_masks, input_images_,
        detections)

    summaries_ = tf.summary.merge([losses_summary_, metrics_summary_])

    global_init_op_ = tf.global_variables_initializer()
    local_init_op_ = tf.local_variables_initializer()

    # Create tensorboard file writer for train and test evaluations
    saver = tf.train.Saver([textures_var_, tf.train.get_global_step()])
    train_writer = None
    test_writer = None

    if args.logdir is not None:
        log.debug(f"Tensorboard logging: {args.logdir}")
        os.makedirs(args.logdir, exist_ok=True)

        arguments_summary_ = tf.summary.text(
            'Arguments', tf.constant('```' + ' '.join(sys.argv[1:]) + '```'))
        # TODO: Save argparse

        graph = None
        if args.save_graph:
            log.debug("Graph will be saved to tensorboard")
            graph = tf.get_default_graph()

        train_writer = tf.summary.FileWriter(args.logdir + '/train',
                                             graph=graph)
        test_writer = tf.summary.FileWriter(args.logdir + '/test')

        # Find existing checkpoint
        os.makedirs(args.logdir + '/checkpoints', exist_ok=True)
        checkpoint_path = tf.train.latest_checkpoint(args.logdir +
                                                     '/checkpoints')
        args.checkpoint = checkpoint_path

    # Create session
    log.debug("Creating session")
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    sess = tf.Session(config=config)
    sess.run(global_init_op_)
    sess.run(local_init_op_)

    # Set initial texture
    if args.checkpoint is not None:
        log.debug(f"Restoring from checkpoint: {args.checkpoint}")
        saver.restore(sess, args.checkpoint)
    else:
        if args.gray_start:
            log.debug("Setting texture to gray")
            textures = np.zeros_like(textures) + 128 / 255

        if args.random_start > 0:
            log.debug(
                f"Adding uniform random perturbation texture with at most {args.random_start}/255 per pixel"
            )
            textures = textures + np.random.randint(
                size=textures.shape,
                low=-args.random_start,
                high=args.random_start) / 255

        sess.run('project_op', {'textures:0': textures})

    # Get global step
    step = sess.run('global_step:0')

    if train_writer is not None:
        log.debug("Running arguments summary")
        summary = sess.run(arguments_summary_)

        train_writer.add_summary(summary, step)
        test_writer.add_summary(summary, step)

    loss_tensors = [
        'total_loss:0', 'total_rpn_cls_loss:0', 'total_rpn_loc_loss:0',
        'total_rpn_foreground_loss:0', 'total_rpn_background_loss:0',
        'total_rpn_cw_loss:0', 'total_box_cls_loss:0', 'total_box_loc_loss:0',
        'total_box_target_loss:0', 'total_box_victim_loss:0',
        'total_box_target_cw_loss:0', 'total_box_victim_cw_loss:0',
        'total_sim_loss:0', 'grad_l2:0', 'grad_linf:0'
    ]

    metric_tensors = [
        'proposal_average_precision:0', 'victim_average_precision:0',
        'target_average_precision:0'
    ]

    output_tensors = loss_tensors + metric_tensors

    log.info(
        'global_step [%s]', ', '.join([
            tensor.replace(':0', '').replace('total_', '')
            for tensor in output_tensors
        ]))

    test_feed_dict = {
        'learning_rate:0': args.learning_rate,
        'momentum:0': args.momentum,
        'decay:0': args.decay,
        'rpn_iou_thresh:0': args.rpn_iou_threshold,
        'rpn_cls_weight:0': args.rpn_cls_weight,
        'rpn_loc_weight:0': args.rpn_loc_weight,
        'rpn_foreground_weight:0': args.rpn_foreground_weight,
        'rpn_background_weight:0': args.rpn_background_weight,
        'rpn_cw_weight:0': args.rpn_cw_weight,
        'rpn_cw_conf:0': args.rpn_cw_conf,
        'box_iou_thresh:0': args.box_iou_threshold,
        'box_cls_weight:0': args.box_cls_weight,
        'box_loc_weight:0': args.box_loc_weight,
        'box_target_weight:0': args.box_target_weight,
        'box_victim_weight:0': args.box_victim_weight,
        'box_target_cw_weight:0': args.box_target_cw_weight,
        'box_target_cw_conf:0': args.box_target_cw_conf,
        'box_victim_cw_weight:0': args.box_victim_cw_weight,
        'box_victim_cw_conf:0': args.box_victim_cw_conf,
        'sim_weight:0': args.sim_weight,
        'victim_class:0': args.victim_class,
        'target_class:0': args.target_class
    }

    # Keep attacking until CTRL+C. The only issue is that we may be in the middle of some operation.
    try:
        log.debug("Entering attacking loop (use ctrl+c to exit)")
        while True:
            # Run summaries as necessary
            if args.logdir and step % args.save_checkpoint_every == 0:
                log.debug("Saving checkpoint")
                saver.save(sess,
                           args.logdir + '/checkpoints/texture',
                           global_step=step,
                           write_meta_graph=False,
                           write_state=True)

            if step % args.save_texture_every == 0 and test_writer is not None:
                log.debug("Writing texture summary")
                test_texture = sess.run(texture_summary_)
                test_writer.add_summary(test_texture, step)

            if step % args.save_test_every == 0:
                log.debug("Runnning test summaries")
                start_time = time.time()

                sess.run(local_init_op_)
                batch_accumulate(sess, test_feed_dict, args.test_batch_size,
                                 args.batch_size, generate_data_partial,
                                 detections, predictions, args.categories)

                end_time = time.time()
                log.debug(
                    f"Loss accumulation took {end_time - start_time} seconds")

                test_output = sess.run(output_tensors, test_feed_dict)
                log.info('test %d %s', step, test_output)

                if test_writer is not None:
                    log.debug("Writing test summaries")
                    test_summaries = sess.run(summaries_, test_feed_dict)
                    test_writer.add_summary(test_summaries, step)

            # Create train feed_dict
            train_feed_dict = test_feed_dict.copy()

            train_feed_dict[
                'image_channel_multiplicative_noise:0'] = args.image_multiplicative_channel_noise_range
            train_feed_dict[
                'image_channel_additive_noise:0'] = args.image_additive_channel_noise_range
            train_feed_dict[
                'image_pixel_multiplicative_noise:0'] = args.image_multiplicative_pixel_noise_range
            train_feed_dict[
                'image_pixel_additive_noise:0'] = args.image_additive_pixel_noise_range
            train_feed_dict[
                'image_gaussian_noise_stddev:0'] = args.image_gaussian_noise_stddev_range

            train_feed_dict[
                'texture_channel_multiplicative_noise:0'] = args.texture_multiplicative_channel_noise_range
            train_feed_dict[
                'texture_channel_additive_noise:0'] = args.texture_additive_channel_noise_range
            train_feed_dict[
                'texture_pixel_multiplicative_noise:0'] = args.texture_multiplicative_pixel_noise_range
            train_feed_dict[
                'texture_pixel_additive_noise:0'] = args.texture_additive_pixel_noise_range
            train_feed_dict[
                'texture_gaussian_noise_stddev:0'] = args.texture_gaussian_noise_stddev_range

            # Zero out gradient accumulation, losses, and metrics, then accumulate batches
            log.debug("Starting gradient accumulation...")
            start_time = time.time()

            sess.run(local_init_op_)
            batch_accumulate(sess, train_feed_dict, args.train_batch_size,
                             args.batch_size, generate_data_partial,
                             detections, predictions, args.categories)

            end_time = time.time()
            log.debug(
                f"Gradient accumulation took {end_time - start_time} seconds")

            train_output = sess.run(output_tensors, train_feed_dict)
            log.info('train %d %s', step, train_output)

            if step % args.save_train_every == 0 and train_writer is not None:
                log.debug("Writing train summaries")
                train_summaries = sess.run(summaries_, test_feed_dict)
                train_writer.add_summary(train_summaries, step)

            # Update textures and project texture to feasible set
            # TODO: We can probably run these together but probably need some control dependency
            log.debug("Projecting attack")
            sess.run('attack_op', train_feed_dict)
            sess.run('project_op')
            step = sess.run('global_step:0')

    except KeyboardInterrupt:
        log.warn('Interrupted')

    finally:
        if test_writer is not None:
            test_writer.close()

        if train_writer is not None:
            train_writer.close()

        if sess is not None:
            sess.close()
from lucid.misc.gl import meshutil
from lucid.misc.gl import glrenderer
import lucid.misc.io.showing as show
import lucid.misc.io as lucid_io
from lucid.misc.tfutil import create_session

from lucid.modelzoo import vision_models
from lucid.optvis import objectives
from lucid.optvis import param
from lucid.optvis.style import StyleLoss, mean_l1_loss
from lucid.optvis.param.spatial import sample_bilinear

"""You can check the installed version of OpenGL:"""

create_opengl_context()
gl.glGetString(gl.GL_VERSION)

model = vision_models.InceptionV1()
model.load_graphdef()

"""## Loading 3D model

Let's download some 3D models first. This is similar to the steps in the [3D Feature Visualization notebook](https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/texture_synth_3d.ipynb) if you're keen on the details and haven't completed that notebook yet.
"""

TEXTURE_SIZE = 1024

!gsutil cp gs://deepdream/article_models.zip . && \
 unzip -qo article_models.zip && \
 ls -al article_models && \
Exemplo n.º 4
0
import pytest

import os
import numpy as np

HAVE_COLAB_NVIDIA = (os.path.exists('/usr/lib64-nvidia/')
                     and os.path.exists('/opt/bin/nvidia-smi'))

WIDTH, HEIGHT = 200, 100

if HAVE_COLAB_NVIDIA:
    from lucid.misc.gl import glcontext  # must be imported before OpenGL.GL
    import OpenGL.GL as gl
    from lucid.misc.gl import glrenderer

    glcontext.create_opengl_context((WIDTH, HEIGHT))


@pytest.mark.skipif(not HAVE_COLAB_NVIDIA, reason="GPU Colab kernel only")
def test_gl_context():
    # Render triangle
    gl.glClear(gl.GL_COLOR_BUFFER_BIT)
    gl.glBegin(gl.GL_TRIANGLES)
    gl.glColor3f(1, 0, 0)
    gl.glVertex2f(0, 1)

    gl.glColor3f(0, 1, 0)
    gl.glVertex2f(-1, -1)

    gl.glColor3f(0, 0, 1)
    gl.glVertex2f(1, -1)