コード例 #1
0
# ## Visualize Activations
#
# In some cases it helps to look at the output from different layers and visualize them to catch issues and odd patterns.

# In[32]:

# Get activations of a few sample layers
activations = model.run_graph(
    [image],
    [
        ("input_image", model.keras_model.get_layer("input_image").output),
        ("res2c_out", model.keras_model.get_layer("res2c_out").output),
        ("res3c_out", model.keras_model.get_layer("res3c_out").output),
        ("res4w_out",
         model.keras_model.get_layer("res4w_out").output),  # for resnet100
        ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
        ("roi", model.keras_model.get_layer("ROI").output),
    ])

# In[33]:

# Input image (normalized)
_ = plt.imshow(modellib.unmold_image(activations["input_image"][0], config))

# In[34]:

# Backbone feature map
display_images(np.transpose(activations["res2c_out"][0, :, :, :4], [2, 0, 1]),
               cols=4)
コード例 #2
0
def train_adversarial_batch(model,
                            datagenerator,
                            target_attack=False,
                            show_perturbation=False,
                            use_mask=False,
                            save_adversarials_to_logs=False):
    for inputs in datagenerator:
        images = inputs[0]
        image_metas = inputs[1]
        rpn_match = inputs[2]
        rpn_bbox = inputs[3]
        gt_class_ids = inputs[4]
        gt_boxes = inputs[5]
        gt_masks = inputs[6]

        # image_metas as numpy array
        image_metas = image_metas.numpy()

        # To GPU
        if model.config.GPU_COUNT:
            images = images.cuda()
            rpn_match = rpn_match.cuda()
            rpn_bbox = rpn_bbox.cuda()
            gt_class_ids = gt_class_ids.cuda()
            gt_boxes = gt_boxes.cuda()
            gt_masks = gt_masks.cuda()

        # SETTINGS

        steps = 20
        max_perturbation = 15

        # Wrap in variables
        images_orig = images.clone()
        images = Variable(images, requires_grad=True)
        rpn_match = Variable(rpn_match)
        rpn_bbox = Variable(rpn_bbox)
        gt_class_ids = Variable(gt_class_ids)
        gt_boxes = Variable(gt_boxes)
        gt_masks = Variable(gt_masks)

        # Create mask of
        mask = create_mask(images_orig.shape, gt_boxes[0][0])

        for step in range(steps):
            model.zero_grad()
            zero_gradients(images)
            # Run object detection
            rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask = \
                model.predict([images, image_metas, gt_class_ids, gt_boxes, gt_masks], mode='training')

            # Compute losses
            rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss = compute_losses(
                rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox,
                target_class_ids, mrcnn_class_logits, target_deltas,
                mrcnn_bbox, target_mask, mrcnn_mask)
            loss = rpn_class_loss + rpn_bbox_loss + mrcnn_class_loss + mrcnn_bbox_loss + mrcnn_mask_loss
            loss.backward()
            print("step {}: loss={}".format(step, loss.data.cpu().numpy()[0]))

            # Calculate gradient
            #grad = images.grad * 10000
            grad = torch.sign(images.grad)

            # Change part of the image in mask only if enabled
            if use_mask:
                grad.data = grad.data * mask

            # Clamp max perturbation per step
            grad = torch.clamp(grad, -max_perturbation / steps,
                               max_perturbation / steps)

            # Add/Subtract perturbation based on attack
            if target_attack:
                images_tmp = unmold_image_tensor(images.data - grad.data,
                                                 model.config)
            else:
                images_tmp = unmold_image_tensor(images.data + grad.data,
                                                 model.config)

            # Clamp to reasonable image values
            images_tmp = torch.clamp(images_tmp, 0, 255)
            images_data = mold_image_tensor(images_tmp, model.config)

            # Set adversarial image as new input
            images.data = images_data

        a = images[0].data.clone()

        class_names = [
            'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
            'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
            'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
            'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
            'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
            'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
            'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
            'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
            'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
            'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
            'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
            'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
            'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
            'scissors', 'teddy bear', 'hair drier', 'toothbrush'
        ]

        # Run detection
        image_org = unmold_image(img_to_np(images_orig[0]), model.config)
        results = model.detect([image_org])

        # Visualize original
        r = results[0]
        display_instances(image_org, r['rois'], r['masks'], r['class_ids'],
                          class_names, r['scores'])

        # Run detection
        image_adv = unmold_image(img_to_np(a), model.config)
        results = model.detect([image_adv])

        # Visualize adversarial
        r = results[0]
        display_instances(image_adv, r['rois'], r['masks'], r['class_ids'],
                          class_names, r['scores'])

        if save_adversarials_to_logs:
            path = os.path.join(DEFAULT_LOGS_DIR, "adversarial_examples")
            pathlib.Path(path).mkdir(parents=True, exist_ok=True)

            path = os.path.join(
                path, "adversarial_example_" + str(int(time.time())) + ".jpg")
            skimage.io.imsave(path, image_adv)
            print("Adversarial exaple saved to: " + path)

        # Visualize perturbation
        if show_perturbation:
            image_tmp = image_adv.astype(np.int16)
            perturbation = image_tmp - image_org
            perturbation.clip(0, 255)
            scale_factor = 255 / perturbation.max()

            # Show Plot
            _, ax = plt.subplots(1, figsize=(16, 16))
            height, width = perturbation.shape[:2]
            ax.set_ylim(height + 10, -10)
            ax.set_xlim(-10, width + 10)
            ax.axis('off')
            ax.set_title('Attacking noise (x{0:4.2f})'.format(scale_factor))
            ax.imshow(perturbation * scale_factor)
            plt.show()
コード例 #3
0
def train_adversarial_batch(model,
                            datagenerator,
                            target_attack=False,
                            use_mask=False):
    for i, inputs in tqdm(enumerate(datagenerator), total=len(datagenerator)):
        images = inputs[0]
        image_metas = inputs[1]
        rpn_match = inputs[2]
        rpn_bbox = inputs[3]
        gt_class_ids = inputs[4]
        gt_boxes = inputs[5]
        gt_masks = inputs[6]

        # image_metas as numpy array
        image_metas = image_metas.numpy()

        # To GPU
        if model.config.GPU_COUNT:
            images = images.cuda()
            rpn_match = rpn_match.cuda()
            rpn_bbox = rpn_bbox.cuda()
            gt_class_ids = gt_class_ids.cuda()
            gt_boxes = gt_boxes.cuda()
            gt_masks = gt_masks.cuda()

        # SETTINGS
        steps = 30
        max_perturbation = 15

        # Wrap in variables
        images_orig = images.clone()
        images = Variable(images, requires_grad=True)
        rpn_match = Variable(rpn_match)
        rpn_bbox = Variable(rpn_bbox)
        gt_class_ids = Variable(gt_class_ids)
        gt_boxes = Variable(gt_boxes)
        gt_masks = Variable(gt_masks)

        # Create mask of
        mask = create_mask(images_orig.shape, gt_boxes[0][0])

        for step in range(steps):
            model.zero_grad()
            zero_gradients(images)
            # Run object detection
            rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask = \
                model.predict([images, image_metas, gt_class_ids, gt_boxes, gt_masks], mode='training')

            # Compute losses
            rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss = compute_losses(
                rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox,
                target_class_ids, mrcnn_class_logits, target_deltas,
                mrcnn_bbox, target_mask, mrcnn_mask)
            loss = rpn_class_loss + rpn_bbox_loss + mrcnn_class_loss + mrcnn_bbox_loss + mrcnn_mask_loss
            loss.backward()

            # Calculate gradient
            # grad = images.grad * 10000
            grad = torch.sign(images.grad)

            # Change part of the image in mask only if enabled
            if use_mask:
                grad.data = grad.data * mask

            # Clamp max perturbation per step
            grad = torch.clamp(grad, -max_perturbation / steps,
                               max_perturbation / steps)

            # Add/Subtract perturbation based on attack
            if target_attack:
                images_tmp = unmold_image_tensor(images.data - grad.data,
                                                 model.config)
            else:
                images_tmp = unmold_image_tensor(images.data + grad.data,
                                                 model.config)

            # Clamp to reasonable image values
            images_tmp = torch.clamp(images_tmp, 0, 255)
            images_data = mold_image_tensor(images_tmp, model.config)

            # Set adversarial image as new input
            images.data = images_data

        a = images[0].data.clone()
        """

        # Run detection
        image = unmold_image(img_to_np(images_orig[0]), model.config)
        results = model.detect([image])
        # Visualize results
        r = results[0]

        plt = display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])

        #plt.show()
        #plt.close()
        plt.savefig('./data/video/results/{}.png'.format(i))
        """
        # Run detection
        image = unmold_image(img_to_np(a), model.config)
        results = model.detect([image])

        # Visualize results
        r = results[0]
        plt = display_instances(image, r['rois'], r['masks'], r['class_ids'],
                                class_names, r['scores'])
        #plt.show()
        #plt.close()
        plt.savefig('./data/video/results_adversarial/{}.png'.format(i))
コード例 #4
0
    rpn_match,
)
log("rpn_bbox", rpn_bbox)
image_id = image_meta[0][0]
print("image_id: ", image_id, dataset.image_reference(image_id))

# Remove the last dim in mrcnn_class_ids. It's only added
# to satisfy Keras restriction on target shape.
mrcnn_class_ids = mrcnn_class_ids[:, :, 0]

# In[16]:

b = 0

# Restore original image (reverse normalization)
sample_image = modellib.unmold_image(normalized_images[b], config)

# Compute anchor shifts.
indices = np.where(rpn_match[b] == 1)[0]
refined_anchors = utils.apply_box_deltas(
    anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV)
log("anchors", anchors)
log("refined_anchors", refined_anchors)

# Get list of positive anchors
positive_anchor_ids = np.where(rpn_match[b] == 1)[0]
print("Positive anchors: {}".format(len(positive_anchor_ids)))
negative_anchor_ids = np.where(rpn_match[b] == -1)[0]
print("Negative anchors: {}".format(len(negative_anchor_ids)))
neutral_anchor_ids = np.where(rpn_match[b] == 0)[0]
print("Neutral anchors: {}".format(len(neutral_anchor_ids)))
コード例 #5
0
log("det_masks", det_masks)

display_images(det_mask_specific[:4] * 255, cmap="Blues", interpolation="none")

display_images(det_masks[:4] * 255, cmap="Blues", interpolation="none")

# Get activations of a few sample layers
activations = model.run_graph([image], [
    ("input_image",        model.keras_model.get_layer("input_image").output),
    ("res4w_out",          model.keras_model.get_layer("res4w_out").output),  # for resnet100
    ("rpn_bbox",           model.keras_model.get_layer("rpn_bbox").output),
    ("roi",                model.keras_model.get_layer("ROI").output),
])

# Input image (normalized)
_ = plt.imshow(modellib.unmold_image(activations["input_image"][0],config))

# Backbone feature map
display_images(np.transpose(activations["res4w_out"][0,:,:,:4], [2, 0, 1]))

# Histograms of RPN bounding box deltas
plt.figure(figsize=(12, 3))
plt.subplot(1, 4, 1)
plt.title("dy")
_ = plt.hist(activations["rpn_bbox"][0,:,0], 50)
plt.subplot(1, 4, 2)
plt.title("dx")
_ = plt.hist(activations["rpn_bbox"][0,:,1], 50)
plt.subplot(1, 4, 3)
plt.title("dw")
_ = plt.hist(activations["rpn_bbox"][0,:,2], 50)