コード例 #1
0
def test_tf_faster_rcnn(art_warning, get_mnist_dataset):
    try:
        master_seed(seed=1234, set_tensorflow=True)

        # Only import if object detection module is available
        from art.estimators.object_detection.tensorflow_faster_rcnn import TensorFlowFasterRCNN

        # Define object detector
        images = tf.placeholder(tf.float32, shape=[1, 28, 28, 1])
        obj_dec = TensorFlowFasterRCNN(images=images)

        # Get test data
        (_, _), (x_test_mnist, y_test_mnist) = get_mnist_dataset
        x_test_mnist = x_test_mnist[:1]

        # First test predict
        result = obj_dec.predict(x_test_mnist)

        assert list(result[0].keys()) == ["boxes", "labels", "scores"]

        assert result[0]["boxes"].shape == (300, 4)
        expected_detection_boxes = np.asarray(
            [0.008862, 0.003788, 0.070454, 0.175931])
        np.testing.assert_array_almost_equal(result[0]["boxes"][2, :],
                                             expected_detection_boxes,
                                             decimal=3)

        assert result[0]["scores"].shape == (300, )
        expected_detection_scores = np.asarray([
            2.196349e-04,
            7.968055e-05,
            7.811916e-05,
            7.334248e-05,
            6.868376e-05,
            6.861838e-05,
            6.756858e-05,
            6.331169e-05,
            6.313509e-05,
            6.222352e-05,
        ])
        np.testing.assert_array_almost_equal(result[0]["scores"][:10],
                                             expected_detection_scores,
                                             decimal=3)

        assert result[0]["labels"].shape == (300, )
        # expected_detection_classes = np.asarray([37, 15, 15, 66, 15, 15, 15, 63, 2, 66])
        # np.testing.assert_array_almost_equal(result[0]["labels"][:10], expected_detection_classes)

        # Then test loss gradient
        # Create labels
        y = [{
            "boxes": result[0]["boxes"],
            "labels": result[0]["labels"],
            "scores": np.ones_like(result[0]["labels"])
        }]

        # Compute gradients
        grads = obj_dec.loss_gradient(x_test_mnist[:1], y)

        assert grads.shape == (1, 28, 28, 1)

        expected_gradients = np.asarray([
            [-0.00298723],
            [-0.0039893],
            [-0.00036253],
            [0.01038542],
            [0.01455704],
            [0.00995643],
            [0.00424966],
            [0.00470569],
            [0.00666382],
            [0.0028694],
            [0.00525351],
            [0.00889174],
            [0.0071413],
            [0.00618231],
            [0.00598106],
            [0.0072665],
            [0.00708815],
            [0.00286943],
            [0.00411595],
            [0.00788978],
            [0.00587319],
            [0.00808631],
            [0.01018151],
            [0.00867905],
            [0.00820272],
            [0.00124911],
            [-0.0042593],
            [0.02380728],
        ])
        np.testing.assert_array_almost_equal(grads[0, 0, :, :],
                                             expected_gradients,
                                             decimal=2)

        # Then test loss gradient with standard format
        # Create labels
        result_tf = obj_dec.predict(x_test_mnist, standardise_output=False)
        result = obj_dec.predict(x_test_mnist, standardise_output=True)

        from art.estimators.object_detection.utils import convert_tf_to_pt

        result_pt = convert_tf_to_pt(y=result_tf,
                                     height=x_test_mnist.shape[1],
                                     width=x_test_mnist.shape[2])

        np.testing.assert_array_equal(result[0]["boxes"],
                                      result_pt[0]["boxes"])
        np.testing.assert_array_equal(result[0]["labels"],
                                      result_pt[0]["labels"])
        np.testing.assert_array_equal(result[0]["scores"],
                                      result_pt[0]["scores"])

        y = [{
            "boxes": result[0]["boxes"],
            "labels": result[0]["labels"],
            "scores": np.ones_like(result[0]["labels"])
        }]

        # Compute gradients
        grads = obj_dec.loss_gradient(x_test_mnist[:1],
                                      y,
                                      standardise_output=True)

        assert grads.shape == (1, 28, 28, 1)

        expected_gradients = np.asarray([
            [-0.00095965],
            [-0.00265362],
            [-0.00031886],
            [0.01132964],
            [0.01674244],
            [0.01262039],
            [0.0063345],
            [0.00673249],
            [0.00618648],
            [0.00422678],
            [0.00542425],
            [0.00814896],
            [0.00919153],
            [0.01068758],
            [0.00929435],
            [0.00877143],
            [0.00747379],
            [0.0050377],
            [0.00656254],
            [0.00799547],
            [0.0051057],
            [0.00714598],
            [0.01090685],
            [0.00787637],
            [0.00709959],
            [0.00047201],
            [-0.00460457],
            [0.02629307],
        ])
        np.testing.assert_array_almost_equal(grads[0, 0, :, :],
                                             expected_gradients,
                                             decimal=2)

        obj_dec._sess.close()
        tf.reset_default_graph()

    except ARTTestException as e:
        art_warning(e)
    def test_loss_gradient_standard_format(self):
        # Create labels
        result_tf = self.obj_dec.predict(self.x_test_mnist[:2],
                                         standardise_output=False)
        result = self.obj_dec.predict(self.x_test_mnist[:2],
                                      standardise_output=True)

        from art.estimators.object_detection.utils import convert_tf_to_pt

        result_pt = convert_tf_to_pt(y=result_tf,
                                     height=self.x_test_mnist.shape[1],
                                     width=self.x_test_mnist.shape[2])
        for i in range(2):
            np.testing.assert_array_equal(result[i]["boxes"],
                                          result_pt[i]["boxes"])
            np.testing.assert_array_equal(result[i]["labels"],
                                          result_pt[i]["labels"])
            np.testing.assert_array_equal(result[i]["scores"],
                                          result_pt[i]["scores"])

        y = [
            {
                "boxes": result[0]["boxes"],
                "labels": result[0]["labels"],
                "scores": np.ones_like(result[0]["labels"]),
            },
            {
                "boxes": result[1]["boxes"],
                "labels": result[1]["labels"],
                "scores": np.ones_like(result[1]["labels"]),
            },
        ]

        # Compute gradients
        grads = self.obj_dec.loss_gradient(self.x_test_mnist[:2],
                                           y,
                                           standardise_output=True)

        self.assertTrue(grads.shape == (2, 28, 28, 1))

        expected_gradients1 = np.asarray([
            [-6.1982083e-03],
            [9.2188769e-04],
            [2.2715484e-03],
            [3.0439291e-03],
            [3.9350586e-03],
            [1.3214475e-03],
            [-1.9790903e-03],
            [-1.8616641e-03],
            [-1.7762191e-03],
            [-2.4208077e-03],
            [-2.1795963e-03],
            [-1.3475846e-03],
            [-1.7141351e-04],
            [5.3379539e-04],
            [6.1705662e-04],
            [9.1885449e-05],
            [-2.4936342e-04],
            [-7.8056828e-04],
            [-2.4509570e-04],
            [-1.3246380e-04],
            [-6.9344416e-04],
            [-2.8356430e-04],
            [1.1605137e-03],
            [2.7452575e-03],
            [2.9905243e-03],
            [2.2033940e-03],
            [1.7121597e-03],
            [8.4455572e-03],
        ])
        np.testing.assert_array_almost_equal(grads[0, 0, :, :],
                                             expected_gradients1,
                                             decimal=2)

        expected_gradients2 = np.asarray([
            [-8.14103708e-03],
            [-5.78497676e-03],
            [-1.93702651e-03],
            [-1.10854053e-04],
            [-3.13712610e-03],
            [-2.40660645e-03],
            [-2.33814842e-03],
            [-1.18874465e-04],
            [-8.61960289e-05],
            [-8.44302267e-05],
            [1.16928865e-03],
            [8.52172205e-04],
            [1.50172669e-03],
            [9.76039213e-04],
            [6.99639553e-04],
            [1.55441079e-03],
            [1.99828879e-03],
            [2.53868615e-03],
            [3.47398920e-03],
            [3.55495396e-03],
            [3.40546807e-03],
            [5.23657538e-03],
            [9.50821862e-03],
            [8.31787288e-03],
            [4.75075701e-03],
            [8.02019704e-03],
            [1.00337435e-02],
            [6.10247999e-03],
        ])
        np.testing.assert_array_almost_equal(grads[1, :, 0, :],
                                             expected_gradients2,
                                             decimal=2)
コード例 #3
0
    def predict(  # pylint: disable=W0221
        self, x: np.ndarray, batch_size: int = 128, standardise_output: bool = False, **kwargs
    ) -> List[Dict[str, np.ndarray]]:
        """
        Perform prediction for a batch of inputs.

        :param x: Samples of shape (nb_samples, height, width, nb_channels).
        :param batch_size: Batch size.
        :param standardise_output: True if output should be standardised to PyTorch format. Box coordinates will be
                                   scaled from [0, 1] to image dimensions, label index will be increased by 1 to adhere
                                   to COCO categories and the boxes will be changed to [x1, y1, x2, y2] format, with
                                   0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H.

        :return: Predictions of format `List[Dict[str, np.ndarray]]`, one for each input image. The
                 fields of the Dict are as follows:

                 - boxes [N, 4]: the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H.
                                 Can be changed to PyTorch format with `standardise_output=True`.
                 - labels [N]: the labels for each image in TensorFlow format. Can be changed to PyTorch format with
                               `standardise_output=True`.
                 - scores [N]: the scores or each prediction.
        """
        # Only do prediction if is_training is False
        if self.is_training:
            raise NotImplementedError(
                "This object detector was loaded in training mode and therefore not support prediction."
            )

        # Apply preprocessing
        x, _ = self._apply_preprocessing(x, y=None, fit=False)

        # Check if batch processing is appropriately set
        if self.images is not None and self.images.shape[0].value is not None:
            if x.shape[0] % self.images.shape[0].value != 0:  # pragma: no cover
                raise ValueError("Number of prediction samples must be a multiple of input size.")

            logger.warning("Reset batch size to input size.")
            batch_size = self.images.shape[0].value

        # Run prediction with batch processing
        num_samples = x.shape[0]
        num_batch = int(np.ceil(num_samples / float(batch_size)))
        results = list()

        for m in range(num_batch):
            # Batch indexes
            begin, end = m * batch_size, min((m + 1) * batch_size, num_samples)

            # Create feed_dict
            feed_dict = {self.images: x[begin:end]}

            # Run prediction
            batch_results = self._sess.run(self._detections, feed_dict=feed_dict)

            for i in range(end - begin):
                d_sample = dict()

                d_sample["boxes"] = batch_results["detection_boxes"][i]
                d_sample["labels"] = batch_results["detection_classes"][i].astype(np.int32)

                if standardise_output:
                    from art.estimators.object_detection.utils import convert_tf_to_pt

                    d_sample = convert_tf_to_pt(y=[d_sample], height=x.shape[1], width=x.shape[2])[0]

                d_sample["scores"] = batch_results["detection_scores"][i]

                results.append(d_sample)

        return results
コード例 #4
0
    def generate(self, x: np.ndarray, y: Optional[List[Dict[str, np.ndarray]]] = None, **kwargs) -> np.ndarray:
        """
        Generate RobustDPatch.

        :param x: Sample images.
        :param y: Target labels for object detector.
        :return: Adversarial patch.
        """
        channel_index = 1 if self.estimator.channels_first else x.ndim - 1
        if x.shape[channel_index] != self.patch_shape[channel_index - 1]:
            raise ValueError("The color channel index of the images and the patch have to be identical.")
        if y is None and self.targeted:
            raise ValueError("The targeted version of RobustDPatch attack requires target labels provided to `y`.")
        if y is not None and not self.targeted:
            raise ValueError("The RobustDPatch attack does not use target labels.")
        if x.ndim != 4:  # pragma: no cover
            raise ValueError("The adversarial patch can only be applied to images.")

        # Check whether patch fits into the cropped images:
        if self.estimator.channels_first:
            image_height, image_width = x.shape[2:4]
        else:
            image_height, image_width = x.shape[1:3]

        if not self.estimator.native_label_is_pytorch_format and y is not None:
            from art.estimators.object_detection.utils import convert_tf_to_pt

            y = convert_tf_to_pt(y=y, height=x.shape[1], width=x.shape[2])

        if y is not None:
            for i_image in range(x.shape[0]):
                y_i = y[i_image]["boxes"]
                for i_box in range(y_i.shape[0]):
                    x_1, y_1, x_2, y_2 = y_i[i_box]
                    if (  # pragma: no cover
                        x_1 < self.crop_range[1]
                        or y_1 < self.crop_range[0]
                        or x_2 > image_width - self.crop_range[1] + 1
                        or y_2 > image_height - self.crop_range[0] + 1
                    ):
                        raise ValueError("Cropping is intersecting with at least one box, reduce `crop_range`.")

        if (  # pragma: no cover
            self.patch_location[0] + self.patch_shape[0] > image_height - self.crop_range[0]
            or self.patch_location[1] + self.patch_shape[1] > image_width - self.crop_range[1]
        ):
            raise ValueError("The patch (partially) lies outside the cropped image.")

        for i_step in trange(self.max_iter, desc="RobustDPatch iteration", disable=not self.verbose):
            if i_step == 0 or (i_step + 1) % 100 == 0:
                logger.info("Training Step: %i", i_step + 1)

            num_batches = math.ceil(x.shape[0] / self.batch_size)
            patch_gradients_old = np.zeros_like(self._patch)

            for e_step in range(self.sample_size):
                if e_step == 0 or (e_step + 1) % 100 == 0:
                    logger.info("EOT Step: %i", e_step + 1)

                for i_batch in range(num_batches):
                    i_batch_start = i_batch * self.batch_size
                    i_batch_end = min((i_batch + 1) * self.batch_size, x.shape[0])

                    if y is None:
                        y_batch = y
                    else:
                        y_batch = y[i_batch_start:i_batch_end]

                    # Sample and apply the random transformations:
                    patched_images, patch_target, transforms = self._augment_images_with_patch(
                        x[i_batch_start:i_batch_end], y_batch, self._patch, channels_first=self.estimator.channels_first
                    )

                    gradients = self.estimator.loss_gradient(
                        x=patched_images,
                        y=patch_target,
                        standardise_output=True,
                    )

                    gradients = self._untransform_gradients(
                        gradients, transforms, channels_first=self.estimator.channels_first
                    )

                    patch_gradients = patch_gradients_old + np.sum(gradients, axis=0)
                    logger.debug(
                        "Gradient percentage diff: %f)",
                        np.mean(np.sign(patch_gradients) != np.sign(patch_gradients_old)),
                    )

                    patch_gradients_old = patch_gradients

            self._patch = self._patch + np.sign(patch_gradients) * (1 - 2 * int(self.targeted)) * self.learning_rate

            if self.estimator.clip_values is not None:
                self._patch = np.clip(
                    self._patch,
                    a_min=self.estimator.clip_values[0],
                    a_max=self.estimator.clip_values[1],
                )

        return self._patch