Beispiel #1
0
 def test_taylor_approximation(self, x, params):
     manual_transformation = RotationZX()
     auto_transformation = Composition(RotationZ(), RotationX())
     manual = taylor.encode(manual_transformation, x, params)
     auto = taylor.encode(auto_transformation, x, params)
     self.assertSound(manual, params,
                      partial(manual_transformation.transform, x))
     self.assertSound(auto, params, partial(auto_transformation.transform,
                                            x))
Beispiel #2
0
    def test_taylor_approximation(self, x, params):
        manual_transformation = TaperingRotation()
        auto_transformation = Composition(TaperingZ(), RotationZ())
        expected = taylor.encode(manual_transformation, x, params)
        actual = taylor.encode(auto_transformation, x, params)

        self.assertSound(expected, params,
                         partial(manual_transformation.transform, x))
        self.assertSound(actual, params,
                         partial(auto_transformation.transform, x))
        self.assertAlmostEqualBounds(expected, actual)
Beispiel #3
0
    def concretize(self, x, A, sign=-1, aux=None):
        n_batch = A.shape[0]
        n_outputs = A.shape[1]
        n_values = A.shape[2]
        n_points = x.shape[1]
        n_coords = x.shape[2]
        n_params = len(self.params)
        assert n_values == n_points * n_coords

        # Computing linear constraints based on taylor relaxations of transformation
        x_np = x.cpu().numpy()
        x_np = np.reshape(x_np, (n_batch * n_points, n_coords))
        bounds = taylor.encode(self.transformation, x_np, self.params)

        lower_offset = torch.tensor(bounds.lower_offset.reshape(
            (n_batch, n_values, 1)),
                                    device=x.device)
        upper_offset = torch.tensor(bounds.upper_offset.reshape(
            (n_batch, n_values, 1)),
                                    device=x.device)
        lower_slope = torch.tensor(bounds.lower_slope.reshape(
            (n_batch, n_values, n_params)),
                                   device=x.device)
        upper_slope = torch.tensor(bounds.upper_slope.reshape(
            (n_batch, n_values, n_params)),
                                   device=x.device)

        # Backwards propagate coefficients through linear relaxation of transformation
        if sign == -1:  # computing lower bound
            new_A = torch.matmul(A.clamp(min=0.0), lower_slope) + torch.matmul(
                A.clamp(max=0.0), upper_slope)
            offset = torch.matmul(A.clamp(min=0.0),
                                  lower_offset) + torch.matmul(
                                      A.clamp(max=0.0), upper_offset)
        elif sign == 1:  # computing upper bound
            new_A = torch.matmul(A.clamp(min=0.0), upper_slope) + torch.matmul(
                A.clamp(max=0.0), lower_slope)
            offset = torch.matmul(A.clamp(min=0.0),
                                  upper_offset) + torch.matmul(
                                      A.clamp(max=0.0), lower_offset)
        else:
            raise RuntimeError(f"Invalid sign value: {sign}")

        # Instantiate bounds based on valid parameter ranges. Same implementation as for L-inf perturbation in PerturbationLpNorm
        lb = torch.tensor([[p.lower_bound] for p in self.params],
                          dtype=x.dtype,
                          device=x.device).reshape((1, n_params, 1))
        ub = torch.tensor([[p.upper_bound] for p in self.params],
                          dtype=x.dtype,
                          device=x.device).reshape((1, n_params, 1))

        center = (ub + lb) / 2.0
        diff = (ub - lb) / 2.0

        bound = new_A.matmul(center) + sign * new_A.abs().matmul(diff)

        result = bound + offset
        assert result.shape == (n_batch, n_outputs, 1)
        return result.squeeze(-1)
            if settings.relaxation == 'interval':
                bounds = transformation.transform(np_points, params)

                if settings.implicit_intervals > 1:
                    bounds = split_implicitly(
                        bounds=bounds, intervals=settings.implicit_intervals,
                        encode_rotation=partial(transformation.transform, np_points),
                        params=params
                    )

                (dominant_classes, nlb, nub) = eran.analyze_segmentation_box(bounds, label, valid_classes, num_total_classes)

            elif settings.relaxation == 'taylor':
                bounds = transformation.transform(np_points, params)
                constraints = taylor.encode(transformation, np_points, params)

                if settings.implicit_intervals > 1:
                    bounds = split_implicitly(
                        bounds=bounds, intervals=settings.implicit_intervals,
                        encode_rotation=partial(transformation.transform, np_points),
                        params=params
                    )

                (dominant_classes, nlb, nub) = eran.analyze_segmentation_linear(bounds, constraints, params, label, valid_classes, num_total_classes)
                dominant_classes = np.array(dominant_classes)

                assert np.all(np.logical_or(label == dominant_classes, dominant_classes == -1)), \
                    f"Wrong dominant class! label {label}, dominant_class: {dominant_classes}"

            else:
Beispiel #5
0
 def test_transformation_taylor(self, x, params):
     transformation = TwistingZ()
     actual = taylor.encode(transformation, x, params)
     self.assertSound(actual, params, partial(transformation.transform, x))
Beispiel #6
0
 def test_taylor_twisting_rotation(self, x, params):
     transformation = Composition(TwistingZ(), RotationZ())
     actual = taylor.encode(transformation, x, params)
     self.assertSound(actual, params, partial(transformation.transform, x))
Beispiel #7
0
 def test_taylor_rot_zyx(self, x, params):
     transformation = Composition(RotationZ(),
                                  Composition(RotationY(), RotationX()))
     actual = taylor.encode(transformation, x, params)
     self.assertSound(actual, params, partial(transformation.transform, x))