def split_implicitly(bounds, intervals, encode_rotation, params): split_inc = [(param.upper_bound - param.lower_bound) / intervals for param in params] refined_lb = np.full_like(bounds.lower_bound, float('inf')) refined_ub = np.full_like(bounds.upper_bound, float('-inf')) for idx in itertools.product(range(intervals), repeat=len(params)): split_min = [ param.lower_bound + idx[i] * split_inc[i] for i, param in enumerate(params) ] split_max = [ param.lower_bound + (idx[i] + 1) * split_inc[i] for i, param in enumerate(params) ] split_intervals = [ Interval(lb, ub) for lb, ub in zip(split_min, split_max) ] candidate_bounds = encode_rotation(split_intervals) np.minimum(refined_lb, candidate_bounds.lower_bound, out=refined_lb) np.maximum(refined_ub, candidate_bounds.upper_bound, out=refined_ub) assert np.logical_or(bounds.lower_bound <= refined_lb, np.isclose(bounds.lower_bound, refined_lb)).all() assert np.logical_or(refined_ub <= bounds.upper_bound, np.isclose(refined_ub, bounds.upper_bound)).all() assert (refined_lb <= refined_ub).all() return Interval(refined_lb, refined_ub)
def test_broadcast_addition(self): a = Interval(-1., 1.) b = np.random.uniform(-100, 100, (100,)) self.assertAlmostEqualNumpy(np.minimum(-b, b), (a * b).lower_bound) self.assertAlmostEqualNumpy(np.maximum(-b, b), (a * b).upper_bound) self.assertAlmostEqualNumpy(np.minimum(-b, b), (b * a).lower_bound) self.assertAlmostEqualNumpy(np.maximum(-b, b), (b * a).upper_bound)
def test_broadcast(self): a = Interval(-1, 1) b = np.random.uniform(-100, 100, (100,)) self.assertAlmostEqualNumpy(-b - 1, (a - b).lower_bound) self.assertAlmostEqualNumpy(-b + 1, (a - b).upper_bound) self.assertAlmostEqualNumpy(b - 1, (b - a).lower_bound) self.assertAlmostEqualNumpy(b + 1, (b - a).upper_bound)
def load_spec(spec_dir: Path, counter: int) -> List[Tuple[List[Interval], Interval, LinearBounds]]: parameters = list() interval_bounds = list() lower_biases = list() upper_biases = list() lower_weights = list() upper_weights = list() with (spec_dir / f'{counter}.csv').open('r') as f: split_parameters = list() split_interval_bounds = list() split_lower_biases = list() split_upper_biases = list() split_lower_weights = list() split_upper_weights = list() for line in f.readlines(): if '|' in line: lower, upper = line.strip().split(' | ') lower = [float(v) for v in lower.split(' ')] upper = [float(v) for v in upper.split(' ')] split_lower_biases.append(lower[0]) split_upper_biases.append(upper[0]) split_lower_weights.append(lower[1:]) split_upper_weights.append(upper[1:]) elif 'SPEC_FINISHED' in line: parameters.append(np.asarray(split_parameters)) interval_bounds.append(np.asarray(split_interval_bounds)) lower_biases.append(np.asarray(split_lower_biases)) upper_biases.append(np.asarray(split_upper_biases)) lower_weights.append(np.asarray(split_lower_weights)) upper_weights.append(np.asarray(split_upper_weights)) split_parameters = list() split_interval_bounds = list() split_lower_biases = list() split_upper_biases = list() split_lower_weights = list() split_upper_weights = list() elif line.startswith('('): split_interval_bounds.extend(eval(line)) else: split_parameters.append([float(v) for v in line.strip().split(' ')]) parameters = np.array(parameters) interval_bounds = np.asarray(interval_bounds) lower_biases = np.asarray(lower_biases) upper_biases = np.asarray(upper_biases) lower_weights = np.asarray(lower_weights) upper_weights = np.asarray(upper_weights) result = list() for i in range(len(parameters)): params = [Interval(param[0], param[1]) for param in parameters[i]] bounds = Interval( lower_bound=interval_bounds[i][:, 0], upper_bound=interval_bounds[i][:, 1] ) constraints = LinearBounds( upper_slope=upper_weights[i], upper_offset=upper_biases[i], lower_slope=lower_weights[i], lower_offset=lower_biases[i] ) result.append((params, bounds, constraints)) return result
intervals_per_dimension = [settings.intervals] * num_params theta_inc = [2.0 * param / float(intervals) for param, intervals in zip(parameters, intervals_per_dimension)] checkpoints_sample = checkpoints[str(i)] if str(i) in checkpoints else {} valid_classes = np.unique(label) certified_points = np.full(settings.num_points, True) timer.start() parameter_iterator = itertools.product(*(range(intervals) for intervals in intervals_per_dimension)) progress_bar = tqdm(parameter_iterator, desc=f"Object {counter}", unit="interval", total=settings.intervals ** num_params) for idx in progress_bar: assert len(idx) == len(parameters) and len(theta_inc) == len(parameters) params = [Interval(j * inc - theta, (j + 1) * inc - theta) for j, inc, theta in zip(idx, theta_inc, parameters)] interval_key = 'x'.join([f"[{i.lower_bound:.4f},{i.upper_bound:.4f}]" for i in params]) if interval_key in checkpoints_sample: interval_certified = np.array(checkpoints_sample[interval_key]) else: assert np_points.shape[0] == settings.num_points, \ f"invalid points shape {np_points.shape}, expected ({settings.num_points}, x)" if settings.relaxation == 'interval': bounds = transformation.transform(np_points, params) if settings.implicit_intervals > 1: bounds = split_implicitly( bounds=bounds, intervals=settings.implicit_intervals,
def evaluate_at_2d(self, x, y) -> Interval: return Interval( lower_bound=self.lower_offset + self.lower_slope[:, :, 0] * x + self.lower_slope[:, :, 1] * y, upper_bound=self.upper_offset + self.upper_slope[:, :, 0] * x + self.upper_slope[:, :, 1] * y, )
def evaluate_at(self, x) -> Interval: return Interval(self.lower_offset + self.lower_slope[:, :, 0] * x, self.upper_offset + self.upper_slope[:, :, 0] * x)
def test_interval_soundness(self): self.assertSound(Interval(-1.0, 1.0), [Interval(-1.0, 1.0)], lambda x: x)
def test_lower_bound_violation(self): with self.assertRaises(AssertionError): self.assertSound(Interval(-1.0, 1.0), [Interval(-1.0 - 1e-8, 1.0)], lambda x: x)
def test_approximate_interval_soundness(self): self.assertSound(Interval(-1.0, 1.0), [Interval(-1.0 - 1e-9, 1 + 1e-9)], lambda x: x)
class TestIntervalMultiplication(RelaxationTestCase): @parameterized.expand([ [Interval(0, 0), Interval(0, 0), Interval(0, 0)], [Interval(1, 1), Interval(1, 1), Interval(1, 1)], [Interval(-1, -1), Interval(-2, -2), Interval(2, 2)], [Interval(-2, 2), Interval(-2, 2), Interval(-4, 4)], [Interval(-2, 5), Interval(1, 2), Interval(-4, 10)], ]) def test_float_intervals(self, a, b, expected): self.assertEqual(expected, a * b) self.assertEqual(expected, b * a) @parameterized.expand(zip(sample_intervals(), sample_intervals())) def test_ndarray_intervals(self, a, b): self.assertSound(a * b, [a, b], lambda p: p[0] * p[1]) self.assertSound(b * a, [a, b], lambda p: p[0] * p[1]) @parameterized.expand([ [0, Interval(0, 0), Interval(0, 0)], [1, Interval(1, 1), Interval(1, 1)], [-1, Interval(-2, -2), Interval(2, 2)], [2, Interval(-2, 5), Interval(-4, 10)], ]) def test_with_constants(self, a, b, expected): self.assertEqual(expected, a * b) self.assertEqual(expected, b * a) @parameterized.expand(zip(np.random.uniform(-100, 100, (100, 100)), sample_intervals())) def test_with_constant_ndarray(self, a, b): self.assertSound(a * b, [b], lambda b: a * b) self.assertSound(b * a, [b], lambda b: a * b) def test_broadcast_addition(self): a = Interval(-1., 1.) b = np.random.uniform(-100, 100, (100,)) self.assertAlmostEqualNumpy(np.minimum(-b, b), (a * b).lower_bound) self.assertAlmostEqualNumpy(np.maximum(-b, b), (a * b).upper_bound) self.assertAlmostEqualNumpy(np.minimum(-b, b), (b * a).lower_bound) self.assertAlmostEqualNumpy(np.maximum(-b, b), (b * a).upper_bound)
def sample_intervals(num_batches=100, size_intervals=100): scaling = np.linspace(0, 1, num_batches).reshape((num_batches, 1)).repeat(size_intervals, axis=1) center = np.random.uniform(-100 * scaling, 100 * scaling, (num_batches, size_intervals)).squeeze() width = np.random.uniform(0, 200 * scaling, (num_batches, size_intervals)).squeeze() return [Interval(center[i] - width[i], center[i] + width[i]) for i in range(num_batches)]
certified = True timer.start(TOTAL_TIMER) elapsed_bounds = 0.0 parameter_iterator = itertools.product( *(range(intervals) for intervals in intervals_per_dimension)) progress_bar = tqdm(parameter_iterator, desc=f"Object {counter}", unit="interval", total=settings.intervals**num_params) for idx in progress_bar: assert len(idx) == len(parameters) and len(theta_inc) == len( parameters) params = [ Interval(j * inc - theta, (j + 1) * inc - theta) for j, inc, theta in zip(idx, theta_inc, parameters) ] interval_key = 'x'.join( [f"[{i.lower_bound:.4f},{i.upper_bound:.4f}]" for i in params]) if interval_key in checkpoints_sample: interval_certified = checkpoints_sample[interval_key] else: assert np_points.shape[0] == settings.num_points, \ f"invalid points shape {np_points.shape}, expected ({settings.num_points}, x)" if settings.relaxation == 'interval': timer.start(BOUND_TIMER) bounds = transformation.transform(np_points, params)
class TestIntervalSubtraction(RelaxationTestCase): @parameterized.expand([ [Interval(0, 0), Interval(0, 0), Interval(0, 0)], [Interval(1, 1), Interval(1, 1), Interval(0, 0)], [Interval(1, 1), Interval(-2, -2), Interval(3, 3)], [Interval(-2, 5), Interval(1, 2), Interval(-4, 4)], ]) def test_float_intervals(self, a, b, expected): self.assertEqual(expected, a - b) @parameterized.expand(zip(sample_intervals(), sample_intervals())) def test_ndarray_intervals(self, a, b): self.assertAlmostEqualNumpy(a.lower_bound - b.upper_bound, (a - b).lower_bound) self.assertAlmostEqualNumpy(a.upper_bound - b.lower_bound, (a - b).upper_bound) self.assertSound(a - b, [a, b], lambda p: p[0] - p[1]) @parameterized.expand([ [0, Interval(0, 0), Interval(0, 0)], [1, Interval(1, 1), Interval(0, 0)], [-1, Interval(-2, -2), Interval(1, 1)], [2, Interval(-2, 5), Interval(-3, 4)], ]) def test_with_constants(self, a, b, expected): self.assertEqual(expected, a - b) @parameterized.expand(zip(np.random.uniform(-100, 100, (100, 100)), sample_intervals())) def test_with_constant_ndarray(self, a, b): self.assertAlmostEqualNumpy(a - b.upper_bound, (a - b).lower_bound) self.assertAlmostEqualNumpy(a - b.lower_bound, (a - b).upper_bound) self.assertSound(a - b, [b], lambda b: a - b) self.assertAlmostEqualNumpy(b.lower_bound - a, (b - a).lower_bound) self.assertAlmostEqualNumpy(b.upper_bound - a, (b - a).upper_bound) self.assertSound(b - a, [b], lambda b: b - a) def test_broadcast(self): a = Interval(-1, 1) b = np.random.uniform(-100, 100, (100,)) self.assertAlmostEqualNumpy(-b - 1, (a - b).lower_bound) self.assertAlmostEqualNumpy(-b + 1, (a - b).upper_bound) self.assertAlmostEqualNumpy(b - 1, (b - a).lower_bound) self.assertAlmostEqualNumpy(b + 1, (b - a).upper_bound) @parameterized.expand([ [Interval(0, 0), Interval(0, 0)], [Interval(1, 1), Interval(-1, -1)], [Interval(-2, 2), Interval(-2, 2)], [Interval(-2, 5), Interval(-5, 2)], ]) def test_interval_inversion(self, a, expected): self.assertEqual(expected, -a)
class TestIntervalAddition(RelaxationTestCase): @parameterized.expand([ [Interval(0, 0), Interval(0, 0), Interval(0, 0)], [Interval(1, 1), Interval(1, 1), Interval(2, 2)], [Interval(-1, -1), Interval(-2, -2), Interval(-3, -3)], [Interval(-2, 5), Interval(1, 2), Interval(-1, 7)], ]) def test_float_intervals(self, a, b, expected): self.assertEqual(expected, a + b) self.assertEqual(expected, b + a) @parameterized.expand(zip(sample_intervals(), sample_intervals())) def test_ndarray_intervals(self, a, b): self.assertAlmostEqualNumpy(a.lower_bound + b.lower_bound, (a + b).lower_bound) self.assertAlmostEqualNumpy(a.lower_bound + b.lower_bound, (b + a).lower_bound) self.assertSound(a + b, [a, b], lambda p: p[0] + p[1]) @parameterized.expand([ [0, Interval(0, 0), Interval(0, 0)], [1, Interval(1, 1), Interval(2, 2)], [-1, Interval(-2, -2), Interval(-3, -3)], [2, Interval(-2, 5), Interval(0, 7)], ]) def test_with_constants(self, a, b, expected): self.assertEqual(expected, a + b) self.assertEqual(expected, b + a) @parameterized.expand(zip(np.random.uniform(-100, 100, (100, 100)), sample_intervals())) def test_with_constant_ndarray(self, a, b): self.assertAlmostEqualNumpy(a + b.lower_bound, (a + b).lower_bound) self.assertAlmostEqualNumpy(a + b.lower_bound, (b + a).lower_bound) self.assertSound(a + b, [b], lambda b: a + b) def test_broadcast_addition(self): a = Interval(-1, 1) b = np.random.uniform(-100, 100, (100,)) self.assertAlmostEqualNumpy(b - 1, (a + b).lower_bound) self.assertAlmostEqualNumpy(b + 1, (a + b).upper_bound) self.assertAlmostEqualNumpy(b - 1, (b + a).lower_bound) self.assertAlmostEqualNumpy(b + 1, (b + a).upper_bound)
class TestIntervalSquare(RelaxationTestCase): @parameterized.expand([ [Interval(0, 0), Interval(0, 0)], [Interval(1, 1), Interval(1, 1)], [Interval(-2, -2), Interval(4, 4)], [Interval(2, 3), Interval(4, 9)], [Interval(-3, -2), Interval(4, 9)], [Interval(-2, 3), Interval(0, 9)] ]) def test_single_interval(self, a: Interval, expected: Interval): self.assertAlmostEqual(expected.lower_bound, iv.square(a).lower_bound) self.assertAlmostEqual(expected.upper_bound, iv.square(a).upper_bound) self.assertSound(iv.square(a), [a], np.square) @parameterized.expand([ [0, 0], [1, 1], [-1, 1], [2, 4], [-3, 9], ]) def test_single_scalar(self, a: float, expected: float): self.assertAlmostEqual(expected, iv.square(a)) @parameterized.expand([[i] for i in sample_intervals()]) def test_nd_interval(self, a): self.assertSound(iv.square(a), [a], np.square) @parameterized.expand([[i] for i in np.random.uniform(-100, 100, (100, 100))]) def test_nd_array(self, a): self.assertAlmostEqualNumpy(np.square(a), iv.square(a))
class TestIntervalCosine(RelaxationTestCase): @parameterized.expand([ [Interval(0, 0), Interval(1, 1)], [Interval(np.pi / 2, np.pi / 2), Interval(0, 0)], [Interval(-np.pi / 2, -np.pi / 2), Interval(0, 0)], [Interval(np.pi * 5 / 2, np.pi * 5 / 2), Interval(0, 0)], [Interval(np.pi * 9 / 2, np.pi * 9 / 2), Interval(0, 0)], [Interval(0, np.pi / 2), Interval(0, 1)] ]) def test_single_interval(self, a: Interval, expected: Interval): self.assertAlmostEqual(expected.lower_bound, iv.cos(a).lower_bound) self.assertAlmostEqual(expected.upper_bound, iv.cos(a).upper_bound) self.assertSound(iv.cos(a), [a], np.cos) @parameterized.expand([ [0, 1], [np.pi / 2, 0], [-np.pi / 2, 0], [np.pi * 5 / 2, 0], [np.pi * 3, -1], ]) def test_single_scalar(self, a: float, expected: float): self.assertAlmostEqual(expected, iv.cos(a)) @parameterized.expand([[i] for i in sample_intervals()]) def test_nd_interval(self, a): self.assertSound(iv.cos(a), [a], np.cos) @parameterized.expand([[i] for i in np.random.uniform(-100, 100, (100, 100))]) def test_nd_array(self, a): self.assertAlmostEqualNumpy(np.cos(a), iv.cos(a))
class TestIntervalSine(RelaxationTestCase): @parameterized.expand([ [Interval(0, 0), Interval(0, 0)], [Interval(np.pi / 2, np.pi / 2), Interval(1, 1)], [Interval(-np.pi / 2, -np.pi / 2), Interval(-1, -1)], [Interval(np.pi * 5 / 2, np.pi * 5 / 2), Interval(1, 1)], [Interval(np.pi * 9 / 2, np.pi * 9 / 2), Interval(1, 1)], [Interval(0, np.pi / 2), Interval(0, 1)] ]) def test_single_interval(self, a, expected): self.assertEqual(expected, iv.sin(a)) self.assertSound(iv.sin(a), [a], np.sin) @parameterized.expand([ [0, 0], [np.pi / 2, 1], [-np.pi / 2, -1], [np.pi * 5 / 2, 1], [np.pi * 9 / 2, 1], ]) def test_single_scalar(self, a, expected): self.assertAlmostEqual(expected, iv.sin(a)) @parameterized.expand([[i] for i in sample_intervals()]) def test_nd_interval(self, a): self.assertSound(iv.sin(a), [a], np.sin) @parameterized.expand([[i] for i in np.random.uniform(-100, 100, (100, 100))]) def test_nd_array(self, a): self.assertAlmostEqualNumpy(np.sin(a), iv.sin(a))
def sample_params(num_params=1, num_batches=100): scaling = np.linspace(0, 1, num_batches).reshape((num_batches, 1)).repeat(num_params, axis=1) center = np.random.uniform(-100 * scaling, 100 * scaling, (num_batches, num_params)) width = np.random.uniform(0, 4 * scaling, (num_batches, num_params)) return [[Interval(center[i, j] - width[i, j], center[i, j] + width[i, j]) for j in range(num_params)] for i in range(num_batches)]
inputs = {session.get_inputs()[0].name: input_data} outputs = session.run(None, inputs) assert np.all(lower_bound <= upper_bound) logger.info("Solving network...") assert lower_bound.shape[0] == settings.num_points, \ f"invalid lower bound shape {lower_bound.shape}, expected ({settings.num_points}, x)" assert upper_bound.shape[0] == settings.num_points, \ f"invalid upper bound shape {upper_bound.shape}, expected ({settings.num_points}, x)" assert np_points.shape[0] == settings.num_points, \ f"invalid points shape {np_points.shape}, expected ({settings.num_points}, x)" start = timer() (dominant_class, nlb, nub) = eran.analyze_classification_box(Interval(lower_bound, upper_bound)) end = timer() elapsed = end - start total_time += elapsed certified = dominant_class == label.item() if certified: logger.info(f"Successfully verified class {dominant_class}") verified_same += 1 elif dominant_class == -1: logger.info(f"Failed to verify class {label.item()}") not_verified += 1 else: logger.info( f"Wrongly verified class {dominant_class} instead of {label.item()}"