def test_m3_strategy_with_ground_truth(self): data1 = HeterogeneousImpressionGenerator(1000, gamma_shape=1.0, gamma_scale=2)() publisher1 = PublisherData(FixedPriceGenerator(0.1)(data1)) data2 = HeterogeneousImpressionGenerator(1000, gamma_shape=1.0, gamma_scale=3)() publisher2 = PublisherData(FixedPriceGenerator(0.05)(data2)) dataset = DataSet([publisher1, publisher2], "dataset") params = SystemParameters( [100.0, 100.0], LiquidLegionsParameters(), np.random.default_rng(seed=1) ) halo = HaloSimulator(dataset, params, PrivacyTracker()) budget = PrivacyBudget(1.0, 1e-5) m3strategy = M3Strategy( GammaPoissonModel, {}, RestrictedPairwiseUnionReachSurface, {}, use_ground_truth_for_reach_curves=True, ) surface = m3strategy.fit(halo, params, budget) expected0 = surface.by_spend([10.0, 0.0]).reach(1) actual0 = dataset.reach_by_spend([10.0, 0.0]).reach(1) self.assertAlmostEqual(expected0, actual0, delta=1) expected1 = surface.by_spend([0.0, 10.0]).reach(1) actual1 = dataset.reach_by_spend([0.0, 10.0]).reach(1) self.assertAlmostEqual(expected1, actual1, delta=1) expected2 = surface.by_spend([10.0, 10.0]).reach(1) actual2 = dataset.reach_by_spend([10.0, 10.0]).reach(1) self.assertAlmostEqual(expected2, actual2, delta=10)
def add(self, data_set: DataSet) -> None: """Adds a DataSet to this DataDesign.""" data_set_path = self._filesystem.joinpath(self._dirpath, data_set.name) if self._filesystem.exists(data_set_path): raise ValueError( "This DataDesign already contains a DataSet with name {}". format(data_set.name)) data_set.write_data_set(self._dirpath, filesystem=self._filesystem) self._data_set_names.add(data_set.name)
def setUpClass(cls): pdf11 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf11") pdf12 = PublisherData([(2, 0.03), (4, 0.06)], "pdf12") cls.data_set1 = DataSet([pdf11, pdf12], "ds1") pdf21 = PublisherData([(1, 0.01), (2, 0.02), (2, 0.04), (3, 0.05)], "pdf21") pdf22 = PublisherData([(2, 0.03), (3, 0.06)], "pdf22") cls.data_set2 = DataSet([pdf21, pdf22], "ds2")
def test_fit_frequency_one(self): data = HeterogeneousImpressionGenerator(10000, gamma_shape=1.0, gamma_scale=3)() publisher = PublisherData(FixedPriceGenerator(0.1)(data)) dataset = DataSet([publisher], f"Exponential-poisson") spend_fraction = 0.5 spend = dataset._data[0].max_spend * spend_fraction point = dataset.reach_by_spend([spend], max_frequency=1) gm = GoergModel([point]) gm_reach = gm.by_spend([spend]).reach() kgpm = KInflatedGammaPoissonModel([point]) kgpm._fit() kgpm_reach = kgpm.by_spend([spend]).reach() self.assertAlmostEqual(gm_reach, kgpm_reach, delta=1)
def test_compute_trial_results_path(self): with TemporaryDirectory() as d: pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "dataset") data_design = DataDesign(join(d, "data_design")) data_design.add(data_set) msd = ModelingStrategyDescriptor("strategy", {}, "single_pub_model", {}, "multi_pub_model", {}) sparams = SystemParameters( [0.03, 0.05], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "tps") trial_descriptor = TrialDescriptor(msd, sparams, eparams) trial = ExperimentalTrial("edir", data_design, "dataset", trial_descriptor) actual = trial._compute_trial_results_path() expected = "{}/{}/{},{},{},{}".format( "edir", "dataset", "strategy,single_pub_model,multi_pub_model", "spends=(0.03,0.05),decay_rate=13,sketch_size=1000000.0", "epsilon=1.0,delta=0.01,replica_id=3,max_frequency=5", "test_point_strategy=tps.csv", ) self.assertEqual(actual, expected)
def test_simulated_venn_diagram_reach_by_spend_without_active_pub(self): pdfs = [ PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1"), PublisherData([(2, 0.03), (4, 0.06)], "pdf2"), PublisherData([(2, 0.01), (3, 0.03), (4, 0.05)], "pdf3"), ] data_set = DataSet(pdfs, "test") params = SystemParameters( [0.4, 0.5, 0.4], LiquidLegionsParameters(), FakeRandomGenerator(), ) privacy_tracker = PrivacyTracker() halo = HaloSimulator(data_set, params, privacy_tracker) spends = [0, 0, 0] budget = PrivacyBudget(0.2, 0.4) privacy_budget_split = 0.5 max_freq = 1 reach_points = halo.simulated_venn_diagram_reach_by_spend( spends, budget, privacy_budget_split, max_freq) expected_reach_points = [] self.assertEqual(expected_reach_points, reach_points) self.assertEqual(halo.privacy_tracker.privacy_consumption.epsilon, 0) self.assertEqual(halo.privacy_tracker.privacy_consumption.delta, 0) self.assertEqual(len(halo.privacy_tracker._noising_events), 0)
def test_sample_venn_diagram(self, regions, sample_size, expected): params = SystemParameters([0], LiquidLegionsParameters(), FakeRandomGenerator()) halo = HaloSimulator(DataSet([], "test"), params, PrivacyTracker()) self.assertEqual( halo._sample_venn_diagram(regions, sample_size), expected, )
def setUpClass(cls): pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") cls.data_set = data_set cls.curve1 = GroundTruthReachCurveModel(data_set, 0) cls.curve2 = GroundTruthReachCurveModel(data_set, 1)
def test_one_publisher(self): pdf = PublisherData([(1, 100.0)], "pdf") data_set = DataSet([pdf], "test") generator = GridTestPointGenerator(data_set, np.random.default_rng(1), grid_size=4) values = [int(x[0]) for x in generator.test_points()] self.assertLen(values, 4) self.assertEqual(values, [20, 40, 60, 80])
def test_fit_gamma_poisson(self): # mean = 5, var = 6 N, alpha, beta = 10000, 8, 0.5 data = HeterogeneousImpressionGenerator(10000, gamma_shape=alpha, gamma_scale=beta)() publisher = PublisherData(FixedPriceGenerator(0.1)(data)) dataset = DataSet([publisher], f"Exponential-poisson") spend_fraction = 0.5 spend = dataset._data[0].max_spend * spend_fraction point = dataset.reach_by_spend([spend]) gm = GoergModel([point]) gm_reach = gm.by_spend([spend]).reach() kgpm = KInflatedGammaPoissonModel([point]) kgpm.print_fit_header() kgpm.print_fit("true", 0.0, N, alpha, beta, []) kgpm._fit() kgpm_reach = kgpm.by_spend([spend]).reach() self.assertAlmostEqual(gm_reach, kgpm_reach, delta=100)
def setUpClass(cls): pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") cls.params = SystemParameters([0.4, 0.5], LiquidLegionsParameters(), np.random.default_rng(1)) cls.privacy_tracker = PrivacyTracker() cls.halo = HaloSimulator(data_set, cls.params, cls.privacy_tracker)
def test_two_publishers(self): pdf1 = PublisherData([(1, 3.0)], "pdf1") pdf2 = PublisherData([(1, 6.0)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") generator = GridTestPointGenerator(data_set, np.random.default_rng(1), grid_size=2) values = [(int(x[0]), int(x[1])) for x in generator.test_points()] self.assertLen(values, 4) self.assertEqual(values, [(1, 2), (1, 4), (2, 2), (2, 4)])
def test_fifteen_publishers(self): pdf_list = [] for i in range(15): pdf = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf{}".format(i)) pdf_list.append(pdf) data_set = DataSet(pdf_list, "test") generator = LatinHypercubeRandomTestPointGenerator( data_set, np.random.default_rng(1), npoints=225) values = [x for x in generator.test_points()] self.assertLen(values, 225)
def test_read_and_write_data_set(self): with TemporaryDirectory() as d: self.data_set.write_data_set(d) new_data_set = DataSet.read_data_set("{}/test".format(d)) self.assertEqual(new_data_set.publisher_count, 2) self.assertEqual(new_data_set.name, "test") self.assertEqual( new_data_set.reach_by_impressions([4, 0]).reach(), 3) self.assertEqual( new_data_set.reach_by_impressions([0, 2]).reach(), 2) self.assertEqual( new_data_set.reach_by_impressions([4, 2]).reach(), 4)
def test_npoints_generator(self): pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(1, 0.02), (2, 0.04), (1, 0.08), (3, 0.10)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") generator = LatinHypercubeRandomTestPointGenerator( data_set, np.random.default_rng(1), npublishers=2, minimum_points_per_publisher=200, ) values = [x for x in generator.test_points()] self.assertLen(values, 400)
def test_one_publisher(self): pdf = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf") data_set = DataSet([pdf], "test") generator = LatinHypercubeRandomTestPointGenerator( data_set, np.random.default_rng(1), npoints=100) values = [x for x in generator.test_points()] self.assertLen(values, 100) for i, v in enumerate(values): self.assertLen(v, 1) self.assertTrue(v[0] >= 0.0, "Item {} is negative: {}".format(i, v)) self.assertTrue(v[0] < 0.05, "Item {} is too large: {}".format(i, v))
def test_class_setup_with_campaign_spend_fractions_generator(self): pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") params = SystemParameters( liquid_legions=LiquidLegionsParameters(), generator=np.random.default_rng(1), campaign_spend_fractions_generator=lambda dataset: [0.2] * dataset. publisher_count, ) params = params.update_from_dataset(data_set) privacy_tracker = PrivacyTracker() halo = HaloSimulator(data_set, params, privacy_tracker) self.assertAlmostEqual(halo._campaign_spends[0], 0.01, 7) # using assertAlmostEqual here because of a rounding error self.assertAlmostEqual(halo._campaign_spends[1], 0.012, 7)
def test_form_venn_diagram_regions_with_publishers_more_than_limit(self): num_publishers = MAX_ACTIVE_PUBLISHERS + 1 data_set = DataSet( [ PublisherData([(1, 0.01)], f"pdf{i + 1}") for i in range(num_publishers) ], "test", ) params = SystemParameters([0.4] * num_publishers, LiquidLegionsParameters(), np.random.default_rng(1)) privacy_tracker = PrivacyTracker() halo = HaloSimulator(data_set, params, privacy_tracker) spends = [0.01] * num_publishers with self.assertRaises(ValueError): halo._form_venn_diagram_regions(spends)
def test_evaluate(self): with TemporaryDirectory() as d: pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "dataset") data_design_dir = join(d, "data_design") experiment_dir = join(d, "experiments") data_design = DataDesign(data_design_dir) data_design.add(data_set) MODELING_STRATEGIES["fake"] = FakeModelingStrategy TEST_POINT_STRATEGIES[ "fake_tps"] = lambda ds, rng: FakeTestPointGenerator( ).test_points() msd = ModelingStrategyDescriptor("fake", {"x": 1}, "goerg", {}, "pairwise_union", {}) sparams1 = SystemParameters( [0.03, 0.05], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) sparams2 = SystemParameters( [0.05, 0.03], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) eparams1 = ExperimentParameters(PrivacyBudget(1.0, 0.01), 1, 5, "fake_tps") eparams2 = ExperimentParameters(PrivacyBudget(0.5, 0.001), 1, 5, "fake_tps") trial_descriptors = [ TrialDescriptor(msd, sparams1, eparams1), TrialDescriptor(msd, sparams1, eparams2), TrialDescriptor(msd, sparams2, eparams1), TrialDescriptor(msd, sparams2, eparams2), ] exp = Experiment(experiment_dir, data_design, "dataset", trial_descriptors) trials = exp.generate_trials() self.assertLen(trials, 4)
def test_make_independent_vars_dataframe(self): with TemporaryDirectory() as d: pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "dataset") data_design = DataDesign(join(d, "data_design")) data_design.add(data_set) msd = ModelingStrategyDescriptor("strategy", {}, "single_pub_model", {}, "multi_pub_model", {}) sparams = SystemParameters( [0.03, 0.05], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "test_point_strategy") trial_descriptor = TrialDescriptor(msd, sparams, eparams) trial = ExperimentalTrial("edir", data_design, "dataset", trial_descriptor) actual = trial._make_independent_vars_dataframe() expected_trial_name = "strategy,single_pub_model,multi_pub_model,spends=(0.03,0.05),decay_rate=13,sketch_size=1000000.0,epsilon=1.0,delta=0.01,replica_id=3,max_frequency=5,test_point_strategy=test_point_strategy" expected = pd.DataFrame({ "dataset": ["dataset"], "trial": [expected_trial_name], "replica_id": [3], "single_pub_model": ["single_pub_model"], "multi_pub_model": ["multi_pub_model"], "strategy": ["strategy"], "liquid_legions_sketch_size": [1e6], "liquid_legions_decay_rate": [13], "maximum_reach": [4], "ncampaigns": [2], "largest_pub_reach": [3], "max_frequency": [5], "average_spend_fraction": [0.04], }) pd.testing.assert_frame_equal(actual, expected)
def test_form_venn_diagram_regions(self, num_publishers, spends, max_freq, expected): pdfs = [ PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1"), PublisherData([(2, 0.03), (4, 0.06)], "pdf2"), PublisherData([(2, 0.01), (3, 0.03), (4, 0.05)], "pdf3"), ] data_set = DataSet(pdfs[:num_publishers], "test") params = SystemParameters( [0.4] * num_publishers, LiquidLegionsParameters(), np.random.default_rng(1), ) privacy_tracker = PrivacyTracker() halo = HaloSimulator(data_set, params, privacy_tracker) regions = halo._form_venn_diagram_regions(spends, max_freq) self.assertEqual(expected, regions)
def test_scale_up_reach_in_primitive_regions( self, mock_geometric_estimate_noiser, regions, true_cardinality, std, budget, privacy_budget_split, fixed_noise, expected, ): mock_geometric_estimate_noiser.return_value = FakeNoiser(fixed_noise) params = SystemParameters([0], LiquidLegionsParameters(), FakeRandomGenerator()) halo = HaloSimulator(DataSet([], "test"), params, PrivacyTracker()) scaled_regions = halo._scale_up_reach_in_primitive_regions( regions, true_cardinality, std, budget, privacy_budget_split) self.assertEqual(scaled_regions, expected) self.assertEqual(halo.privacy_tracker.privacy_consumption.epsilon, budget.epsilon * privacy_budget_split) self.assertEqual(halo.privacy_tracker.privacy_consumption.delta, budget.delta * privacy_budget_split) self.assertEqual(len(halo.privacy_tracker._noising_events), 1) self.assertEqual( halo.privacy_tracker._noising_events[0].budget.epsilon, budget.epsilon * privacy_budget_split, ) self.assertEqual( halo.privacy_tracker._noising_events[0].budget.delta, budget.delta * privacy_budget_split, ) self.assertEqual( halo.privacy_tracker._noising_events[0].mechanism, DP_NOISE_MECHANISM_DISCRETE_LAPLACE, ) self.assertEqual( halo.privacy_tracker._noising_events[0].params, {"privacy_budget_split": privacy_budget_split}, )
def test_generate_reach_points_from_venn_diagram(self, num_publishers, spends, regions, expected): pdfs = [ PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1"), PublisherData([(2, 0.03), (4, 0.06)], "pdf2"), PublisherData([(2, 0.01), (3, 0.03), (4, 0.05)], "pdf3"), ] data_set = DataSet(pdfs[:num_publishers], "test") params = SystemParameters( [0.4] * num_publishers, LiquidLegionsParameters(), np.random.default_rng(1), ) privacy_tracker = PrivacyTracker() halo = HaloSimulator(data_set, params, privacy_tracker) # Note that the reach points generated from the Venn diagram only # contain 1+ reaches. reach_points = halo._generate_reach_points_from_venn_diagram( spends, regions) self.assertEqual(len(reach_points), len(expected)) for i, (r_pt, expected_r_pt) in enumerate(zip(reach_points, expected)): self.assertEqual( r_pt.impressions, expected_r_pt.impressions, msg=f"The impressions of No.{i + 1} reach point is not correct", ) self.assertEqual( r_pt.reach(1), expected_r_pt.reach(1), msg=f"The reach of No.{i + 1} reach point is not correct", ) self.assertEqual( r_pt.spends, expected_r_pt.spends, msg=f"The spends of No.{i + 1} reach point is not correct", )
def test_two_publishers(self): pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(1, 0.02), (2, 0.04), (1, 0.08), (3, 0.10)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") generator = UniformlyRandomTestPointGenerator(data_set, np.random.default_rng(1), npoints=100) values = [x for x in generator.test_points()] self.assertLen(values, 100) for i, v in enumerate(values): self.assertLen(v, 2) self.assertTrue(v[0] >= 0.0, "Item {} is negative: {}".format(i, v)) self.assertTrue(v[0] < 0.05, "Item {} is too large: {}".format(i, v)) self.assertTrue(v[1] >= 0.0, "Item {} is negative: {}".format(i, v)) self.assertTrue(v[1] < 0.10, "Item {} is too large: {}".format(i, v))
def test_evaluate_single_publisher_model(self): with TemporaryDirectory() as d: data1 = HeterogeneousImpressionGenerator(1000, gamma_shape=1.0, gamma_scale=3.0)() pdf1 = PublisherData(FixedPriceGenerator(0.1)(data1)) data_set = DataSet([pdf1], "dataset") data_design_dir = join(d, "data_design") experiment_dir = join(d, "experiments") data_design = DataDesign(data_design_dir) data_design.add(data_set) msd = ModelingStrategyDescriptor("single_publisher", {}, "goerg", {}, "pairwise_union", {}) sparams = SystemParameters( [0.5], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "grid", {"grid_size": 5}) trial_descriptor = TrialDescriptor(msd, sparams, eparams) trial = ExperimentalTrial( experiment_dir, data_design, "dataset", trial_descriptor, analysis_type="single_pub", ) result = trial.evaluate(seed=1) # We don't check each column in the resulting dataframe, because these have # been checked by the preceding unit tests. However, we make a few strategic # probes. self.assertEqual(result.shape[0], 1) self.assertAlmostEqual(result["relative_error_at_100"][0], 0.0, delta=0.01) self.assertGreater(result["max_nonzero_frequency_from_halo"][0], 0) self.assertEqual(result["max_nonzero_frequency_from_data"][0], 5)
def test_add_dp_noise_to_primitive_regions( self, mock_geometric_estimate_noiser, regions, budget, privacy_budget_split, fixed_noise, expected_regions, ): mock_geometric_estimate_noiser.return_value = FakeNoiser(fixed_noise) halo = HaloSimulator(DataSet([], "test"), SystemParameters(), PrivacyTracker()) noised_regions = halo._add_dp_noise_to_primitive_regions( regions, budget, privacy_budget_split) self.assertEqual(noised_regions, expected_regions) self.assertEqual(halo.privacy_tracker.privacy_consumption.epsilon, budget.epsilon * privacy_budget_split) self.assertEqual(halo.privacy_tracker.privacy_consumption.delta, budget.delta * privacy_budget_split) self.assertEqual(len(halo.privacy_tracker._noising_events), 1) self.assertEqual( halo.privacy_tracker._noising_events[0].budget.epsilon, budget.epsilon * privacy_budget_split, ) self.assertEqual( halo.privacy_tracker._noising_events[0].budget.delta, budget.delta * privacy_budget_split, ) self.assertEqual( halo.privacy_tracker._noising_events[0].mechanism, DP_NOISE_MECHANISM_DISCRETE_LAPLACE, ) self.assertEqual( halo.privacy_tracker._noising_events[0].params, {"privacy_budget_split": privacy_budget_split}, )
def test_evaluate_when_there_is_a_modeling_exception(self): with TemporaryDirectory() as d: pdf1 = PublisherData([(1, 0.01), (2, 0.02), (3, 0.04), (4, 0.05)], "pdf1") data_set = DataSet([pdf1], "dataset") data_design_dir = join(d, "data_design") experiment_dir = join(d, "experiments") data_design = DataDesign(data_design_dir) data_design.add(data_set) MODELING_STRATEGIES["fake"] = GoergModelingStrategy TEST_POINT_STRATEGIES["fake_tps"] = GoergTestPointGenerator msd = ModelingStrategyDescriptor("fake", {}, "goerg", {}, "pairwise_union", {}) sparams = SystemParameters( [0.5], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "fake_tps") trial_descriptor = TrialDescriptor(msd, sparams, eparams) trial = ExperimentalTrial(experiment_dir, data_design, "dataset", trial_descriptor) result = trial.evaluate(seed=1) # We don't check each column in the resulting dataframe, because these have # been checked by the preceding unit tests. However, we make a few strategic # probes. self.assertEqual(result.shape[0], 1) self.assertEqual(result["dataset"][0], "dataset") self.assertEqual(result["replica_id"][0], 3) self.assertEqual(result["privacy_budget_epsilon"][0], 1.0) self.assertEqual(result["model_succeeded"][0], 0) self.assertEqual( result["model_exception"][0], "Cannot fit Goerg model when impressions <= reach.", )
def test_latin_hypercube_definition(self): """Check if the points satisifies the definiton of Latin Hypercube. Test if the generated test points are indeed projected into equally space cells along each dimension. """ pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(1, 0.02), (2, 0.04), (1, 0.08), (3, 0.10)], "pdf2") pdf3 = PublisherData([(1, 0.02), (2, 0.04), (1, 0.01), (3, 0.06)], "pdf3") data_set = DataSet([pdf1, pdf2, pdf3], "test") generator = LatinHypercubeRandomTestPointGenerator( data_set, np.random.default_rng(1), npoints=100) design = np.stack([x for x in generator.test_points()]) equally_spaced = set(range(100)) self.assertEqual(set((design[:, 0] / 0.05 * 100).astype("int32")), equally_spaced) self.assertEqual(set((design[:, 1] / 0.10 * 100).astype("int32")), equally_spaced) self.assertEqual(set((design[:, 2] / 0.06 * 100).astype("int32")), equally_spaced)
def test_evaluate_singe_publisher_model_with_exception(self): with TemporaryDirectory() as d: pdf1 = PublisherData([(1, 0.01), (2, 0.02), (3, 0.04), (4, 0.05)], "pdf1") data_set = DataSet([pdf1], "dataset") data_design_dir = join(d, "data_design") experiment_dir = join(d, "experiments") data_design = DataDesign(data_design_dir) data_design.add(data_set) MODELING_STRATEGIES["fake"] = GoergModelingStrategy TEST_POINT_STRATEGIES["fake_tps"] = GoergTestPointGenerator msd = ModelingStrategyDescriptor("fake", {}, "goerg", {}, "pairwise_union", {}) sparams = SystemParameters( [0.5], LiquidLegionsParameters(13, 1e6, 1), np.random.default_rng(), ) eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "fake_tps") trial_descriptor = TrialDescriptor(msd, sparams, eparams) trial = ExperimentalTrial( experiment_dir, data_design, "dataset", trial_descriptor, analysis_type="single_pub", ) result = trial.evaluate(seed=1) # We don't check each column in the resulting dataframe, because these have # been checked by the preceding unit tests. However, we make a few strategic # probes. self.assertEqual(result.shape[0], 1) self.assertTrue(math.isnan(result["relative_error_at_100"][0]))
def setUpClass(cls): pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1") pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2") data_set = DataSet([pdf1, pdf2], "test") cls.data_set = data_set