def main(self, algo, param, privacy, e_iter=100000, d_iter=500000, test_range=0.1, n_checks=3): # list of tasks to test, each tuple contains (function, extra_args, sensitivity) tasks = [(generic_method_pydp, { 'algorithm': algo, 'param_for_algorithm': param }, ALL_DIFFER)] # claimed privacy level to check claimed_privacy = privacy for i, (algorithm, kwargs, sensitivity) in enumerate(tasks): start_time = time.time() results = {} flag_file = time.ctime().replace(' ', '_') for privacy_budget in claimed_privacy: # # privacy levels to test, here we test the claimed privacy plus .01 above and below test_privacy = arr_n_check(privacy_budget, test_range, n_checks) # set the second argument of the function (assumed to be `epsilon`) to the claimed privacy level kwargs[algorithm.__code__.co_varnames[1]] = privacy_budget results[privacy_budget] = self.detect_counterexample( algorithm, test_privacy, kwargs, sensitivity=sensitivity, event_iterations=e_iter, detect_iterations=d_iter) # dump the results to file json_file = Path.cwd() / f'{algorithm.__name__}_{flag_file}.json' with json_file.open('w') as f: json.dump(encode(results, unpicklable=False), f) # plot and save to file plot_file = Path.cwd() / f'{algorithm.__name__}_{flag_file}.pdf' self.plot_result(results, r'Test $\epsilon$', 'P Value', algorithm.__name__.replace('_', ' ').title(), plot_file) total_time, total_detections = time.time() - start_time, len(claimed_privacy) * \ len(test_privacy) logger.info( f'[{i + 1} / {len(tasks)}]: {algorithm.__name__} | Time elapsed: {total_time:5.3f}s | ' f'Average time per detection: {total_time / total_detections:5.3f}s' ) return 0
def test_negative_privacy(): assert arr_n_check(-0.9, 0.5, 3) == (0.4, 0.9, 1.4)
def test_very_large_test_range(): assert arr_n_check(0.9, 1, 5) == (0.9, )
def test_inappropriats_n_checks1(): assert arr_n_check(0.9, 0.5, 4) == (0.4, 0.9, 1.4)