def actual_release(self, dataset: object): """ Returns non-private exact response from the algorithm """ actual_res = {} if (self.algorithm == wn.dp_mean): with wn.Analysis(filter_level="all") as analysis: data = wn.to_float(wn.Dataset(value=dataset)) agg = wn.mean(data=data, data_lower=float(min(dataset)), data_upper=float(max(dataset)), data_rows=len(dataset), data_columns=1) analysis.release() actual_res["__key__"] = agg.value if (self.algorithm == wn.dp_sum): with wn.Analysis(filter_level="all") as analysis: data = wn.to_float(wn.Dataset(value=dataset)) agg = wn.sum(data=data, data_lower=float(min(dataset)), data_upper=float(max(dataset)), data_rows=len(dataset), data_columns=1) analysis.release() actual_res["__key__"] = agg.value return Report(actual_res)
def test_multilayer_analysis(run=True): with wn.Analysis() as analysis: PUMS = wn.Dataset(path=TEST_CSV_PATH, column_names=test_csv_names) age = wn.to_float(PUMS['age']) sex = wn.to_bool(PUMS['sex'], true_label="TRUE") age_clamped = wn.clamp(age, lower=0., upper=150.) age_resized = wn.resize(age_clamped, n=1000) mean_age = wn.dp_mean(data=wn.to_float(PUMS['race']), privacy_usage={'epsilon': .65}, data_lower=0., data_upper=100., data_n=500) analysis.release() sex_plus_22 = wn.add(wn.to_float(sex), 22., left_n=1000, left_lower=0., left_upper=1.) wn.dp_mean(age_resized / 2. + sex_plus_22, privacy_usage={'epsilon': .1}, data_lower=mean_age - 5.2, data_upper=102., data_n=500) + 5. wn.dp_variance(data=wn.to_float(PUMS['educ']), privacy_usage={'epsilon': .15}, data_n=1000, data_lower=0., data_upper=12.) # wn.dp_moment_raw( # wn.to_float(PUMS['married']), # privacy_usage={'epsilon': .15}, # data_n=1000000, # data_lower=0., # data_upper=12., # order=3 # ) # # wn.dp_covariance( # left=wn.to_float(PUMS['age']), # right=wn.to_float(PUMS['married']), # privacy_usage={'epsilon': .15}, # left_n=1000, # right_n=1000, # left_lower=0., # left_upper=1., # right_lower=0., # right_upper=1. # ) if run: analysis.release() return analysis
def mean(self, priv_usage, data, *args, **kwargs): self._analysis.__enter__() data = wn.to_float(data) self._analysis.exit() if abs(self.priv_used - self.priv_budget) < priv_usage: priv_usage = self.priv_budget - self.priv_used return self._internalexec(priv_usage, wn.dp_mean, data, *args, **kwargs)
def whitenoise_core_dp_multi_agg(self, f, dataset_path, col_names, args, epsilon, kwargs): releases = [] with wn.Analysis() as analysis: for x in range(self.repeat_count): df = wn.Dataset(path=dataset_path, column_names=col_names) release = f(data=wn.to_float(df[[args[0], args[1]]]), privacy_usage={'epsilon': epsilon}, **kwargs) releases.append(release) analysis.release() noisy_values = [release.value[0][0] for release in releases] return np.array(noisy_values)
def test_covariance(): import numpy as np import pandas as pd import matplotlib.pyplot as plt data = np.genfromtxt(TEST_CSV_PATH, delimiter=',', names=True) with wn.Analysis() as analysis: wn_data = wn.Dataset(path=TEST_CSV_PATH, column_names=test_csv_names) # get full covariance matrix cov = wn.dp_covariance(data=wn.to_float(wn_data['age', 'sex', 'educ', 'income', 'married']), privacy_usage={'epsilon': 10}, data_lower=[0., 0., 1., 0., 0.], data_upper=[100., 1., 16., 500_000., 1.], data_n=1000) analysis.release() # store DP covariance and correlation matrix dp_cov = cov.value print(dp_cov) dp_corr = dp_cov / np.outer(np.sqrt(np.diag(dp_cov)), np.sqrt(np.diag(dp_cov))) # get non-DP covariance/correlation matrices age = list(data[:]['age']) sex = list(data[:]['sex']) educ = list(data[:]['educ']) income = list(data[:]['income']) married = list(data[:]['married']) non_dp_cov = np.cov([age, sex, educ, income, married]) non_dp_corr = non_dp_cov / np.outer(np.sqrt(np.diag(non_dp_cov)), np.sqrt(np.diag(non_dp_cov))) print('Non-DP Covariance Matrix:\n{0}\n\n'.format( pd.DataFrame(non_dp_cov))) print('Non-DP Correlation Matrix:\n{0}\n\n'.format( pd.DataFrame(non_dp_corr))) print('DP Correlation Matrix:\n{0}'.format(pd.DataFrame(dp_corr))) # skip plot step if IS_CI_BUILD: return plt.imshow(non_dp_corr - dp_corr, interpolation='nearest') plt.colorbar() plt.show()
def release(self, dataset: object) -> Report: """ Releases report according to the OpenDP Core applying functions on the dataset or return the actual report if actual is set to True """ noisy_res = {"__key__": []} # Repeating analysis multiple times to collect enough samples for evaluation for i in range(self.eval_params.repeat_count): with wn.Analysis() as analysis: data = wn.to_float(wn.Dataset(value=dataset)) agg = self.algorithm( data=data, privacy_usage={'epsilon': self.privacy_params.epsilon}, data_lower=float(min(dataset)), data_upper=float(max(dataset)), data_rows=len(dataset), data_columns=1) analysis.release() noisy_res["__key__"].append(agg.value) return Report(noisy_res)
def test_insertion_simple(): """ Conduct a differentially private analysis with values inserted from other systems :return: """ with wn.Analysis() as analysis: # construct a fake dataset that describes your actual data (will never be run) data = wn.Dataset(path="", column_names=["A", "B", "C", "D"]) # pull a column out col_a = wn.to_float(data['A']) # describe the preprocessing you actually perform on the data col_a_clamped = wn.impute(wn.clamp(col_a, lower=0., upper=10.)) col_a_resized = wn.resize(col_a_clamped, n=1000000) # run a fake aggregation actual_mean = wn.mean(col_a_resized) # insert aggregated data from an external system actual_mean.set(10) # describe the differentially private operation gaussian_mean = wn.gaussian_mechanism(actual_mean, privacy_usage={ "epsilon": .4, "delta": 1e-6 }) # check if the analysis is permissible analysis.validate() # compute the missing releasable nodes- in this case, only the gaussian mean analysis.release() # retrieve the noised mean print("gaussian mean", gaussian_mean.value) # release a couple other statistics using other mechanisms in the same batch actual_sum = wn.sum(col_a_clamped) actual_sum.set(123456) laplace_sum = wn.laplace_mechanism(actual_sum, privacy_usage={"epsilon": .1}) actual_count = wn.count(col_a) actual_count.set(9876) geo_count = wn.simple_geometric_mechanism( actual_count, 0, 10000, privacy_usage={"epsilon": .1}) analysis.release() print("laplace sum", laplace_sum.value) print("geometric count", geo_count.value) actual_histogram_b = wn.histogram( wn.clamp(data['B'], categories=['X', 'Y', 'Z'], null_value="W")) actual_histogram_b.set([12, 1280, 2345, 12]) geo_histogram_b = wn.simple_geometric_mechanism( actual_histogram_b, 0, 10000, privacy_usage={"epsilon": .1}) col_c = wn.to_bool(data['C'], true_label="T") actual_histogram_c = wn.histogram(col_c) actual_histogram_c.set([5000, 5000]) lap_histogram_c = wn.laplace_mechanism(actual_histogram_c, privacy_usage={"epsilon": .1}) analysis.release() print("noised histogram b", geo_histogram_b.value) print("noised histogram c", lap_histogram_c.value) print("C dimensionality", col_c.dimensionality) print("C categories", col_c.categories) # multicolumnar insertion # pull a column out col_rest = wn.to_float(data[['C', 'D']]) # describe the preprocessing you actually perform on the data col_rest_resized = wn.resize(wn.impute( wn.clamp(col_rest, lower=[0., 5.], upper=1000.)), n=10000) # run a fake aggregation actual_mean = wn.mean(col_rest_resized) # insert aggregated data from an external system actual_mean.set([[10., 12.]]) # describe the differentially private operation gaussian_mean = wn.gaussian_mechanism(actual_mean, privacy_usage={ "epsilon": .4, "delta": 1e-6 }) # check if the analysis is permissible analysis.validate() # compute the missing releasable nodes- in this case, only the gaussian mean analysis.release() # retrieve the noised mean print("rest gaussian mean", gaussian_mean.value)
def test_dp_linear_stats(run=True): with wn.Analysis() as analysis: dataset_pums = wn.Dataset(path=TEST_CSV_PATH, column_names=test_csv_names) age = dataset_pums['age'] analysis.release() num_records = wn.dp_count(age, privacy_usage={'epsilon': .5}, lower=0, upper=10000) analysis.release() print("number of records:", num_records.value) vars = wn.to_float(dataset_pums[["age", "income"]]) covariance = wn.dp_covariance(data=vars, privacy_usage={'epsilon': .5}, data_lower=[0., 0.], data_upper=[150., 150000.], data_n=num_records) print("covariance released") num_means = wn.dp_mean(data=vars, privacy_usage={'epsilon': .5}, data_lower=[0., 0.], data_upper=[150., 150000.], data_n=num_records) analysis.release() print("covariance:\n", covariance.value) print("means:\n", num_means.value) age = wn.to_float(age) age_variance = wn.dp_variance(age, privacy_usage={'epsilon': .5}, data_lower=0., data_upper=150., data_n=num_records) analysis.release() print("age variance:", age_variance.value) # If I clamp, impute, resize, then I can reuse their properties for multiple statistics clamped_age = wn.clamp(age, lower=0., upper=100.) imputed_age = wn.impute(clamped_age) preprocessed_age = wn.resize(imputed_age, n=num_records) # properties necessary for mean are statically known mean = wn.dp_mean(preprocessed_age, privacy_usage={'epsilon': .5}) # properties necessary for variance are statically known variance = wn.dp_variance(preprocessed_age, privacy_usage={'epsilon': .5}) # sum doesn't need n, so I pass the data in before resizing age_sum = wn.dp_sum(imputed_age, privacy_usage={'epsilon': .5}) # mean with lower, upper properties propagated up from prior bounds transformed_mean = wn.dp_mean(-(preprocessed_age + 2.), privacy_usage={'epsilon': .5}) analysis.release() print("age transformed mean:", transformed_mean.value) # releases may be pieced together from combinations of smaller components custom_mean = wn.laplace_mechanism(wn.mean(preprocessed_age), privacy_usage={'epsilon': .5}) custom_maximum = wn.laplace_mechanism(wn.maximum(preprocessed_age), privacy_usage={'epsilon': .5}) custom_maximum = wn.laplace_mechanism(wn.maximum(preprocessed_age), privacy_usage={'epsilon': .5}) custom_quantile = wn.laplace_mechanism(wn.quantile(preprocessed_age, alpha=.5), privacy_usage={'epsilon': 500}) income = wn.to_float(dataset_pums['income']) income_max = wn.laplace_mechanism(wn.maximum(income, data_lower=0., data_upper=1000000.), privacy_usage={'epsilon': 10}) # releases may also be postprocessed and reused as arguments to more components age_sum + custom_maximum * 23. analysis.release() print("laplace quantile:", custom_quantile.value) age_histogram = wn.dp_histogram(wn.to_int(age, lower=0, upper=100), edges=list(range(0, 100, 25)), null_value=150, privacy_usage={'epsilon': 2.}) sex_histogram = wn.dp_histogram(wn.to_bool(dataset_pums['sex'], true_label="1"), privacy_usage={'epsilon': 2.}) education_histogram = wn.dp_histogram(dataset_pums['educ'], categories=["5", "7", "10"], null_value="-1", privacy_usage={'epsilon': 2.}) analysis.release() print("age histogram: ", age_histogram.value) print("sex histogram: ", sex_histogram.value) print("education histogram: ", education_histogram.value) if run: analysis.release() # get the mean computed when release() was called print(mean.value) print(variance.value) return analysis
def test_everything(run=True): with wn.Analysis(dynamic=True) as analysis: data = wn.Dataset(path=TEST_CSV_PATH, column_names=test_csv_names) age_int = wn.to_int(data['age'], 0, 150) sex = wn.to_bool(data['sex'], "1") educ = wn.to_float(data['educ']) race = data['race'] income = wn.to_float(data['income']) married = wn.to_bool(data['married'], "1") numerics = wn.to_float(data[['age', 'income']]) # intentionally busted component # print("invalid component id ", (sex + "a").component_id) # broadcast scalar over 2d, broadcast scalar over 1d, columnar broadcasting, left and right mul numerics * 2. + 2. * educ # add different values for each column numerics + [[1., 2.]] # index into first column age = numerics[0] income = numerics[[False, True]] # boolean ops and broadcasting mask = sex & married | (~married ^ False) | (age > 50.) | (age_int == 25) # numerical clamping wn.clamp(numerics, 0., [150., 150_000.]) wn.clamp(data['educ'], categories=[str(i) for i in range(8, 10)], null_value="-1") wn.count(mask) wn.covariance(age, income) wn.digitize(educ, edges=[1., 3., 10.], null_value=-1) # checks for safety against division by zero income / 2. income / wn.clamp(educ, 5., 20.) wn.dp_count(data, privacy_usage={"epsilon": 0.5}) wn.dp_count(mask, privacy_usage={"epsilon": 0.5}) wn.dp_histogram(mask, privacy_usage={"epsilon": 0.5}) age = wn.impute(wn.clamp(age, 0., 150.)) wn.dp_maximum(age, privacy_usage={"epsilon": 0.5}) wn.dp_minimum(age, privacy_usage={"epsilon": 0.5}) wn.dp_median(age, privacy_usage={"epsilon": 0.5}) age_n = wn.resize(age, n=800) wn.dp_mean(age_n, privacy_usage={"epsilon": 0.5}) wn.dp_moment_raw(age_n, order=3, privacy_usage={"epsilon": 0.5}) wn.dp_sum(age, privacy_usage={"epsilon": 0.5}) wn.dp_variance(age_n, privacy_usage={"epsilon": 0.5}) wn.filter(income, mask) race_histogram = wn.histogram(race, categories=["1", "2", "3"], null_value="3") wn.histogram(income, edges=[0., 10000., 50000.], null_value=-1) wn.dp_histogram(married, privacy_usage={"epsilon": 0.5}) wn.gaussian_mechanism(race_histogram, privacy_usage={ "epsilon": 0.5, "delta": .000001 }) wn.laplace_mechanism(race_histogram, privacy_usage={ "epsilon": 0.5, "delta": .000001 }) wn.kth_raw_sample_moment(educ, k=3) wn.log(wn.clamp(educ, 0.001, 50.)) wn.maximum(educ) wn.mean(educ) wn.minimum(educ) educ % 2. educ**2. wn.quantile(educ, .32) wn.resize(educ, 1200, 0., 50.) wn.resize(race, 1200, categories=["1", "2"], weights=[1, 2]) wn.resize(data[["age", "sex"]], 1200, categories=[["1", "2"], ["a", "b"]], weights=[1, 2]) wn.resize(data[["age", "sex"]], 1200, categories=[["1", "2"], ["a", "b", "c"]], weights=[[1, 2], [3, 7, 2]]) wn.sum(educ) wn.variance(educ) if run: analysis.release() return analysis