def snapping_similarity():
    snapping_estimates = []
    laplace_estimates = []
    with sn.Analysis(strict_parameter_checks=False):
        PUMS = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)

        age = sn.impute(sn.to_float(PUMS['age']),
                        data_lower=0.,
                        data_upper=100.,
                        data_rows=1000)

        for i in range(100):
            snapping_component = sn.dp_mean(age,
                                            mechanism="snapping",
                                            privacy_usage={
                                                "epsilon": 1.0,
                                                "delta": 1E-6
                                            })
            laplace_component = sn.dp_mean(age,
                                           mechanism="laplace",
                                           privacy_usage={
                                               "epsilon": 1.0,
                                               "delta": 1E-6
                                           })

            snapping_estimates.append(snapping_component.value)
            laplace_estimates.append(laplace_component.value)

    print(sum(snapping_estimates) / len(snapping_estimates))
    print(sum(laplace_estimates) / len(laplace_estimates))
Exemplo n.º 2
0
def test_properties():
    with sn.Analysis():
        # load data
        data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)

        # establish data
        age_dt = sn.cast(data['age'], 'FLOAT')

        # ensure data are non-null
        non_null_age_dt = sn.impute(age_dt,
                                    distribution='Uniform',
                                    lower=0.,
                                    upper=100.)
        clamped = sn.clamp(age_dt, lower=0., upper=100.)

        # create potential for null data again
        potentially_null_age_dt = non_null_age_dt / 0.

        # print('original properties:\n{0}\n\n'.format(age_dt.properties))
        print('properties after imputation:\n{0}\n\n'.format(
            non_null_age_dt.nullity))
        print('properties after nan mult:\n{0}\n\n'.format(
            potentially_null_age_dt.nullity))

        print("lower", clamped.lower)
        print("upper", clamped.upper)
        print("releasable", clamped.releasable)
        # print("props", clamped.properties)
        print("data_type", clamped.data_type)
        print("categories", clamped.categories)
def analytic_gaussian_similarity():
    analytic_gauss_estimates = []
    gauss_estimates = []
    with sn.Analysis(strict_parameter_checks=False):
        PUMS = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)

        age = sn.impute(sn.to_float(PUMS['age']),
                        data_lower=0.,
                        data_upper=100.,
                        data_rows=1000)

        for i in range(100):
            an_gauss_component = sn.dp_mean(age,
                                            mechanism="AnalyticGaussian",
                                            privacy_usage={
                                                "epsilon": 1.0,
                                                "delta": 1E-6
                                            })
            gauss_component = sn.dp_mean(age,
                                         mechanism="Gaussian",
                                         privacy_usage={
                                             "epsilon": 1.0,
                                             "delta": 1E-6
                                         })

            # this triggers an analysis.release (which also computes gauss_component)
            analytic_gauss_estimates.append(an_gauss_component.value)
            gauss_estimates.append(gauss_component.value)

    print(sum(analytic_gauss_estimates) / len(analytic_gauss_estimates))
    print(sum(gauss_estimates) / len(gauss_estimates))
Exemplo n.º 4
0
def test_private_clamped_sum_helpers():
    # Compute the CI with smartnoise
    with sn.Analysis() as analysis:
        data = sn.Dataset(path=TEST_DATA_PATH, column_names=TEST_DATA_COLUMNS)
        D = sn.to_float(data["age"])
        D_tilde = sn.resize(sn.clamp(data=D, lower=0.0, upper=100.0), number_rows=1000,)
        release = sn.dp_sum(data=sn.impute(D_tilde), privacy_usage={"epsilon": 1.0})
    smartnoise_ci = release.get_accuracy(0.05)

    op = PrivateClampedSum(lower_bound=0, upper_bound=100)
    eeprivacy_ci = op.confidence_interval(epsilon=1, confidence=0.95)

    assert pytest.approx(smartnoise_ci, abs=0.001) == eeprivacy_ci
Exemplo n.º 5
0
def try_sn():
    # establish data information
    #data_path = 'https://raw.githubusercontent.com/opendp/smartnoise-samples/86-requirements-fix/analysis/data/PUMS_california_demographics_1000/data.csv'
    data_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000',
                             'data.csv')
    data_path = os.path.abspath(data_path)
    print('data_path', data_path)
    var_names = ["age", "sex", "educ", "race", "income", "married", "pid"]
    D = pd.read_csv(data_path)['age']
    D_mean_age = np.mean(D)
    print('D_mean_age', D_mean_age)

    # establish extra information for this simulation
    age_lower_bound = 0.
    age_upper_bound = 100.
    D_tilde = np.clip(D, age_lower_bound, age_upper_bound)
    D_tilde_mean_age = np.mean(D_tilde)
    data_size = 1000

    df = pd.read_csv(data_path)
    df_as_array = [list(row) for row in df.itertuples()]
    #df.values.tolist()
    print('D.values', df_as_array)

    n_sims = 2
    releases = []
    with sn.Analysis(dynamic=True) as analysis:
        data = sn.Dataset(path=data_path, column_names=var_names)
        #data = sn.Dataset(value=df_as_array, column_names=var_names)
        D = sn.to_float(data['age'])
        # preprocess data (resize is a no-op because we have the correct data size)
        D_tilde = sn.resize(sn.clamp(data=D, lower=0., upper=100.),
                            number_rows=data_size)

        for index in range(n_sims):
            # get DP mean of age
            releases.append(
                sn.dp_mean(data=sn.impute(D_tilde),
                           privacy_usage={'epsilon': 1}))

    accuracy = releases[0].get_accuracy(0.05)

    analysis.release()
    dp_values = [release.value for release in releases]
    print(
        'Accuracy interval (with accuracy value {0}) contains the true mean on D_tilde with probability {1}'
        .format(
            round(accuracy, 4),
            np.mean([(D_tilde_mean_age >= val - accuracy) &
                     (D_tilde_mean_age <= val + accuracy)
                     for val in dp_values])))
Exemplo n.º 6
0
def test_divide():
    with sn.Analysis():
        data_A = generate_synthetic(float, variants=['Random'])

        f_random = data_A['F_Random']
        imputed = sn.impute(f_random, lower=0., upper=10.)
        clamped_nonzero = sn.clamp(imputed, lower=1., upper=10.)
        clamped_zero = sn.clamp(imputed, lower=0., upper=10.)

        # test properties
        assert f_random.nullity
        assert not imputed.nullity
        assert (2. / imputed).nullity
        assert (f_random / imputed).nullity
        assert (2. / clamped_zero).nullity
Exemplo n.º 7
0
def test_median_education():
    # import pandas as pd
    # print(pd.read_csv(data_path)['value'].median())
    with sn.Analysis(filter_level="all") as analysis:
        data = sn.Dataset(path=TEST_EDUC_PATH, column_names=TEST_EDUC_NAMES)
        candidates = list(map(float, range(1, 200, 2)))
        median_scores = sn.median(sn.impute(sn.to_float(data['value']), 100.,
                                            200.),
                                  candidates=candidates)

        # print(list(zip(candidates, median_scores.value[0])))

        dp_median = sn.exponential_mechanism(median_scores,
                                             candidates=candidates,
                                             privacy_usage={"epsilon": 100.})
        print(dp_median.value)
    analysis.release()
def test_mechanism(args, constructor):
    with sn.Analysis() as analysis:
        PUMS = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
        categorical = sn.resize(sn.clamp(PUMS['sex'],
                                         categories=["0", "1"],
                                         null_value="0"),
                                number_rows=1000)

        numeric = sn.impute(sn.to_float(PUMS['age']),
                            data_lower=0.,
                            data_upper=100.,
                            data_rows=1000)

        all = constructor(numeric, categorical, args)

        analysis.release()
        all_values = {stat: all[stat].value for stat in all}
        print()
        pprint(all_values)

        for value in all_values.values():
            assert value is not None
Exemplo n.º 9
0
def test_private_clamped_mean_helpers():
    # Compute the CI with smartnoise
    with sn.Analysis() as analysis:
        data = sn.Dataset(path=TEST_DATA_PATH, column_names=TEST_DATA_COLUMNS)
        D = sn.to_float(data["age"])
        D_tilde = sn.resize(sn.clamp(data=D, lower=0.0, upper=100.0), number_rows=1000,)
        release = sn.dp_mean(data=sn.impute(D_tilde), privacy_usage={"epsilon": 1.0})
    smartnoise_ci = release.get_accuracy(0.05)

    # Compute the CI with eeprivacy
    op = PrivateClampedMean(lower_bound=0, upper_bound=100)
    eeprivacy_ci = op.confidence_interval(epsilon=1, N=1000, confidence=0.95)

    # Compare computed confidence intervals
    assert pytest.approx(smartnoise_ci, abs=0.001) == eeprivacy_ci

    smartnoise_epsilon = release.from_accuracy(value=1, alpha=0.05)[0]["epsilon"]
    eeprivacy_epsilon = op.epsilon_for_confidence_interval(
        target_ci=1, N=1000, confidence=0.95
    )

    # Compare computed epsilons for confidence interval
    assert pytest.approx(smartnoise_epsilon, abs=0.001) == eeprivacy_epsilon
Exemplo n.º 10
0
def test_dp_linear_stats(run=True):
    with sn.Analysis() as analysis:
        dataset_pums = sn.Dataset(path=TEST_PUMS_PATH,
                                  column_names=TEST_PUMS_NAMES)

        age = dataset_pums['age']
        analysis.release()

        num_records = sn.dp_count(age,
                                  privacy_usage={'epsilon': .5},
                                  lower=0,
                                  upper=10000)
        analysis.release()

        print("number of records:", num_records.value)

        vars = sn.to_float(dataset_pums[["age", "income"]])

        covariance = sn.dp_covariance(data=vars,
                                      privacy_usage={'epsilon': .5},
                                      data_lower=[0., 0.],
                                      data_upper=[150., 150000.],
                                      data_rows=num_records)
        print("covariance released")

        num_means = sn.dp_mean(data=vars,
                               privacy_usage={'epsilon': .5},
                               data_lower=[0., 0.],
                               data_upper=[150., 150000.],
                               data_rows=num_records)

        analysis.release()
        print("covariance:\n", covariance.value)
        print("means:\n", num_means.value)

        age = sn.to_float(age)

        age_variance = sn.dp_variance(age,
                                      privacy_usage={'epsilon': .5},
                                      data_lower=0.,
                                      data_upper=150.,
                                      data_rows=num_records)

        analysis.release()

        print("age variance:", age_variance.value)

        # If I clamp, impute, resize, then I can reuse their properties for multiple statistics
        clamped_age = sn.clamp(age, lower=0., upper=100.)
        imputed_age = sn.impute(clamped_age)
        preprocessed_age = sn.resize(imputed_age, number_rows=num_records)

        # properties necessary for mean are statically known
        mean = sn.dp_mean(preprocessed_age, privacy_usage={'epsilon': .5})

        # properties necessary for variance are statically known
        variance = sn.dp_variance(preprocessed_age,
                                  privacy_usage={'epsilon': .5})

        # sum doesn't need n, so I pass the data in before resizing
        age_sum = sn.dp_sum(imputed_age, privacy_usage={'epsilon': .5})

        # mean with lower, upper properties propagated up from prior bounds
        transformed_mean = sn.dp_mean(-(preprocessed_age + 2.),
                                      privacy_usage={'epsilon': .5})

        analysis.release()
        print("age transformed mean:", transformed_mean.value)

        # releases may be pieced together from combinations of smaller components
        custom_mean = sn.laplace_mechanism(sn.mean(preprocessed_age),
                                           privacy_usage={'epsilon': .5})

        custom_maximum = sn.laplace_mechanism(sn.maximum(preprocessed_age),
                                              privacy_usage={'epsilon': .5})

        custom_maximum = sn.laplace_mechanism(sn.maximum(preprocessed_age),
                                              privacy_usage={'epsilon': .5})

        custom_quantile = sn.laplace_mechanism(sn.quantile(preprocessed_age,
                                                           alpha=.5),
                                               privacy_usage={'epsilon': 500})

        income = sn.to_float(dataset_pums['income'])
        income_max = sn.laplace_mechanism(sn.maximum(income,
                                                     data_lower=0.,
                                                     data_upper=1000000.),
                                          privacy_usage={'epsilon': 10})

        # releases may also be postprocessed and reused as arguments to more components
        age_sum + custom_maximum * 23.

        analysis.release()
        print("laplace quantile:", custom_quantile.value)

        age_histogram = sn.dp_histogram(sn.to_int(age, lower=0, upper=100),
                                        edges=list(range(0, 100, 25)),
                                        null_value=150,
                                        privacy_usage={'epsilon': 2.})

        sex_histogram = sn.dp_histogram(sn.to_bool(dataset_pums['sex'],
                                                   true_label="1"),
                                        privacy_usage={'epsilon': 2.})

        education_histogram = sn.dp_histogram(dataset_pums['educ'],
                                              categories=["5", "7", "10"],
                                              null_value="-1",
                                              privacy_usage={'epsilon': 2.})

        analysis.release()

        print("age histogram: ", age_histogram.value)
        print("sex histogram: ", sex_histogram.value)
        print("education histogram: ", education_histogram.value)

    if run:
        analysis.release()

        # get the mean computed when release() was called
        print(mean.value)
        print(variance.value)

    return analysis
Exemplo n.º 11
0
def test_everything(run=True):
    with sn.Analysis() as analysis:
        data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)

        age_int = sn.to_int(data['age'], 0, 150)
        sex = sn.to_bool(data['sex'], "1")
        educ = sn.to_float(data['educ'])
        race = data['race']
        income = sn.to_float(data['income'])
        married = sn.to_bool(data['married'], "1")

        numerics = sn.to_float(data[['age', 'income']])

        # intentionally busted component
        # print("invalid component id ", (sex + "a").component_id)

        # broadcast scalar over 2d, broadcast scalar over 1d, columnar broadcasting, left and right mul
        numerics * 2. + 2. * educ

        # add different values for each column
        numerics + [[1., 2.]]

        # index into first column
        age = sn.index(numerics, indices=0)
        income = sn.index(numerics, mask=[False, True])

        # boolean ops and broadcasting
        mask = sex & married | (~married ^ False) | (age > 50.) | (age_int
                                                                   == 25)

        # numerical clamping
        sn.clamp(numerics, 0., [150., 150_000.])
        sn.clamp(data['educ'],
                 categories=[str(i) for i in range(8, 10)],
                 null_value="-1")

        sn.count(mask)
        sn.covariance(age, income)
        sn.digitize(educ, edges=[1., 3., 10.], null_value=-1)

        # checks for safety against division by zero
        income / 2.
        income / sn.clamp(educ, 5., 20.)

        sn.dp_count(data, privacy_usage={"epsilon": 0.5})
        sn.dp_count(mask, privacy_usage={"epsilon": 0.5})

        sn.dp_histogram(mask, privacy_usage={"epsilon": 0.5})
        age = sn.impute(sn.clamp(age, 0., 150.))
        sn.dp_maximum(age, privacy_usage={"epsilon": 0.5})
        sn.dp_minimum(age, privacy_usage={"epsilon": 0.5})
        sn.dp_median(age, privacy_usage={"epsilon": 0.5})

        age_n = sn.resize(age, number_rows=800)
        sn.dp_mean(age_n, privacy_usage={"epsilon": 0.5})
        sn.dp_raw_moment(age_n, order=3, privacy_usage={"epsilon": 0.5})

        sn.dp_sum(age, privacy_usage={"epsilon": 0.5})
        sn.dp_variance(age_n, privacy_usage={"epsilon": 0.5})

        sn.filter(income, mask)
        race_histogram = sn.histogram(race,
                                      categories=["1", "2", "3"],
                                      null_value="3")
        sn.histogram(income, edges=[0., 10000., 50000.], null_value=-1)

        sn.dp_histogram(married, privacy_usage={"epsilon": 0.5})

        sn.gaussian_mechanism(race_histogram,
                              privacy_usage={
                                  "epsilon": 0.5,
                                  "delta": .000001
                              })
        sn.laplace_mechanism(race_histogram,
                             privacy_usage={
                                 "epsilon": 0.5,
                                 "delta": .000001
                             })

        sn.raw_moment(educ, order=3)

        sn.log(sn.clamp(educ, 0.001, 50.))
        sn.maximum(educ)
        sn.mean(educ)
        sn.minimum(educ)

        educ % 2.
        educ**2.

        sn.quantile(educ, .32)

        sn.resize(educ, number_rows=1200, lower=0., upper=50.)
        sn.resize(race,
                  number_rows=1200,
                  categories=["1", "2"],
                  weights=[1, 2])
        sn.resize(data[["age", "sex"]],
                  1200,
                  categories=[["1", "2"], ["a", "b"]],
                  weights=[1, 2])
        sn.resize(data[["age", "sex"]],
                  1200,
                  categories=[["1", "2"], ["a", "b", "c"]],
                  weights=[[1, 2], [3, 7, 2]])

        sn.sum(educ)
        sn.variance(educ)

    if run:
        analysis.release()

    return analysis