コード例 #1
0
 def test_ok1(self):
     s = copy.copy(schema)
     s["PUMS.PUMS"]["income"].upper = None
     reader = PandasReader(df, s)
     private_reader = PrivateReader(reader, s, privacy=Privacy(epsilon=4.0))
     rs = private_reader.execute_df(
         "SELECT income FROM PUMS.PUMS GROUP BY income")
コード例 #2
0
ファイル: _aggregation.py プロジェクト: opendp/smartnoise-sdk
    def run_agg_query(self, df, metadata, query, confidence, get_exact=True):
        """
        Run the query using the private reader and input query
        Get query response back
        """
        reader = PandasReader(df, metadata)
        actual = 0.0
        # VAR not supported in Pandas Reader. So not needed to fetch actual on every aggregation
        if get_exact:
            actual = reader.execute(query)[1:][0][0]
        private_reader = PrivateReader(reader,
                                       metadata,
                                       privacy=Privacy(epsilon=self.epsilon))
        query_ast = private_reader.parse_query_string(query)

        noisy_values = []
        low_bounds = []
        high_bounds = []
        for idx in range(self.repeat_count):
            res = private_reader._execute_ast(query_ast, True)
            # Disabled because confidence interval not available in report
            # interval = res.report[res.colnames[0]].intervals[confidence]
            # low_bounds.append(interval[0].low)
            # high_bounds.append(interval[0].high)
            noisy_values.append(res[1:][0][0])
        return np.array(noisy_values), actual, low_bounds, high_bounds
コード例 #3
0
 def test_err1(self):
     s = copy.copy(schema)
     s["PUMS.PUMS"]["income"].upper = None
     reader = PandasReader(df, s)
     private_reader = PrivateReader(reader, s, privacy=Privacy(epsilon=4.0))
     with pytest.raises(ValueError):
         rs = private_reader.execute_df("SELECT SUM(income) FROM PUMS.PUMS")
コード例 #4
0
 def test_check_thresholds_gauss(self):
     # check tau for various privacy parameters
     epsilons = [0.1, 2.0]
     max_contribs = [1, 3]
     deltas = [10E-5, 10E-15]
     query = "SELECT COUNT(*) FROM PUMS.PUMS GROUP BY married"
     reader = PandasReader(df, schema)
     qp = QueryParser(schema)
     q = qp.query(query)
     for eps in epsilons:
         for d in max_contribs:
             for delta in deltas:
                 privacy = Privacy(epsilon=eps, delta=delta)
                 privacy.mechanisms.map[Stat.threshold] = Mechanism.gaussian
                 # using slightly different formulations of same formula from different papers
                 # make sure private_reader round-trips
                 gaus_scale = math.sqrt(d) * math.sqrt(
                     2 * math.log(1.25 / delta)) / eps
                 gaus_rho = 1 + gaus_scale * math.sqrt(
                     2 * math.log(d / math.sqrt(2 * math.pi * delta)))
                 schema_c = copy.copy(schema)
                 schema_c["PUMS.PUMS"].max_ids = d
                 private_reader = PrivateReader(reader,
                                                metadata=schema_c,
                                                privacy=privacy)
                 assert (private_reader._options.max_contrib == d)
                 r = private_reader._execute_ast(q)
                 assert (math.isclose(private_reader.tau,
                                      gaus_rho,
                                      rel_tol=0.03,
                                      abs_tol=2))
コード例 #5
0
 def QuerytoAST(self, query, meta, data):
     reader = PandasReader(meta, data)
     private_reader = PrivateReader(meta, reader, self.pp.epsilon)
     # query =  'SELECT Usage AS Usage, SUM(Usage) + 3 AS u FROM dataset.dataset GROUP BY Role'
     try:
         ast = private_reader.parse_query_string(query)
     except:
         return
     return ast
コード例 #6
0
 def test_execute_with_dpsu(self):
     schema_dpsu = copy.copy(schema)
     schema_dpsu["PUMS.PUMS"].use_dpsu = True
     reader = PandasReader(df, schema_dpsu)
     private_reader = PrivateReader(reader, schema_dpsu, 1.0)
     assert (private_reader._options.use_dpsu == True)
     query = QueryParser(schema_dpsu).queries(
         "SELECT COUNT(*) AS c FROM PUMS.PUMS GROUP BY married")[0]
     assert (private_reader._get_reader(query) is not private_reader.reader)
コード例 #7
0
 def test_viz_child_nodes(self):
     query = "SELECT AVG(age) AS my_sum FROM PUMS.PUMS GROUP BY age"
     reader = PandasReader(df, schema)
     private_reader = PrivateReader(reader,
                                    schema,
                                    privacy=Privacy(epsilon=1.0))
     inner, outer = private_reader._rewrite(query)
     aggfuncs = outer.find_nodes(AggFunction)
     for aggfunc in aggfuncs:
         graph = aggfunc.visualize(n_trunc=30)
         assert (isinstance(graph, Digraph))
コード例 #8
0
 def test_execute_without_dpsu(self):
     schema_no_dpsu = copy.copy(schema)
     schema_no_dpsu["PUMS.PUMS"].use_dpsu = False
     reader = PandasReader(df, schema_no_dpsu)
     private_reader = PrivateReader(reader,
                                    schema_no_dpsu,
                                    privacy=Privacy(epsilon=1.0))
     assert (private_reader._options.use_dpsu == False)
     query = QueryParser(schema_no_dpsu).queries(
         "SELECT COUNT(*) AS c FROM PUMS.PUMS GROUP BY married")[0]
     assert (private_reader._get_reader(query) is private_reader.reader)
コード例 #9
0
 def test_viz_query_rewritten(self):
     query = "SELECT SUM(age) AS my_sum FROM PUMS.PUMS GROUP BY age"
     parsed_query = QueryParser(schema).query(query)
     reader = PandasReader(df, schema)
     private_reader = PrivateReader(reader,
                                    schema,
                                    privacy=Privacy(epsilon=1.0))
     inner, outer = private_reader._rewrite_ast(parsed_query)
     graph = outer.visualize(n_trunc=30)
     assert (isinstance(graph, Digraph))
     #graph.render('ast_digraph', view=True, cleanup=True)
     graph = inner.visualize(n_trunc=30)
     assert (isinstance(graph, Digraph))
コード例 #10
0
 def test_with_censor_dims(self):
     meta = Metadata.from_file(meta_path)
     df = pd.read_csv(csv_path)
     reader = PandasReader(df, meta)
     private_reader = PrivateReader(reader,
                                    meta,
                                    privacy=Privacy(epsilon=3.0))
     query = "SELECT COUNT (*) AS foo, COUNT(DISTINCT pid) AS bar FROM PUMS.PUMS"
     q = QueryParser(meta).query(query)
     inner, outer = private_reader._rewrite_ast(q)
     ne = outer.select.namedExpressions
     assert (ne[0].expression.expression.name != 'keycount')
     assert (ne[1].expression.expression.name == 'keycount')
コード例 #11
0
 def test_case_sensitive(self):
     sample = Table(
         "PUMS", "PUMS",
         [Int('pid', is_key=True), Int('"PiD"')], 150)
     meta = Metadata([sample], "csv")
     reader = PostgresReader("localhost", "PUMS", "admin", "password")
     private_reader = PrivateReader(reader,
                                    meta,
                                    privacy=Privacy(epsilon=3.0))
     query = 'SELECT COUNT (DISTINCT pid) AS foo, COUNT(DISTINCT "PiD") AS bar FROM PUMS.PUMS'
     inner, outer = private_reader._rewrite(query)
     ne = outer.select.namedExpressions
     assert (ne[0].expression.expression.name == 'keycount')
     assert (ne[1].expression.expression.name != 'keycount')
コード例 #12
0
 def test_reuse_expression(self):
     meta = Metadata.from_file(meta_path)
     df = pd.read_csv(csv_path)
     reader = PandasReader(df, meta)
     private_reader = PrivateReader(reader,
                                    meta,
                                    privacy=Privacy(epsilon=3.0))
     query = 'SELECT AVG(age), SUM(age), COUNT(age) FROM PUMS.PUMS'
     q = QueryParser(meta).query(query)
     inner, outer = private_reader._rewrite(query)
     names = unique(
         [f.name for f in outer.select.namedExpressions.find_nodes(Column)])
     assert (len(names) == 2)
     assert ('count_age' in names)
     assert ('sum_age' in names)
コード例 #13
0
 def test_empty_result_count_typed_notau_prepost(self):
     schema_all = copy.deepcopy(schema)
     schema_all['PUMS.PUMS'].censor_dims = False
     reader = PandasReader(df, schema)
     query = QueryParser(schema).queries(
         "SELECT COUNT(*) as c FROM PUMS.PUMS WHERE age > 100")[0]
     private_reader = PrivateReader(reader,
                                    schema_all,
                                    privacy=Privacy(epsilon=1.0))
     private_reader._execute_ast(query, True)
     for i in range(3):
         print(private_reader._options)
         trs = private_reader._execute_ast(query, True)
         print("empty query")
         print(trs)
         assert (len(trs) == 2)
コード例 #14
0
 def setup_class(cls):
     meta = Metadata.from_file(meta_path)
     meta["PUMS.PUMS"].censor_dims = False
     df = pd.read_csv(csv_path)
     reader = PandasReader(df, meta)
     private_reader = PrivateReader(reader, meta, privacy=Privacy(epsilon=10.0, delta=0.1))
     cls.reader = private_reader
コード例 #15
0
 def release(self, dataset: object) -> Report:
     """
     Dataset is a collection of [Dataset Metadata, PandasReader]
     Releases response to SQL query based on the number of repetitions
     requested by eval_params if actual is set of False. 
     
     """
     private_reader = PrivateReader(dataset[0], dataset[1],
                                    self.privacy_params.epsilon)
     query_ast = private_reader.parse_query_string(self.algorithm)
     srs_orig = private_reader.reader._execute_ast_df(query_ast)
     noisy_values = []
     for idx in range(self.eval_params.repeat_count):
         res = private_reader._execute_ast(query_ast, True)
         if not res[1:]:
             return Report({"__key__": "noisy_values_empty"})
         else:
             noisy_values.append(res[1:][0][0])
     return Report({"__key__": noisy_values})
コード例 #16
0
ファイル: test_having.py プロジェクト: opendp/smartnoise-sdk
 def setup_class(self):
     meta = Metadata.from_file(meta_path)
     meta["PUMS.PUMS"].censor_dims = False
     meta["PUMS.PUMS"]["sex"].type = "int"
     meta["PUMS.PUMS"]["educ"].type = "int"
     meta["PUMS.PUMS"]["married"].type = "bool"
     df = pd.read_csv(csv_path)
     reader = PandasReader(df, meta)
     private_reader = PrivateReader(reader,
                                    meta,
                                    privacy=Privacy(epsilon=10.0,
                                                    delta=10e-3))
     self.reader = private_reader
コード例 #17
0
 def get_private_reader(self, *ignore, metadata, privacy, database, **kwargs):
     if database not in self.connections:
         return None
     else:
         from snsql.sql import PrivateReader
         conn = self.connections[database]
         if self.engine.lower() == "spark":
             if database.lower() != 'pums_large':
                 conn.createOrReplaceTempView("PUMS")
             conn = self.session
         priv = PrivateReader.from_connection(
             conn, 
             metadata=metadata, 
             privacy=privacy
         )
         if self.engine.lower() == "spark":
             priv.reader.compare.search_path = ["PUMS"]
         return priv
コード例 #18
0
    def test_dpsu_vs_korolova(self):
        query = "SELECT ngram, COUNT(*) as n FROM reddit.reddit GROUP BY ngram ORDER BY n desc"
        reader = PandasReader(df, schema)
        private_reader = PrivateReader(reader,
                                       schema,
                                       privacy=Privacy(epsilon=3.0))
        private_reader.options.max_contrib = 10
        result = private_reader.execute_df(query)

        private_reader_korolova = PrivateReader(reader,
                                                schema,
                                                privacy=Privacy(epsilon=3.0))
        private_reader_korolova.options.dpsu = False
        private_reader_korolova.options.max_contrib = 10
        korolova_result = private_reader_korolova.execute_df(query)

        assert len(result['n']) > len(korolova_result['n'])
        assert len(final_df) < len(df)
コード例 #19
0
ファイル: test_query.py プロジェクト: opendp/smartnoise-sdk
 def test_group_by_noisy_typed_order_desc(self):
     reader = PandasReader(df, schema)
     private_reader = PrivateReader(reader, schema, privacy=Privacy(epsilon=4.0))
     rs = private_reader.execute_df("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c DESC")
     assert(rs['c'][0] > rs['c'][1])
コード例 #20
0
ファイル: test_query.py プロジェクト: opendp/smartnoise-sdk
 def test_sum_noisy_postprocess(self):
     reader = PandasReader(df, schema)
     private_reader = PrivateReader(reader, schema, privacy=Privacy(epsilon=1.0))
     trs = private_reader.execute_df("SELECT POWER(SUM(age), 2) as age_total FROM PUMS.PUMS")
     assert(trs['age_total'][0] > 1000 ** 2)
コード例 #21
0
ファイル: _aggregation.py プロジェクト: opendp/smartnoise-sdk
    def run_agg_query_df(self,
                         df,
                         metadata,
                         query,
                         confidence,
                         file_name="d1"):
        """
        Run the query using the private reader and input query
        Get query response back for multiple dimensions and aggregations
        """
        # Getting exact result
        reader = PandasReader(df, metadata)
        exact_res = reader.execute(query)[1:]

        private_reader = PrivateReader(reader,
                                       metadata,
                                       privacy=Privacy(epsilon=self.epsilon))
        query_ast = private_reader.parse_query_string(query)

        # Distinguishing dimension and measure columns

        sample_res = private_reader._execute_ast(query_ast, True)
        headers = sample_res[0]

        dim_cols = []
        num_cols = []

        out_syms = query_ast.all_symbols()
        out_types = [s[1].type() for s in out_syms]
        out_col_names = [s[0] for s in out_syms]

        for col, ctype in zip(out_col_names, out_types):
            if ctype == "string":
                dim_cols.append(col)
            else:
                num_cols.append(col)

        # Repeated query and store results
        res = []
        for idx in range(self.repeat_count):
            dim_rows = []
            num_rows = []
            singleres = private_reader._execute_ast_df(query_ast,
                                                       cache_exact=True)
            # values = singleres[col]
            for col in dim_cols:
                dim_rows.append(singleres[col].tolist())
            for col in num_cols:
                values = singleres[col].tolist()
                num_rows.append(list(zip(values)))

            res.extend(list(zip(*dim_rows, *num_rows)))

        exact_df = pd.DataFrame(exact_res, columns=headers)
        noisy_df = pd.DataFrame(res, columns=headers)

        # Add a dummy dimension column for cases where no dimensions available for merging D1 and D2
        if len(dim_cols) == 0:
            dim_cols.append("__dim__")

        if dim_cols[0] == "__dim__":
            exact_df[dim_cols[0]] = ["key"] * len(exact_df)
            noisy_df[dim_cols[0]] = ["key"] * len(noisy_df)

        return noisy_df, exact_df, dim_cols, num_cols
コード例 #22
0
from snsql.sql.privacy import Privacy
from snsql.sql.parse import QueryParser

git_root_dir = subprocess.check_output(
    "git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
meta_path = os.path.join(git_root_dir, os.path.join("datasets",
                                                    "PUMS_pid.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("datasets", "PUMS_pid.csv"))

meta = Metadata.from_file(meta_path)
pums = pd.read_csv(csv_path)
query = 'SELECT AVG(age), STD(age), VAR(age), SUM(age), COUNT(age) FROM PUMS.PUMS GROUP BY sex'
q = QueryParser(meta).query(query)

privacy = Privacy(alphas=[0.01, 0.05], delta=1 / (math.sqrt(100) * 100))
priv = PrivateReader.from_connection(pums, privacy=privacy, metadata=meta)
subquery, root = priv._rewrite(query)

acc = Accuracy(root, subquery, privacy)


class TestAccuracy:
    def test_count_accuracy(self):
        error = acc.count(alpha=0.05)
        assert (error < 7.53978 and error > 0.5)
        error_wide = acc.count(alpha=0.01)
        assert (error_wide < 9.909)
        assert (error_wide > error)

    def test_count_accuracy_small_delta(self):
        acc = Accuracy(root, subquery, privacy=Privacy(epsilon=1.0, delta=0.1))
コード例 #23
0
ファイル: test_xpath.py プロジェクト: opendp/smartnoise-sdk
git_root_dir = subprocess.check_output(
    "git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()

meta_path = os.path.join(git_root_dir, os.path.join("datasets",
                                                    "PUMS_pid.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("datasets", "PUMS_pid.csv"))

from snsql.xpath.parse import XPath
p = XPath()

meta = Metadata.from_file(meta_path)
pums = pd.read_csv(csv_path)
query = 'SELECT AVG(age) + 3, STD(age), VAR(age), SUM(age) / 10, COUNT(age) + 2 FROM PUMS.PUMS'
q = QueryParser(meta).query(query)
reader = SqlReader.from_connection(pums, "pandas", metadata=meta)
priv = PrivateReader(reader, meta, privacy=Privacy(epsilon=1.0))
subquery, root = priv._rewrite(query)


class TestXPathExecutionNoRewrite:
    def test_all_root_descend(self):
        path = '//*'  # returns value
        xx = p.parse(path)
        res = xx.evaluate(q)
        assert (len(res) > 40)
        assert (str(xx) == path)

    def test_all_with_condition(self):
        path = '//*[@left]'  # returns value
        xx = p.parse(path)
        res = xx.evaluate(q)