def check_equal(self, actual_code, correct_code, msg=None, equal=None): def minus(iter1, iter2, invert=False): extra = [] for i1 in iter1: found = False for i2 in iter2: # for asymmetric equality checks if invert: test_result = equal(i2, i1) else: test_result = equal(i1, i2) if test_result: found = True break if not found: extra.append(i1) return extra if equal is None: equal = lambda x, y: x == y LOG.debug("** Checking equality: %s **", msg) actual = compile.parse(actual_code) correct = compile.parse(correct_code) extra = minus(actual, correct) # in case EQUAL is asymmetric, always supply actual as the first arg missing = minus(correct, actual, invert=True) self.output_diffs(extra, missing, msg) LOG.debug("** Finished equality: %s **", msg)
def check_equal(self, actual_code, correct_code, msg=None, equal=None): def minus(iter1, iter2, invert=False): extra = [] for i1 in iter1: found = False for i2 in iter2: # for asymmetric equality checks if invert: test_result = equal(i2, i1) else: test_result = equal(i1, i2) if test_result: found = True break if not found: extra.append(i1) return extra if equal is None: equal = lambda x, y: x == y LOG.debug("** Checking equality: {} **".format(msg)) actual = compile.parse(actual_code) correct = compile.parse(correct_code) extra = minus(actual, correct) # in case EQUAL is asymmetric, always supply actual as the first arg missing = minus(correct, actual, invert=True) self.output_diffs(extra, missing, msg) LOG.debug("** Finished equality: {} **".format(msg))
def test_column_references_multiple_atoms(self): """Test column references occurring in multiple atoms in a rule.""" ms = compile.ModuleSchemas() ms['nova'] = compile.Schema({'q': ('id', 'name', 'status'), 'r': ('id', 'age', 'weight')}) # Multiple atoms code = ("p(x) :- nova:q(id=x, 2=y), nova:r(id=x)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(x, x0, y), nova:r(x, y0, y1)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Multiple atoms') # Multiple atoms sharing column name but different variables code = ("p(x) :- nova:q(id=x), nova:r(id=y)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(x, x0, x1), nova:r(y, y0, y1)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Multiple atoms shared column name') # Multiple atoms, same table code = ("p(x) :- nova:q(id=x, 2=y), nova:q(id=x)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(x, x0, y), nova:q(x, y0, y1)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Multiple atoms, same table')
def datalog_equal(actual_code, correct_code, msg=None, equal=None, module_schemas=None, output_diff=True): """Check if the strings given by actual_code and CORRECT_CODE represent the same datalog. """ def minus(iter1, iter2, invert=False): extra = [] for i1 in iter1: found = False for i2 in iter2: # for asymmetric equality checks if invert: test_result = equal(i2, i1) else: test_result = equal(i1, i2) if test_result: found = True break if not found: extra.append(i1) return extra if equal is None: equal = lambda x, y: x == y LOG.debug("** Checking equality: %s **", msg) actual = compile.parse(actual_code, module_schemas=module_schemas) correct = compile.parse(correct_code, module_schemas=module_schemas) extra = minus(actual, correct) # in case EQUAL is asymmetric, always supply actual as the first arg # and set INVERT to true missing = minus(correct, actual, invert=True) if output_diff: output_diffs(extra, missing, msg) LOG.debug("** Finished equality: %s **", msg) return len(extra) == 0 and len(missing) == 0
def check_err(code, errmsg, msg): try: compile.parse(code, ms) self.fail("Error should have been thrown but was not: " + msg) except compile.CongressException as e: emsg = "Err message '{}' should include '{}'".format( str(e), errmsg) self.assertTrue(errmsg in str(e), msg + ": " + emsg)
def create_unify(self, atom_string1, atom_string2, msg, change_num, unifier1=None, unifier2=None, recursive_str=False): """Create unification and check basic results.""" def str_uni(u): if recursive_str: return u.recur_str() else: return str(u) def print_unifiers(changes=None): LOG.debug("unifier1: {}".format(str_uni(unifier1))) LOG.debug("unifier2: {}".format(str_uni(unifier2))) if changes is not None: LOG.debug("changes: {}".format( ";".join([str(x) for x in changes]))) if msg is not None: self.open(msg) if unifier1 is None: # LOG.debug("Generating new unifier1") unifier1 = runtime.TopDownTheory.new_bi_unifier() if unifier2 is None: # LOG.debug("Generating new unifier2") unifier2 = runtime.TopDownTheory.new_bi_unifier() p1 = compile.parse(atom_string1)[0] p2 = compile.parse(atom_string2)[0] changes = unify.bi_unify_atoms(p1, unifier1, p2, unifier2) self.assertTrue(changes is not None) print_unifiers(changes) p1p = p1.plug(unifier1) p2p = p2.plug(unifier2) print_unifiers(changes) if not p1p == p2p: LOG.debug( "Failure: bi-unify({}, {}) produced {} and {}".format( str(p1), str(p2), str_uni(unifier1), str_uni(unifier2))) LOG.debug("plug({}, {}) = {}".format( str(p1), str_uni(unifier1), str(p1p))) LOG.debug("plug({}, {}) = {}".format( str(p2), str_uni(unifier2), str(p2p))) self.fail() if change_num is not None and len(changes) != change_num: LOG.debug( "Failure: bi-unify({}, {}) produced {} and {}".format( str(p1), str(p2), str_uni(unifier1), str_uni(unifier2))) LOG.debug("plug({}, {}) = {}".format( str(p1), str_uni(unifier1), str(p1p))) LOG.debug("plug({}, {}) = {}".format( str(p2), str_uni(unifier2), str(p2p))) LOG.debug("Expected {} changes; computed {} changes".format( change_num, len(changes))) self.fail() LOG.debug("unifier1: {}".format(str_uni(unifier1))) LOG.debug("unifier2: {}".format(str_uni(unifier2))) if msg is not None: self.open(msg) return (p1, unifier1, p2, unifier2, changes)
def create_unify(self, atom_string1, atom_string2, msg, change_num, unifier1=None, unifier2=None, recursive_str=False): """Create unification and check basic results.""" def str_uni(u): if recursive_str: return u.recur_str() else: return str(u) def print_unifiers(changes=None): LOG.debug("unifier1: %s", str_uni(unifier1)) LOG.debug("unifier2: %s", str_uni(unifier2)) if changes is not None: LOG.debug("changes: %s", ";".join([str(x) for x in changes])) if msg is not None: self.open(msg) if unifier1 is None: # LOG.debug("Generating new unifier1") unifier1 = TopDownTheory.new_bi_unifier() if unifier2 is None: # LOG.debug("Generating new unifier2") unifier2 = TopDownTheory.new_bi_unifier() p1 = compile.parse(atom_string1)[0] p2 = compile.parse(atom_string2)[0] changes = unify.bi_unify_atoms(p1, unifier1, p2, unifier2) self.assertTrue(changes is not None) print_unifiers(changes) p1p = p1.plug(unifier1) p2p = p2.plug(unifier2) print_unifiers(changes) if not p1p == p2p: LOG.debug("Failure: bi-unify(%s, %s) produced %s and %s", p1, p2, str_uni(unifier1), str_uni(unifier2)) LOG.debug("plug(%s, %s) = %s", p1, str_uni(unifier1), p1p) LOG.debug("plug(%s, %s) = %s", p2, str_uni(unifier2), p2p) self.fail() if change_num is not None and len(changes) != change_num: LOG.debug("Failure: bi-unify(%s, %s) produced %s and %s", p1, p2, str_uni(unifier1), str_uni(unifier2)) LOG.debug("plug(%s, %s) = %s", p1, str_uni(unifier1), p1p) LOG.debug("plug(%s, %s) = %s", p2, str_uni(unifier2), p2p) LOG.debug("Expected %s changes; computed %s changes", change_num, len(changes)) self.fail() LOG.debug("unifier1: %s", str_uni(unifier1)) LOG.debug("unifier2: %s", str_uni(unifier2)) if msg is not None: self.open(msg) return (p1, unifier1, p2, unifier2, changes)
def test_rule_recursion(self): rules = compile.parse('p(x) :- q(x), r(x) q(x) :- r(x) r(x) :- t(x)') self.assertFalse(compile.is_recursive(rules)) rules = compile.parse('p(x) :- p(x)') self.assertTrue(compile.is_recursive(rules)) rules = compile.parse('p(x) :- q(x) q(x) :- r(x) r(x) :- p(x)') self.assertTrue(compile.is_recursive(rules)) rules = compile.parse('p(x) :- q(x) q(x) :- not p(x)') self.assertTrue(compile.is_recursive(rules)) rules = compile.parse('p(x) :- q(x), s(x) q(x) :- t(x) s(x) :- p(x)') self.assertTrue(compile.is_recursive(rules))
def check_unify_fail(self, atom_string1, atom_string2, msg): """Check that the bi-unification fails.""" self.open(msg) unifier1 = TopDownTheory.new_bi_unifier() unifier2 = TopDownTheory.new_bi_unifier() p1 = compile.parse(atom_string1)[0] p2 = compile.parse(atom_string2)[0] changes = unify.bi_unify_atoms(p1, unifier1, p2, unifier2) if changes is not None: LOG.debug("Failure failure: bi-unify(%s, %s) produced %s and %s", p1, p2, unifier1, unifier2) LOG.debug("plug(%s, %s) = %s", p1, unifier1, p1.plug(unifier1)) LOG.debug("plug(%s, %s) = %s", p2, unifier2, p2.plug(unifier2)) self.fail() self.close(msg)
def check_unify_fail(self, atom_string1, atom_string2, msg): """Check that the bi-unification fails.""" self.open(msg) unifier1 = runtime.TopDownTheory.new_bi_unifier() unifier2 = runtime.TopDownTheory.new_bi_unifier() p1 = compile.parse(atom_string1)[0] p2 = compile.parse(atom_string2)[0] changes = unify.bi_unify_atoms(p1, unifier1, p2, unifier2) if changes is not None: LOG.debug("Failure failure: bi-unify(%s, %s) produced %s and %s", p1, p2, unifier1, unifier2) LOG.debug("plug(%s, %s) = %s", p1, unifier1, p1.plug(unifier1)) LOG.debug("plug(%s, %s) = %s", p2, unifier2, p2.plug(unifier2)) self.fail() self.close(msg)
def datalog_equal(actual_code, correct_code, msg=None, equal=None, theories=None, output_diff=True): """Check equality. Check if the strings given by actual_code and CORRECT_CODE represent the same datalog. """ def minus(iter1, iter2, invert=False): extra = [] for i1 in iter1: found = False for i2 in iter2: # for asymmetric equality checks if invert: test_result = equal(i2, i1) else: test_result = equal(i1, i2) if test_result: found = True break if not found: extra.append(i1) return extra if equal is None: equal = lambda x, y: x == y LOG.debug("** Checking equality: %s **", msg) actual = compile.parse(actual_code, theories=theories) correct = compile.parse(correct_code, theories=theories) extra = minus(actual, correct) # in case EQUAL is asymmetric, always supply actual as the first arg # and set INVERT to true missing = minus(correct, actual, invert=True) if output_diff: output_diffs(extra, missing, msg) LOG.debug("** Finished equality: %s **", msg) is_equal = len(extra) == 0 and len(missing) == 0 if not is_equal: LOG.debug('datalog_equal failed, extras: %s, missing: %s', extra, missing) return is_equal
def _parse_rules(self, string, errmsg=''): if errmsg: errmsg = errmsg + ":: " # basic parsing try: return compile.parse(string) except compile.CongressException as e: (num, desc) = error_codes.get('rule_syntax') raise webservice.DataModelException( num, desc + ":: " + errmsg + str(e))
def add_item(self, item, params, id_=None, context=None): """Add item to model. Args: item: The item to add to the model params: A dict-like object containing parameters from the request query string and body. id_: The ID of the item, or None if an ID should be generated context: Key-values providing frame of reference of request Returns: Tuple of (ID, newly_created_item) Raises: KeyError: ID already exists. """ if id_ is not None: LOG.debug("add_item error: should not be given ID") raise webservice.DataModelException( *error_codes.get('add_item_id')) str_rule = item['rule'] try: rule = compile.parse(str_rule, self.engine.module_schemas) if len(rule) == 1: rule = rule[0] else: LOG.debug("add_item error: given too many rules") (num, desc) = error_codes.get('multiple_rules') raise webservice.DataModelException( num, desc + ":: Received multiple rules: " + "; ".join(str(x) for x in rule)) changes = self.change_rule(rule, context) except compile.CongressException as e: LOG.debug("add_item error: invalid rule syntax") (num, desc) = error_codes.get('rule_syntax') raise webservice.DataModelException(num, desc + "::" + str(e)) for change in changes: if change.formula == rule: d = {'rule': rule.pretty_str(), 'id': rule.id, 'comment': None} policy_name = self.policy_name(context) db_policy_rules.add_policy_rule(d['id'], policy_name, str_rule, d['comment']) return (rule.id, d) num, desc = error_codes.get('rule_already_exists') raise webservice.DataModelException( num, desc, http_status_code=httplib.CONFLICT)
def add_item(self, item, params, id_=None, context=None): """Add item to model. Args: item: The item to add to the model params: A dict-like object containing parameters from the request query string and body. id_: The ID of the item, or None if an ID should be generated context: Key-values providing frame of reference of request Returns: Tuple of (ID, newly_created_item) Raises: KeyError: ID already exists. """ # TODO(thinrichs): add comment property to rule if id_ is not None: raise webservice.DataModelException( **error_codes.get('add_item_id')) str_rule = item['rule'] try: rule = compile.parse(str_rule) if len(rule) == 1: rule = rule[0] else: (num, desc) = error_codes.get('add_item_multiple_rules') raise webservice.DataModelException( num, desc + ":: Received multiple rules: " + "; ".join(str(x) for x in rule)) changes = self.change_rule(rule, context) except compile.CongressException as e: (num, desc) = error_codes.get('rule_syntax') raise webservice.DataModelException(num, desc + "::" + str(e)) for change in changes: if change.formula == rule: d = {'rule': str(rule), 'id': rule.id, 'comment': None} return (rule.id, d) # rule already existed policy_name = self.policy_name(context) for p in self.engine.theory[policy_name].policy(): if p == rule: d = {'rule': str(rule), 'id': rule.id, 'comment': 'None'} return (rule.id, d) raise Exception("add_item added a rule but then could not find it.")
def test_column_references_lowlevel(self): """Test column-references with low-level checks.""" # do the first one the painful way, to ensure the parser # is doing something reasonable. ms = compile.ModuleSchemas() ms['nova'] = compile.Schema({'q': ('id', 'name', 'status')}) code = ("p(x) :- nova:q(id=x)") actual = compile.parse(code, ms) self.assertEqual(len(actual), 1) rule = actual[0] self.assertEqual(len(rule.heads), 1) self.assertEqual(rule.head.table, "p") self.assertEqual(len(rule.head.arguments), 1) self.assertEqual(rule.head.arguments[0].name, 'x') self.assertEqual(len(rule.body), 1) lit = rule.body[0] self.assertFalse(lit.is_negated()) self.assertEqual(lit.table, "nova:q") self.assertEqual(len(lit.arguments), 3) self.assertEqual(lit.arguments[0].name, 'x') self.assertNotEqual(lit.arguments[0].name, lit.arguments[1].name) self.assertNotEqual(lit.arguments[0].name, lit.arguments[2].name) self.assertNotEqual(lit.arguments[1].name, lit.arguments[2].name)
def parse(self, string): return compile.parse(string, theories=self.theory)
def str2pol(policy_string): return compile.parse(policy_string)
def str2pol(policy_string, theories=None): return compile.parse(policy_string, theories=theories)
def test_column_references_atom(self): """Test column references occurring in a single atom in a rule.""" ms = compile.ModuleSchemas() ms['nova'] = compile.Schema({'q': ('id', 'name', 'status')}) # Multiple column names code = ("p(x) :- nova:q(id=x, status=y)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(x, w, y)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Multiple column names') # Multiple column numbers code = ("p(x) :- nova:q(0=x, 1=y, 2=z)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(x, y, z)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Multiple column numbers') # Mix column names and numbers code = ("p(x) :- nova:q(id=x, 2=y)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(x, w, y)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Mix names and numbers') # Object constants code = ("p(x) :- nova:q(id=3, 2=2)") actual = compile.parse(code, ms) correct = "p(x) :- nova:q(3, w, 2)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Object constants') # Out of order code = ("p(x, y) :- nova:q(status=y, id=x)") actual = compile.parse(code, ms) correct = "p(x, y) :- nova:q(x, z, y)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Out of order') # Out of order with numbers code = ("p(x, y) :- nova:q(1=y, 0=x)") actual = compile.parse(code, ms) correct = "p(x, y) :- nova:q(x, y, z)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Out of order with numbers') # Positional plus named code = ("p(x, y) :- nova:q(x, status=y)") actual = compile.parse(code, ms) correct = "p(x, y) :- nova:q(x, z, y)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Positional plus named') # Positional plus named 2 code = ("p(x, y, z) :- nova:q(x, y, 2=z)") actual = compile.parse(code, ms) correct = "p(x, y, z) :- nova:q(x, y, z)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Positional plus named 2') # Pure positional (different since we are providing schema) code = ("p(x, y, z) :- nova:q(x, y, z)") actual = compile.parse(code, ms) correct = "p(x, y, z) :- nova:q(x, y, z)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Pure positional') # Pure positional (without schema) code = ("p(x) :- nova:q(x, y, z)") actual = compile.parse(code, compile.ModuleSchemas()) correct = "p(x) :- nova:q(x, y, z)" eq = helper.datalog_same(helper.pol2str(actual), correct) self.assertTrue(eq, 'Pure positional without schema')
def test_rule_stratification(self): rules = compile.parse('p(x) :- not q(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- p(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- q(x) q(x) :- p(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- q(x) q(x) :- not r(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- not q(x) q(x) :- not r(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- not q(x) ' 'q(x) :- not r(x) ' 'r(x) :- not s(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- q(x), r(x) ' 'q(x) :- not t(x) ' 'r(x) :- not s(x)') self.assertTrue(compile.is_stratified(rules)) rules = compile.parse('p(x) :- not p(x)') self.assertFalse(compile.is_stratified(rules)) rules = compile.parse('p(x) :- q(x) q(x) :- not p(x)') self.assertFalse(compile.is_stratified(rules)) rules = compile.parse('p(x) :- q(x),r(x) r(x) :- not p(x)') self.assertFalse(compile.is_stratified(rules)) rules = compile.parse('p(x) :- q(x), r(x) ' 'q(x) :- not t(x) ' 'r(x) :- not s(x) ' 't(x) :- p(x)') self.assertFalse(compile.is_stratified(rules))
def create(rootdir, statedir, config_file, config_override=None): """Get Congress up and running when src is installed in rootdir, i.e. ROOTDIR=/path/to/congress/congress. CONFIG_OVERRIDE is a dictionary of dictionaries with configuration values that overrides those provided in CONFIG_FILE. The top-level dictionary has keys for the CONFIG_FILE sections, and the second-level dictionaries store values for that section. """ LOG.debug("Starting Congress with rootdir=%s, statedir=%s, " "datasource_config=%s, config_override=%s", rootdir, statedir, config_file, config_override) # create message bus cage = d6cage.d6Cage() cage.system_service_names.add(cage.name) # read in datasource configurations cage.config = initialize_config(config_file, config_override) # path to congress source dir src_path = os.path.join(rootdir, "congress") # add policy engine engine_path = os.path.join(src_path, "policy/dsepolicy.py") LOG.info("main::start() engine_path: %s", engine_path) cage.loadModule("PolicyEngine", engine_path) cage.createservice( name="engine", moduleName="PolicyEngine", description="Policy Engine (DseRuntime instance)", args={'d6cage': cage, 'rootdir': src_path}) engine = cage.service_object('engine') if statedir is not None: engine.load_dir(statedir) engine.initialize_table_subscriptions() cage.system_service_names.add(engine.name) engine.debug_mode() # should take this out for production # add policy api # TODO(thinrichs): change to real API path. api_path = os.path.join(src_path, "api/policy_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-policy", api_path) cage.createservice( name="api-policy", moduleName="API-policy", description="API-policy DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-policy') # add rule api api_path = os.path.join(src_path, "api/rule_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-rule", api_path) cage.createservice( name="api-rule", moduleName="API-rule", description="API-rule DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-rule') # add table api api_path = os.path.join(src_path, "api/table_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-table", api_path) cage.createservice( name="api-table", moduleName="API-table", description="API-table DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-table') # add row api api_path = os.path.join(src_path, "api/row_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-row", api_path) cage.createservice( name="api-row", moduleName="API-row", description="API-row DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-row') # add datasource api api_path = os.path.join(src_path, "api/datasource_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-datasource", api_path) cage.createservice( name="api-datasource", moduleName="API-datasource", description="API-datasource DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-datasource') # add status api api_path = os.path.join(src_path, "api/status_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-status", api_path) cage.createservice( name="api-status", moduleName="API-status", description="API-status DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-status') # add schema api api_path = os.path.join(src_path, "api/schema_model.py") LOG.info("main::start() api_path: %s", api_path) cage.loadModule("API-schema", api_path) cage.createservice( name="api-schema", moduleName="API-schema", description="API-schema DSE instance", args={'policy_engine': engine}) cage.system_service_names.add('api-schema') # have policy-engine subscribe to api calls # TODO(thinrichs): either have API publish everything to DSE bus and # have policy engine subscribe to all those messages # OR have API interact with individual components directly # and change all tests so that the policy engine does not need to be # subscribed to 'policy-update' engine.subscribe('api-rule', 'policy-update', callback=engine.receive_policy_update) # spin up all the configured services, if we have configured them if cage.config: for name in cage.config: if 'module' in cage.config[name]: load_data_service(name, cage.config[name], cage, src_path) # inform policy engine about schema service = cage.service_object(name) engine.set_schema(name, service.get_schema()) # populate rule api data, needs to be done after models are loaded. # FIXME(arosen): refactor how we're loading data and api. rules = db_policy_rules.get_policy_rules() for rule in rules: parsed_rule = compile.parse(rule.rule)[0] cage.services['api-rule']['object'].change_rule( parsed_rule, {'policy_id': rule.policy_name}) return cage
def str2pol(policy_string, module_schemas=None): return compile.parse(policy_string, module_schemas=module_schemas)
def string_to_database(string, theories=None): return list_to_database(compile.parse( string, theories=theories))