def makerules(rulesfile): rerules = {} rules = loadrules(rulesfile) for rule in rules: rerules[rule_engine.Rule(rule)] = rules[rule] return rerules
def select(self, *names, from_=None, where='true', limit=None): data = self.data if from_ is not None: data = rule_engine.Rule(from_, context=self._rule_context).evaluate(data) if isinstance(data, collections.abc.Mapping): data = data.values() if not isiterable(data): raise ValueError('data source is not iterable') rule = rule_engine.Rule(where, context=self._rule_context) count = 0 for match in rule.filter(data): if count == limit: break yield tuple( rule_engine.Rule(name, context=self._rule_context).evaluate( match) for name in names) count += 1
def main(): parser = argparse.ArgumentParser( conflict_handler='resolve', description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--depth', default=2, type=int, help='the depth to pretty print') parser.add_argument('--gzip', action='store_true', default=False, help='decompress the file') parser.add_argument('--regex-case-sensitive', default=False, action='store_true', help='use case-sensitive regular expressions') parser.add_argument('json_file', type=argparse.FileType('rb'), help='the JSON file to filter') parser.add_argument('rule', help='the rule to apply') parser.epilog = EPILOG arguments = parser.parse_args() re_flags = re.MULTILINE if arguments.regex_case_sensitive: re_flags &= re.IGNORECASE context = rule_engine.Context(default_value=None, regex_flags=re_flags) try: rule = rule_engine.Rule(arguments.rule, context=context) except rule_engine.RuleSyntaxError as error: print(error.message) return 0 file_object = arguments.json_file if arguments.gzip: file_object = gzip.GzipFile(fileobj=file_object) total = 0 matches = 0 for line in file_object: result = json.loads(line.decode('utf-8')) total += 1 if not rule.matches(result): continue matches += 1 print(result_to_url(result)) if arguments.depth > 0: for key in BLACKLIST: result.pop(key, None) pprint.pprint(result, depth=arguments.depth) print("rule matched {:,} of {:,} results ({:.2f}%)".format( matches, total, ((matches / total) * 100))) return 0
def parse(self, str): #print('Rule>> parsing:'+str) when, *then = str.split('=>') if not then: raise Exception('Invalid rule: ' + str) try: self.rule = rule_engine.Rule(when.strip()) except rule_engine.EngineError as e: print('Invalid rule: ' + e.message) self.tags = self.mapTags(then[0])
def _entry_from_raw(self, entry, index=None): entry = entry.copy() if 'rule' in entry: entry['rule'] = rule_engine.Rule(entry['rule'], context=self._context) elif 'source' in entry: entry['source'] = ipaddress.ip_network(entry['source']) else: raise RuntimeError( "rule {}contains neither a rule or source key".format( '' if index is None else '#' + str(index) + ' ')) entry['permanent'] = entry.get('permanent', True) return entry
def __init__(self, stack, planetName, description=None, icon=None): super().__init__( stack, f'@{planetName}', description=description, icon=getPlanetIcon(planetName.lower()) ) self._planet = planetName self.wsTagRules.append(rule_engine.Rule( f'tag =~ "@{planetName}#.*"' ))
def modules(module_type=None): modules = tuple(msf_modules.values()) if module_type is not None: modules = tuple(module for module in modules if module.get('type') == module_type) alert = None if filter_expresion := (request.args.get('filter') or g.session.get('filter')): # this whole thing is a red herring try: rule = rule_engine.Rule(filter_expresion, context=rule_context) modules = rule.filter(modules) except rule_engine.RuleSyntaxError: alert = 'The filter expression contained a syntax error.' except rule_engine.EngineError: alert = 'The filter expression contained an error.' else: g.session['filter'] = filter_expresion
def main(): parser = argparse.ArgumentParser( conflict_handler='resolve', description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--gzip', action='store_true', default=False, help='decompress the file') parser.add_argument('json_file', type=argparse.FileType('rb'), help='the JSON file to filter') arguments = parser.parse_args() re_flags = re.IGNORECASE | re.MULTILINE context = rule_engine.Context(default_value=None, regex_flags=re_flags) file_object = arguments.json_file if arguments.gzip: file_object = gzip.GzipFile(fileobj=file_object) results = [json.loads(line.decode('utf-8')) for line in file_object] with open(RULES_FILE, 'r') as file_h: rules = yaml.load(file_h, Loader=yaml.FullLoader) for vulnerability in rules['rules']: try: rule = rule_engine.Rule(vulnerability['rule'], context=context) except rule_engine.RuleSyntaxError as error: print(error.message) return 0 matches = tuple(rule.filter(results)) if not matches: continue print(vulnerability['description']) references = vulnerability.get('references', {}) _print_references(references) print('Hosts:') for match in matches: print(" * {}".format(results_filter.result_to_url(match))) print() return 0
def signal_entry_changed_filter(self, entry): text = entry.get_text() self._rule = None label = self.gobjects['label_filter_summary'] if text: try: self._rule = rule_engine.Rule(text, context=self._rule_context) except rule_engine.EngineError: entry.set_property('secondary-icon-stock', 'gtk-dialog-warning') return entry.set_property('secondary-icon-stock', None) self._tv_model_filter.refilter() visible_records = len(self._tv_model_filter) all_records = len(self._tv_model) label.set_text("Showing {:,} of {:,} {} ({:.1f}%)".format( visible_records, all_records, self.label_text.lower(), ((visible_records / all_records) if all_records > 0 else 1.0) * 100))
def flex_check_block_criteria(self, offer): try: # Calculate the relative time between start and end time block_start, block_end, block_relative_time = self.flex_calculate_block_duration( offer['startTime'], offer['endTime'] ) # Add block_relative_time.hours to offer dict to evaluate offer['block_relative_hours'] = block_relative_time.hours # Add a list of acceptable service_area_ids to evaluate offer['accepted_service_area_ids'] = self.criteria_block_service_ids # Rule engine criteria_met = True # Get datetime object of today today = datetime.now() # Check if blocks today are acceptable, if not, check that the # block_start.day is greater than today. if it isn't fail the criteria if not self.criteria_accept_block_same_day: if block_start.day <= today.day and block_start.month == today.month: self.logger.info(f'block falls outside criteria {block_start.day} {today.day} {block_start}') criteria_met = False # Specify criteria to meet to accept a block if criteria_met: rule = rule_engine.Rule( f""" rateInfo.currency == '{self.criteria_block_currency}' and rateInfo.priceAmount {self.criteria_block_price} and block_relative_hours {self.criteria_block_duration_hours} and serviceAreaId in accepted_service_area_ids """ ) criteria_met = rule.matches(offer) return criteria_met except Exception as e: self.logger.error(f'failed to evaluate block criteria {e}') raise e
def main(): parser = argparse.ArgumentParser( conflict_handler='resolve', description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) auth_type_parser_group = parser.add_mutually_exclusive_group() auth_type_parser_group.add_argument( '--auth-token', dest='auth_token', help='authenticate to github with a token') auth_type_parser_group.add_argument( '--auth-user', dest='auth_user', help='authenticate to github with credentials') parser.add_argument('repo_slug', help='the repository to filter') parser.add_argument('type', choices=('issues', 'pulls'), help='thing to filter') parser.add_argument('rule', help='the rule to apply') parser.epilog = EPILOG arguments = parser.parse_args() # need to define a custom context to use a custom resolver function context = rule_engine.Context( resolver=rule_engine.engine.resolve_attribute) try: rule = rule_engine.Rule(arguments.rule, context=context) except rule_engine.RuleSyntaxError as error: print(error.message) return 0 gh = _get_github(arguments) repo = gh.get_repo(arguments.repo_slug) things = tuple(getattr(repo, 'get_' + arguments.type)(state='all')) for thing in rule.filter(things): print("{0}#{1: <4} - {2}".format(arguments.repo_slug, thing.number, thing.title)) return 0
def main(): parser = argparse.ArgumentParser( conflict_handler='resolve', description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('csv_file', type=argparse.FileType('r'), help='the CSV file to filter') parser.add_argument('rule', help='the rule to apply') arguments = parser.parse_args() # need to define a custom context to use a custom resolver function context = rule_engine.Context(resolver=resolve_item) try: rule = rule_engine.Rule(arguments.rule, context=context) except rule_engine.RuleSyntaxError as error: print(error.message) return 0 csv_reader = csv.DictReader(arguments.csv_file) csv_writer = csv.DictWriter(sys.stdout, csv_reader.fieldnames, dialect=csv_reader.dialect) for row in rule.filter(csv_reader): csv_writer.writerow(row) return 0
def __init__(self): context = rule_engine.Context(resolver=rule_engine.resolve_attribute) self.rule_1 = rule_engine.Rule('main_genre == "" and platform == "" ', context=context) self.rule_2 = rule_engine.Rule('main_genre != "" and platform == "" ', context=context) self.rule_3 = rule_engine.Rule('main_genre == "" and platform != "" ', context=context) self.rule_4 = rule_engine.Rule('main_genre != "" and platform != "" ', context=context) context = rule_engine.Context(resolver=rule_engine.resolve_attribute) self.rule_5 = rule_engine.Rule( 'main_genres_length == 0 and platforms_length != 0 ', context=context) self.rule_6 = rule_engine.Rule( 'main_genres_length != 0 and platforms_length == 0 ', context=context) self.rule_7 = rule_engine.Rule( 'main_genres_length == 0 and platforms_length == 0 ', context=context) self.rule_8 = rule_engine.Rule( 'main_genres_length != 0 and platforms_length != 0 ', context=context) context = rule_engine.Context(resolver=rule_engine.resolve_attribute) self.rule_9 = rule_engine.Rule('recommendation_num > 20', context=context) self.rule_10 = rule_engine.Rule('results_length > 60', context=context) self.rule_11 = rule_engine.Rule('recommendation_num > results_length', context=context)
lambda: increasing(per_user_stats['updates_for_PRO']) }) users_all[user_id] = per_user_stats if __name__ == "__main__": import rule_engine from datetime import datetime data = get_raw_latest_activity_data() rule_readable_data = create_aggregations(data) # match a literal first name and applying a regex to the email rule = rule_engine.Rule( 'PRO_increase == "True" and since =~ ".*@rebels.org$"' ) # => <Rule text='first_name == "Luke" and email =~ ".*@rebels.org$"' > rule.matches({ 'first_name': 'Luke', 'last_name': 'Skywalker', 'email': '*****@*****.**' }) # => True rule.matches({ 'first_name': 'Darth', 'last_name': 'Vader', 'email': '*****@*****.**' }) # => False is_dev = os.environ.get('USERNAME', 'root') != 'root' app.run(debug=is_dev, host="0.0.0.0",
def calculate(self, obj): """ :return: value calculated while condition is true """ value = rule_engine.Rule(self.JXFSJS, context=context) return value.evaluate(obj) if len(self.JXFSJS) else 0.0
def match(self, obj): """ :return: rule condition is true or false """ condition = rule_engine.Rule(self.GZTJ, context=context) return condition.matches(obj) if len(self.GZTJ) else False
from cava.models.correlation import base_rule, resolver_context, rule_types import rule_engine # This will be a list of instantiated classes, base_rule rules = [ # Is it snowing? If so, turn on the driveway heater base_rule( rule_engine.Rule("model.snowing", context=resolver_context), "turn_on_driveway_heater", rule_types.trigger, ), ]