def sched_timeout(self): #start timer operations after the filter has started actual filtering self.start_timer_event.wait() while True: #wait time.sleep(self.config.timeout) #wait for signal from main thread: the signal is send if the specified no. of packets is met self.notif_timer_event.wait() if self.config.rules_count < Rule.num_of_rules(): with self.lock: self.config.incr_rules_count = \ self.config.incr_rules_count if((Rule.num_of_rules() - self.config.rules_count) > self.config.incr_rules_count) \ else (Rule.num_of_rules() - self.config.rules_count) self.config.update() #clear event self.notif_timer_event.clear() #reset pkt_count self.pkt_count = 0 if self.config.rules_count == Rule.num_of_rules(): print(" # all rules defined have been added.") #stop timer incase all rules have been added to self._rules #resume capture print(' # resuming capture.\n') print(" ", end="") print("src".ljust(20) + "dst".ljust(20) + "protocol".ljust(10) + "ttl".ljust(10) + "action".ljust(15) + \ "memory(MBs)".ljust(15) + "cummulative cpu time(secs)") print(" ", end="") print(('-' * 116)) else: #stop timer, all defined rules have been added break
def __init__(self, rules_db): Rule.__init__( self, [], expression_from_list( [rules_db.add_constant_by_name("atom"), Variable("X")]), rules_db.add_constant_by_name("atom_rule"), "(built-in)", rules_db)
def get_mode_one_config(parser, p_result): issues = '' if not p_result.rules_count: issues += " - no. of rules not provided.\n" if not p_result.rule_nums: issues += " - rule numbers (comma-seperated) not provided.\n" if issues: print('[-] issues:\n' + issues) parser.print_usage() sys.exit(0) if p_result.rules_count == 'all': p_result.rules_count = Rule.num_of_rules() p_result.rules = list(Rule.get_all_rules().keys()) else: p_result.rule_nums = [ int(i) for i in p_result.rule_nums.split(',') if i ] p_result.rules_count = len(p_result.rule_nums) print(' # initializing rules', end='') p_result.rules = Rule.get_rules_from_nums(p_result.rule_nums) return ModeOneFilterConfig(p_result)
def update_rules(self): """ create a new list of random rules. """ self.rule_nums = Rule.get_rules_randomly(self.rules_count) print(' # initializing rules.', end=' ') self.rules = Rule.get_rules_from_nums(self.rule_nums)
def execute(self): ListRules().execute() operator = input( "Choose operator to apply to rules(number).\n1. Any \n2. All\nChoose: " ) try: operator = int(operator) except ValueError: print("Choose numbers\n 1. Any \n2. All\n ") rules_to_apply = [] while True: ListRules().execute() rule = input( "Rule Name to apply or done in case you wish to execute now: ") rule = rule.strip() if rule == "done": break if self.fetch_rule(rule): if rule not in rules_to_apply: rules_to_apply.append(rule) else: print("Could not find rule") ListAction().execute() action_name = input("Action to apply. Choose by name: ") action_types = [ action.action_name for action in ActionType.__subclasses__() ] if action_name not in action_types: print("Could not find the action") if action_name == 'move': move_folder = input("Where should I move the emails: ") else: move_folder = None result_ids = set() for rule in rules_to_apply: rule = Rule(rule) rule.from_json(rule.rule_file) result = rule.fetch_for_rule() new_result_ids = set([message.gmail_id for message in result]) if operator == 1: result_ids.union(new_result_ids) else: if len(result_ids) == 0: result_ids = new_result_ids else: result_ids.intersection(new_result_ids) actions = [ action for action in ActionType.__subclasses__() if action.action_name == action_name ] action = actions[0] action().apply(result_ids, move_folder=move_folder)
def backup(args): with open(args.config) as config_file: config = json.load(config_file) rule = Rule(config) if args.dryrun: # dryrun does not work anymore ('it is dry-run now) raise NotImplementedError('dryrun does not work anymore (it is dry-run now)') rule.options.dryrun = args.dryrun now = datetime.datetime.now() rsync_command = ['rsync'] + rule.get_optional_args(now) + rule.get_positional_args(now) # create dirs fro log files success_filepath = rule.log.get_sucess_filepath(now) if not args.debug and success_filepath: make_dirpath(success_filepath) progress_filepath = rule.log.get_progress_filepath(now) if args.verbose or args.debug or not progress_filepath: progress_file = sys.stdout else: make_dirpath(progress_filepath) progress_file = open(progress_filepath, "w") error_filepath = rule.log.get_error_filepath(now) if args.verbose or args.debug or not error_filepath: error_file = sys.stderr else: make_dirpath(error_filepath) error_file = open(error_filepath, 'w') if error_filepath else PIPE # process if args.debug: print(' '.join(rsync_command)) else: rsync_process = Popen(rsync_command, stdout=progress_file, stderr=error_file) rsync_process.wait() # some cleaning if progress_filepath and exists(progress_filepath): remove(progress_filepath) if error_filepath and exists(error_filepath): is_errors = not stat(error_filepath).st_size == 0 if not is_errors: remove(error_filepath) user_message = 'sucessfull save of {}.'.format(rule.source.dirpath) else: with open(error_filepath, 'r') as error_file: last_error = error_file.readlines()[-1].decode() user_message = 'finished with errors on save of {}: {}'.format(rule.source.dirpath, last_error) else: user_message = 'done' notify_command = ['synodsmnotify', '@administrators', 'Backup {}'.format(basename(realpath(rule.source.dirpath))), user_message] call(notify_command) logging.debug('done')
class Test(object): # default f1 = Rule("f1", to_field="__f1", required=False, allowed_none=True) f2 = Rule("f2", to_field="__f2", required=False, allowed_none=False) f3 = Rule("f3", to_field="__f3", required=True, allowed_none=False) f4 = Rule("f4", to_field="__f4", required=True, allowed_none=True)
def get_mode_four_config(parser, p_result): issues = '' if not p_result.rules_count_range: issues += ' - rules count range not provided.\n' else: try: nr = [int(i) for i in p_result.rules_count_range.split('-')] if nr[0] < 1 or nr[1] > Rule.num_of_rules(): issues += ' - invalid rules count range.\n' else: # set ranges p_result.rules_count_range = tuple(nr) except ValueError: issues += ' - invalid rules count range.\n' if not p_result.timeout_range: issues += ' - timeout range not provided.\n' else: try: tr = [int(i) for i in p_result.timeout_range.split('-')] if tr[0] < 1: issues += ' - invalid timeout range.\n' else: # set ranges p_result.timeout_range = tuple(tr) except ValueError: issues += ' - invalid timeout range.\n' if issues: print('[-] issues:\n' + issues) parser.print_usage() sys.exit(0) # initial timeout print(' # generating random timeout.') p_result.timeout = random.randint(p_result.timeout_range[0], p_result.timeout_range[1]) # initial rules count - random from range print(' # generating random rules count (no. of rules).') p_result.rules_count = random.randint(p_result.rules_count_range[0], p_result.rules_count_range[1]) # initial rule numbers print(' # generating rule numbers randomly.') p_result.rule_nums = Rule.get_rules_randomly(p_result.rules_count) # adds a new member variable # initial list of rules print(' # initializing rules.', end=' ') p_result.rules = Rule.get_rules_from_nums(p_result.rule_nums) return ModeFourFilterConfig(p_result)
def update_rules(self): """ Set rules count to a new random value and create a new list of random rules. """ print(' # generating new rules count.') self.rules_count = random.randint(self.rules_count_range[0], self.rules_count_range[1]) #print(' # generating %d r') self.rule_nums = Rule.get_rules_randomly(self.rules_count) print(' # initializing rules.', end=' ') self.rules = Rule.get_rules_from_nums(self.rule_nums)
def execute(self): list_rule = ListRules() print("The list of rules to choose from, Please choose the name:") list_rule.execute() rule = input("Describe the rule: ") rule_file = rule + '.json' if not os.path.isfile(rule_file): print("Could not find the rule by the name {}".format(rule)) exit() rule = Rule(rule).from_json(rule_file) print(rule.render())
def update_rules(message): global rules rules = message rules_given_id.clear() print("update rules given id {}".format(rules_given_id)) for r in rules: print(r) rule = Rule(r, enabled=r['enabled']) print(rule.root) print(rule.evaluate()) rule.execute() print('updating rules') print(rules) update_admin() # to send the new rules status
def __init__(self, list_of_players): ''' @param: list_of_players: a list of players @return: none ''' self.lastgame_round_players = list_of_players self.players = list_of_players self.rule_checker = Rule() self.board = Board([], []) self.loser = [] self.log = [] if len(list_of_players) > 5 or len(list_of_players) < 3: raise Exception("Incorrect number of players") colors = ["white", "black", "red", "green", "blue"] self.avatar_player = {} for i in range(len(list_of_players)): self.avatar_player[list_of_players[i]] = Avatar(colors[i], None)
def __init__(self, rules: list, implications: list, membership_function: dict, domain: tuple): self.implication = implications self.membership_function = membership_function self.domain = domain AggregationMethods.__init__(self, Mamdani.consequent_filter) self.rules = [Rule(item.split(), max, min, lambda x: 1-x) for item in rules]
def __init__(self, learning_rate, no_rules, max_iter): self.learning_rate = learning_rate self.max_iter = max_iter self.rules = [Rule() for _ in range(no_rules)] self.samples = [ SampleDataFunction(x, y) for x in range(-4, 4) for y in range(-4, 4) ] self.sum_w = self.sum_wz = 0
def generate_rules(self): '''Generate rules from frequent itemsets''' # start from 2-elements itemsets for level in self.itemsets[1:]: for itemset in level: subsets = Rule.get_subsets(itemset) for antecedent in subsets: consequent = itemset.difference(antecedent) if antecedent and consequent: rule_support = self.support[itemset] rule_confidence = rule_support / self.support[antecedent] if rule_confidence > self.min_confidence: self.rules.append(Rule(antecedent, consequent, rule_support, rule_confidence)) self.rules.sort(key=lambda rule: rule.confidence)
def get_mode_three_config(parser, p_result): issues = '' if not p_result.timeout: issues += ' - filter timeout not provided.\n' if not p_result.rules_count: issues += ' - initial no. of rules not provided.\n' else: p_result.rules_count = int(p_result.rules_count) if not p_result.proto: issues += ' - protocol not provided.\n' elif not p_result.proto in PROTOCOLS.keys(): issues += ' - invalid protocol.\n' else: pass if not p_result.max_pkts_count: issues += ' - max. packet count before incrementing rules not provided.\n' if not p_result.incr_rules_count: issues += ' - no. of rules to be incremented after timeout not provided.\n' elif p_result.rules_count: if p_result.incr_rules_count > (Rule.num_of_rules() - p_result.rules_count): issues += ' - no. of increment rules too high.\n' else: pass if issues: print('[-] issues:\n' + issues) parser.print_usage() sys.exit(0) print(' # generating random rule numbers.') p_result.rule_nums = Rule.get_rules_randomly(p_result.rules_count) # adds a new member variable print(' # initializing rules', end=' ') p_result.rules = Rule.get_rules_from_nums(p_result.rule_nums) p_result.proto = PROTOCOLS[p_result.proto] return ModeThreeFilterConfig(p_result)
def test_metaclass(self): rule = Rule("Field") class Test(object): __metaclass__ = MetaRules field = rule self.assertTrue(hasattr(Test, "_fields")) self.assertEqual(Test._fields, {'field': rule}) self.assertEqual(Test()._fields, {'field': rule})
def get_rules(self): '''Generate association rules from frequent itemsets''' rules = [] for itemset in self.itemsets: # if itemset is one-element, then antecedent or consequent # will be empty, so skip if len(itemset) > 1: subsets = Rule.get_subsets(itemset) for antecedent in subsets: consequent = itemset.difference(antecedent) if antecedent and consequent: rule_support = self.get_support(itemset) rule_confidence = rule_support / self.get_support( antecedent) if rule_confidence > self.min_confidence: rules.append( Rule(antecedent, consequent, rule_support, rule_confidence)) rules.sort(key=lambda rule: rule.confidence) return rules
def _load_rules(self, path): rules = [] with open(path, encoding="utf-8") as f: for line in f: if line.isspace(): continue rule = Rule.from_string(line) rules.append(rule) return RuleSet(rules)
def __initialize_rules(self): self.rules = [] rules = [] try: with open("rules.json") as rules_file: rules = json.loads(rules_file.read()) except: pass for rule in rules: self.rules.append(Rule(rule))
def get_mode_five_config(parser, p_result): issues = '' if not p_result.t_win_trig: issues += " - THRESHOLD_WIN_TRIG not specified.\n" if not p_result.win_size: issues += " - WINDOW_SIZE not specified.\n" if not p_result.inactive_cp: issues += " - INACTIVE_CHECKED% not specified.\n" if issues: print('[-] issues:\n' + issues) parser.print_usage() sys.exit(0) p_result.active_p = random.randint(1, (100 - p_result.inactive_cp)) p_result.active_set = Rule.get_rules_randomly( round(float(Rule.num_of_rules()) * p_result.active_p / 100.0)) print(' # initializing ACTIVE rules.', end=' ') rtmp = Rule.get_rules_from_nums(p_result.active_set) p_result.active_rules = {} for num, rule in zip(p_result.active_set, rtmp): p_result.active_rules[num] = rule p_result.inactive_set = Rule.get_new_rules_randomly(\ round(float(Rule.num_of_rules()) * p_result.inactive_cp/100.0), p_result.active_set) print(' # initializing INACTIVE_CHECKED rules.', end=' ') rtmp = Rule.get_rules_from_nums(p_result.inactive_set) p_result.inactive_rules = {} for num, rule in zip(p_result.inactive_set, rtmp): p_result.inactive_rules[num] = rule return ModeFiveFilterConfig(p_result)
def get_mode_two_config(parser, p_result): issues = '' if not p_result.timeout: issues += ' - filter timeout not provided.\n' if not p_result.rules_count: issues += ' - no. of rules not provided.\n' if issues: print('[-] issues:\n' + issues) parser.print_usage() sys.exit(0) p_result.rules_count = int(p_result.rules_count) p_result.rule_nums = Rule.get_rules_randomly(p_result.rules_count) # adds a new member variable print(' # initializing rules', end=' ') p_result.rules = Rule.get_rules_from_nums(p_result.rule_nums) return ModeTwoFilterConfig(p_result)
def add_builtins(rules_db): rules_db.add_rule( Rule([], expression_from_list([ Variable("X"), rules_db.add_constant_by_name("is"), Variable("X") ]), rules_db.add_constant_by_name("is_rule"), "(built-in)", rules_db)) rules_db.add_rule(RuleIsNot(rules_db)) rules_db.add_rule(RuleAtom(rules_db)) rules_db.add_rule(RuleNewAtom(rules_db))
def test(definitions): from rules import Rule definitions.set_attribute('Plus', 'Flat') definitions.set_attribute('Plus', 'Orderless') definitions.set_attribute('g', 'Orderless') eval = Evaluation(Symbol('unused'), definitions) #expr = 'Plus[a,b,c,d,e,f,g,h,i,1,2,3]' #expr = 'a+b+a+e+a+b+a+e+a+b+a+e+a+b+a+e+f' #expr = 'a+b+c+d+e+f+g+h+i+j+k+l+a+b' #pattern = 'x_+x_+b+c+d+e+f+g+h+i+j+k+l' #expr = 'a+b+c+d+e+f+g+h+i+j+k+l+a+b+b+1+2+3' #pattern = 'x_+x_+c+d+e+f+g+h+i+j+k+l' #pattern = 'Plus[x_]' #expr = 'g[a+b+c+d+e,b,e,d,c,a]' #pattern = 'g[x__+y_,y_,x__]' #expr = 'g[a+b+c+d+e,b]' #pattern = 'g[x__+y_,y_]' #expr = 'a+a+b' #pattern = 'x__+x__' #expr = 'a+b+c' #pattern = 'x_+y_' expr = 'Format[a+b]' pattern = 'Format[Plus[items__]]' #expr = 'a*b+c*d+e*f+g*h+i*j+k*l+m*n' #pattern = 'a_*x_+b_*x_+a_*x_+b_*x_' expr = parse(expr).evaluate(eval) pattern = parse(pattern).evaluate(eval) print expr print pattern rule = Rule(pattern, parse('3')) #evaluation = Evaluation(Expression) print '%s' % rule.apply(expr, eval)
def update_rules(self): print('\n ' + '#' * 90) new_rule_nums = Rule.get_new_rules_randomly(\ self.incr_rules_count, self.rule_nums) #new_rule_nums.sort() #add new rule numbers to self._rule_nums print(" # adding", self.incr_rules_count, " more rule(s)") #instantiate new rules, add them to self._rules [self.rule_nums.append(rn) for rn in new_rule_nums] print(' # initializing rules.', end=' ') [self.rules.append(r) for r in Rule.get_rules_from_nums(new_rule_nums)] self.rule_nums.sort() self.rules_count = len(self.rule_nums) #self._rule_nums.sort() print(" # rules added ==> ", ' '.join(map(str, new_rule_nums))) print(" # new rules list ==> %s" % ' '.join(map(str, self.rule_nums))) print(" # new rules count ==> %d" % self.rules_count)
def update_admin(): devices = [{'id': x, 'state': device.get_state(), 'state_label': device.get_state_label(), 'is_actuator': isinstance(device, Actuator), 'type': device.type } for x, device in device_ids.items()] builder_rules = [] for device in device_ids.values(): builder_rules.append(device.build_rule()) from state import device_names for r in rules: r['status'] = Rule(r, enabled=r['enabled']).evaluate() for admin_id in app.admin_id: emit('update', {'devices': devices, 'builder_rules': builder_rules, 'rules': rules, 'device_names': device_names}, room=admin_id)
def result(): delete_target_dir('./static') os.mkdir('./static') # 清洗数据 if not os.path.exists('./datas/1.xlsx'): data = {'code': 0, 'info': '未上传excel'} return jsonify(data) from clean_data import Duty duty = Duty() datas = duty.get_duty_datas() # pprint(datas) with open('./datas/results.json', 'w', encoding='utf-8') as f: f.write(json.dumps(datas, ensure_ascii=False)) # 执行规则 from rules import Rule Rule() # 返回api结果 data = {'code': 1, 'info': 'http://192.168.10.129:5000/static/考勤结果.xlsx'} print('执行完毕') return jsonify(data)
def __init__(self, response): self._response = self.convert_proxy_response(response) if not self._rule: self._rule = Rule.get_url_match(Url({'_id': self._response.url}))
params['moves_amount'] = len(solution) end_time = time.time() delta = end_time - start_time print(str(solution)) print('time complexity=', _open.time_complexity) print('size complexity=', params.get('size_complexity')) print(f'Moves: {params.get("moves_amount")}') print('seconds: ', delta) try: solution.to_file('res.json') except: pass exit() _close.append(min_state) neighbours = Rule.neighbours(min_state) for neighbour in neighbours: if neighbour in _close: continue g = min_state.g + Rule.distance(args.greedy) if neighbour not in _open: neighbour.parent = min_state neighbour.set_metrics(g=g, heuristic=heuristics.get_total_h) _open.put_nowait(neighbour) elif g <= neighbour.g: i = _open.queue.index(neighbour) neighbour = _open.queue[i] neighbour.parent = min_state
class TestRules(Rules): id_field = Rule("Id", required=True)
def set_ownvalue(self, name, value): from expression import Symbol from rules import Rule self.add_rule(name, Rule(Symbol(name), value))
def learn_one_rule(records, attributes, clss): """ Determine a rule that best covers the input. Returns the rule. Input: records - set of records that the rule should try to cover best; attributes - set of records' attributes; clss - the class the rule should precdict. """ def aux_nominal(name, value): """ Creates the condition for a nominal attribute. Returns a function from record to boolean representing the condition. Input: name - name of the attribute; value - value for the attribute. """ return lambda r: r[name] == value # name's value is equal to value def aux_continuous(name, value): """ Creates the condition for a continuous attribute. Returns a function from record to boolean representing the condition. Input: name - name of the attribute; value - value for the attribute. """ return lambda r: r[name] < value # name's value is less than value k = len( attribute_ocurrences(records, CLASS)[0] ) # number of classes rule = Rule(clss) # the generated rule conditions = [] # all desired attribute value relations strings = [] # the textual representation each element in conditions for name, cont, values in attributes: # each attribute if cont: # continuous attribute values = attribute_ocurrences(records, name)[0] # the different values assumed by this attribute in records values.sort() # sort the values in ascending order values = [ (values[i - 1] + values[i]) / 2 for i in range(1, len(values)) ] # replace values with the average of each consecutive pair of elements for value in values: # each average strings.append(name + ' < ' + str(value)) # add the text of this name to strings conditions.append( aux_continuous(name, value) ) # add a continuous condition else: for value in values: # each value value = value[:] # value is a copy of an attribute value strings.append(name + ' = ' + str(value)) # add the text of this name to strings conditions.append( aux_nominal(name, value) ) # add a nominal condition better = True # adding the best condition improved rule covered = records # the records covered by the current rule current = 0 # the laplace of rule's current form while better: best = None # index of the best condition laplace = 0 # laplace if the best condition were to be added for i, condition in enumerate(conditions): # each conjunct and its index matches = filter(condition, covered) # the covered records matched by this condition if matches != []: # some records in covered matches condition correct = len( filter(lambda r: r[CLASS] == clss, matches) ) # number of covered records correctly classified total = len(matches) # number of covered records l = float(correct + 1) / (total + k) # laplace with this condition if l >= laplace: best = i # update best laplace = l # update laplace better = laplace > current # update better if better: # a best condition was found and improves laplace rule.add_conjunct(conditions[best], strings[best]) # add the best condition to rule c = conditions.pop(best) # remove the best condition strings.pop(best) # remove the best condition's string covered = filter(c, covered) # covered keeps records that match c current = laplace # update current return rule