Beispiel #1
0
    def reply(self, data, cb):
        info = data
        if not isinstance(data, Info):
            info = Info(data)

        if not self.config.get('keepBlank', False) and info.text:
            info.text = info.text.trim()

        rule_list = self.routes
        waiter = self.wait_rules.get(info.user, None)

        if waiter:
            rule_list = [].extend(waiter).extend(self.routes)
            self.last_wait_rules[info.user] = waiter
            self.wait_rules[info.user] = None

        for i in range(0, len(rule_list)):
            rule = rule_list[i]
            if Rule.is_match(info, rule):
                weixinlogger.info("match %s" % rule.name)
                conversationlogger.info("match %s" % rule.name)
                rule.count = i
                result = Rule.execute(info, rule, cb)
                if isinstance(result, (str, unicode)):
                    result = BuildConfig(MessageBuilder.TYPE_RAW_TEXT, None, result)
                if result:
                    if rule.replies:
                        self.wait(info.user, Rule.convert(rule.replies, rule.name))
                    return cb(None, result)

            else:
                logger.debug("not match %s" % rule.name)

        return cb('404', BuildConfig(MessageBuilder.TYPE_RAW_TEXT, None, self.get_status('404') + info.text))
Beispiel #2
0
def max(brd, bb=64, depth=10, best=()):
    if depth == 0:
        return ((), calc(brd))  
    bestmove = () 
    val = -64  
    moves = brd.getMove(brd.b)
    if len(moves) == 0:
       return (bestmove, -64) 
    if len(best) > 0:
        moves.append(best)
    for i in range(len(moves)-1, -1, -1):
        if best and i < len(moves)-1 and moves[i] == best:
            continue;
        b = move(moves[i][0], moves[i][1], brd.b)
        tmp = Brd(b, brd.w)
        Rule.refresh(moves[i][1], tmp.b, tmp.w, tmp)
        response = min(tmp, val, depth - 1, best=())[1]
        if response > bb:
            bestmove = moves[i]
            val = response
            break;
        if response > val:
            bestmove = moves[i]
            val = response
    if val == -64: #black lose anyway
        bestmove = moves[0]
    return (bestmove, val)
Beispiel #3
0
def min(brd, aa= -64, depth=10, best=()):
    if depth == 0:
        return ((), calc(brd))
    val = 64
    bestmove = ()
    moves = brd.getMove(brd.w)
    if len(moves) == 0:
       return (bestmove, 64) 
    if best:
        moves.append(best)
    for i in range(len(moves)-1, -1, -1):
        if best and i < len(moves)-1 and moves[i] == best:
            continue;
        w = move(moves[i][0], moves[i][1], brd.w)
        tmp = Brd(brd.b, w)
        Rule.refresh(moves[i][1], tmp.w, tmp.b, tmp)
        response = max(tmp, val, depth - 1, best=())[1]
        if response < aa:
            bestmove = moves[i]
            val = response
            break
        if response < val:
            bestmove = moves[i]
            val = response
    if val == 64: #white lose anyway
        bestmove = moves[0]
    return (bestmove, val)     
Beispiel #4
0
def p_rule_single(p):
    'rule_single : atom COLONDASH atoms PERIOD'
    global aux_index
    c = Rule(p[1], p[3], aux_index)
    aux_index = aux_index + 1
    rule_map.update(c.rule_map)
    p[0] = c.__str__()
Beispiel #5
0
    def __init__(self, d, sip, sp, dip, dp, p, i, action, style, ts=None, flag="None"):

        Rule.__init__(self, d, sip, sp, dip, dp, p, i, action, style, ts, flag)
        self.action = "block"
        self.numPortsScanned = len(self.dPorts.elements)
        sortedValues = self.dPorts.elements.values()
        sortedValues.sort()
        self.startPort = sortedValues[0]
        self.endPort = sortedValues[self.numPortsScanned-1]
Beispiel #6
0
 def make_grammar(self):
     grammar = Grammar()
     r1 = Rule(
         Symbol("NP", {"AGR": "?a"}), [
             Symbol("ART", {"AGR": "?a"}), Symbol("N", {"AGR": "?a"})])
     r1.set_variable_code("?a", -1L)
     # -1L should be default for any undefined variable
     # that is referenced while constructing
     grammar.add_rule(r1)
     return grammar
Beispiel #7
0
def parseRule(fp,dataset):
    """"""
    rule=Rule()      
    
    #rule name 
    name=geti(fp)   

    #for
    token=geti(fp)
    if token!='FOR':
        log.error('Rule has no FOR symbol, line %d'%common.counter)
        return

    #(
    token=geti(fp)
    if token!='(':
        log.error('Rule FOR has no ( symbol, line %d'%common.counter)
        return

    #for..
    token=geti(fp)
    rule.target=token

    #)
    token=geti(fp)
    if token!=')':
        log.error('Rule FOR brackets not match , line %d'%common.counter)
        return
    
    #;
    token=geti(fp)
    if token!=';':
        log.error('Rule FOR not end with ; , line %d'%common.counter)
        return

    #local
    parseLocal(fp,rule)

    #code
    parseCode(fp,rule)

    # where clause
    rule.where=parseWhere(fp)       

    #parse END_RULE
    token=geti(fp)
    if token!='END_RULE':
        log.error('RULE Defination has no END_RULE, line %d'%common.counter)
        return
    token=geti(fp)#skip ;   
    if token!=';':
        log.error('RULE Defination does not end with ;, line %d'%common.counter)
        return

    dataset.rules[name]=rule
Beispiel #8
0
    def translate(self, raw_rule):
        parser = EZRuleParser()
        ez_rule = parser.parse(raw_rule)

        if not EZTranslator._is_valid_rule(ez_rule):
           return None

        ios_rule = Rule()
        ios_rule.action = EZTranslator._action_from_ez(ez_rule)
        ios_rule.trigger = EZTranslator._trigger_from_ez(ez_rule)

        return ios_rule
Beispiel #9
0
    def __init__(self):
        self.rules = []
        self.debug_info = []
        self.selected_rule_index = 0

        # Init rules, this domain is simple and known so we can define each one of the rules
        for last in range(1, 4):
            for second_to_last in range(1, 4):
                for consequent in range(1, 4):
                    rule = Rule()
                    rule.set(last, second_to_last, int(consequent))
                    self.rules.append(rule)
Beispiel #10
0
 def test19(self):
     r = Rule("/before/<int:x>/<alphanum:y>/<path:path>")
     
     s = r.test("/before/123/xyz123/some/file.jpg")
     self.assertTrue(s.match())
     self.assertEqual(s.param("x"), 123)
     self.assertEqual(s.param("y"), "xyz123")
     self.assertEqual(s.param("path"), "some/file.jpg")
     
     s = r.test("/before/456/qrs789/another/file.jpg")
     self.assertTrue(s.match())
     self.assertEqual(s.param("x"), 456)
     self.assertEqual(s.param("y"), "qrs789")
     self.assertEqual(s.param("path"), "another/file.jpg")
    def __init__(self, service_get_caps_uri):
        #
        #   Rule details
        #
        self.rule_id = 'web_service_name'
        self.rule_name = 'Web Service Name'
        self.rule_business_definition = 'The name is a summarised title (see above) of the service.\n' + \
            'The primary purpose of service name is to support machine-to-machine communications.\n' + \
            'Where possible, the service name should be meaningful to humans.\n' + \
            'The name should be a reflection of the service title (see above), sanitized to be machine readable.\n' + \
            'Describes the Service, not the data.\n' + \
            'Acronyms are allowed ONLY if they are a widely understood standard or an official name (e.g. DEM).\n' + \
            'New service names should be consistent with existing service names.\n' + \
            'Must not duplicate an existing GA web service name.\n'
        self.rule_authority = 'http://pid-dev.ga.gov.au/organisation/ga'
        self.rule_functional_definition = '120 character limit\n' +\
            'alphanumeric characters and underscores only\n' +\
            'machine-readable reflection of the web service title\n' + \
            'acronyms from controlled list only\n' + \
            'not a duplicate of existing name'
        self.component_name = 'string'
        self.passed = True
        self.fail_reasons = []
        self.components_total_count = 1
        self.components_failed_count = 0
        self.failed_components = None

        #
        #   Rule code
        #

        # get the name, from where?

        #
        #   Call the base Rule constructor
        #
        Rule.__init__(self,
                      self.rule_id,
                      self.rule_name,
                      self.rule_business_definition,
                      self.rule_authority,
                      self.rule_functional_definition,
                      self.component_name,
                      self.passed,
                      self.fail_reasons,
                      self.components_total_count,
                      self.components_failed_count,
                      self.failed_components)
    def test_turkish_blah(self):
        self.initialise_simulation(turkish_vowel_harmony_new_weights)
        Q2s = [
            'in', 'ler', 'siz', 'i', 'ten', 'sel', 'lik', 'li', 'e', EPSILON
        ]
        hmm_dict = {
            'q0': ['q1'],
            'q1': (['q2'], [
                'el', 'j1l', 'ek', 'ip', 'renk', 'son', 'et', 'josun', 'kedi',
                'kent', 'k0j', 'k0k', 'sokak', 'tuz', 'dal', 'gyn', 'kirpi',
                'k1z', 's1rtlan', 'g0z', 'kurt', 'aj', 'arp'
            ]),
            'q2': (['qf'], Q2s),
        }
        some_hmm = HMM(deepcopy(hmm_dict))
        some_rules = RuleSet([
            Rule([{
                "syll": "+"
            }], [{
                "back": "+"
            }], [{
                "cont": "+",
                "back": "+"
            }, {
                "syll": "-",
                "kleene": True
            }], [], True)
        ])

        some_hypo = Hypothesis(Grammar(some_hmm, some_rules))

        #
        self.assert_equal_no_infs(self.get_target_hypo().get_energy(),
                                  some_hypo.get_energy())
Beispiel #13
0
    def proposeRule(self, facet, description, category, relation, oldCodelet):
        """Creates a proposed rule, and posts a rule-strength-tester codelet.

        The new codelet has urgency a function of the degree of conceptual-depth of the descriptions in the rule
        """
        from rule import Rule

        rule = Rule(facet, description, category, relation)
        rule.updateStrength()
        if description and relation:
            depths = description.conceptualDepth + relation.conceptualDepth
            depths /= 200.0
            urgency = math.sqrt(depths) * 100.0
        else:
            urgency = 0
        self.newCodelet('rule-strength-tester', oldCodelet, urgency, rule)
Beispiel #14
0
def main(args):
    wcfg = WCFG(read_grammar_rules(args.grammar))
    #print 'GRAMMAR'
    #print wcfg

    for input_str in args.input:
        wfsa = make_linear_fsa(input_str)
        #print 'FSA'
        #print wfsa
        parser = Earley(wcfg, wfsa)
        forest = parser.do('[S]', '[GOAL]')
        if not forest:
            print 'NO PARSE FOUND'
            continue
        new_rules = []
        for rule in forest:
            if len(rule.rhs) > 1 and all(map(is_nonterminal, rule.rhs)):
                new_rules.append(
                    Rule(rule.lhs, reversed(rule.rhs), rule.log_prob))
        [forest.add(rule) for rule in new_rules]
        print '# FOREST'
        print forest
        print

        if args.show_permutations:
            print '# PERMUTATIONS'
            counts = count_derivations(forest, '[GOAL]')
            total = 0
            for p, n in sorted(counts['p'].iteritems(), key=lambda (k, v): k):
                print 'permutation=(%s) derivations=%d' % (' '.join(
                    str(i) for i in p), n)
                total += n
            print 'permutations=%d derivations=%d' % (len(
                counts['p'].keys()), total)
            print
Beispiel #15
0
 def test_different_symbols(self):
     """
     Rules with identical derivations but different symbols should not eq
     """
     c_nonterminal = NonterminalSymbol('c')
     c_rule = Rule(c_nonterminal, self.a_derivation)
     self.assertNotEqual(c_rule, self.a_rule)
    def _crossover(self, list_of_rules):
        """
        Crea cruces entre las reglas en la lista `list_of_rules`.

        TODO: A DEFINIRSE.
            - Parametros
            - Eleccion
            - Forma de cruzamiento
        """
        """
        Implementacion α. Suponiendo que se obtienen 4 reglas, y se quieren
        10 en total se crean hijos a partir de las combinaciones de 4 tomadas
        de a dos (6) y se lo suma a los padres para un total de 10.
        """
        if len(list_of_rules) > 1:
            combinations = [x for x in itertools.combinations(list_of_rules, 2)]
            # Uso itertools.combinations para crear sets de combinaciones de las reglas.
            while len(list_of_rules) < self.size_rule_generation:
                for rule1, rule2 in combinations:
                    new_rule = Rule.crossover(rule1['rule'], rule2['rule'])
                    rule_map = {'fitness': (0, 0), 'rule': new_rule}
                    list_of_rules.append(rule_map)
        else:
            print "Invalid number of rules to crossover (" + str(len(list_of_rules)) + ")."
            exit(0)

        return list_of_rules[:self.size_rule_generation]
 def get_energy(self, simulation_case):
     case_name = simulation_case.case_name
     configuration.configurations_dict["case_name"] = case_name
     if isinstance(simulation_case.hmm_dict, HMM):
         hmm = simulation_case.hmm_dict
     else:
         hmm = HMM(simulation_case.hmm_dict)
     if isinstance(simulation_case.flat_rule_set_list, RuleSet):
         rule_set = simulation_case.flat_rule_set_list
     else:
         rule_set_list = []
         for flat_rule in simulation_case.flat_rule_set_list:
             rule_set_list.append(Rule(*flat_rule))
         rule_set = RuleSet(rule_set_list)
     grammar = Grammar(hmm, rule_set)
     self.write_to_dot_to_file(hmm, "hmm_" + case_name)
     self.write_to_dot_to_file(grammar.get_nfa(),
                               "grammar_nfa_" + case_name)
     hypothesis = Hypothesis(grammar, self.data)
     energy = hypothesis.get_energy()
     if self.target_energy:
         print("{}: {} distance from target: {}".format(
             case_name, hypothesis.get_recent_energy_signature(),
             energy - self.target_energy))
     else:
         print("{}: {}".format(case_name,
                               hypothesis.get_recent_energy_signature()))
     return energy
 def load(cls, rule_set_file_name):
     rules = []
     with codecs.open(rule_set_file_name, "r") as f:
         rules_list = json.load(f)
         for flat_rule_list in rules_list:
             rules.append(Rule.load(flat_rule_list))
         return cls(rules)
Beispiel #19
0
    def readInput(self):
        try:
            # Break up by line
            lines = self.inputFile.read().split('\n')
            for line in lines:
                line = line.strip('\r\n')

            # Get number of queries to make to the KB
            self.numQueries = int(lines[0])

            # Read each query and store it as a predicate in self.queryList
            for i in range(1, self.numQueries + 1):
                newQuery = Predicate(lines[i])
                self.queryList.append(newQuery)

            # Read in number of statements for the KB.
            self.numStatements = int(lines[self.numQueries + 1])

            for j in range(self.numQueries + 2,
                           self.numQueries + 2 + self.numStatements):
                statement = lines[j]

                # Check if statement is a rule or a fact
                if "=>" in statement:
                    newRule = Rule(statement)
                    self.kb.addRule(newRule)
                else:
                    newFact = Predicate(statement)
                    self.kb.addFact(newFact)

        except Exception as e:
            print "\nError reading input : ", e
Beispiel #20
0
    def remove_eps_rules(self):
        g = Grammar()

        g.terminals = self.terminals.copy()
        g.non_terminals = self.non_terminals.copy()
        g.rules = self.rules.copy()
        g.start = self.start

        eps_non_terms = self.find_eps_rules()
        for rule in self.rules:
            rule_eps = eps_non_terms.intersection(rule.right_part)
            for i in range(len(rule_eps)):
                for combo in itertools.combinations(rule_eps, i + 1):
                    new_rule = Rule(rule.left_part, ' '.join(rule.right_part))
                    for ch in combo:
                        new_rule.right_part.remove(ch)
                    if len(new_rule.right_part) > 0:
                        g.rules.append(new_rule)

        eps_rules = []
        for rule in g.rules:
            if rule.right_part[0] == EPS:
                eps_rules.append(rule)
        for rule in eps_rules:
            g.rules.remove(rule)

        self.non_terminals = g.non_terminals
        self.terminals = g.terminals
        self.rules = g.rules
        self.start = g.start
    def test_abadnese(self):
        self.initialise_segment_table("abd_segment_table.txt")
        data = [
            'bbabbba', 'baabbba', 'babbbba', 'bbabadba', 'baabadba',
            'babbadba', 'bbabbad', 'baabbad', 'babbbad', 'bbabadad',
            'baabadad', 'babbadad', 'bbabbab', 'baabbab', 'babbbab',
            'bbabadab', 'baabadab', 'babbadab'
        ]

        hmm = HMM({
            'q0': ['q1'],
            'q1': (['q2',
                    'qf'], ['bbab', 'baab', 'babb', 'bbaba', 'baaba',
                            'babba']),
            'q2': (['qf'], ['dba', 'dad', 'dab'])
        })
        rule = Rule.load([[{
            "cons": "+"
        }], [{
            "labial": "+"
        }], [{
            "labial": "+"
        }], [], True])
        rule_set = RuleSet([rule])

        grammar = Grammar(hmm, rule_set)
        hypothesis = Hypothesis(grammar, data)
        self.assertEqual(hypothesis.get_energy(), 245)
    def test_target(self):
        hmm = HMM({'q0': ['q1'],
         'q1': (['qf'], ['ba', 'ab', 'di', 'id', 'bFi', 'dFa', 'da', 'ad', 'bi', 'ib', 'dFi', 'bFa', 'bad', 'dab', 'did', 'bFid', 'bFib', 'dFab', 'bab', 'dib', 'dFid', 'dFad'])
          })

        rule = Rule([], [{"voiceless": "+"}], [{"cons": "+"}], [{"bound": "+"}], True)
        return self.get_energy(hmm, [rule], "target")
    def predict(self, curr_index, seq_index, detector):
        # no random subsequences
        if self.simulator.random_subsequences == False:
            if (curr_index >=
                    self.simulator.sequence_size * self.simulator.predict_ratio
                    and seq_index
                    == self.simulator.rules_detector.target_seq_index
                    and detector.is_change_detected is True):

                self.predict_sequence_no_random(curr_index)

        else:
            first_pred = True
            if self.simulator.rules_detector:
                first_pred = True if self.predicted_rule == Rule(
                    None, None) else False

            if (curr_index >= self.simulator.sequence_size *
                    self.simulator.predict_ratio):

                if first_pred:
                    if seq_index == self.simulator.rules_detector.target_seq_index and detector.is_change_detected is True:
                        self.predict_sequence(seq_index, curr_index)

                elif curr_index % self.simulator.rules_detector.round_to == 0 and seq_index == 0:
                    self.predict_sequence(seq_index, curr_index)
Beispiel #24
0
    def decode_rule(self, rule):
        name = rule['rule']['name']
        key = rule['rule']['key']
        alert_by = rule['alerts']
        action = Webhook(rule['rule']['action']['url'])

        conditions = []
        for c in rule['rule']['conditions']:
            trigger = Trigger(c['trigger']['name'], c['trigger']['arguments'])
            filters = []
            for f in c['filter']['and']:
                _filter = Filter(f['operation'], f['type'], f['arguments'])
                filters.append(_filter)
            condition = Condition(filters, trigger)
            conditions.append(condition)

        selection = {
            'devices':
            decode_selection(rule['search']['filters']['devices'],
                             selection_type='devices'),
            'sensors':
            decode_selection(rule['search']['filters']['sensors'],
                             selection_type='sensors'),
        }

        return Rule(name,
                    alert_by=alert_by,
                    key=key,
                    conditions=conditions,
                    action=action,
                    selection=selection)
Beispiel #25
0
def read_file(filename: str) -> Tuple[List[Fact], List[Rule]]:
    """
    Чтение фактов и правил из файла
    :param filename: имя файла
    :return: Список фактов и список правил
    """
    facts = []
    rules = []
    # Читаем все из файла
    with open(filename, 'r') as f:
        file_content = f.read()
    # Разделяем факты от правил
    file_facts, file_rules = file_content.split('\n\n')
    # Инициализируем список фактов
    for file_fact in file_facts.split('\n'):
        if not file_fact:
            continue
        new_fact = create_fact(fact_line=file_fact)
        facts.append(new_fact)
    # Инициализируем список правил
    for file_rule in file_rules.split('\n'):
        if not file_rule:
            continue
        lhs, rhs = file_rule.split('->')
        target_fact = create_fact(fact_line=rhs)
        lhs_facts = [
            create_fact(fact_line=fact_str) for fact_str in lhs.split('^')
        ]
        new_rule = Rule(facts=lhs_facts, result=target_fact)
        rules.append(new_rule)
    return facts, rules
Beispiel #26
0
def _rule_filter(rules_,rules_dict,recall_min_c1,precision_min_c1,recall_min_c0,precision_min_c0):
    # Factorize rules before semantic tree filtering
    rules_ = [
        tuple(rule)
        for rule in
        [Rule(r, args=args) for r, args in rules_]]
    
    # 根据0/1类样本的recall/precision进行筛选。对于重复出现的规则,则更新其表现。
    for rule, score in rules_:
        if score[0] >= recall_min_c1 and score[1] >= precision_min_c1 and score[2]>=recall_min_c0 and score[3]>=precision_min_c0:

            if rule in rules_dict:
                # update the score to the new mean
                # Moving Average Calculation
                e = rules_dict[rule][4] + 1  # counter
                d = rules_dict[rule][3] + 1. / e * (
                    score[3] - rules_dict[rule][3])
                c = rules_dict[rule][2] + 1. / e * (
                    score[2] - rules_dict[rule][2])          
                b = rules_dict[rule][1] + 1. / e * (
                    score[1] - rules_dict[rule][1])
                a = rules_dict[rule][0] + 1. / e * (
                    score[0] - rules_dict[rule][0])

                rules_dict[rule] = (a, b, c, d, e)
            else:
                rules_dict[rule] = (score[0], score[1], score[2], score[3], 1)
                
    return rules_dict
    def test_parser_kleene(self):
        hmm = HMM({
            INITIAL_STATE: ['q1'],
            'q1': (['q2', FINAL_STATE], ['at', 'attstktttt', 'st']),
            'q2': ([FINAL_STATE], ['o'])
        })

        hmm_transducer = hmm.get_transducer()
        self.write_to_dot_to_file(hmm_transducer, "test_hmm_transducer_kleene")

        assimilation_rule_with_kleene = Rule([{
            "cons": "-"
        }], [{
            "low": "+"
        }], [{
            "cons": "-"
        }, {
            "cons": "+",
            "kleene": True
        }], [],
                                             obligatory=True)

        rule_set_with_kleene = RuleSet([assimilation_rule_with_kleene])
        grammar = Grammar(hmm, rule_set_with_kleene)

        nfa = grammar.get_nfa()
        self.write_to_dot_to_file(nfa, "test_parser_nfa_kleene")
Beispiel #28
0
 def __init__(self, *modifiers):
     self._rule = Rule()
     for modifier in modifiers:
         modifier(self._rule)
     self._fields = dict(self._rule.get_target_fields() or {})
     self.__dict__.update(self._fields)
     self._all_instances.append(self)
Beispiel #29
0
 def addPenaltyRules(self):
     """as the name says"""
     self.penaltyRules.add(
         Rule(
             'False Naming of Discard, Claimed for Mah Jongg and False Declaration of Mah Jongg',
             'Oabsolute payers=2 payees=2',
             points=-300))
Beispiel #30
0
def mine_implications(rminer,cn):
    "cn closure of sufficient suppratio, find implications there"
    warn_potential_deprecation()
    global heappushcnt
    mingens = []
    for m in transv(_faces(cn,rminer.latt.immpreds[cn])).hyedges:
        mingens.append(ItSet(m))
    if len(cn) == len(mingens[0]):
        "o/w no rules as cn is a free set and its own unique mingen"
        pass
    else:
        for an in mingens:
            if an in rminer.latt.supps:
                print("ALREADY IN SUPPS") #, 
                print(an, rminer.latt.supps[cn], rminer.latt.supps[an])
            else: 
                rminer.latt.supps[an] = rminer.latt.supps[cn]
                rul = Rule(an,cn,rminer.latt)
                ch = checkrule(rul,rminer)
                if ch == rminer.DISCARD:
                    pass
                elif ch < rminer.latt.boosthr:
                    heappushcnt += 1
                    heappush(rminer.reserved,(-rul.supp,heappushcnt,rul))
                else:
                    rminer.count += 1
                    yield rul
Beispiel #31
0
	def make_max_rules(self, all_preds):
		rules = []
		ncs = all_preds - self.preds
		for a, c in self.__children.items():
			comment = 'Originally generated from state %s -%s-> %s' % (''.join(self.preds), a, ''.join(c.preds))
			rules.append(Rule(self.preds, ncs, a, c.preds, comment))
		return rules
Beispiel #32
0
def mine_partial_rules(rminer, cn):
    "check boost wrt smaller antecedents only"
    global heappushcnt
    for an in rminer.latt.allpreds(
            cn, (rminer.latt.supps[cn] * statics.scale) / statics.confthr):
        rul = Rule(an, cn, rminer.latt)
        if len(an) == 1:  # and len(cn) == 2:
            "boost revision may require to fish back in reserved rules"
            if 1 < rul.lift < rminer.latt.boosthr:
                rminer.addlift(rul.lift)
                rminer.latt.reviseboost(rminer.sumlifts, rminer.numlifts)
                rereserved = []
                while rminer.reserved:
                    (negs, _,
                     rul2) = heappop(rminer.reserved)  # for _ see implminer
                    if rul2.cboo < rminer.latt.boosthr:
                        heappushcnt -= 1
                        heappush(rereserved, (negs, heappushcnt, rul2))
                    else:
                        rminer.count += 1
                        yield rul2
                rminer.reserved = rereserved
        ch = checkrule(rul, rminer)
        if ch == rminer.DISCARD:
            pass
        elif ch < rminer.latt.boosthr:
            heappushcnt -= 1
            heappush(rminer.reserved, (-rul.supp, heappushcnt, rul))
        else:
            rminer.count += 1
            yield rul
Beispiel #33
0
    def generate_rules(self):
        rules = []
        for i in range(0, self.number_of_rules):
            rules.append(Rule(MemberShipFunction(random.uniform(0, 1), random.uniform(0, 1)), MemberShipFunction(random.uniform(
                0, 1), random.uniform(0, 1)), ConsequenceFunction(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))))

        return rules
    def randomize_grammar(cls, nodes_by_type, vocabulary, possible_segments):
        # Randomize rules
        min_rules_num = 1
        max_rules_num = 2
        rules_num = random.randint(min_rules_num, max_rules_num)
        max_num_feature_nodes_per_rule = 1
        max_num_affix_segments_per_rule = 1
        max_num_environment_roots = len(nodes_by_type[SyntacticNode.TYPE_ROOT])

        rules = []
        for i in range(rules_num):
            num_feature_nodes = random.randint(1,
                                               max_num_feature_nodes_per_rule)
            feature_nodes = random.choices(
                nodes_by_type[SyntacticNode.TYPE_FEATURE], k=num_feature_nodes)

            num_affix_segments = random.randint(
                0, max_num_affix_segments_per_rule)
            output_affix = ''
            for _ in range(
                    num_affix_segments
            ):  # We don't use choices because segments in affix can repeat
                output_affix += random.choice(possible_segments)

            num_environment_roots = random.randint(0,
                                                   max_num_environment_roots)
            environment_roots = random.sample(
                nodes_by_type[SyntacticNode.TYPE_ROOT],
                k=num_environment_roots)
            rules.append(Rule(feature_nodes, output_affix, environment_roots))

        return Grammar(nodes_by_type, vocabulary, rules)
Beispiel #35
0
    def proposeRule(self, facet, description, category, relation, oldCodelet):
        """Creates a proposed rule, and posts a rule-strength-tester codelet.

        The new codelet has urgency a function of the degree of conceptual-depth of the descriptions in the rule
        """
        from rule import Rule

        rule = Rule(facet, description, category, relation)
        rule.updateStrength()
        if description and relation:
            depths = description.conceptualDepth + relation.conceptualDepth
            depths /= 200.0
            urgency = math.sqrt(depths) * 100.0
        else:
            urgency = 0
        self.newCodelet('rule-strength-tester', oldCodelet, urgency, rule)
Beispiel #36
0
    def gen_rules(self):
        self.WM_rule = pd.DataFrame(
            np.zeros([len(self.data_set), self.col_num]))
        for i in range(len(self.data_set)):
            for j in range(self.col_num):
                self.WM_rule.loc[i][j] = self.ux[j].idxmax(axis=1)[i]  #x fuzzy

        self.rules = []
        self.not_dup_rules = []
        for i in range(len(self.data_set)):
            #fuzzy_label = [" "," "," "," "]

            fuzzy_label = [" "] * self.col_num
            for j in range(self.col_num):
                for k in range(self.set):
                    if self.WM_rule.loc[i][j] == k:
                        fuzzy_label[j] = self.set_fuzzy[k]

            rule_gen = Rule(rule=fuzzy_label, target=self.targets[i])

            self.rules.append(rule_gen)

            self.not_dup_rules = gf.remove_doubles(self.rules)

        self.target_categories = gf.remove_doubles(self.targets)
 def test_insertion_with_right_context_only2(self):
     configurations["SINGLE_CONTEXT_TRANSDUCER_FLAG"] = True
     self.initialise_segment_table("abd_segment_table.txt")
     rule = Rule([], [{"cons": "-"}], [], [{"cons": "+", "labial": "+"}, {"cons": "+", "labial": "-"}],
                 obligatory=True)
     rule_set = RuleSet([rule])
     self.assertCountEqual(rule_set.get_outputs_of_word('bdbd'), ['abdabd'])
Beispiel #38
0
def main(args):

    for input_str in args.input:
        fsa = make_linear_fsa(input_str)
        cfg = make_grammar(fsa)
        parser = Earley(cfg, fsa)
        forest = parser.do('[S]', '[GOAL]')
        if not forest:
            print 'NO PARSE FOUND'
            continue
        new_rules = []
        for rule in forest:
            if len(rule.rhs) > 1 and all(map(is_nonterminal, rule.rhs)):
                new_rules.append(
                    Rule(rule.lhs, reversed(rule.rhs), rule.log_prob))
        [forest.add(rule) for rule in new_rules]
        print '# FOREST'
        print forest
        print

        if args.show_permutations:
            counts = count_derivations(forest, '[GOAL]')
            total = 0
            for p, n in sorted(counts['p'].iteritems(), key=lambda (k, v): k):
                print p, n
                total += n
            print len(counts['p'].keys()), total
Beispiel #39
0
def main():
    rule = Rule()
    hcc = HandCompleteChecker(rule)
    print("화료점 계산기")
    while True:
        hand_code = input("손패 코드 입력하세요: (exit 입력으로 종료)")
        if hand_code == "exit":
            return
        try:
            hand = PlayerHand(hand_code)
        except:
            print("코드가 이상하다.")
            continue
        print(hand)

        agari_tile_code = input("화료 패 코드 입력하세요: ")
        try:
            agari_tile = Tile(Tile.calc_tile_num(agari_tile_code))
        except:
            print("코드가 이상하다.")
            continue
        print(agari_tile)

        agari_type_num = input("론인가 쯔모인가: (0/1)")
        try:
           agari_type = HandForPointCalcConstant.AGARI_TYPES[int(agari_type_num)]
        except:
            print("코드가 이상하다.")
            continue
        print(agari_type)

        hand_pc = HandForPointCalc(hand, agari_tile, agari_type,
                          HandForPointCalcConstant.EAST, HandForPointCalcConstant.EAST)

        print("점수 :", PointCalculator(rule).calc_agari_status(hand_pc))
Beispiel #40
0
 def generate(self):
   """ generates the rules base from all possible combinations of input 
       variable functions
   """
   self.rules = self.generate_helper()
   for k, i in enumerate(self.rules):
     self.rules[k] = Rule(i)
Beispiel #41
0
 def fromString(self, string):
     policy=Policy()
     elements = Grammar.parsePolicy(string)
     for ruleElements in elements:
         rule = Rule.fromElements(ruleElements)
         policy.rules.append(rule)
     return policy
Beispiel #42
0
 def get_random_rule_simple(self):
     """
     generate a simple random rule
     variable (+/-/*) variable (>/</=/!=) number
     """
     var1 = self.variables[ri(0, self.n - 1)]
     var2 = self.variables[ri(0, self.n - 1)]
     op1 = ["+", "-", "*", ][ri(0, 2)]
     lhs = {
         "op": op1,
         "lhs": {
             "value": var1,
         },
         "rhs": {
             "value": var2,
         },
     }
     val = ri(1, 10)
     rhs = {
         "value": val,
     }
     op0 = [">", "<", "=", "!=", ][ri(0, 3)]
     if op0 == "<":
         op0 = ">"
         lhs, rhs = rhs, lhs
     return Rule(rule_structured={
         "op": op0,
         "lhs": lhs,
         "rhs": rhs,
     })
    def __init__(self, config, environ, logdispatch, statechglogger):
        '''Constructor'''
        Rule.__init__(self, config, environ, logdispatch, statechglogger)

        self.logger = logdispatch
        self.rulenumber = 363
        self.rulename = "STIGConfigureRestrictionsPolicy"
        self.formatDetailedResults("initialize")
        self.rootrequired = True
        self.applicable = {'type': 'white',
                           'os': {'Mac OS X': ['10.15', 'r', '10.15.10']},
                           'fisma': 'high'}
        datatype = "bool"
        key = "RESTRICTIONS"
        instructions = "To disable the installation of the restrictions " + \
            "profile set the value of RESTRICTIONS to False"
        default = True
        self.ci = self.initCi(datatype, key, instructions, default)
        self.iditerator = 0
        self.identifier = "mil.disa.STIG.Restrictions.alacarte"
        if search("10\.10.*", self.environ.getosver()):
            self.profile = "/Applications/stonix4mac.app/Contents/" + \
                         "Resources/stonix.app/Contents/MacOS/" + \
                         "stonix_resources/files/" + \
                         "U_Apple_OS_X_10-10_Workstation_V1R2_STIG_Restrictions_Policy.mobileconfig"
            '''These directories for testing purposes only'''
#             self.profile = "/Users/username/stonix/src/" + \
#                 "stonix_resources/files/" + \
#                 "U_Apple_OS_X_10-10_Workstation_V1R2_STIG_Restrictions_Policy.mobileconfig"
        elif search("10\.11\.*", self.environ.getosver()):
            self.profile = "/Applications/stonix4mac.app/Contents/" + \
                         "Resources/stonix.app/Contents/MacOS/" + \
                         "stonix_resources/files/" + \
                         "U_Apple_OS_X_10-11_V1R1_STIG_Restrictions_Policy.mobileconfig"
            '''These directories for testing purposes only'''
#             self.profile = "/Users/username/stonix/src/" + \
#                          "stonix_resources/files/" + \
#                          "U_Apple_OS_X_10-11_V1R1_STIG_Restrictions_Policy.mobileconfig"
        else:
            self.profile = "/Applications/stonix4mac.app/Contents/" + \
                         "Resources/stonix.app/Contents/MacOS/" + \
                         "stonix_resources/files/" + \
                         "U_Apple_macOS_10-12_V1R1_STIG_Restrictions_Policy.mobileconfig"
#             self.profile = "/Users/username/stonix/src/" + \
#                 "stonix_resources/files/" + \
#                 "U_Apple_macOS_10-12_V1R1_STIG_Restrictions_Policy.mobileconfig"
        self.sethelptext()
Beispiel #44
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    _create_inputs_folder()
    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    deployment_map = DeploymentMap(
        parameter_store,
        ADF_PIPELINE_PREFIX
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )
    organizations = Organizations(role)
    clean(parameter_store, deployment_map)
    ensure_event_bus_status(ORGANIZATION_ID)
    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'
    threads = []
    _cache = Cache()
    for p in deployment_map.map_contents.get('pipelines', []):
        _source_account_id = p.get('default_providers', {}).get('source', {}).get('properties', {}).get('account_id', {})
        if _source_account_id and int(_source_account_id) != int(DEPLOYMENT_ACCOUNT_ID) and not _cache.check(_source_account_id):
            rule = Rule(p['default_providers']['source']['properties']['account_id'])
            rule.create_update()
            _cache.add(p['default_providers']['source']['properties']['account_id'], True)
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
    def test_turkish__only_syll_is_the_correct_context(self):
        self.initialise_simulation(turkish_vowel_harmony_new_weights)

        # +syll --> +back
        hmm_dict = {
            'q0': ['q1'],
            'q1': (['q2'], [
                'el', 'j1l', 'ek', 'ip', 'renk', 'son', 'et', 'josun', 'kedi',
                'kent', 'k0j', 'k0k', 'sokak', 'tuz', 'dal', 'gyn', 'kirpi',
                'k1z', 's1rtlan', 'g0z', 'kurt', 'aj', 'arp'
            ]),
            'q2': (['qf'], [
                'in', 'ler', 'siz', 'i', 'ten', 'sel', 'lik', 'li', 'e',
                EPSILON
            ]),
        }
        rule_change = ([{"syll": "+"}], [{"back": "+"}])

        # +syll --> -back
        hmm_dict2 = {
            'q0': ['q1'],
            'q1': (['q2'], [
                'el', 'j1l', 'ek', 'ip', 'renk', 'son', 'et', 'josun', 'kedi',
                'kent', 'k0j', 'k0k', 'sokak', 'tuz', 'dal', 'gyn', 'kirpi',
                'k1z', 's1rtlan', 'g0z', 'kurt', 'aj', 'arp'
            ]),
            'q2': (['qf'], [
                '1n', 'lar', 's1z', '1', 'tan', 'sal', 'l1k', 'l1', 'a',
                EPSILON
            ]),
        }
        rule_change2 = ([{"syll": "+"}], [{"back": "-"}])

        target_energy = self.get_target_hypo().get_energy()
        unexpexted_context = []
        for feat in 'syll,back,round,high,voice,cont,lateral,son'.split(','):
            for val in ['+', '-']:
                if (feat, val) == ('syll', '-'):
                    continue
                for r, change in enumerate([rule_change, rule_change2],
                                           start=1):
                    for h, hmm in enumerate([hmm_dict, hmm_dict2], start=1):
                        some_hmm = HMM(deepcopy(hmm))
                        rule = change + ([{
                            "syll": "+",
                            "back": change[1][0]['back']
                        }, {
                            feat: val,
                            "kleene": True
                        }], [], True)
                        some_rules = RuleSet([Rule(*rule)])
                        some_hypo = Hypothesis(Grammar(some_hmm, some_rules))
                        if some_hypo.get_energy() <= target_energy:
                            unexpexted_context.append(
                                {f"hmm{h} rule {r}": {
                                    feat: val
                                }})

        assert unexpexted_context == [], f"Unexpected kleene context for rule: {unexpexted_context}"
 def _add_debug_rule(self, salience, preceding_rules):
     rule_names = ', '.join(rule.name or '->{}'.format(rule.target._name) for rule in preceding_rules)
     def debug_func(*args, **kwargs):
         self.logger.debug('Ran rule(s): {} at salience: {}.'.format(rule_names, salience))
     rule = Rule()
     rule.set_salience(salience-1)
     rule.add_python_action(debug_func)
     rule.build(self.engine)
Beispiel #47
0
def main(argv):
	try:
		options, args = getopt.getopt(argv, "r:n:c:g:f:lh", [
						'rule=', 'name=', 'cell=', 'grid=', 'fill=', 'list', 'help'])
	except getopt.GetoptError:
		usage()
		exit(2)
	else:
		def_grid = 350
		def_cell = 10
		def_alive = 'black'
		def_death = 'white'
		for opt, arg in sorted(options):
			if opt in ('-h', '--help'):
				usage()
				exit(2)
			elif opt in ('-l', '--list'):
				plist()
			elif opt in ('-c', '--cell'):
				if int(arg) > 0:
					def_cell = int(arg)
				else:
					print "Sure, cells of 0x0."
					exit(2)
			elif opt in ('-g', '--grid'):
				if int(arg) > 0:
					def_grid = int(arg)
				else:
					print "Good luck with a grid of 0x0."
					exit(2)
			elif opt in ('-f', '--fill'):
				colour = arg
				if colour[:colour.find(':')] != colour[colour.find(':') + 1:]:
					def_alive = colour[:colour.find(':')].replace('-', ' ')
					def_death = colour[colour.find(':') + 1:].replace('-', ' ')
			elif opt in ('-r', '--rule'):
				rule = arg
				born = [int(i) for i in rule[:rule.find(':')]]
				survive = [int(i) for i in rule[rule.find(':') + 1:]]
				thegrid = Grid(def_grid, def_grid, def_cell, alive=def_alive,
							death=def_death)
				aut = Rule(thegrid, born, survive)
				CellularAutomaton.generate_random(250, thegrid,
					aut.get_cell_matrix(), thegrid.get_alive_colour())
				aut.start()
				exit(2)
			elif opt in ('-n', '--name'):
				name = arg
				thegrid = Grid(def_grid, def_grid, def_cell, alive=def_alive,
							death=def_death)
				aut = CellularAutomaton.factory(name, thegrid)
				CellularAutomaton.generate_random(250, thegrid,
					aut.get_cell_matrix(), thegrid.get_alive_colour())
				aut.start()
				exit(2)
			else:
				usage()
				exit(2)
Beispiel #48
0
def run_rule(rule_id):
	""" This runs the given rule against the currently active record """
	rule = Rule.rule_named(rule_id)
	if rule is None:
		bottle.abort(404)
	
	record = _testrecord_from_request(bottle.request)
	if record is None:
		bottle.abort(400)
	
	return 'match' if rule.match_against(record) else 'ok'
Beispiel #49
0
 def set(self, pattern, handler=None, replies=None):    
     if pattern and handler is None and replies is None:
         r = pattern
     else:
         r = {
             'name' : pattern,
             'pattern' : pattern,
             'handler' : handler,
             'replies' : replies
         }
     if r is not None:
         r = Rule.convert(r)
         self.routes.extend(r)
Beispiel #50
0
class Grammar(object):
    """docstring for Grammar"""
    def __init__(self):
        super(Grammar, self).__init__()
        self.digram_index = {}
        self.root_production = Rule(self)

    def train_string(self, string):
        """docstring for train_string"""
        input_sequence = [c for c in string]
        if (0 < len(input_sequence)):
            self.root_production.last().insert_after(Symbol.factory(self, input_sequence.pop(0)))
        while (0 < len(input_sequence)):
            self.root_production.last().insert_after(Symbol.factory(self, input_sequence.pop(0)))
            match = self.get_index(self.root_production.last().prev)
            if not match:
                self.add_index(self.root_production.last().prev)
            elif match.next != self.root_production.last().prev:
                self.root_production.last().prev.process_match(match)

    def add_index(self, digram):
        """docstring for index"""
        self.digram_index[digram.hash_value()] = digram

    def get_index(self, digram):
        """docstring for get"""
        return self.digram_index.get(digram.hash_value())

    def clear_index(self, digram):
        """docstring for clear_index"""
        if self.digram_index.get(digram.hash_value()) == digram:
            self.digram_index[digram.hash_value()] = None

    def print_grammar(self):
        """docstring for print_grammar"""
        output_array = []
        rule_set = [self.root_production]
    
        i = 0
        for rule in rule_set:
            output_array.append("%s --(%d)--> " % (i, rule.reference_count))
            line_length = rule.print_rule(rule_set, output_array, len("%s --(%d)--> " % (i, rule.reference_count)))
        
            if i > 0:
                output_array.append(' ' * (57 - line_length))
                line_length = rule.print_rule_expansion(rule_set, output_array, line_length)
            output_array.append('\n');
            i += 1
        return "".join(output_array)
Beispiel #51
0
 def generate_rules(self):
     """"""
     for reaction_container in self.reaction_pool:
         rule_container =  RuleContainer(reaction_container)
         rule_container.sp_state = reaction_container.sp_state
         common_cont = reaction_container.get_common_contingencies()
         rule_container.common_reqs = common_cont
         if self.contingency_pool.has_key(reaction_container.name):
             gen = RequirementsGenerator(self.contingency_pool[reaction_container.name])
             rule_container.contingencies = gen
         else:
             rule_container.contingencies = None
         # add source and product states and reqs change name.
         header = True if len(reaction_container) > 1 else False
         for reaction in reaction_container:
             rule = Rule(reaction)
             rule.header = header
             # add specific reqs and cont.
             rule_container.add_rule(rule)
             if header:
                 rule.specific_reqs = reaction.get_specific_contingencies(common_cont)
         self.rule_pool[reaction_container.name] = rule_container
Beispiel #52
0
    def __init__(self, tderiv, tree=False, level=0, options=[]):
        self.level = level
        self.options = options

        if tree:
            self.derivation = treeToDeriv(tderiv)
        else:
            self.derivation = []

            tderiv = tderiv.lstrip("[").rstrip("]")
            rules = tderiv.split(",")
            for ruleStr in rules:
                rule = Rule(ruleStr)
                self.derivation.append(rule)

        for rule in self.derivation:
            if rule.rhs[0] == "EPSILON":
                rule.rhs = []

        print >>sys.stderr, "Debug: tracking derivation:"
        for rule in self.derivation:
            print >>sys.stderr, rule
Beispiel #53
0
    def start_state(root):
        ''' None -> <s>^{g-1} . TOP </s>^{g-1} '''

##        LMState.cache = {}

        lmstr = LMState.lm.raw_startsyms()
        lhsstr = lmstr + [root] + LMState.lm.raw_stopsyms()
        
        edge = Hyperedge(None, [root], Vector(), lhsstr)
        edge.lmlhsstr = LMState.lm.startsyms() + [root] + LMState.lm.stopsyms()
        edge.rule = Rule.parse("ROOT(TOP) -> x0 ### ")
        sc = root.bestres[0] if FLAGS.futurecost else 0
        return LMState(None, [DottedRule(edge, dot=len(lmstr))], LMState.lm.startsyms(),
                       step=0, score=sc) # future cost
Beispiel #54
0
    def process_match(self, match):
        """Deal with a matching digram"""
        from rule import Rule
        rule = None
        if (match.prev.is_guard() and match.next.next.is_guard()):
            # reuse an existing rule
            rule = match.prev.rule
            self.substitute(rule)
            self.prev.propagate_change()
        else:
            # create a new rule
            rule = Rule(self.grammar)
            rule.last().insert_after(Symbol.factory(self.grammar, self))
            rule.last().insert_after(Symbol.factory(self.grammar, self.next))
            self.grammar.add_index(rule.first())
            
            match.substitute(rule)
            match.prev.propagate_change()
            self.substitute(rule)
            self.prev.propagate_change()

        # Check for an under-used rule
        if (NonTerminal == type(rule.first()) and (rule.first().rule.reference_count == 1)):
            rule.first().expand()
Beispiel #55
0
def treeToDeriv(tree):
    if type(tree[1]) is not tuple:
        r = Rule()
        r.setup(tree[0], [tree[1],], 1.0)
        return [r,]
    else:
        r = Rule()
        rhs = [x[0] for x in tree[1:]]
        assert(len(rhs) <= 2), "Non-binary rule: %s" % str(rhs)
        r.setup(tree[0], rhs, 1.0)
        res = [r,]
        for subt in tree[1:]:
            res += treeToDeriv(subt)
        return res
Beispiel #56
0
 def POST(self):
     """Add a new rule to the database."""
     i = web.input()
     web.header('Content-Type', 'application/json')
     try:
         rule = Rule.parse(i.rule_string)
         INSTANCE['language'].validate_rule(rule)
         INSTANCE['game'].add_rule(rule)
     except ValueError:
         LOGGER.error('syntax error in rule "%s"', i.rule_string)
         return error_response(SEMANTIC_ERROR,
                               'RULE is MODAL VERB PREPOSITION OBJECT')
     except InvalidRuleException:
         LOGGER.error('invalid token in rule "%s"', i.rule_string)
         return error_response(SYNTAX_ERROR, 'invalid token in rule')
     except DuplicateRuleException:
         LOGGER.error('attempt to add duplicate rule "%s"', rule)
         return error_response(LOGICAL_ERROR, 'rule exists')
     else:
         LOGGER.info('successfully added "%s"', rule)
         return success_response('Rule added.')
Beispiel #57
0
 def POST(self):
     """Remove a rule from the database."""
     i = web.input()
     web.header('Content-Type', 'application/json')
     try:
         rule = Rule.parse(i.rule_string)
         INSTANCE['language'].validate_rule(rule)
         INSTANCE['game'].remove_rule(rule)
     except ValueError:
         LOGGER.error('syntax error in rule "%s"', i.rule_string)
         return error_response(SEMANTIC_ERROR,
                               'RULE is MODAL VERB PREPOSITION OBJECT')
     except InvalidRuleException:
         LOGGER.error('invalid token in rule "%s"', i.rule_string)
         return error_response(SYNTAX_ERROR, 'invalid token in rule')
     except NonexistentRuleException:
         LOGGER.info('attempt to remove nonexistent rule "%s"', rule)
         return error_response(LOGICAL_ERROR, 'rule does not exist')
     else:
         LOGGER.info('successfully removed "%s"', rule)
         return success_response('Rule removed.')
    def __init__(self):
        firstRule = Rule()

        i = 0
        # reset number of rules and hashtable
        Rule.numRules = 0
        #symbol.theDigrams.clear()
        # loop until done
        done = False
        input_string = "abracadabraarbadacarba"
        for i in input_string:
            #print "before: " + str(Rule.numRules)
            firstRule.last().insertAfter(Terminal(i))
            #print "middle: " + str(Rule.numRules)
            firstRule.last().p.check()
            #print "after: " + str(Rule.numRules)
        print firstRule.getRules()
        print firstRule.theGuard.n
        print firstRule.theGuard.n.value
        print firstRule.theGuard.p.value
 def _create_reduce_rule(self, map_rule):
     r1 = map_rule.target[0]
     r2 = map_rule.target[1]
     self.reduce_rule = Rule()
     self.reduce_rule.set_name('_Reduce')
     for field_name in map_rule.groupby:
         self.reduce_rule.add_variable(getattr(r1, field_name), getattr(r2, field_name))
     self.reduce_rule.add_condition(fact_index(r1) != fact_index(r2))
     
     reduce_kwargs = {}
     for field_name, (value, subfields) in self.map_reduce_values.iteritems():
         v1 = [getattr(r1, field_name) for field_name in subfields]
         v2 = [getattr(r2, field_name) for field_name in subfields]
         if len(subfields) == 1: 
             v1 = v1[0]
             v2 = v2[0]
         reduced = value._reduce(v1, v2)
         if len(subfields) == 1:
             reduced = [reduced]
         reduce_kwargs.update(dict(zip(subfields, reduced)))
     self.reduce_rule.add_action(Delete(r2))
     self.reduce_rule.add_action(Update(r1, **reduce_kwargs))
     self.reduce_rule.set_salience(map_rule.salience)
     map_rule.add_secondary_rule(self.reduce_rule)
Beispiel #60
0
 def test14(self):
     r = Rule("/pages/<alphanum:x>/")
     s = r.test("/pages/first/")
     
     self.assertTrue(s.match())
     self.assertEqual(s.param("x"), "first")