Example #1
0
    def __init__(self):
        self.lists = list_functions()

        self.tokens = SemTokenizer()
        self.graph = Atoms()
        self.nxG = nx.Graph()
Example #2
0
class Semantics:
    def __init__(self):
        self.lists = list_functions()

        self.tokens = SemTokenizer()
        self.graph = Atoms()
        self.nxG = nx.Graph()
        
    def semanticRules(self):
        for k, v in semantic_rules.items():
            if 'regex' in v:
                for x in v['regex']:
                    yield k, x
                    
    def semanticsToAtoms(self, sentence):
        item_counter = 0
        turing = sentence.words
        if not sentence:
            return
        
        for word in sentence.words:
            next_index = item_counter + 1
            if len(turing) > next_index:
                next_word  = turing[next_index]
                item_counter += 1
            else:
                continue

            self.graph.add_node(word, ignore_dupes=True)
            self.graph.add_node(next_word, ignore_dupes=True)
            
            self.graph.add_edge(word, next_word)
            
            try:
                right_index = item_counter + sentence.spans[item_counter]
                tag = sentence.tags[item_counter]
                current = Tag(sentence.words[item_counter], sentence.words[right_index], tag)
                sentence.tag_set.append(current)
            except:
                current = Tag(sentence.words[item_counter], None, tag)
                sentence.tag_set.append(current)
                continue
            
            if len(sentence.tags) > right_index:
                right_word = sentence.words[right_index]
            else:
                continue
            
            self.nxG.add_edge(tag, word)
            self.nxG.add_edge(tag, next_word)
            self.graph.add_node(tag, ignore_dupes=True)
            self.graph.add_edge(tag, word)
            self.graph.add_edge(tag, next_word)


        return self.graph

    def handleSemantics(self, sentence):
        if not sentence:
            return

        self.ndpda = NDPDA_FSM('INIT', sentence[0])
        self.semanticsToGraph(sentence)
        
        self.tokens.fsm_setup()
        
        
        for k, x in self.semanticRules():
            match_rules = {}
            set_rules   = {}
            
            if x[:2] == '= ':
                current_rule = semantic_rules[k]
                for m in current_rule['match']:
                    tokenizer_out = self.tokens.fsm_tokenizer(m)
                    match_rules[k] = tokenizer_out
                for n in current_rule['set']:
                    tokenizer_out = self.tokens.fsm_tokenizer(n)
                    set_rules[k] = tokenizer_out
                    
                #match_dict = self.tokens.tokenize(m[0])
                #set_dict = self.tokens.tokenize(current_rule['set'])
                self.ndpda.add_transition(x[2:], match_rules, k, set_rules)
                
        processed = self.ndpda.process_list(sentence[3])
        
        return processed