Exemplo n.º 1
0
def ruby2html(ruby_file):
    with open(ruby_file, encoding='utf-8') as prologf:
        code = prologf.readlines()
    html = highlight(''.join(code), PrologLexer(), HtmlFormatter())
    html_name = ''.join(ruby_file.split('.')[:-1]) + '.html'
    with open(html_name, 'w', encoding='utf-8') as hf:
        hf.writelines(html)
Exemplo n.º 2
0
class PrologParser(object):
    def __init__(self):
        self.pro_lexer = PrologLexer()

    def parse(self, text):
        """
        for the given text parse into rules
        """
        rules = {}
        tokens = self.__parse_into_tokens(text)
        rules_unrefined = self.__get_rules_with_lazy_body(tokens)
        # make the first pass adding rules to dictionary
        self.__merge_duplicates(rules, rules_unrefined)
        return rules

    def __parse_into_tokens(self, text):
        return list(self.pro_lexer.get_tokens(text))

    def __merge_duplicates(self, rule_dictionary, rules_with_possible_duplicates):
        for rule in rules_with_possible_duplicates:
            rulename = rule.rule_name()
            if rulename in rule_dictionary:
                rule_dictionary[rulename].add_to_root_operation(rule.body.root_operation)
            else:
                rule_dictionary[rulename] = rule
        return rule_dictionary


    def __get_rules_with_lazy_body(self, tokens):
        rules = []
        curr_rule = []
        for token in tokens:
            if not token[0] is Token.Text:
                curr_rule.append(token)

                # if we reached end of rule, create a rule from prev values
                if token[0] is Token.Punctuation and token[1] == '.':
                    if curr_rule:
                        rules.append(Rule(curr_rule))
                        curr_rule = list()

        return rules
Exemplo n.º 3
0
class PrologParser(object):
    def __init__(self):
        self.pro_lexer = PrologLexer()

    def parse(self, text):
        """
        for the given text parse into rules
        """
        rules = {}
        tokens = self.__parse_into_tokens(text)
        rules_unrefined = self.__get_rules_with_lazy_body(tokens)
        # make the first pass adding rules to dictionary
        self.__merge_duplicates(rules, rules_unrefined)
        return rules

    def __parse_into_tokens(self, text):
        return list(self.pro_lexer.get_tokens(text))

    def __merge_duplicates(self, rule_dictionary, rules_with_possible_duplicates):
        for rule in rules_with_possible_duplicates:
            rulename = rule.rule_name()
            if rulename in rule_dictionary:
                rule_dictionary[rulename].add_to_root_operation(rule.body.root_operation)
            else:
                rule_dictionary[rulename] = rule
        return rule_dictionary

    def __get_rules_with_lazy_body(self, tokens):
        rules = []
        curr_rule = []
        for token in tokens:
            if not token[0] is Token.Text:
                curr_rule.append(token)

                # if we reached end of rule, create a rule from prev values
                if token[0] is Token.Punctuation and token[1] == ".":
                    if curr_rule:
                        rules.append(Rule(curr_rule))
                        curr_rule = list()

        return rules
Exemplo n.º 4
0
 def __init__(self):
     self.pro_lexer = PrologLexer()
Exemplo n.º 5
0
 def __init__(self):
     self.pro_lexer = PrologLexer()