Example #1
0
    def build(self):
        g = Grammar(self.tokens)

        for level, (assoc, terms) in enumerate(self.precedence, 1):
            for term in terms:
                g.set_precedence(term, assoc, level)

        for prod_name, syms, func, precedence in self.productions:
            g.add_production(prod_name, syms, func, precedence)

        g.set_start()

        for unused_term in g.unused_terminals():
            warnings.warn(
                "Token %r is unused" % unused_term,
                ParserGeneratorWarning,
                stacklevel=2
            )
        for unused_prod in g.unused_productions():
            warnings.warn(
                "Production %r is not reachable" % unused_prod,
                ParserGeneratorWarning,
                stacklevel=2
            )

        g.build_lritems()
        g.compute_first()
        g.compute_follow()

        # win32 temp directories are already per-user
        if os.name == "nt":
            cache_file = os.path.join(
                tempfile.gettempdir(),
                "rply-%s-%s-%s.json" % (
                    self.VERSION, self.cache_id, self.compute_grammar_hash(g)
                )
            )
        else:
            cache_file = os.path.join(
                tempfile.gettempdir(),
                "rply-%s-%s-%s-%s.json" % (
                    self.VERSION,
                    os.getuid(),
                    self.cache_id,
                    self.compute_grammar_hash(g)
                )
            )
        table = None
        if os.path.exists(cache_file):
            with open(cache_file) as f:
                data = json.load(f)
                stat_result = os.fstat(f.fileno())
            if (
                os.name == "nt" or (
                    stat_result.st_uid == os.getuid() and
                    stat.S_IMODE(stat_result.st_mode) == 0o0600
                )
            ):
                if self.data_is_valid(g, data):
                    table = LRTable.from_cache(g, data)
        if table is None:
            table = LRTable.from_grammar(g)
            fd = os.open(
                cache_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o0600
            )
            with os.fdopen(fd, "w") as f:
                json.dump(self.serialize_table(table), f)
        if table.sr_conflicts:
            warnings.warn(
                "%d shift/reduce conflict%s" % (
                    len(table.sr_conflicts),
                    "s" if len(table.sr_conflicts) > 1 else ""
                ),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        if table.rr_conflicts:
            warnings.warn(
                "%d reduce/reduce conflict%s" % (
                    len(table.rr_conflicts),
                    "s" if len(table.rr_conflicts) > 1 else ""
                ),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        return LRParser(table, self.error_handler)
Example #2
0
    def build(self):
        g = Grammar(self.tokens)

        for level, (assoc, terms) in enumerate(self.precedence, 1):
            for term in terms:
                g.set_precedence(term, assoc, level)

        for prod_name, syms, func, precedence in self.productions:
            g.add_production(prod_name, syms, func, precedence)

        g.set_start()

        for unused_term in g.unused_terminals():
            warnings.warn(
                "Token %r is unused" % unused_term,
                ParserGeneratorWarning,
                stacklevel=2
            )
        for unused_prod in g.unused_productions():
            warnings.warn(
                "Production %r is not reachable" % unused_prod,
                ParserGeneratorWarning,
                stacklevel=2
            )

        g.build_lritems()
        g.compute_first()
        g.compute_follow()

        # win32 temp directories are already per-user
        if os.name == "nt":
            cache_file = os.path.join(
                tempfile.gettempdir(),
                "rply-%s-%s-%s.json" % (self.VERSION, self.cache_id, self.compute_grammar_hash(g))
            )
        else:
            cache_file = os.path.join(
                tempfile.gettempdir(),
                "rply-%s-%s-%s-%s.json" % (self.VERSION, os.getuid(), self.cache_id, self.compute_grammar_hash(g))
            )
        table = None
        if os.path.exists(cache_file):
            with open(cache_file) as f:
                data = json.load(f)
                stat_result = os.fstat(f.fileno())
            if (
                os.name == "nt" or (
                    stat_result.st_uid == os.getuid() and
                    stat.S_IMODE(stat_result.st_mode) == 0o0600
                )
            ):
                if self.data_is_valid(g, data):
                    table = LRTable.from_cache(g, data)
        if table is None:
            table = LRTable.from_grammar(g)
            fd = os.open(cache_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o0600)
            with os.fdopen(fd, "w") as f:
                json.dump(self.serialize_table(table), f)
        if table.sr_conflicts:
            warnings.warn(
                "%d shift/reduce conflict%s" % (len(table.sr_conflicts), "s" if len(table.sr_conflicts) > 1 else ""),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        if table.rr_conflicts:
            warnings.warn(
                "%d reduce/reduce conflict%s" % (len(table.rr_conflicts), "s" if len(table.rr_conflicts) > 1 else ""),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        return LRParser(table, self.error_handler)
Example #3
0
    def build(self):
        g = Grammar(self.tokens)

        for level, (assoc, terms) in enumerate(self.precedence, 1):
            for term in terms:
                g.set_precedence(term, assoc, level)

        for prod_name, syms, func, precedence in self.productions:
            g.add_production(prod_name, syms, func, precedence)

        g.set_start()

        for unused_term in g.unused_terminals():
            warnings.warn(
                "Token %r is unused" % unused_term,
                ParserGeneratorWarning,
                stacklevel=2
            )
        for unused_prod in g.unused_productions():
            warnings.warn(
                "Production %r is not reachable" % unused_prod,
                ParserGeneratorWarning,
                stacklevel=2
            )

        g.build_lritems()
        g.compute_first()
        g.compute_follow()

        table = None
        if self.cache_id is not None:
            cache_dir = AppDirs("rply").user_cache_dir
            cache_file = os.path.join(
                cache_dir,
                "%s-%s-%s.json" % (
                    self.cache_id, self.VERSION, self.compute_grammar_hash(g)
                )
            )

            if os.path.exists(cache_file):
                with open(cache_file) as f:
                    data = json.load(f)
                if self.data_is_valid(g, data):
                    table = LRTable.from_cache(g, data)
        if table is None:
            table = LRTable.from_grammar(g)

            if self.cache_id is not None:
                if not os.path.exists(cache_dir):
                    os.makedirs(cache_dir, mode=0o0700)

                with open(cache_file, "w") as f:
                    json.dump(self.serialize_table(table), f)

        if table.sr_conflicts:
            warnings.warn(
                "%d shift/reduce conflict%s" % (
                    len(table.sr_conflicts),
                    "s" if len(table.sr_conflicts) > 1 else ""
                ),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        if table.rr_conflicts:
            warnings.warn(
                "%d reduce/reduce conflict%s" % (
                    len(table.rr_conflicts),
                    "s" if len(table.rr_conflicts) > 1 else ""
                ),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        return LRParser(table, self.error_handler)
Example #4
0
    def build(self):
        g = Grammar(self.tokens)

        for level, (assoc, terms) in enumerate(self.precedence, 1):
            for term in terms:
                g.set_precedence(term, assoc, level)

        for prod_name, syms, func, precedence in self.productions:
            g.add_production(prod_name, syms, func, precedence)

        g.set_start()

        for unused_term in g.unused_terminals():
            warnings.warn(
                "Token %r is unused" % unused_term,
                ParserGeneratorWarning,
                stacklevel=2
            )
        for unused_prod in g.unused_productions():
            warnings.warn(
                "Production %r is not reachable" % unused_prod,
                ParserGeneratorWarning,
                stacklevel=2
            )

        g.build_lritems()
        g.compute_first()
        g.compute_follow()

        table = LRTable(g)
        if table.sr_conflicts:
            warnings.warn(
                "%d shift/reduce conflict%s" % (len(table.sr_conflicts), "s" if len(table.sr_conflicts) > 1 else ""),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        if table.rr_conflicts:
            warnings.warn(
                "%d reduce/reduce conflict%s" % (len(table.rr_conflicts), "s" if len(table.rr_conflicts) > 1 else ""),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        return LRParser(table, self.error_handler)
Example #5
0
    def build(self):
        g = Grammar(self.tokens)

        for level, (assoc, terms) in enumerate(self.precedence, 1):
            for term in terms:
                g.set_precedence(term, assoc, level)

        for prod_name, syms, func, precedence in self.productions:
            g.add_production(prod_name, syms, func, precedence)

        g.set_start()

        for unused_term in g.unused_terminals():
            warnings.warn("Token %r is unused" % unused_term,
                          ParserGeneratorWarning,
                          stacklevel=2)
        for unused_prod in g.unused_productions():
            warnings.warn("Production %r is not reachable" % unused_prod,
                          ParserGeneratorWarning,
                          stacklevel=2)

        g.build_lritems()
        g.compute_first()
        g.compute_follow()

        table = None
        if self.cache_id is not None:
            cache_dir = AppDirs("rply").user_cache_dir
            cache_file = os.path.join(
                cache_dir, "%s-%s-%s.json" %
                (self.cache_id, self.VERSION, self.compute_grammar_hash(g)))

            if os.path.exists(cache_file):
                with open(cache_file) as f:
                    data = json.load(f)
                if self.data_is_valid(g, data):
                    table = LRTable.from_cache(g, data)
        if table is None:
            table = LRTable.from_grammar(g)

            if self.cache_id is not None:
                self._write_cache(cache_dir, cache_file, table)

        if table.sr_conflicts:
            warnings.warn(
                "%d shift/reduce conflict%s" %
                (len(table.sr_conflicts),
                 "s" if len(table.sr_conflicts) > 1 else ""),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        if table.rr_conflicts:
            warnings.warn(
                "%d reduce/reduce conflict%s" %
                (len(table.rr_conflicts),
                 "s" if len(table.rr_conflicts) > 1 else ""),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        return LRParser(table, self.error_handler)
Example #6
0
    def build(self):
        g = Grammar(self.tokens)

        for level, (assoc, terms) in enumerate(self.precedence, 1):
            for term in terms:
                g.set_precedence(term, assoc, level)

        for prod_name, syms, func, precedence in self.productions:
            g.add_production(prod_name, syms, func, precedence)

        g.set_start()

        for unused_term in g.unused_terminals():
            warnings.warn(
                "Token %r is unused" % unused_term,
                ParserGeneratorWarning,
                stacklevel=2
            )
        for unused_prod in g.unused_productions():
            warnings.warn(
                "Production %r is not reachable" % unused_prod,
                ParserGeneratorWarning,
                stacklevel=2
            )

        g.build_lritems()
        g.compute_first()
        g.compute_follow()

        table = None
        grammar_hash = self.compute_grammar_hash(g)
        data = self.cache.load(grammar_hash)
        if data is not None and self.data_is_valid(g, data):
            table = LRTable.from_cache(g, data)
        if table is None:
            table = LRTable.from_grammar(g)
            self.cache.dump(grammar_hash, self.serialize_table(table))
        if table.sr_conflicts:
            warnings.warn(
                "%d shift/reduce conflict%s" % (
                    len(table.sr_conflicts),
                    "s" if len(table.sr_conflicts) > 1 else ""
                ),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        if table.rr_conflicts:
            warnings.warn(
                "%d reduce/reduce conflict%s" % (
                    len(table.rr_conflicts),
                    "s" if len(table.rr_conflicts) > 1 else ""
                ),
                ParserGeneratorWarning,
                stacklevel=2,
            )
        return LRParser(table, self.error_handler)