Example #1
0
def _write_to_zip(zf, dest, src, ns):
    pyc = _py_temp_compile(src, ns)
    if pyc:
        try:
            zf.write(str(pyc), dest.with_suffix(".pyc"))
        finally:
            try:
                pyc.unlink()
            except:
                log_exception("Failed to delete {}", pyc)
        return

    if src in LIB2TO3_GRAMMAR_FILES:
        from lib2to3.pgen2.driver import load_grammar

        tmp = ns.temp / src.name
        try:
            shutil.copy(src, tmp)
            load_grammar(str(tmp))
            for f in ns.temp.glob(src.stem + "*.pickle"):
                zf.write(str(f), str(dest.parent / f.name))
                try:
                    f.unlink()
                except:
                    log_exception("Failed to delete {}", f)
        except:
            log_exception("Failed to compile {}", src)
        finally:
            try:
                tmp.unlink()
            except:
                log_exception("Failed to delete {}", tmp)

    zf.write(str(src), str(dest))
Example #2
0
def _write_to_zip(zf, dest, src, ns):
    pyc = _py_temp_compile(src, ns)
    if pyc:
        try:
            zf.write(str(pyc), dest.with_suffix(".pyc"))
        finally:
            try:
                pyc.unlink()
            except:
                log_exception("Failed to delete {}", pyc)
        return

    if src in LIB2TO3_GRAMMAR_FILES:
        from lib2to3.pgen2.driver import load_grammar

        tmp = ns.temp / src.name
        try:
            shutil.copy(src, tmp)
            load_grammar(str(tmp))
            for f in ns.temp.glob(src.stem + "*.pickle"):
                zf.write(str(f), str(dest.parent / f.name))
                try:
                    f.unlink()
                except:
                    log_exception("Failed to delete {}", f)
        except:
            log_exception("Failed to compile {}", src)
        finally:
            try:
                tmp.unlink()
            except:
                log_exception("Failed to delete {}", tmp)

    zf.write(str(src), str(dest))
 def test_load_grammar_from_pickle(self):
     tmpdir = tempfile.mkdtemp()
     try:
         grammar_copy = os.path.join(tmpdir,
                                     os.path.basename(support.grammar_path))
         shutil.copy(support.grammar_path, grammar_copy)
         pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)
         pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
         self.assertTrue(os.path.exists(pickle_name))
         os.unlink(grammar_copy)
         pgen2_driver.load_grammar(grammar_copy, save=False, force=False)
     finally:
         shutil.rmtree(tmpdir)
Example #4
0
    def test_load_grammar_from_pickle(self):
        # Make a copy of the grammar file in a temp directory we are
        # guaranteed to be able to write to.
        tmpdir = tempfile.mkdtemp()
        try:
            grammar_copy = os.path.join(
                    tmpdir, os.path.basename(support.grammar_path))
            shutil.copy(support.grammar_path, grammar_copy)
            pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)

            pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
            self.assertTrue(os.path.exists(pickle_name))

            os.unlink(grammar_copy)  # Only the pickle remains...
            pgen2_driver.load_grammar(grammar_copy, save=False, force=False)
        finally:
            shutil.rmtree(tmpdir)
Example #5
0
def patch_grammar(grammar_file):
    """Patch in the given lib2to3 grammar."""
    grammar = driver.load_grammar(grammar_file)
    for name, symbol in pygram.python_grammar.symbol2number.items():
        delattr(pygram.python_symbols, name)
    for name, symbol in grammar.symbol2number.items():
        setattr(pygram.python_symbols, name, symbol)
    pygram.python_grammar = grammar
Example #6
0
    def test_load_grammar_from_pickle(self):
        # Make a copy of the grammar file in a temp directory we are
        # guaranteed to be able to write to.
        tmpdir = tempfile.mkdtemp()
        try:
            grammar_copy = os.path.join(
                    tmpdir, os.path.basename(support.grammar_path))
            shutil.copy(support.grammar_path, grammar_copy)
            pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)

            pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
            self.assertTrue(os.path.exists(pickle_name))

            os.unlink(grammar_copy)  # Only the pickle remains...
            pgen2_driver.load_grammar(grammar_copy, save=False, force=False)
        finally:
            shutil.rmtree(tmpdir)
Example #7
0
    def test_load_grammar_from_subprocess(self):
        tmpdir = tempfile.mkdtemp()
        tmpsubdir = os.path.join(tmpdir, 'subdir')
        try:
            os.mkdir(tmpsubdir)
            grammar_base = os.path.basename(support.grammar_path)
            grammar_copy = os.path.join(tmpdir, grammar_base)
            grammar_sub_copy = os.path.join(tmpsubdir, grammar_base)
            shutil.copy(support.grammar_path, grammar_copy)
            shutil.copy(support.grammar_path, grammar_sub_copy)
            pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)
            pickle_sub_name = pgen2_driver._generate_pickle_name(
                grammar_sub_copy)
            self.assertNotEqual(pickle_name, pickle_sub_name)

            # Generate a pickle file from this process.
            pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
            self.assertTrue(os.path.exists(pickle_name))

            # Generate a new pickle file in a subprocess with a most likely
            # different hash randomization seed.
            sub_env = dict(os.environ)
            sub_env['PYTHONHASHSEED'] = 'random'
            code = """
from lib2to3.pgen2 import driver as pgen2_driver
pgen2_driver.load_grammar(%r, save=True, force=True)
            """ % (grammar_sub_copy, )
            msg = ("lib2to3 package is deprecated and may not be able "
                   "to parse Python 3.10+")
            cmd = [
                sys.executable, f'-Wignore:{msg}:PendingDeprecationWarning',
                '-c', code
            ]
            subprocess.check_call(cmd, env=sub_env)
            self.assertTrue(os.path.exists(pickle_sub_name))

            with open(pickle_name, 'rb') as pickle_f_1, \
                    open(pickle_sub_name, 'rb') as pickle_f_2:
                self.assertEqual(
                    pickle_f_1.read(),
                    pickle_f_2.read(),
                    msg='Grammar caches generated using different hash seeds'
                    ' were not identical.')
        finally:
            shutil.rmtree(tmpdir)
Example #8
0
def main():
    gr = driver.load_grammar("Grammar.txt")
    dr = driver.Driver(gr, convert=pytree.convert)

    fn = "example.py"
    tree = dr.parse_file(fn, debug=True)
    if not diff(fn, tree):
        print("No diffs.")
    if not sys.argv[1:]:
        return  # Pass a dummy argument to run the complete test suite below

    problems = []

    # Process every imported module
    for name in sys.modules:
        mod = sys.modules[name]
        if mod is None or not hasattr(mod, "__file__"):
            continue
        fn = mod.__file__
        if fn.endswith(".pyc"):
            fn = fn[:-1]
        if not fn.endswith(".py"):
            continue
        print("Parsing", fn, file=sys.stderr)
        tree = dr.parse_file(fn, debug=True)
        if diff(fn, tree):
            problems.append(fn)

    # Process every single module on sys.path (but not in packages)
    for dir in sys.path:
        try:
            names = os.listdir(dir)
        except OSError:
            continue
        print("Scanning", dir, "...", file=sys.stderr)
        for name in names:
            if not name.endswith(".py"):
                continue
            print("Parsing", name, file=sys.stderr)
            fn = os.path.join(dir, name)
            try:
                tree = dr.parse_file(fn, debug=True)
            except pgen2.parse.ParseError as err:
                print("ParseError:", err)
            else:
                if diff(fn, tree):
                    problems.append(fn)

    # Show summary of problem files
    if not problems:
        print("No problems.  Congratulations!")
    else:
        print("Problems in following files:")
        for fn in problems:
            print("***", fn)
Example #9
0
    def test_load_grammar_from_subprocess(self):
        tmpdir = tempfile.mkdtemp()
        tmpsubdir = os.path.join(tmpdir, 'subdir')
        try:
            os.mkdir(tmpsubdir)
            grammar_base = os.path.basename(support.grammar_path)
            grammar_copy = os.path.join(tmpdir, grammar_base)
            grammar_sub_copy = os.path.join(tmpsubdir, grammar_base)
            shutil.copy(support.grammar_path, grammar_copy)
            shutil.copy(support.grammar_path, grammar_sub_copy)
            pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)
            pickle_sub_name = pgen2_driver._generate_pickle_name(
                     grammar_sub_copy)
            self.assertNotEqual(pickle_name, pickle_sub_name)

            # Generate a pickle file from this process.
            pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
            self.assertTrue(os.path.exists(pickle_name))

            # Generate a new pickle file in a subprocess with a most likely
            # different hash randomization seed.
            sub_env = dict(os.environ)
            sub_env['PYTHONHASHSEED'] = 'random'
            subprocess.check_call(
                    [sys.executable, '-c', """
from lib2to3.pgen2 import driver as pgen2_driver
pgen2_driver.load_grammar(%r, save=True, force=True)
                    """ % (grammar_sub_copy,)],
                    env=sub_env)
            self.assertTrue(os.path.exists(pickle_sub_name))

            with open(pickle_name, 'rb') as pickle_f_1, \
                    open(pickle_sub_name, 'rb') as pickle_f_2:
                self.assertEqual(
                    pickle_f_1.read(), pickle_f_2.read(),
                    msg='Grammar caches generated using different hash seeds'
                    ' were not identical.')
        finally:
            shutil.rmtree(tmpdir)
Example #10
0
    def test_load_grammar_from_subprocess(self):
        tmpdir = tempfile.mkdtemp()
        tmpsubdir = os.path.join(tmpdir, 'subdir')
        try:
            os.mkdir(tmpsubdir)
            grammar_base = os.path.basename(support.grammar_path)
            grammar_copy = os.path.join(tmpdir, grammar_base)
            grammar_sub_copy = os.path.join(tmpsubdir, grammar_base)
            shutil.copy(support.grammar_path, grammar_copy)
            shutil.copy(support.grammar_path, grammar_sub_copy)
            pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)
            pickle_sub_name = pgen2_driver._generate_pickle_name(
                grammar_sub_copy)
            self.assertNotEqual(pickle_name, pickle_sub_name)
            pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
            self.assertTrue(os.path.exists(pickle_name))
            sub_env = dict(os.environ)
            sub_env['PYTHONHASHSEED'] = 'random'
            subprocess.check_call([
                sys.executable, '-c',
                """
from lib2to3.pgen2 import driver as pgen2_driver
pgen2_driver.load_grammar(%r, save=True, force=True)
                    """ % (grammar_sub_copy, )
            ],
                                  env=sub_env)
            self.assertTrue(os.path.exists(pickle_sub_name))
            with open(pickle_name,
                      'rb') as pickle_f_1, open(pickle_sub_name,
                                                'rb') as pickle_f_2:
                self.assertEqual(
                    pickle_f_1.read(),
                    pickle_f_2.read(),
                    msg=
                    'Grammar caches generated using different hash seeds were not identical.'
                )
        finally:
            shutil.rmtree(tmpdir)
Should also produce token.py

"""
from string import Template
import datetime
import sys
from lib2to3.compiler import parser
from lib2to3.pgen2.driver import load_grammar, grammar as grammar_module

gen_date = datetime.datetime.now()
python_version = sys.version.split('\n')[0]

print "Generating grammar2x.py"

opmap = grammar_module.opmap
g = load_grammar('grammar2x.txt', force=True)
#g3 = load_grammar('grammar3x.txt', force=True)
g_templ = Template(open('grammar2x.py.templ').read())
attrs_assign = []
attrs = ['symbol2number', 'number2symbol', 'states', 'dfas', 'labels', 'keywords', 'tokens', 'symbol2label', 'start']

for attr in attrs:
    attrs_assign.append(''.join([attr, " = ", repr(getattr(g, attr))]))

attrs_assign_str = ("\n" + " "*4).join(attrs_assign)

opmap_assign = "opmap = {0!r}".format(opmap)

out = g_templ.substitute(gen_date=gen_date,
                         python_version=python_version,
                         grammar_attrs_assign=attrs_assign_str, 
Example #12
0
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""Export the Python grammar and symbols."""

# Python imports
import os

# Local imports
from lib2to3.pgen2 import token
from lib2to3.pgen2 import driver
from lib2to3 import pytree

# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")


class Symbols(object):
    def __init__(self, grammar):
        """Initializer.

        Creates an attribute for each grammar symbol (nonterminal),
        whose value is the symbol's type (an int >= 256).
        """
        for name, symbol in grammar.symbol2number.iteritems():
            setattr(self, name, symbol)


python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
Example #13
0
# Python imports
import unittest
import sys
import os
import os.path
import re
from textwrap import dedent

# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver

test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = driver.load_grammar(grammar_path)
driver = driver.Driver(grammar, convert=pytree.convert)

def parse_string(string):
    return driver.parse_string(reformat(string), debug=True)

def run_all_tests(test_mod=None, tests=None):
    if tests is None:
        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
    unittest.TextTestRunner(verbosity=2).run(tests)

def reformat(string):
    return dedent(string) + u"\n\n"

def get_refactorer(fixer_pkg="lib2to3", fixers=None, options=None):
    """
Example #14
0
# Python imports
import unittest
import sys
import os
import os.path
import re
from textwrap import dedent

# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver

test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = driver.load_grammar(grammar_path)
driver = driver.Driver(grammar, convert=pytree.convert)

def parse_string(string):
    return driver.parse_string(reformat(string), debug=True)

def run_all_tests(test_mod=None, tests=None):
    if tests is None:
        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
    unittest.TextTestRunner(verbosity=2).run(tests)

def reformat(string):
    return dedent(string) + u"\n\n"

def get_refactorer(fixer_pkg="lib2to3", fixers=None, options=None):
    """
Example #15
0
# Author: Collin Winter

# Python imports
import unittest
import os
import os.path
from textwrap import dedent

# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver as pgen2_driver

test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = pgen2_driver.load_grammar(grammar_path)
driver = pgen2_driver.Driver(grammar, convert=pytree.convert)

def parse_string(string):
    return driver.parse_string(reformat(string), debug=True)

def run_all_tests(test_mod=None, tests=None):
    if tests is None:
        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
    unittest.TextTestRunner(verbosity=2).run(tests)

def reformat(string):
    return dedent(string) + "\n\n"

def get_refactorer(fixer_pkg="lib2to3", fixers=None, options=None):
    """
Example #16
0
# Author: Collin Winter

# Python imports
import unittest
import os
import os.path
from textwrap import dedent

# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver as pgen2_driver

test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = pgen2_driver.load_grammar(grammar_path)
grammar_no_print_statement = pgen2_driver.load_grammar(grammar_path)
del grammar_no_print_statement.keywords["print"]
driver = pgen2_driver.Driver(grammar, convert=pytree.convert)
driver_no_print_statement = pgen2_driver.Driver(grammar_no_print_statement,
                                                convert=pytree.convert)


def parse_string(string):
    return driver.parse_string(reformat(string), debug=True)


def run_all_tests(test_mod=None, tests=None):
    if tests is None:
        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
    unittest.TextTestRunner(verbosity=2).run(tests)
Example #17
0
 def test_load_grammar_from_txt_file(self):
     pgen2_driver.load_grammar(support.grammar_path, save=False, force=True)
Should also produce token.py

"""
from string import Template
import datetime
import sys
from lib2to3.compiler import parser
from lib2to3.pgen2.driver import load_grammar, grammar as grammar_module

gen_date = datetime.datetime.now()
python_version = sys.version.split('\n')[0]

print "Generating grammar2x.py"

opmap = grammar_module.opmap
g = load_grammar('grammar2x.txt', force=True)
#g3 = load_grammar('grammar3x.txt', force=True)
g_templ = Template(open('grammar2x.py.templ').read())
attrs_assign = []
attrs = ['symbol2number', 'number2symbol', 'states', 'dfas', 'labels', 'keywords', 'tokens', 'symbol2label', 'start']

for attr in attrs:
    attrs_assign.append(''.join([attr, " = ", repr(getattr(g, attr))]))

attrs_assign_str = ("\n" + " "*4).join(attrs_assign)

opmap_assign = "opmap = {0!r}".format(opmap)

out = g_templ.substitute(gen_date=gen_date,
                         python_version=python_version,
                         grammar_attrs_assign=attrs_assign_str,
Example #19
0
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""

# Python imports
import os

# Local imports
from lib2to3.pgen2 import token
from lib2to3.pgen2 import driver
from lib2to3 import pytree

# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")


class Symbols(object):
    def __init__(self, grammar):
        """Initializer.

        Creates an attribute for each grammar symbol (nonterminal),
        whose value is the symbol's type (an int >= 256).
        """
        for name, symbol in grammar.symbol2number.iteritems():
            setattr(self, name, symbol)


python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
Example #20
0
# Author: Collin Winter

# Python imports
import unittest
import os
import os.path
from textwrap import dedent

# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver as pgen2_driver

test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = pgen2_driver.load_grammar(grammar_path)
grammar_no_print_statement = pgen2_driver.load_grammar(grammar_path)
del grammar_no_print_statement.keywords["print"]
driver = pgen2_driver.Driver(grammar, convert=pytree.convert)
driver_no_print_statement = pgen2_driver.Driver(
    grammar_no_print_statement,
    convert=pytree.convert
)

def parse_string(string):
    return driver.parse_string(reformat(string), debug=True)

def run_all_tests(test_mod=None, tests=None):
    if tests is None:
        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
    unittest.TextTestRunner(verbosity=2).run(tests)
Example #21
0
from lib2to3.pgen2.driver import load_grammar
from lib2to3.pgen2.driver import Driver
import os

gpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "Grammar.txt")
g = load_grammar(gpath)


def suite(text):
    d = Driver(g)
    return d.parse_string(text)


# dummy
def st2tuple(tree, line_info=1):
    return tree
Example #22
0
# Python imports
import unittest
import sys
import os
import os.path
import re
from textwrap import dedent

# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver as pgen2_driver

test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = pgen2_driver.load_grammar(grammar_path)
driver = pgen2_driver.Driver(grammar, convert=pytree.convert)


def parse_string(string):
    return driver.parse_string(reformat(string), debug=True)


def run_all_tests(test_mod=None, tests=None):
    if tests is None:
        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
    unittest.TextTestRunner(verbosity=2).run(tests)


def reformat(string):
    return dedent(string) + "\n\n"
Example #23
0
 def test_load_grammar_from_txt_file(self):
     pgen2_driver.load_grammar(support.grammar_path, save=False, force=True)
Example #24
0
# gen_2to3_grammar.py: input

import sys

from lib2to3.pgen2 import driver

gp = driver._generate_pickle_name(sys.argv[1])
driver.load_grammar(sys.argv[1], gp, force=True)
print(gp)
Example #25
0
File: parser.py Project: Afey/pyjs
from lib2to3.pgen2.driver import load_grammar
from lib2to3.pgen2.driver import Driver
import os

gpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "Grammar.txt")
g = load_grammar(gpath)

def suite(text):
    d = Driver(g )
    return d.parse_string(text)

# dummy
def st2tuple(tree, line_info=1):
    return tree