示例#1
0
def convert_dataset_metadata(in_dir, out_dir):
    meta_dict = OrderedDict()
    meta_dict["BIDSVersion"] = "1.0.0"
    
    study_key_file = os.path.join(in_dir, "study_key.txt")
    if os.path.exists(study_key_file):
        meta_dict["Name"] = tokenize.open(study_key_file).read().strip()
    else:
        if in_dir.endswith(os.sep):
            meta_dict["Name"] = in_dir.split(os.sep)[-1]
        else:
            meta_dict["Name"] = in_dir.split(os.sep)[-2]
        
    ref_file = os.path.join(in_dir, "references.txt")
    if os.path.exists(ref_file):
        meta_dict["ReferencesAndLinks"] = tokenize.open(ref_file).read().strip()
        
    lic_file = os.path.join(in_dir, "license.txt")
    if os.path.exists(lic_file):
        meta_dict["License"] = tokenize.open(lic_file).read().strip()
        
    json.dump(meta_dict, open(os.path.join(out_dir,
                                           "dataset_description.json"), "w"),
                  sort_keys=True, indent=4, separators=(',', ': '))
              
    readme = os.path.join(in_dir, "README")
    if os.path.exists(readme):
        shutil.copy(readme, os.path.join(out_dir,"README"))
    elif os.path.exists(readme + ".txt"):
        shutil.copy(readme + ".txt", os.path.join(out_dir,"README"))
示例#2
0
    def investigate_pep8_status(self, filename):
        sys.stdout.write("%s: " % (filename,))
        sys.stdout.flush()
        with tokenize.open(filename) as i:
            source = i.read()
        if not is_valid_source(source):
            return
        errors = self.errors_in_source(source)

        if errors:
            print(', '.join(errors))
        else:
            print('clean')
            return

        changed = True
        while changed:
            changed = False
            for error, source in list(self.best_examples.items()):
                self.note_source(source)
                target = self.example_file_for_error(error)
                if os.path.exists(target):
                    existing_length = len(tokenize.open(target).read())
                    if existing_length <= len(source):
                        continue
                    else:
                        print((
                            "A smaller example for %s (%d < %d). Simplifying "
                            "an example from %s"
                        ) % (
                            error,
                            len(source), existing_length,
                            self.trash_file(source)))

                else:

                    print(
                        '%s is new. Simplifying an example from %s' % (
                            error, self.trash_file(source)))

                changed = True
                example = self.find_minimal_example_from_source(
                    source,
                    is_example=lambda source:
                        error in self.errors_in_source(
                            source),
                )
                assert len(example) <= len(source)
                with open(target, 'w') as o:
                    o.write(example)
示例#3
0
文件: embedding.py 项目: m-labs/artiq
def main():
    if len(sys.argv) > 1 and sys.argv[1] == "+diag":
        del sys.argv[1]
        diag = True
    else:
        diag = False

    if len(sys.argv) > 1 and sys.argv[1] == "+compile":
        del sys.argv[1]
        compile_only = True
    else:
        compile_only = False

    ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
    dmgr = DeviceManager(DeviceDB(ddb_path))

    with tokenize.open(sys.argv[1]) as f:
        testcase_code = compile(f.read(), f.name, "exec")
        testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr}
        exec(testcase_code, testcase_vars)

    try:
        core = dmgr.get("core")
        if compile_only:
            core.compile(testcase_vars["entrypoint"], (), {})
        else:
            core.run(testcase_vars["entrypoint"], (), {})
    except CompileError as error:
        if not diag:
            exit(1)
示例#4
0
 def _open(self, filename):
     if filename.endswith('.py') and hasattr(tokenize, 'open'):
         # On Python 3.2 and newer, open Python files with tokenize.open().
         # This functions uses the encoding cookie to get the encoding.
         return tokenize.open(filename)
     else:
         return open(filename)
示例#5
0
def main():
    def make_callback(text):
        return count_calls_decorator(
            lambda file_, start, stop: log(text, file_, start, stop)
        )

    nci_callback = make_callback('None-coalescing `if` block')
    nco_callback = make_callback('[Possible] None-coalescing `or`')
    nct_callback = make_callback('None-coalescing ternary')
    sna_callback = make_callback('Safe navigation `and`')
    sni_callback = make_callback('Safe navigation `if` block')
    snt_callback = make_callback('Safe navigation ternary')

    files = sys.argv[1:]
    if files:
        expanded_files = []
        for file_ in files:
            if '*' in file_:
                expanded_files.extend(glob.glob(file_))
            else:
                expanded_files.append(file_)
    else:
        files = glob.glob(os.path.join(sys.prefix, 'Lib', '**', '*.py'))

    for file_ in files:
        try:
            source = tokenize.open(file_)
        except (SyntaxError, UnicodeDecodeError):
            continue

        with source:
            try:
                tree = ast.parse(source.read(), filename=file_)
            except SyntaxError:
                continue

            NoneCoalesceIfBlockVisitor(file_, nci_callback).visit(tree)
            NoneCoalesceOrVisitor(file_, nco_callback).visit(tree)
            NoneCoalesceTernaryVisitor(file_, nct_callback).visit(tree)
            SafeNavAndVisitor(file_, sna_callback).visit(tree)
            SafeNavIfBlockVisitor(file_, sni_callback).visit(tree)
            SafeNavTernaryVisitor(file_, snt_callback).visit(tree)

    print('Total None-coalescing `if` blocks: {}'
          .format(get_call_count(nci_callback)))

    print('Total [possible] None-coalescing `or`: {}'
          .format(get_call_count(nco_callback)))

    print('Total None-coalescing ternaries: {}'
          .format(get_call_count(nct_callback)))

    print('Total Safe navigation `and`: {}'
          .format(get_call_count(sna_callback)))

    print('Total Safe navigation `if` blocks: {}'
          .format(get_call_count(sni_callback)))

    print('Total Safe navigation ternaries: {}'
          .format(get_call_count(snt_callback)))
示例#6
0
def check_spelling(target):
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {'behaviour', 'quitted', 'likelyhood', 'sucessfully',
             'occur[^r .]', 'seperator', 'explicitely', 'resetted',
             'auxillary', 'accidentaly', 'ambigious', 'loosly',
             'initialis', 'convienence', 'similiar', 'uncommited',
             'reproducable'}

    # Words which look better when splitted, but might need some fine tuning.
    words |= {'keystrings', 'webelements', 'mouseevent', 'keysequence',
              'normalmode', 'eventloops', 'sizehint', 'statemachine',
              'metaobject', 'logrecord', 'monkeypatch', 'filetype'}

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _py_files(target):
            with tokenize.open(fn) as f:
                if fn == os.path.join('scripts', 'misc_checks.py'):
                    continue
                for line in f:
                    for w in words:
                        if re.search(w, line) and fn not in seen[w]:
                            print("Found '{}' in {}!".format(w, fn))
                            seen[w].append(fn)
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#7
0
文件: sixer.py 项目: mscuthbert/sixer
    def patch(self, filename):
        self.current_file = filename

        with tokenize.open(filename) as fp:
            content = fp.read()

        old_content = content
        for operation in self.operations:
            content = operation.patch(content)

        if content == old_content:
            # no change
            self.check(content)
            if self.options.to_stdout:
                self.write_stdout(content)
            return False

        with open(filename, "rb") as fp:
            encoding, _ = tokenize.detect_encoding(fp.readline)

        if not self.options.quiet:
            print("Patch %s" % filename)
        if not self.options.to_stdout:
            with open(filename, "w", encoding=encoding) as fp:
                fp.write(content)
        else:
            self.write_stdout(content)
        self.check(content)
        return True
示例#8
0
def read_py_file(filepath):
    if sys.version_info < (3, ):
        return open(filepath, 'rU').read()
    else:
        # see https://docs.python.org/3/library/tokenize.html#tokenize.detect_encoding
        # first just see if the file is properly encoded
        try:
            with open(filepath, 'rb') as f:
                tokenize.detect_encoding(f.readline)
        except SyntaxError as err:
            # this warning is issued:
            #   (1) in badly authored files (contains non-utf8 in a comment line)
            #   (2) a coding is specified, but wrong and
            #   (3) no coding is specified, and the default
            #       'utf8' fails to decode.
            #   (4) the encoding specified by a pep263 declaration did not match
            #       with the encoding detected by inspecting the BOM
            raise CouldNotHandleEncoding(filepath, err)

        try:
            return tokenize.open(filepath).read()
            # this warning is issued:
            #   (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
            #       (see http://stackoverflow.com/a/5552623)
        except UnicodeDecodeError as err:
            raise CouldNotHandleEncoding(filepath, err)
示例#9
0
def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {'[Bb]ehaviour', '[Qq]uitted', 'Ll]ikelyhood', '[Ss]ucessfully',
             '[Oo]ccur[^r .]', '[Ss]eperator', '[Ee]xplicitely', '[Rr]esetted',
             '[Aa]uxillary', '[Aa]ccidentaly', '[Aa]mbigious', '[Ll]oosly',
             '[Ii]nitialis', '[Cc]onvienence', '[Ss]imiliar', '[Uu]ncommited',
             '[Rr]eproducable'}

    # Words which look better when splitted, but might need some fine tuning.
    words |= {'[Kk]eystrings', '[Ww]ebelements', '[Mm]ouseevent',
              '[Kk]eysequence', '[Nn]ormalmode', '[Ee]ventloops',
              '[Ss]izehint', '[Ss]tatemachine', '[Mm]etaobject',
              '[Ll]ogrecord', '[Ff]iletype'}

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _py_files():
            with tokenize.open(fn) as f:
                if fn == os.path.join('.', 'scripts', 'misc_checks.py'):
                    continue
                for line in f:
                    for w in words:
                        if re.search(w, line) and fn not in seen[w]:
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#10
0
文件: __init__.py 项目: nigef/pyta
def _verify_pre_check(filepath):
    """Check student code for certain issues."""
    # Make sure the program doesn't crash for students.
    # Could use some improvement for better logging and error reporting.
    try:
        # Check for inline "pylint:" comment, which may indicate a student
        # trying to disable a check.
        with tokenize.open(os.path.expanduser(filepath)) as f:
            for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
                if tok_type != tokenize.COMMENT:
                    continue
                match = pylint.utils.OPTION_RGX.search(content)
                if match is not None:
                    print('ERROR: string "pylint:" found in comment. ' +
                          'No check run on file `{}`\n'.format(filepath))
                    return False
    except IndentationError as e:
        print('ERROR: python_ta could not check your code due to an ' +
              'indentation error at line {}'.format(e.lineno))
        return False
    except tokenize.TokenError as e:
        print('ERROR: python_ta could not check your code due to a ' +
              'syntax error in your file')
        return False
    return True
示例#11
0
文件: report.py 项目: alexandrul/mypy
    def on_file(self, tree: MypyFile, type_map: Dict[Expression, Type]) -> None:
        self.last_xml = None
        path = os.path.relpath(tree.path)
        if stats.is_special_module(path):
            return
        if path.startswith('..'):
            return
        if 'stubs' in path.split('/'):
            return

        visitor = stats.StatisticsVisitor(inferred=True, typemap=type_map, all_nodes=True)
        tree.accept(visitor)

        root = etree.Element('mypy-report-file', name=path, module=tree._fullname)
        doc = etree.ElementTree(root)
        file_info = FileInfo(path, tree._fullname)

        with tokenize.open(path) as input_file:
            for lineno, line_text in enumerate(input_file, 1):
                status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
                file_info.counts[status] += 1
                etree.SubElement(root, 'line',
                                 number=str(lineno),
                                 precision=stats.precision_names[status],
                                 content=line_text[:-1])
        # Assumes a layout similar to what XmlReporter uses.
        xslt_path = os.path.relpath('mypy-html.xslt', path)
        transform_pi = etree.ProcessingInstruction('xml-stylesheet',
                'type="text/xsl" href="%s"' % cgi.escape(xslt_path, True))
        root.addprevious(transform_pi)
        self.schema.assertValid(doc)

        self.last_xml = doc
        self.files.append(file_info)
示例#12
0
def updatecache(filename, module_globals=None):
    """Update a cache entry and return its list of lines.
    If something's wrong, print a message, discard the cache entry,
    and return an empty list."""

    if filename in cache:
        if len(cache[filename]) != 1:
            del cache[filename]
    if not filename or (filename.startswith("<") and filename.endswith(">")):
        return []

    fullname = filename
    try:
        stat = os.stat(fullname)
    except OSError:
        basename = filename

        # Realise a lazy loader based lookup if there is one
        # otherwise try to lookup right now.
        if lazycache(filename, module_globals):
            try:
                data = cache[filename][0]()
            except (ImportError, OSError):
                pass
            else:
                if data is None:
                    # No luck, the PEP302 loader cannot find the source
                    # for this module.
                    return []
                cache[filename] = (len(data), None, [line + "\n" for line in data.splitlines()], fullname)
                return cache[filename][2]

        # Try looking through the module search path, which is only useful
        # when handling a relative filename.
        if os.path.isabs(filename):
            return []

        for dirname in sys.path:
            try:
                fullname = os.path.join(dirname, basename)
            except (TypeError, AttributeError):
                # Not sufficiently string-like to do anything useful with.
                continue
            try:
                stat = os.stat(fullname)
                break
            except OSError:
                pass
        else:
            return []
    try:
        with tokenize.open(fullname) as fp:
            lines = fp.readlines()
    except OSError:
        return []
    if lines and not lines[-1].endswith("\n"):
        lines[-1] += "\n"
    size, mtime = stat.st_size, stat.st_mtime
    cache[filename] = size, mtime, lines, fullname
    return lines
示例#13
0
def check(file):
    """check(file_or_dir)

    If file_or_dir is a directory and not a symbolic link, then recursively
    descend the directory tree named by file_or_dir, checking all .py files
    along the way. If file_or_dir is an ordinary Python source file, it is
    checked for whitespace related problems. The diagnostic messages are
    written to standard output using the print statement.
    """

    if os.path.isdir(file) and not os.path.islink(file):
        if verbose:
            print("%r: listing directory" % (file,))
        names = os.listdir(file)
        for name in names:
            fullname = os.path.join(file, name)
            if (os.path.isdir(fullname) and
                not os.path.islink(fullname) or
                os.path.normcase(name[-3:]) == ".py"):
                check(fullname)
        return

    try:
        f = tokenize.open(file)
    except IOError as msg:
        errprint("%r: I/O Error: %s" % (file, msg))
        return

    if verbose > 1:
        print("checking %r ..." % file)

    try:
        process_tokens(tokenize.generate_tokens(f.readline))

    except tokenize.TokenError as msg:
        errprint("%r: Token Error: %s" % (file, msg))
        return

    except IndentationError as msg:
        errprint("%r: Indentation Error: %s" % (file, msg))
        return

    except NannyNag as nag:
        badline = nag.get_lineno()
        line = nag.get_line()
        if verbose:
            print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
            print("offending line: %r" % (line,))
            print(nag.get_msg())
        else:
            if ' ' in file: file = '"' + file + '"'
            if filename_only: print(file)
            else: print(file, badline, repr(line))
        return

    finally:
        f.close()

    if verbose:
        print("%r: Clean bill of health." % (file,))
示例#14
0
文件: report.py 项目: chadrik/mypy
    def on_file(self,
                tree: MypyFile,
                type_map: Dict[Expression, Type],
                options: Options) -> None:
        path = os.path.relpath(tree.path)
        visitor = stats.StatisticsVisitor(inferred=True, filename=tree.fullname(),
                                          typemap=type_map, all_nodes=True)
        tree.accept(visitor)

        class_name = os.path.basename(path)
        file_info = FileInfo(path, tree._fullname)
        class_element = etree.Element('class',
                                      filename=path,
                                      complexity='1.0',
                                      name=class_name)
        etree.SubElement(class_element, 'methods')
        lines_element = etree.SubElement(class_element, 'lines')

        with tokenize.open(path) as input_file:
            class_lines_covered = 0
            class_total_lines = 0
            for lineno, _ in enumerate(input_file, 1):
                status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
                hits = 0
                branch = False
                if status == stats.TYPE_EMPTY:
                    continue
                class_total_lines += 1
                if status != stats.TYPE_ANY:
                    class_lines_covered += 1
                    hits = 1
                if status == stats.TYPE_IMPRECISE:
                    branch = True
                file_info.counts[status] += 1
                line_element = etree.SubElement(lines_element, 'line',
                                                number=str(lineno),
                                                precision=stats.precision_names[status],
                                                hits=str(hits),
                                                branch=str(branch).lower())
                if branch:
                    line_element.attrib['condition-coverage'] = '50% (1/2)'
            class_element.attrib['branch-rate'] = '0'
            class_element.attrib['line-rate'] = get_line_rate(class_lines_covered,
                                                              class_total_lines)
            # parent_module is set to whichever module contains this file.  For most files, we want
            # to simply strip the last element off of the module.  But for __init__.py files,
            # the module == the parent module.
            parent_module = file_info.module.rsplit('.', 1)[0]
            if file_info.name.endswith('__init__.py'):
                parent_module = file_info.module

            if parent_module not in self.root_package.packages:
                self.root_package.packages[parent_module] = CoberturaPackage(parent_module)
            current_package = self.root_package.packages[parent_module]
            packages_to_update = [self.root_package, current_package]
            for package in packages_to_update:
                package.total_lines += class_total_lines
                package.covered_lines += class_lines_covered
            current_package.classes[class_name] = class_element
示例#15
0
 def build_from_file(self, filename):
     self.filename = filename
     modtxt = ""
     with tokenize.open(filename) as f:
         modtxt = f.read()
     self.source = modtxt
     modast = ast.parse(modtxt, mode="exec")
     self.build_from_ast(modast)
示例#16
0
    def test_getline(self):
        with tokenize.open(self.file_name) as fp:
            for index, line in enumerate(fp):
                if not line.endswith('\n'):
                    line += '\n'

                cached_line = linecache.getline(self.file_name, index + 1)
                self.assertEqual(line, cached_line)
示例#17
0
def main():
    if not len(sys.argv) == 2:
        print("Expected exactly one module filename", file=sys.stderr)
        exit(1)

    def process_diagnostic(diag):
        print("\n".join(diag.render()), file=sys.stderr)
        if diag.level in ("fatal", "error"):
            exit(1)

    engine = diagnostic.Engine()
    engine.process = process_diagnostic

    with tokenize.open(sys.argv[1]) as f:
        testcase_code = compile(f.read(), f.name, "exec")
        testcase_vars = {'__name__': 'testbench'}
        exec(testcase_code, testcase_vars)

    device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
    device_mgr = DeviceManager(DeviceDB(device_db_path))

    dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
    dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))

    argument_mgr = ProcessArgumentManager({})

    def embed():
        experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))

        stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
        stitcher.stitch_call(experiment.run, (), {})
        stitcher.finalize()
        return stitcher

    stitcher = embed()
    module = Module(stitcher)
    target = OR1KTarget()
    llvm_ir = target.compile(module)
    elf_obj = target.assemble(llvm_ir)
    elf_shlib = target.link([elf_obj])

    benchmark(lambda: embed(),
              "ARTIQ embedding")

    benchmark(lambda: Module(stitcher),
              "ARTIQ transforms and validators")

    benchmark(lambda: target.compile(module),
              "LLVM optimizations")

    benchmark(lambda: target.assemble(llvm_ir),
              "LLVM machine code emission")

    benchmark(lambda: target.link([elf_obj]),
              "Linking")

    benchmark(lambda: target.strip(elf_shlib),
              "Stripping debug information")
示例#18
0
def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {
        "[Bb]ehaviour",
        "[Qq]uitted",
        "Ll]ikelyhood",
        "[Ss]ucessfully",
        "[Oo]ccur[^r .]",
        "[Ss]eperator",
        "[Ee]xplicitely",
        "[Rr]esetted",
        "[Aa]uxillary",
        "[Aa]ccidentaly",
        "[Aa]mbigious",
        "[Ll]oosly",
        "[Ii]nitialis",
        "[Cc]onvienence",
        "[Ss]imiliar",
        "[Uu]ncommited",
        "[Rr]eproducable",
        "[Aa]n [Uu]ser",
    }

    # Words which look better when splitted, but might need some fine tuning.
    words |= {
        "[Ww]ebelements",
        "[Mm]ouseevent",
        "[Kk]eysequence",
        "[Nn]ormalmode",
        "[Ee]ventloops",
        "[Ss]izehint",
        "[Ss]tatemachine",
        "[Mm]etaobject",
        "[Ll]ogrecord",
        "[Ff]iletype",
    }

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _get_files():
            with tokenize.open(fn) as f:
                if fn == os.path.join(".", "scripts", "dev", "misc_checks.py"):
                    continue
                for line in f:
                    for w in words:
                        if re.search(w, line) and fn not in seen[w]:
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#19
0
def check(filenames, select=None, ignore=None, ignore_decorators=None):
    """Generate docstring errors that exist in `filenames` iterable.

    By default, the PEP-257 convention is checked. To specifically define the
    set of error codes to check for, supply either `select` or `ignore` (but
    not both). In either case, the parameter should be a collection of error
    code strings, e.g., {'D100', 'D404'}.

    When supplying `select`, only specified error codes will be reported.
    When supplying `ignore`, all error codes which were not specified will be
    reported.

    Note that ignored error code refer to the entire set of possible
    error codes, which is larger than just the PEP-257 convention. To your
    convenience, you may use `pydocstyle.violations.conventions.pep257` as
    a base set to add or remove errors from.

    Examples
    ---------
    >>> check(['pydocstyle.py'])
    <generator object check at 0x...>

    >>> check(['pydocstyle.py'], select=['D100'])
    <generator object check at 0x...>

    >>> check(['pydocstyle.py'], ignore=conventions.pep257 - {'D100'})
    <generator object check at 0x...>

    """
    if select is not None and ignore is not None:
        raise IllegalConfiguration('Cannot pass both select and ignore. '
                                   'They are mutually exclusive.')
    elif select is not None:
        checked_codes = select
    elif ignore is not None:
        checked_codes = list(set(violations.ErrorRegistry.get_error_codes()) -
                             set(ignore))
    else:
        checked_codes = violations.conventions.pep257

    for filename in filenames:
        log.info('Checking file %s.', filename)
        try:
            with tk.open(filename) as file:
                source = file.read()
            for error in ConventionChecker().check_source(source, filename,
                                                          ignore_decorators):
                code = getattr(error, 'code', None)
                if code in checked_codes:
                    yield error
        except (EnvironmentError, AllError, ParseError) as error:
            log.warning('Error in file %s: %s', filename, error)
            yield error
        except tk.TokenError:
            yield SyntaxError('invalid syntax in file %s' % filename)
示例#20
0
def _find_executable_linenos(filename):
    try:
        with tokenize.open(filename) as f:
            prog = f.read()
            encoding = f.encoding
    except IOError as err:
        print('Not printing coverage data for %r: %s' % (filename, err), file=sys.stderr)
        return {}
    code = compile(prog, filename, 'exec')
    strs = _find_strings(filename, encoding)
    return _find_lines(code, strs)
示例#21
0
 def openFile(self, path=None):
     if not self._maybeSaveBeforeExit():
         return
     if path is None:
         path = self._ioDialog(QFileDialog.AcceptOpen)
         if path is None:
             return
         self.fileChooser.setCurrentFolder(os.path.dirname(path))
     with tokenize.open(path) as inputFile:
         self.editor.setPlainText(inputFile.read())
     self.currentPath = path
def _find_executable_linenos(filename):
    """Return dict where keys are line numbers in the line number table."""
    try:
        with tokenize.open(filename) as f:
            prog = f.read()
            encoding = f.encoding
    except IOError as err:
        print(("Not printing coverage data for %r: %s" % (filename, err)), file=sys.stderr)
        return {}
    code = compile(prog, filename, "exec")
    strs = _find_strings(filename, encoding)
    return _find_lines(code, strs)
示例#23
0
 def test_fftpack_import(self):
     base = Path(scipy.__file__).parent
     regexp = r"\s*from.+\.fftpack import .*\n"
     for path in base.rglob("*.py"):
         if base / "fftpack" in path.parents:
             continue
         # use tokenize to auto-detect encoding on systems where no
         # default encoding is defined (e.g. LANG='C')
         with tokenize.open(str(path)) as file:
             assert_(all(not re.fullmatch(regexp, line)
                         for line in file),
                     "{0} contains an import from fftpack".format(path))
示例#24
0
def get_code(path):
    with tokenize.open(path) as f:  # opens with detected source encoding
        source = f.read()

    try:
        code = compile(source, path, "exec")
    except UnicodeEncodeError:
        code = compile(source, "<encoding error>", "exec")
        code = update_code_recursively(code, filename=path)
        # so code constains correct filename (even if it contains Unicode)
        # and tracebacks show contents of code lines

    return code
示例#25
0
文件: slicer.py 项目: patrickod/peppy
def main():
    args = parser.parse_args()
    if not os.path.exists(args.target):
        os.makedirs(args.target)

    for filename in all_python_files(args.src):
        with tokenize.open(filename) as f:
            source = f.read()

        for i in range(len(source)):
            for j in range(i + 1, len(source)):
                write_if_valid(args.target, source[i:j])
                write_if_valid(args.target, source[:i] + source[j:])
示例#26
0
def _find_executable_linenos(filename):
    """Return dict where keys are line numbers in the line number table."""
    try:
        with tokenize.open(filename) as f:
            prog = f.read()
            encoding = f.encoding
    except OSError as err:
        print(("Not printing coverage data for %r: %s" % (filename, err)),
              file=sys.stderr)
        return {}
    code = compile(prog, filename, "exec")
    strs = _find_strings(filename, encoding)
    return _find_lines(code, strs)
示例#27
0
def get_code(path):
	with tokenize.open(path) as f:	# opens with detected source encoding
		source = f.read()
	
	try:
		code = compile(source, path, "exec")
	except UnicodeEncodeError:
		code = compile(source, "<encoding error>", "exec")
		code = update_code_recursively(code, filename=path)
			# so code constains correct filename (even if it contains Unicode)
			# and tracebacks show contents of code lines
	
	return code
示例#28
0
 def for_file(cls, filename: str, modname: str) -> "ModuleAnalyzer":
     if ('file', filename) in cls.cache:
         return cls.cache['file', filename]
     try:
         with tokenize.open(filename) as f:
             obj = cls(f, modname, filename)
             cls.cache['file', filename] = obj
     except Exception as err:
         if '.egg' + path.sep in filename:
             obj = cls.cache['file', filename] = cls.for_egg(filename, modname)
         else:
             raise PycodeError('error opening %r' % filename, err) from err
     return obj
示例#29
0
 def _file_to_tree(filename):
     with tokenize.open(filename) as file:
         parsed = ast.parse(file.read(), filename=filename)
     classes = intervaltree.IntervalTree()
     tree = intervaltree.IntervalTree()
     for node in ast.walk(parsed):
         if isinstance(node, (ast.ClassDef)):
             start, end = Main._compute_interval(node)
             classes[start:end] = node
         if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
             start, end = Main._compute_interval(node)
             tree[start:end] = node
     return classes, tree
示例#30
0
def create_frozen_lib(name, mods, aliases={}):
    """Create a new *frozen library* called `name`.

    `mods` is a dictionary of module descriptions indexed by module
    name.  A module description is a triple of
      (filename, short_filename, is_pkg).

    This is the same format as returned by `find_modules`.

    A frozen library can contain a number of aliases. Specifically
    that means that a given Python module may be importable under its
    normal name, plus any number of aliases names. The alias is a
    dictionary indexed by the alias name, with the value being the
    name of the underlying module.

    """
    extern_fmt = 'extern unsigned char {}[];\n'
    struct_fmt = '    {{"{}", {}, {}}},\n'
    c_filename = '{}.c'.format(name)
    h_struct_filename = '{}_struct.h'.format(name)
    h_extern_filename = '{}_extern.h'.format(name)

    # Although easier for the user to specify {alias: module}
    # the code is simpler if we have {module: [aliases]}
    aliases = dict_inverse(aliases)

    with open(h_struct_filename, 'w') as h_struct, \
            open(h_extern_filename, 'w') as h_ext, \
            open(c_filename, 'w') as c_out:

        for mod_name in sorted(mods.keys()):
            (filename, short_filename, is_pkg) = mods[mod_name]
            with tokenize.open(filename) as f:
                code = compile(f.read(), short_filename, 'exec', optimize=2)
            var_name = "M_" + "__".join(mod_name.split("."))
            raw_code = marshal.dumps(code)
            size = len(raw_code)
            # Packages are indicated by negative size; this is part of
            # the Pythohn frozen library internals.
            if is_pkg:
                size = -size

            write_byte_code(c_out, var_name, raw_code)

            h_ext.write(extern_fmt.format(var_name))

            h_struct.write(struct_fmt.format(mod_name, var_name, size))

            # Write out all the aliases
            for alias in aliases.get(mod_name, []):
                h_struct.write(struct_fmt.format(alias, var_name, size))
示例#31
0
def create_frozen_lib(name, mods, aliases={}):
    """Create a new *frozen library* called `name`.

    `mods` is a dictionary of module descriptions indexed by module
    name.  A module description is a triple of
      (filename, short_filename, is_pkg).

    This is the same format as returned by `find_modules`.

    A frozen library can contain a number of aliases. Specifically
    that means that a given Python module may be importable under its
    normal name, plus any number of aliases names. The alias is a
    dictionary indexed by the alias name, with the value being the
    name of the underlying module.

    """
    extern_fmt = 'extern unsigned char {}[];\n'
    struct_fmt = '    {{"{}", {}, {}}},\n'
    c_filename = '{}.c'.format(name)
    h_struct_filename = '{}_struct.h'.format(name)
    h_extern_filename = '{}_extern.h'.format(name)

    # Although easier for the user to specify {alias: module}
    # the code is simpler if we have {module: [aliases]}
    aliases = dict_inverse(aliases)

    with open(h_struct_filename, 'w') as h_struct, \
            open(h_extern_filename, 'w') as h_ext, \
            open(c_filename, 'w') as c_out:

        for mod_name in sorted(mods.keys()):
            (filename, short_filename, is_pkg) = mods[mod_name]
            with tokenize.open(filename) as f:
                code = compile(f.read(), short_filename, 'exec', optimize=2)
            var_name = "M_" + "__".join(mod_name.split("."))
            raw_code = marshal.dumps(code)
            size = len(raw_code)
            # Packages are indicated by negative size; this is part of
            # the Pythohn frozen library internals.
            if is_pkg:
                size = -size

            write_byte_code(c_out, var_name, raw_code)

            h_ext.write(extern_fmt.format(var_name))

            h_struct.write(struct_fmt.format(mod_name, var_name, size))

            # Write out all the aliases
            for alias in aliases.get(mod_name, []):
                h_struct.write(struct_fmt.format(alias, var_name, size))
示例#32
0
def main():
    parser = ArgumentParser(description=__doc__)
    parser.add_argument(
        "--backend",
        action="append",
        help=("backend to test; can be passed multiple times; defaults to the "
              "default backend"))
    parser.add_argument("--include-sgskip",
                        action="store_true",
                        help="do not filter out *_sgskip.py examples")
    parser.add_argument(
        "--rundir",
        type=Path,
        help=("directory from where the tests are run; defaults to a "
              "temporary directory"))
    parser.add_argument(
        "paths",
        nargs="*",
        type=Path,
        help="examples to run; defaults to all examples (except *_sgskip.py)")
    args = parser.parse_args()

    root = Path(__file__).resolve().parent.parent / "examples"
    paths = args.paths if args.paths else sorted(root.glob("**/*.py"))
    if not args.include_sgskip:
        paths = [path for path in paths if not path.stem.endswith("sgskip")]
    relpaths = [path.resolve().relative_to(root) for path in paths]
    width = max(len(str(relpath)) for relpath in relpaths)
    for relpath in relpaths:
        print(str(relpath).ljust(width + 1), end="", flush=True)
        runinfos = []
        with ExitStack() as stack:
            if args.rundir:
                cwd = args.rundir / relpath.with_suffix("")
                cwd.mkdir(parents=True)
            else:
                cwd = stack.enter_context(TemporaryDirectory())
            with tokenize.open(root / relpath) as src:
                Path(cwd, relpath.name).write_text(_preamble + src.read(),
                                                   encoding="utf-8")
            for backend in args.backend or [None]:
                env = {**os.environ}
                if backend is not None:
                    env["MPLBACKEND"] = backend
                start = time.perf_counter()
                proc = subprocess.run([sys.executable, relpath.name],
                                      cwd=cwd,
                                      env=env)
                elapsed = round(1000 * (time.perf_counter() - start))
                runinfos.append(RunInfo(backend, elapsed, proc.returncode))
        print("\t".join(map(str, runinfos)))
示例#33
0
def read_python_source(filename):
    """Read the Python source text from `filename`.

    Returns a str: unicode on Python 3, bytes on Python 2.

    """
    # Python 3.2 provides `tokenize.open`, the best way to open source files.
    if sys.version_info >= (3, 2):
        f = tokenize.open(filename)
    else:
        f = open(filename, "rU")

    with f:
        return f.read()
def main():
    args = parse_arguments()

    module_name = (args.modulename if args.modulename else path.splitext(
        path.basename(args.filename))[0])
    module_directory = args.directory if args.directory else module_name

    if args.clear:
        clear_c_module(module_directory, module_name)
        if args.make:
            return

    if args.make and module_exists(module_directory):
        return

    with tokenize.open(args.filename) as f:
        tokens = list(
            remove_tokens(tokenize.generate_tokens(f.readline),
                          [tokenize.COMMENT, tokenize.NL]))
        constants = []
        dictionarys = []

        i = 0
        while i < len(tokens):
            if tokens[i].exact_type == tokenize.ENDMARKER:
                break
            elif Constant.match(tokens[i:]):
                c = Constant(tokens[i:])
                i += c.length()
                constants.append(c)
                continue
            elif Dictionary.match(tokens[i:]):
                d = Dictionary(tokens[i:], constants)
                i += d.length()
                dictionarys.append(d)
            else:
                print(token.tok_name[tokens[i].exact_type], tokens[i].string)
            i += 1

        code = ""
        rom_table = ""
        for i in constants:
            rom_table += i.generate_rom_constant()
        rom_table += "\n"

        for i in dictionarys:
            code += i.generate_code()
            rom_table += i.generate_rom_constant()

        create_c_module(module_directory, module_name, code, rom_table)
示例#35
0
def process(filename):
    tokenized = tokenize.generate_tokens(tokenize.open(filename).readline)
    tokens_after = []

    for token in tokenized:
        if token.type == tokenize.NUMBER:
            new_number = int(token.string) * 2
            tokens_after.append((tokenize.NUMBER, str(new_number), token.start,
                                 token.end, token.line))
        else:
            tokens_after.append(token)

    with open(filename, 'wt') as f_out:
        f_out.write(tokenize.untokenize(tokens_after))
def open_python_file(filename):
    """Open a read-only Python file taking proper care of its encoding.

    In Python 3, we would like all files to be opened with utf-8 encoding. However, some author like to specify PEP263
    headers in their source files with their own encodings. In that case, we should respect the author's encoding.
    """
    import tokenize

    if hasattr(tokenize, "open"):  # Added in Python 3.2
        # Open file respecting PEP263 encoding. If no encoding header is
        # found, opens as utf-8.
        return tokenize.open(filename)
    else:
        return open(filename, encoding="utf-8")
示例#37
0
def updatecache(filename, module_globals=None):
    """Update a cache entry and return its list of lines.
    If something's wrong, print a message, discard the cache entry,
    and return an empty list."""
    if filename in cache:
        if len(cache[filename]) != 1:
            del cache[filename]
    if not filename or filename.startswith('<') and filename.endswith('>'):
        return []
    fullname = filename
    try:
        stat = os.stat(fullname)
    except OSError:
        basename = filename
        if lazycache(filename, module_globals):
            try:
                data = cache[filename][0]()
            except (ImportError, OSError):
                pass
            else:
                if data is None:
                    return []
                cache[filename] = len(data), None, [
                    (line + '\n') for line in data.splitlines()
                ], fullname
                return cache[filename][2]
        if os.path.isabs(filename):
            return []
        for dirname in sys.path:
            try:
                fullname = os.path.join(dirname, basename)
            except (TypeError, AttributeError):
                continue
            try:
                stat = os.stat(fullname)
                break
            except OSError:
                pass
        else:
            return []
    try:
        with tokenize.open(fullname) as fp:
            lines = fp.readlines()
    except OSError:
        return []
    if lines and not lines[-1].endswith('\n'):
        lines[-1] += '\n'
    size, mtime = stat.st_size, stat.st_mtime
    cache[filename] = size, mtime, lines, fullname
    return lines
示例#38
0
def source_code(path: Path):
    try:
        with tokenize.open(path) as file:
            source = file.read()
        ast.parse(source)
    except (SyntaxError, UnicodeDecodeError):
        return False
    except Exception:
        # not a file [directories with .py extension, exist :(]
        # or any other issue, but we don't know whether it is
        # syntax related or not, so return True
        return True
    else:
        return True
示例#39
0
 def include(fileName):
     searchedIncludePaths = []
     for searchDir in self.module.program.moduleSearchDirs:
         filePath = '{}/{}'.format(searchDir, fileName)
         if os.path.isfile(filePath):
             return tokenize.open(filePath).read()
         else:
             searchedIncludePaths.append(filePath)
     else:
         raise utils.Error(
             lineNr=self.lineNr,
             message=
             '\n\tAttempt to include file: {}\n\tCan\'t find any of:\n\t\t{}\n'
             .format(node.args[0], '\n\t\t'.join(searchedIncludePaths)))
示例#40
0
文件: code.py 项目: vinniec/thonny
    def _load_file(self, filename, keep_undo=False):
        with tokenize.open(filename) as fp:  # TODO: support also text files
            source = fp.read()

        # Make sure Windows filenames have proper format
        filename = normpath_with_actual_case(filename)
        self._filename = filename
        self._last_known_mtime = os.path.getmtime(self._filename)

        get_workbench().event_generate("Open", editor=self, filename=filename)
        self._code_view.set_content(source, keep_undo)
        self.get_text_widget().edit_modified(False)
        self._code_view.focus_set()
        self.master.remember_recent_file(filename)
示例#41
0
def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {
        '[Bb]ehaviour', '[Qq]uitted', 'Ll]ikelyhood', '[Ss]ucessfully',
        '[Oo]ccur[^rs .!]', '[Ss]eperator', '[Ee]xplicitely', '[Aa]uxillary',
        '[Aa]ccidentaly', '[Aa]mbigious', '[Ll]oosly', '[Ii]nitialis',
        '[Cc]onvienence', '[Ss]imiliar', '[Uu]ncommited', '[Rr]eproducable',
        '[Aa]n [Uu]ser', '[Cc]onvienience', '[Ww]ether', '[Pp]rogramatically',
        '[Ss]plitted', '[Ee]xitted', '[Mm]ininum', '[Rr]esett?ed',
        '[Rr]ecieved', '[Rr]egularily', '[Uu]nderlaying', '[Ii]nexistant',
        '[Ee]lipsis', 'commiting', 'existant', '[Rr]esetted', '[Ss]imilarily',
        '[Ii]nformations', '[Aa]n [Uu][Rr][Ll]', '[Tt]reshold'
    }

    # Words which look better when splitted, but might need some fine tuning.
    words |= {
        '[Ww]ebelements', '[Mm]ouseevent', '[Kk]eysequence', '[Nn]ormalmode',
        '[Ee]ventloops', '[Ss]izehint', '[Ss]tatemachine', '[Mm]etaobject',
        '[Ll]ogrecord', '[Ff]iletype'
    }

    # Files which should be ignored, e.g. because they come from another
    # package
    ignored = [
        os.path.join('.', 'scripts', 'dev', 'misc_checks.py'),
        os.path.join('.', 'qutebrowser', '3rdparty', 'pdfjs'),
        os.path.join('.', 'tests', 'end2end', 'data', 'hints', 'ace',
                     'ace.js'),
    ]

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _get_files():
            with tokenize.open(fn) as f:
                if any(fn.startswith(i) for i in ignored):
                    continue
                for line in f:
                    for w in words:
                        if (re.search(w, line) and fn not in seen[w]
                                and '# pragma: no spellcheck' not in line):
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#42
0
 def execfile(self, filename, source=None):
     "Execute an existing file"
     if source is None:
         with tokenize.open(filename) as fp:
             source = fp.read()
     try:
         code = compile(source, filename, "exec")
     except (OverflowError, SyntaxError):
         self.resetoutput()
         print('*** Error in script or command!\n'
              'Traceback (most recent call last):')
         InteractiveInterpreter.showsyntaxerror(self, filename)
     else:
         self.runcode(code)
示例#43
0
def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {'[Bb]ehaviour', '[Qq]uitted', 'Ll]ikelyhood', '[Ss]ucessfully',
             '[Oo]ccur[^rs .]', '[Ss]eperator', '[Ee]xplicitely',
             '[Aa]uxillary', '[Aa]ccidentaly', '[Aa]mbigious', '[Ll]oosly',
             '[Ii]nitialis', '[Cc]onvienence', '[Ss]imiliar', '[Uu]ncommited',
             '[Rr]eproducable', '[Aa]n [Uu]ser', '[Cc]onvienience',
             '[Ww]ether', '[Pp]rogramatically', '[Ss]plitted', '[Ee]xitted',
             '[Mm]ininum', '[Rr]esett?ed', '[Rr]ecieved', '[Rr]egularily',
             '[Uu]nderlaying', '[Ii]nexistant', '[Ee]lipsis', 'commiting',
             'existant', '[Rr]esetted', '[Ss]imilarily', '[Ii]nformations',
             '[Aa]n [Uu][Rr][Ll]'}

    # Words which look better when splitted, but might need some fine tuning.
    words |= {'[Ww]ebelements', '[Mm]ouseevent', '[Kk]eysequence',
              '[Nn]ormalmode', '[Ee]ventloops', '[Ss]izehint',
              '[Ss]tatemachine', '[Mm]etaobject', '[Ll]ogrecord',
              '[Ff]iletype'}

    # Files which should be ignored, e.g. because they come from another
    # package
    ignored = [
        os.path.join('.', 'scripts', 'dev', 'misc_checks.py'),
        os.path.join('.', 'qutebrowser', '3rdparty', 'pdfjs'),
        os.path.join('.', 'tests', 'end2end', 'data', 'hints', 'ace',
                     'ace.js'),
    ]

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _get_files():
            with tokenize.open(fn) as f:
                if any(fn.startswith(i) for i in ignored):
                    continue
                for line in f:
                    for w in words:
                        if (re.search(w, line) and
                                fn not in seen[w] and
                                '# pragma: no spellcheck' not in line):
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#44
0
    def _copy_script(self, script, outfiles, updated_files):
        shebang_match = None
        script = convert_path(script)
        outfile = os.path.join(self.build_dir, os.path.basename(script))
        outfiles.append(outfile)

        if not self.force and not newer(script, outfile):
            log.debug("not copying %s (up-to-date)", script)
            return

        # Always open the file, but ignore failures in dry-run mode
        # in order to attempt to copy directly.
        try:
            f = tokenize.open(script)
        except OSError:
            if not self.dry_run:
                raise
            f = None
        else:
            first_line = f.readline()
            if not first_line:
                self.warn("%s is an empty file (skipping)" % script)
                return

            shebang_match = shebang_pattern.match(first_line)

        updated_files.append(outfile)
        if shebang_match:
            log.info("copying and adjusting %s -> %s", script, self.build_dir)
            if not self.dry_run:
                if not sysconfig.python_build:
                    executable = self.executable
                else:
                    executable = os.path.join(
                        sysconfig.get_config_var("BINDIR"),
                        "python%s%s" % (sysconfig.get_config_var("VERSION"),
                                        sysconfig.get_config_var("EXE")))
                post_interp = shebang_match.group(1) or ''
                shebang = "#!" + executable + post_interp + "\n"
                self._validate_shebang(shebang, f.encoding)
                with open(outfile, "w", encoding=f.encoding) as outf:
                    outf.write(shebang)
                    outf.writelines(f.readlines())
            if f:
                f.close()
        else:
            if f:
                f.close()
            self.copy_file(script, outfile)
示例#45
0
def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {
        'behaviour', 'quitted', 'likelyhood', 'sucessfully', 'occur[^rs .!]',
        'seperator', 'explicitely', 'auxillary', 'accidentaly', 'ambigious',
        'loosly', 'initialis', 'convienence', 'similiar', 'uncommited',
        'reproducable', 'an user', 'convienience', 'wether', 'programatically',
        'splitted', 'exitted', 'mininum', 'resett?ed', 'recieved',
        'regularily', 'underlaying', 'inexistant', 'elipsis', 'commiting',
        'existant', 'resetted', 'similarily', 'informations', 'an url',
        'treshold', 'artefact'
    }

    # Words which look better when splitted, but might need some fine tuning.
    words |= {
        'webelements', 'mouseevent', 'keysequence', 'normalmode', 'eventloops',
        'sizehint', 'statemachine', 'metaobject', 'logrecord', 'filetype'
    }

    # Files which should be ignored, e.g. because they come from another
    # package
    ignored = [
        os.path.join('.', 'scripts', 'dev', 'misc_checks.py'),
        os.path.join('.', 'qutebrowser', '3rdparty', 'pdfjs'),
        os.path.join('.', 'tests', 'end2end', 'data', 'hints', 'ace',
                     'ace.js'),
    ]

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _get_files():
            with tokenize.open(fn) as f:
                if any(fn.startswith(i) for i in ignored):
                    continue
                for line in f:
                    for w in words:
                        pattern = '[{}{}]{}'.format(w[0], w[0].upper(), w[1:])
                        if (re.search(pattern, line) and fn not in seen[w]
                                and '# pragma: no spellcheck' not in line):
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#46
0
def get_gitignore(root: Path, no_gitignore: bool = False) -> PathSpec:
    """Return a PathSpec matching gitignore content, if present.

    :param root: root path to search for `.gitignore`.
    :param no_gitignore: `config.no_gitignore` value (default=False).
    :returns: PathSpec matching gitignore content, if present.
    """
    lines: List[str] = []
    if not no_gitignore:
        path = os.path.join(root, GITIGNORE)
        if os.path.isfile(path):
            if os.access(path, os.R_OK):
                with tokenize.open(path) as ignore_file:
                    lines = ignore_file.readlines()
    return PathSpec.from_lines(GitWildMatchPattern, lines)
示例#47
0
def check_vcs_conflict():
    """Check VCS conflict markers."""
    try:
        ok = True
        for fn in _get_files(only_py=True):
            with tokenize.open(fn) as f:
                for line in f:
                    if any(line.startswith(c * 7) for c in '<>=|'):
                        print("Found conflict marker in {}".format(fn))
                        ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#48
0
 def tabnanny(self, filename):
     with tokenize.open(filename) as f:
         try:
             tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
         except tokenize.TokenError as msg:
             msgtxt, (lineno, start) = msg.args
             self.editwin.gotoline(lineno)
             self.errorbox('Tabnanny Tokenizing Error',
                           'Token Error: %s' % msgtxt)
             return False
         except tabnanny.NannyNag as nag:
             self.editwin.gotoline(nag.get_lineno())
             self.errorbox('Tab/space error', indent_message)
             return False
     return True
示例#49
0
def check_vcs_conflict():
    """Check VCS conflict markers."""
    try:
        ok = True
        for fn in _py_files():
            with tokenize.open(fn) as f:
                for line in f:
                    if any(line.startswith(c * 7) for c in '<>=|'):
                        print("Found conflict marker in {}".format(fn))
                        ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
示例#50
0
def main(opcode_py, outfile='Include/opcode.h'):
    opcode = {}
    if hasattr(tokenize, 'open'):
        fp = tokenize.open(opcode_py)  # Python 3.2+
    else:
        fp = open(opcode_py)  # Python 2.7
    with fp:
        code = fp.read()
    exec(code, opcode)
    opmap = opcode['opmap']
    hasconst = opcode['hasconst']
    hasjrel = opcode['hasjrel']
    hasjabs = opcode['hasjabs']
    used = [False] * 256
    next_op = 1
    for name, op in opmap.items():
        used[op] = True
    with open(outfile, 'w') as fobj:
        fobj.write(header)
        for name in opcode['opname']:
            if name in opmap:
                fobj.write("#define %-23s %3s\n" % (name, opmap[name]))
            if name == 'POP_EXCEPT':  # Special entry for HAVE_ARGUMENT
                fobj.write("#define %-23s %3d\n" %
                           ('HAVE_ARGUMENT', opcode['HAVE_ARGUMENT']))

        for name in opcode['_specialized_instructions']:
            while used[next_op]:
                next_op += 1
            fobj.write("#define %-23s %3s\n" % (name, next_op))
            used[next_op] = True

        fobj.write("#ifdef NEED_OPCODE_JUMP_TABLES\n")
        write_int_array_from_ops("_PyOpcode_RelativeJump", opcode['hasjrel'],
                                 fobj)
        write_int_array_from_ops("_PyOpcode_Jump",
                                 opcode['hasjrel'] + opcode['hasjabs'], fobj)
        fobj.write("#endif /* OPCODE_TABLES */\n")

        fobj.write("\n")
        fobj.write("#define HAS_CONST(op) (false\\")
        for op in hasconst:
            fobj.write(f"\n    || ((op) == {op}) \\")
        fobj.write("\n    )\n")

        fobj.write(footer)

    print("%s regenerated from %s" % (outfile, opcode_py))
示例#51
0
def updatecache(filename, module_globals=None):
    if filename in cache:
        del cache[filename]
    if not filename or filename.startswith('<') and filename.endswith('>'):
        return []
    fullname = filename
    try:
        stat = os.stat(fullname)
    except OSError:
        basename = filename
        if module_globals and '__loader__' in module_globals:
            name = module_globals.get('__name__')
            loader = module_globals['__loader__']
            get_source = getattr(loader, 'get_source', None)
            if name and get_source:
                try:
                    data = get_source(name)
                except (ImportError, IOError):
                    pass
                if data is None:
                    return []
                cache[filename] = (len(data), None,
                                   [line + '\n'
                                    for line in data.splitlines()], fullname)
                return cache[filename][2]
        if os.path.isabs(filename):
            return []
        for dirname in sys.path:
            try:
                fullname = os.path.join(dirname, basename)
            except (TypeError, AttributeError):
                continue
            try:
                stat = os.stat(fullname)
                break
            except os.error:
                pass
        return []
    try:
        with tokenize.open(fullname) as fp:
            lines = fp.readlines()
    except IOError:
        return []
    if lines and not lines[-1].endswith('\n'):
        lines[-1] += '\n'
    (size, mtime) = (stat.st_size, stat.st_mtime)
    cache[filename] = (size, mtime, lines, fullname)
    return lines
示例#52
0
def main():
    global OPTIONS
    import optparse
    parser = optparse.OptionParser(USAGE)
    parser.add_option('-v',
                      '--verbose',
                      action='count',
                      dest='verbose',
                      default=0,
                      help="enable extra status output")
    parser.add_option('-b',
                      '--allow-bases',
                      action='store_true',
                      help="allow base classes")
    OPTIONS, args = parser.parse_args()
    lazy = set()
    eager = set()
    for fn in args:
        try:
            fp = tokenize.open(fn)
        except SyntaxError:
            continue
        try:
            buf = fp.read()
        except UnicodeDecodeError:
            continue
        finally:
            fp.close()
        try:
            node = parse(buf)
        except SyntaxError:
            continue
        a = analyze(node, fn)
        if not a.safe:
            eager.add(fn)
        else:
            lazy.add(fn)
    total = len(lazy) + len(eager)
    if not total:
        print('warning: no Python modules parsed.')
        return
    print('Eager modules:')
    for fn in sorted(eager):
        print(f'    {fn}')
    print('Lazy modules:')
    for fn in sorted(lazy):
        print(f'    {fn}')
    print(f'{len(lazy) / total * 100:.1f}% - total: {total}')
示例#53
0
    def test_warning_calls():
        # combined "ignore" and stacklevel error
        base = Path(numpy.__file__).parent

        for path in base.rglob("*.py"):
            if base / "testing" in path.parents:
                continue
            if path == base / "__init__.py":
                continue
            if path == base / "random" / "__init__.py":
                continue
            # use tokenize to auto-detect encoding on systems where no
            # default encoding is defined (e.g. LANG='C')
            with tokenize.open(str(path)) as file:
                tree = ast.parse(file.read())
                FindFuncs(path).visit(tree)
示例#54
0
 def _getSourceCode(cls):
     '''Get the source code of Class so we can record it into database.'''
     module = sys.modules[cls.__module__]
     if module.__name__ == '__main__' and hasattr(module, 'In'):
         code = module.In[-1]
     elif cls.__DBDocument__ is not None:
         try:
             code = cls.__DBDocument__.source
         except:
             raise QuLabTypeError('Document %r has no attribute `source`')
     elif hasattr(module, '__file__'):
         with tokenize.open(module.__file__) as f:
             code = f.read()
     else:
         code = ''
     return code
示例#55
0
def format_file(src: Path, line_length: int,
                fast: bool) -> Tuple[FileContent, Encoding]:
    """Reformats a file and returns its contents and encoding."""
    with tokenize.open(src) as src_buffer:
        src_contents = src_buffer.read()
    if src_contents.strip() == '':
        raise NothingChanged(src)

    dst_contents = format_str(src_contents, line_length=line_length)
    if src_contents == dst_contents:
        raise NothingChanged(src)

    if not fast:
        assert_equivalent(src_contents, dst_contents)
        assert_stable(src_contents, dst_contents, line_length=line_length)
    return dst_contents, src_buffer.encoding
示例#56
0
    def test_warning_calls():
        # combined "ignore" and stacklevel error
        base = Path(numpy.__file__).parent

        for path in base.rglob("*.py"):
            if base / "testing" in path.parents:
                continue
            if path == base / "__init__.py":
                continue
            if path == base / "random" / "__init__.py":
                continue
            # use tokenize to auto-detect encoding on systems where no
            # default encoding is defined (e.g. LANG='C')
            with tokenize.open(str(path)) as file:
                tree = ast.parse(file.read())
                FindFuncs(path).visit(tree)
示例#57
0
def process_package(directory):
    number = Counter()
    for file in directory.glob("**/*.py"):
        try:
            with tokenize.open(file) as stream:
                tree = ast.parse(stream.read())
        except:
            continue
        for call in filter(lambda node: isinstance(node, ast.Call),
                           ast.walk(tree)):
            if (type(call.func) is ast.Attribute
                    and type(call.func.value) is ast.Name
                    and call.func.value.id in MODULES):
                number[call.func.value.id + call.func.attr] += 1

    return number
示例#58
0
    def loadfile(self, filename):
        try:
            try:
                with tokenize.open(filename) as f:
                    chars = f.read()
                    fileencoding = f.encoding
                    eol_convention = f.newlines
                    converted = False
            except (UnicodeDecodeError, SyntaxError):
                # Wait for the editor window to appear
                self.editwin.text.update()
                enc = askstring(
                    "Specify file encoding",
                    "The file's encoding is invalid for Python 3.x.\n"
                    "IDLE will convert it to UTF-8.\n"
                    "What is the current encoding of the file?",
                    initialvalue='utf-8',
                    parent=self.editwin.text)
                with open(filename, encoding=enc) as f:
                    chars = f.read()
                    fileencoding = f.encoding
                    eol_convention = f.newlines
                    converted = True
        except OSError as err:
            tkMessageBox.showerror("I/O Error", str(err), parent=self.text)
            return False
        except UnicodeDecodeError:
            tkMessageBox.showerror("Decoding Error",
                                   "File %s\nFailed to Decode" % filename,
                                   parent=self.text)
            return False

        self.text.delete("1.0", "end")
        self.set_filename(None)
        self.fileencoding = fileencoding
        self.eol_convention = eol_convention
        self.text.insert("1.0", chars)
        self.reset_undo()
        self.set_filename(filename)
        if converted:
            # We need to save the conversion results first
            # before being able to execute the code
            self.set_saved(False)
        self.text.mark_set("insert", "1.0")
        self.text.yview("insert")
        self.updaterecentfileslist(filename)
        return True
示例#59
0
def tokenize_py(file_path):
    all_token_text_list = []
    python_key_words = []
    count = 0
    tokens_dict = {}

    list_of_tokens = []
    with tokenize.open(file_path) as f:
        tokens = tokenize.generate_tokens(f.readline)
        for token in tokens:
            if tok_name[token.type] == "NAME":
                if token.string not in [
                        'True', 'False', 'Null', 'None'
                ] and token.string not in keyword.kwlist:
                    # list_of_tokens.append("ID:" + token.string)
                    tokens_dict[token.string] = "ID:"
                    # string_of_tokens.append()
                elif token.string in ['True', 'False', 'Null', 'None']:
                    # list_of_tokens += "LIT:" + token.string + "$%#~"
                    # list_of_tokens.append("LIT:" + token.string)
                    tokens_dict[token.string] = "LIT:"
                elif token.string in keyword.kwlist:
                    # list_of_tokens += "STD:" + token.string + "$%#~"
                    # list_of_tokens.append("STD:" + token.string)
                    tokens_dict[token.string] = "STD:"
            elif tok_name[token.type] in ["NUMBER", "STRING"]:
                # list_of_tokens += "LIT:" + token.string + "$%#~"
                # list_of_tokens.append("LIT:" + token.string)
                tokens_dict[token.string] = "LIT:"

            else:
                if token.string == '\n':
                    # list_of_tokens.append(r"STD:\n")
                    tokens_dict[r"\n"] = "STD:"
                elif tok_name[token.type] == "INDENT":
                    # list_of_tokens.append(r"STD:\t")
                    tokens_dict[r"\t"] = "STD:"
                elif tok_name[token.type] == "DEDENT":
                    # list_of_tokens.append(r"STD:DEDENT")
                    tokens_dict["DEDENT"] = "STD:"
                elif tok_name[token.type] == "ENDMARKER":
                    # list_of_tokens.append(r"STD:ENDMARKER")
                    tokens_dict["ENDMARKER"] = "STD:"
                else:
                    # list_of_tokens.append("STD:" + token.string)
                    tokens_dict[token.string] = "STD:"
    return tokens_dict