Esempio n. 1
0
def parse_issues(config, output, paths, *, log):
    would_reformat = re.compile("^would reformat (.*)$", re.I)
    reformatted = re.compile("^reformatted (.*)$", re.I)
    cannot_reformat = re.compile("^error: cannot format (.*?): (.*)$", re.I)
    results = []
    for line in output:
        line = line.decode("utf-8")
        if line.startswith("All done!") or line.startswith("Oh no!"):
            break

        match = would_reformat.match(line)
        if match:
            res = {"path": match.group(1), "level": "error"}
            results.append(result.from_config(config, **res))
            continue

        match = reformatted.match(line)
        if match:
            res = {"path": match.group(1), "level": "warning", "message": "reformatted"}
            results.append(result.from_config(config, **res))
            continue

        match = cannot_reformat.match(line)
        if match:
            res = {"path": match.group(1), "level": "error", "message": match.group(2)}
            results.append(result.from_config(config, **res))
            continue

        log.debug("Unhandled line", line)
    return results
Esempio n. 2
0
def lint(paths, config, fix=None, **lintargs):
    files = list(expand_exclusions(paths, config, lintargs["root"]))

    for f in files:
        with open(f, "rb") as open_file:
            hasFix = False
            content_to_write = []
            for i, line in enumerate(open_file):
                if line.endswith(b" \n"):
                    # We found a trailing whitespace
                    if fix:
                        # We want to fix it, strip the trailing spaces
                        content_to_write.append(line.rstrip() + b"\n")
                        hasFix = True
                    else:
                        res = {
                            "path": f,
                            "message": "Trailing whitespace",
                            "level": "error",
                            "lineno": i + 1,
                        }
                        results.append(result.from_config(config, **res))
                else:
                    if fix:
                        content_to_write.append(line)
            if hasFix:
                # Only update the file when we found a change to make
                with open(f, "wb") as open_file_to_write:
                    open_file_to_write.write(b"".join(content_to_write))

            # We are still using the same fp, let's return to the first
            # line
            open_file.seek(0)
            # Open it as once as we just need to know if there is
            # at least one \r\n
            content = open_file.read()

            if b"\r\n" in content:
                if fix:
                    # replace \r\n by \n
                    content = content.replace(b"\r\n", b"\n")
                    with open(f, "wb") as open_file_to_write:
                        open_file_to_write.write(content)
                else:
                    res = {
                        "path": f,
                        "message": "Windows line return",
                        "level": "error",
                    }
                    results.append(result.from_config(config, **res))

    return results
Esempio n. 3
0
def api_lint(config, **lintargs):
    topsrcdir = lintargs["root"]
    topobjdir = lintargs["topobjdir"]

    gradle(
        lintargs["log"],
        topsrcdir=topsrcdir,
        topobjdir=topobjdir,
        tasks=lintargs["substs"]["GRADLE_ANDROID_API_LINT_TASKS"],
        extra_args=lintargs.get("extra_args") or [],
    )

    folder = lintargs["substs"]["GRADLE_ANDROID_GECKOVIEW_APILINT_FOLDER"]

    results = []

    with open(os.path.join(topobjdir, folder, "apilint-result.json")) as f:
        issues = json.load(f)

        for rule in ("compat_failures", "failures"):
            for r in issues[rule]:
                err = {
                    "rule":
                    r["rule"] if rule == "failures" else "compat_failures",
                    "path": mozpath.relpath(r["file"], topsrcdir),
                    "lineno": int(r["line"]),
                    "column": int(r.get("column") or 0),
                    "message": r["msg"],
                    "level": "error" if r["error"] else "warning",
                }
                results.append(result.from_config(config, **err))

        for r in issues["api_changes"]:
            err = {
                "rule":
                "api_changes",
                "path":
                mozpath.relpath(r["file"], topsrcdir),
                "lineno":
                int(r["line"]),
                "column":
                int(r.get("column") or 0),
                "message":
                "Unexpected api change. Please run ./gradlew {} for more "
                "information".format(" ".join(
                    lintargs["substs"]["GRADLE_ANDROID_API_LINT_TASKS"])),
            }
            results.append(result.from_config(config, **err))

    return results
Esempio n. 4
0
def lint(paths, config, fix=None, **lintargs):

    if platform.system() == 'Windows':
        # Windows doesn't have permissions in files
        # Exit now
        return results

    files = list(expand_exclusions(paths, config, lintargs['root']))
    for f in files:
        if os.access(f, os.X_OK):
            with open(f, 'r+') as content:
                # Some source files have +x permissions
                line = content.readline()
                if line.startswith("#!"):
                    # Check if the file doesn't start with a shebang
                    # if it does, not a warning
                    continue

            if fix:
                # We want to fix it, do it and leave
                os.chmod(f, 0o644)
                continue

            res = {
                'path': f,
                'message': "Execution permissions on a source file",
                'level': 'error'
            }
            results.append(result.from_config(config, **res))
    return results
Esempio n. 5
0
def javadoc(config, **lintargs):
    topsrcdir = lintargs['root']
    topobjdir = lintargs['topobjdir']

    gradle(lintargs['log'],
           topsrcdir=topsrcdir,
           topobjdir=topobjdir,
           tasks=lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS'],
           extra_args=lintargs.get('extra_args') or [])

    output_files = lintargs['substs'][
        'GRADLE_ANDROID_GECKOVIEW_DOCS_OUTPUT_FILES']

    results = []

    for output_file in output_files:
        with open(os.path.join(topobjdir, output_file)) as f:
            # Like: '[{"path":"/absolute/path/to/topsrcdir/mobile/android/geckoview/src/main/java/org/mozilla/geckoview/ContentBlocking.java","lineno":"462","level":"warning","message":"no @return"}]'.  # NOQA: E501
            issues = json.load(f)

            for issue in issues:
                issue['path'] = issue['path'].replace(lintargs['root'], '')
                # We want warnings to be errors for linting purposes.
                issue['level'] = 'error'
                results.append(result.from_config(config, **issue))

    return results
Esempio n. 6
0
def lint(config, **lintargs):
    topsrcdir = lintargs['root']
    topobjdir = lintargs['topobjdir']

    gradle(lintargs['log'],
           topsrcdir=topsrcdir,
           topobjdir=topobjdir,
           tasks=lintargs['substs']['GRADLE_ANDROID_LINT_TASKS'],
           extra_args=lintargs.get('extra_args') or [])

    # It's surprising that this is the App variant name, but this is "withoutGeckoBinariesDebug"
    # right now and the GeckoView variant name is "withGeckoBinariesDebug".  This will be addressed
    # as we unify variants.
    path = os.path.join(
        lintargs['topobjdir'], 'gradle/build/mobile/android/geckoview/reports',
        'lint-results-{}.xml'.format(
            lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_VARIANT_NAME']))
    tree = ET.parse(open(path, 'rt'))
    root = tree.getroot()

    results = []

    for issue in root.findall('issue'):
        location = issue[0]
        err = {
            'level': issue.get('severity').lower(),
            'rule': issue.get('id'),
            'message': issue.get('message'),
            'path': location.get('file').replace(lintargs['root'], ''),
            'lineno': int(location.get('line') or 0),
        }
        results.append(result.from_config(config, **err))

    return results
Esempio n. 7
0
 def lint(self, files, get_reference_and_tests):
     return [
         result.from_config(self.lintconfig, **result_data)
         for result_data in super(MozL10nLinter, self).lint(
             files, get_reference_and_tests
         )
     ]
Esempio n. 8
0
File: wpt.py Progetto: urrytr/gecko
def lint(files, config, **kwargs):
    tests_dir = os.path.join(kwargs['root'], 'testing', 'web-platform', 'tests')

    def process_line(line):
        try:
            data = json.loads(line)
        except ValueError:
            return

        data["level"] = "error"
        data["path"] = os.path.relpath(os.path.join(tests_dir, data["path"]), kwargs['root'])
        data.setdefault("lineno", 0)
        results.append(result.from_config(config, **data))

    if files == [tests_dir]:
        print >> sys.stderr, ("No specific files specified, running the full wpt lint"
                              " (this is slow)")
        files = ["--all"]
    cmd = [os.path.join(tests_dir, 'wpt'), 'lint', '--json'] + files
    if platform.system() == 'Windows':
        cmd.insert(0, sys.executable)

    proc = ProcessHandler(cmd, env=os.environ, processOutputLine=process_line)
    proc.run()
    try:
        proc.wait()
        if proc.returncode != 0:
            results.append(
                result.from_config(config,
                                   message="Lint process exited with return code %s" %
                                   proc.returncode))
    except KeyboardInterrupt:
        proc.kill()

    return results
Esempio n. 9
0
def parse_issues(log, config, issues, base_path, onlyIn):
    results = []
    if onlyIn:
        onlyIn = os.path.normcase(os.path.normpath(onlyIn))
    for issue in issues:

        try:
            detail = json.loads(six.ensure_text(issue))
            if "message" in detail:
                p = detail["target"]["src_path"]
                detail = detail["message"]
                if "level" in detail:
                    if (
                        detail["level"] == "error" or detail["level"] == "failure-note"
                    ) and not detail["code"]:
                        log.debug(
                            "Error outside of clippy."
                            "This means that the build failed. Therefore, skipping this"
                        )
                        log.debug("File = {} / Detail = {}".format(p, detail))
                        continue
                    # We are in a clippy warning
                    if len(detail["spans"]) == 0:
                        # For some reason, at the end of the summary, we can
                        # get the following line
                        # {'rendered': 'warning: 5 warnings emitted\n\n', 'children':
                        # [], 'code': None, 'level': 'warning', 'message':
                        # '5 warnings emitted', 'spans': []}
                        # if this is the case, skip it
                        log.debug(
                            "Skipping the summary line {} for file {}".format(detail, p)
                        )
                        continue

                    l = detail["spans"][0]
                    p = os.path.join(base_path, l["file_name"])
                    if onlyIn and onlyIn not in os.path.normcase(os.path.normpath(p)):
                        # Case when we have a .rs in the include list in the yaml file
                        log.debug(
                            "{} is not part of the list of files '{}'".format(p, onlyIn)
                        )
                        continue
                    res = {
                        "path": p,
                        "level": detail["level"],
                        "lineno": l["line_start"],
                        "column": l["column_start"],
                        "message": detail["message"],
                        "hint": detail["rendered"],
                        "rule": detail["code"]["code"],
                        "lineoffset": l["line_end"] - l["line_start"],
                    }
                    results.append(result.from_config(config, **res))

        except json.decoder.JSONDecodeError:
            log.debug("Could not parse the output:")
            log.debug("clippy output: {}".format(issue))
            continue

    return results
Esempio n. 10
0
def validate_linter_includes(lintconfig, l10nconfigs, lintargs):
    '''Check l10n.yml config against l10n.toml configs.'''
    reference_paths = set(
        mozpath.relpath(p['reference'].prefix, lintargs['root'])
        for project in l10nconfigs
        for config in project.configs
        for p in config.paths
    )
    # Just check for directories
    reference_dirs = sorted(p for p in reference_paths if os.path.isdir(p))
    missing_in_yml = [
        refd for refd in reference_dirs if refd not in lintconfig['include']
    ]
    # These might be subdirectories in the config, though
    missing_in_yml = [
        d for d in missing_in_yml
        if not any(d.startswith(parent + '/') for parent in lintconfig['include'])
    ]
    if missing_in_yml:
        dirs = ', '.join(missing_in_yml)
        return [result.from_config(
            lintconfig, path=lintconfig['path'],
            message='l10n.yml out of sync with l10n.toml, add: ' + dirs
        )]
    return []
Esempio n. 11
0
def lint(files, config, **lintargs):

    config['root'] = lintargs['root']
    paths = expand_exclusions(files, config, config['root'])
    paths = list(paths)
    chunk_size = 50
    binary = get_rstcheck_binary()

    while paths:
        cmdargs = [
            binary,
        ] + paths[:chunk_size]
        proc = subprocess.Popen(cmdargs,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                env=os.environ)
        all_errors = proc.communicate()[1]
        for errors in all_errors.split("\n"):
            if len(errors) > 1:
                filename, lineno, level, message = parse_with_split(errors)
                res = {
                    'path': filename,
                    'message': message,
                    'lineno': lineno,
                    'level': "error" if int(level) >= 2 else "warning",
                }
                results.append(result.from_config(config, **res))
        paths = paths[chunk_size:]

    return results
Esempio n. 12
0
def parse_issues(config, output, paths):
    RustfmtDiff = namedtuple("RustfmtDiff", ["file", "line", "diff"])
    issues = []
    diff_line = re.compile("^Diff in (.*) at line ([0-9]*):")
    file = ""
    line_no = 0
    diff = ""
    for line in output:
        line = six.ensure_text(line)
        match = diff_line.match(line)
        if match:
            if diff:
                issues.append(RustfmtDiff(file, line_no, diff.rstrip("\n")))
                diff = ""
            file, line_no = match.groups()
        else:
            diff += line + "\n"
    # the algorithm above will always skip adding the last issue
    issues.append(RustfmtDiff(file, line_no, diff))
    results = []
    for issue in issues:
        # rustfmt can not be supplied the paths to the files we want to analyze
        # therefore, for each issue detected, we check if any of the the paths
        # supplied are part of the file name.
        # This just filters out the issues that are not part of paths.
        if any([path in file for path in paths]):
            res = {
                "path": issue.file,
                "diff": issue.diff,
                "level": "warning",
                "lineno": issue.line,
            }
            results.append(result.from_config(config, **res))
    return results
Esempio n. 13
0
def lint(files, config, **kwargs):
    tests_dir = os.path.join(kwargs['root'], 'testing', 'web-platform', 'tests')

    def process_line(line):
        try:
            data = json.loads(line)
        except ValueError:
            return
        data["level"] = "error"
        data["path"] = os.path.relpath(os.path.join(tests_dir, data["path"]), kwargs['root'])
        results.append(result.from_config(config, **data))

    if files == [tests_dir]:
        print >> sys.stderr, ("No specific files specified, running the full wpt lint"
                              " (this is slow)")
        files = ["--all"]
    cmd = [os.path.join(tests_dir, 'wpt'), 'lint', '--json'] + files
    if platform.system() == 'Windows':
        cmd.insert(0, sys.executable)

    proc = ProcessHandler(cmd, env=os.environ, processOutputLine=process_line)
    proc.run()
    try:
        proc.wait()
        if proc.returncode != 0:
            results.append(
                result.from_config(config,
                                   message="Lint process exited with return code %s" %
                                   proc.returncode))
    except KeyboardInterrupt:
        proc.kill()

    return results
Esempio n. 14
0
def lint(paths, config, fix=None, **lintargs):
    results = []

    if platform.system() == "Windows":
        # Windows doesn't have permissions in files
        # Exit now
        return results

    files = list(expand_exclusions(paths, config, lintargs["root"]))
    for f in files:
        if os.access(f, os.X_OK):
            if config.get("allow-shebang"):
                with open(f, "r+") as content:
                    # Some source files have +x permissions
                    line = content.readline()
                    if line.startswith("#!"):
                        # Check if the file doesn't start with a shebang
                        # if it does, not a warning
                        continue

            if fix:
                # We want to fix it, do it and leave
                os.chmod(f, 0o644)
                continue

            res = {
                "path": f,
                "message": "Execution permissions on a source file",
                "level": "error",
            }
            results.append(result.from_config(config, **res))
    return results
Esempio n. 15
0
def lint(files, config, **lintargs):
    log = lintargs["log"]
    config["root"] = lintargs["root"]
    paths = expand_exclusions(files, config, config["root"])
    paths = list(paths)
    chunk_size = 50
    binary = get_rstcheck_binary()
    rstcheck_options = "--ignore-language=cpp,json"

    while paths:
        cmdargs = [which("python"), binary, rstcheck_options
                   ] + paths[:chunk_size]
        log.debug("Command: {}".format(" ".join(cmdargs)))

        proc = subprocess.Popen(
            cmdargs,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            env=os.environ,
            universal_newlines=True,
        )
        all_errors = proc.communicate()[1]
        for errors in all_errors.split("\n"):
            if len(errors) > 1:
                filename, lineno, level, message = parse_with_split(errors)
                res = {
                    "path": filename,
                    "message": message,
                    "lineno": lineno,
                    "level": "error" if int(level) >= 2 else "warning",
                }
                results.append(result.from_config(config, **res))
        paths = paths[chunk_size:]

    return results
Esempio n. 16
0
def validate_linter_includes(lintconfig, l10nconfigs, lintargs):
    """Check l10n.yml config against l10n.toml configs."""
    reference_paths = set(
        mozpath.relpath(p["reference"].prefix, lintargs["root"])
        for project in l10nconfigs
        for config in project.configs
        for p in config.paths
    )
    # Just check for directories
    reference_dirs = sorted(p for p in reference_paths if os.path.isdir(p))
    missing_in_yml = [
        refd for refd in reference_dirs if refd not in lintconfig["include"]
    ]
    # These might be subdirectories in the config, though
    missing_in_yml = [
        d
        for d in missing_in_yml
        if not any(d.startswith(parent + "/") for parent in lintconfig["include"])
    ]
    if missing_in_yml:
        dirs = ", ".join(missing_in_yml)
        return [
            result.from_config(
                lintconfig,
                path=lintconfig["path"],
                message="l10n.yml out of sync with l10n.toml, add: " + dirs,
            )
        ]
    return []
Esempio n. 17
0
def api_lint(config, **lintargs):
    topsrcdir = lintargs['root']
    topobjdir = lintargs['topobjdir']

    gradle(lintargs['log'],
           topsrcdir=topsrcdir,
           topobjdir=topobjdir,
           tasks=lintargs['substs']['GRADLE_ANDROID_API_LINT_TASKS'],
           extra_args=lintargs.get('extra_args') or [])

    folder = lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_APILINT_FOLDER']

    results = []

    with open(os.path.join(topobjdir, folder, 'apilint-result.json')) as f:
        issues = json.load(f)

        for rule in ('compat_failures', 'failures'):
            for r in issues[rule]:
                err = {
                    'rule':
                    r['rule'] if rule == 'failures' else 'compat_failures',
                    'path': mozpath.relpath(r['file'], topsrcdir),
                    'lineno': int(r['line']),
                    'column': int(r.get('column') or 0),
                    'message': r['msg'],
                }
                results.append(result.from_config(config, **err))

        for r in issues['api_changes']:
            err = {
                'rule':
                'api_changes',
                'path':
                mozpath.relpath(r['file'], topsrcdir),
                'lineno':
                int(r['line']),
                'column':
                int(r.get('column') or 0),
                'message':
                'Unexpected api change. Please run ./gradlew {} for more '
                'information'.format(' '.join(
                    lintargs['substs']['GRADLE_ANDROID_API_LINT_TASKS'])),
            }
            results.append(result.from_config(config, **err))

    return results
Esempio n. 18
0
 def process_line(line):
     try:
         data = json.loads(line)
     except ValueError:
         return
     data["level"] = "error"
     data["path"] = os.path.relpath(os.path.join(tests_dir, data["path"]), kwargs['root'])
     results.append(result.from_config(config, **data))
Esempio n. 19
0
 def process_line(line):
     try:
         data = json.loads(line)
     except ValueError:
         return
     data["level"] = "error"
     data["path"] = os.path.relpath(os.path.join(tests_dir, data["path"]), kwargs['root'])
     results.append(result.from_config(config, **data))
Esempio n. 20
0
def checkdupes(paths, config, **kwargs):
    results = []
    topdir = os.path.join(kwargs['root'], "modules", "libpref", "init")
    pref_names = get_names(os.path.join(topdir, "StaticPrefList.yaml"))
    errors = check_against(os.path.join(topdir, "all.js"), pref_names)
    for error in errors:
        results.append(result.from_config(config, **error))
    return results
Esempio n. 21
0
    def process_line(self, line):
        try:
            res = json.loads(line)
        except ValueError:
            print('Non JSON output from linter, will not be processed: {}'.format(line))
            return

        res['level'] = 'error'
        results.append(result.from_config(self.config, **res))
Esempio n. 22
0
    def process_line(self, line):
        try:
            res = json.loads(line)
        except ValueError:
            print('Non JSON output from linter, will not be processed: {}'.format(line))
            return

        res['level'] = 'error'
        results.append(result.from_config(self.config, **res))
Esempio n. 23
0
def external(files, config, **lintargs):
    results = []
    for path in files:
        with open(path, 'r') as fh:
            for i, line in enumerate(fh.readlines()):
                if 'foobar' in line:
                    results.append(result.from_config(
                        config, path=path, lineno=i+1, column=1, rule="no-foobar"))
    return results
Esempio n. 24
0
def _parse_android_test_results(config, topsrcdir=None, report_dir=None):
    # A brute force way to turn a Java FQN into a path on disk.  Assumes Java
    # and Kotlin sources are in mobile/android for performance and simplicity.
    sourcepath_finder = FileFinder(os.path.join(topsrcdir, 'mobile',
                                                'android'))

    finder = FileFinder(report_dir)
    reports = list(finder.find('TEST-*.xml'))
    if not reports:
        raise RuntimeError('No reports found under {}'.format(report_dir))

    for report, _ in reports:
        tree = ET.parse(open(os.path.join(finder.base, report), 'rt'))
        root = tree.getroot()

        class_name = root.get(
            'name')  # Like 'org.mozilla.gecko.permissions.TestPermissions'.
        path = '**/' + class_name.replace(
            '.', '/'
        ) + '.*'  # Like '**/org/mozilla/gecko/permissions/TestPermissions.*'.  # NOQA: E501

        for testcase in root.findall('testcase'):
            function_name = testcase.get('name')

            # Schema cribbed from http://llg.cubic.org/docs/junit/.
            for unexpected in itertools.chain(testcase.findall('error'),
                                              testcase.findall('failure')):
                sourcepaths = list(sourcepath_finder.find(path))
                if not sourcepaths:
                    raise RuntimeError(
                        'No sourcepath found for class {class_name}'.format(
                            class_name=class_name))

                for sourcepath, _ in sourcepaths:
                    lineno = 0
                    message = unexpected.get('message')
                    # Turn '... at org.mozilla.gecko.permissions.TestPermissions.testMultipleRequestsAreQueuedAndDispatchedSequentially(TestPermissions.java:118)' into 118.  # NOQA: E501
                    pattern = r'at {class_name}\.{function_name}\(.*:(\d+)\)'
                    pattern = pattern.format(class_name=class_name,
                                             function_name=function_name)
                    match = re.search(pattern, message)
                    if match:
                        lineno = int(match.group(1))
                    else:
                        msg = 'No source line found for {class_name}.{function_name}'.format(
                            class_name=class_name, function_name=function_name)
                        raise RuntimeError(msg)

                    err = {
                        'level': 'error',
                        'rule': unexpected.get('type'),
                        'message': message,
                        'path': os.path.join('mobile', 'android', sourcepath),
                        'lineno': lineno,
                    }
                    yield result.from_config(config, **err)
Esempio n. 25
0
def _parse_android_test_results(config, topsrcdir=None, report_dir=None):
    # A brute force way to turn a Java FQN into a path on disk.  Assumes Java
    # and Kotlin sources are in mobile/android for performance and simplicity.
    sourcepath_finder = FileFinder(os.path.join(topsrcdir, "mobile",
                                                "android"))

    finder = FileFinder(report_dir)
    reports = list(finder.find("TEST-*.xml"))
    if not reports:
        raise RuntimeError("No reports found under {}".format(report_dir))

    for report, _ in reports:
        tree = ET.parse(open(os.path.join(finder.base, report), "rt"))
        root = tree.getroot()

        class_name = root.get(
            "name")  # Like 'org.mozilla.gecko.permissions.TestPermissions'.
        path = (
            "**/" + class_name.replace(".", "/") + ".*"
        )  # Like '**/org/mozilla/gecko/permissions/TestPermissions.*'.  # NOQA: E501

        for testcase in root.findall("testcase"):
            function_name = testcase.get("name")

            # Schema cribbed from http://llg.cubic.org/docs/junit/.
            for unexpected in itertools.chain(testcase.findall("error"),
                                              testcase.findall("failure")):
                sourcepaths = list(sourcepath_finder.find(path))
                if not sourcepaths:
                    raise RuntimeError(
                        "No sourcepath found for class {class_name}".format(
                            class_name=class_name))

                for sourcepath, _ in sourcepaths:
                    lineno = 0
                    message = unexpected.get("message")
                    # Turn '... at org.mozilla.gecko.permissions.TestPermissions.testMultipleRequestsAreQueuedAndDispatchedSequentially(TestPermissions.java:118)' into 118.  # NOQA: E501
                    pattern = r"at {class_name}\.{function_name}\(.*:(\d+)\)"
                    pattern = pattern.format(class_name=class_name,
                                             function_name=function_name)
                    match = re.search(pattern, message)
                    if match:
                        lineno = int(match.group(1))
                    else:
                        msg = "No source line found for {class_name}.{function_name}".format(
                            class_name=class_name, function_name=function_name)
                        raise RuntimeError(msg)

                    err = {
                        "level": "error",
                        "rule": unexpected.get("type"),
                        "message": message,
                        "path": os.path.join("mobile", "android", sourcepath),
                        "lineno": lineno,
                    }
                    yield result.from_config(config, **err)
Esempio n. 26
0
def lint(paths, lintconfig, **lintargs):
    l10n_base = mb_util.get_state_dir()
    root = lintargs["root"]
    exclude = lintconfig.get("exclude")
    extensions = lintconfig.get("extensions")

    # Load l10n.toml configs
    l10nconfigs = load_configs(lintconfig, root, l10n_base)

    # Check include paths in l10n.yml if it's in our given paths
    # Only the l10n.yml will show up here, but if the l10n.toml files
    # change, we also get the l10n.yml as the toml files are listed as
    # support files.
    if lintconfig["path"] in paths:
        results = validate_linter_includes(lintconfig, l10nconfigs, lintargs)
        paths.remove(lintconfig["path"])
    else:
        results = []

    all_files = []
    for p in paths:
        fp = pathutils.FilterPath(p)
        if fp.isdir:
            for _, fileobj in fp.finder:
                all_files.append(fileobj.path)
        if fp.isfile:
            all_files.append(p)
    # Filter again, our directories might have picked up files the
    # explicitly excluded in the l10n.yml configuration.
    # `browser/locales/en-US/firefox-l10n.js` is a good example.
    all_files, _ = pathutils.filterpaths(
        lintargs["root"],
        all_files,
        lintconfig["include"],
        exclude=exclude,
        extensions=extensions,
    )
    # These should be excluded in l10n.yml
    skips = {p for p in all_files if not parser.hasParser(p)}
    results.extend(
        result.from_config(
            lintconfig,
            level="warning",
            path=path,
            message="file format not supported in compare-locales",
        )
        for path in skips
    )
    all_files = [p for p in all_files if p not in skips]
    files = ProjectFiles(LOCALE, l10nconfigs)

    get_reference_and_tests = l10n_base_reference_and_tests(files)

    linter = MozL10nLinter(lintconfig)
    results += linter.lint(all_files, get_reference_and_tests)
    return results
Esempio n. 27
0
 def add_error(self, node, rule, msg):
     (col, line) = self.span_to_line_and_col(node.span)
     res = {
         "path": self.path,
         "lineno": line,
         "column": col,
         "rule": rule,
         "message": msg,
     }
     self.results.append(result.from_config(self.config, **res))
Esempio n. 28
0
def checkdupes(paths, config, **kwargs):
    results = []
    errors = []
    pref_names = get_names(config["support-files"][0])
    files = list(expand_exclusions(paths, config, kwargs["root"]))
    for file in files:
        errors.extend(check_against(file, pref_names))
    for error in errors:
        results.append(result.from_config(config, **error))
    return results
Esempio n. 29
0
    def process_line(self, line):
        try:
            res = json.loads(line)
        except ValueError:
            print(
                "Non JSON output from {} linter: {}".format(self.config["name"], line)
            )
            return

        res["level"] = "error"
        results.append(result.from_config(self.config, **res))
Esempio n. 30
0
def run(cmd_args, config):

    shell = False
    if os.environ.get("MSYSTEM") in ("MINGW32", "MINGW64"):
        # The eslint binary needs to be run from a shell with msys
        shell = True
    encoding = "utf-8"

    orig = signal.signal(signal.SIGINT, signal.SIG_IGN)
    proc = subprocess.Popen(cmd_args,
                            shell=shell,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    signal.signal(signal.SIGINT, orig)

    try:
        output, errors = proc.communicate()
    except KeyboardInterrupt:
        proc.kill()
        return []

    if errors:
        errors = errors.decode(encoding, "replace")
        print(ESLINT_ERROR_MESSAGE.format(errors))

    if proc.returncode >= 2:
        return 1

    if not output:
        return []  # no output means success
    output = output.decode(encoding, "replace")
    try:
        jsonresult = json.loads(output)
    except ValueError:
        print(ESLINT_ERROR_MESSAGE.format(output))
        return 1

    results = []
    for obj in jsonresult:
        errors = obj["messages"]

        for err in errors:
            err.update({
                "hint": err.get("fix"),
                "level": "error" if err["severity"] == 2 else "warning",
                "lineno": err.get("line") or 0,
                "path": obj["filePath"],
                "rule": err.get("ruleId"),
            })
            results.append(result.from_config(config, **err))

    return results
Esempio n. 31
0
    def process_line(line):
        # Escape slashes otherwise JSON conversion will not work
        line = line.replace('\\', '\\\\')
        try:
            res = json.loads(line)
        except ValueError:
            print('Non JSON output from linter, will not be processed: {}'.format(line))
            return

        if res.get('code') in LINE_OFFSETS:
            res['lineoffset'] = LINE_OFFSETS[res['code']]

        results.append(result.from_config(config, **res))
Esempio n. 32
0
def findbugs(config, **lintargs):
    topsrcdir = lintargs['root']
    topobjdir = lintargs['topobjdir']

    # A brute force way to turn a Java FQN into a path on disk.  Assumes Java and Kotlin sources
    # are in mobile/android for performance and simplicity.
    sourcepath_finder = FileFinder(os.path.join(topsrcdir, 'mobile',
                                                'android'))

    gradle(topsrcdir=topsrcdir,
           topobjdir=topobjdir,
           tasks=lintargs['substs']['GRADLE_ANDROID_FINDBUGS_TASKS'],
           extra_args=lintargs.get('extra_args') or [])

    path = os.path.join(
        lintargs['topobjdir'],
        'gradle/build/mobile/android/app/reports/findbugs',
        'findbugs-{}-output.xml'.format(
            lintargs['substs']['GRADLE_ANDROID_APP_VARIANT_NAME']))
    tree = ET.parse(open(path, 'rt'))
    root = tree.getroot()

    results = []

    for issue in root.findall('./BugInstance'):
        location = issue.find('./SourceLine')
        # Like 'org/mozilla/gecko/sync/repositories/android/FennecTabsRepository.java'.
        unanchored_sourcepath = location.get('sourcepath')

        sourcepaths = list(
            sourcepath_finder.find('**/{}'.format(unanchored_sourcepath)))
        if not len(sourcepaths) == 1:
            raise RuntimeError(
                'No sourcepath found for unanchored sourcepath {path}'.format(
                    path=unanchored_sourcepath))

        sourcepath, _ = sourcepaths[0]

        err = {
            'level': 'error',
            'rule': issue.get('type'),
            'message': ET.tostring(issue),
            'path': os.path.join('mobile', 'android', sourcepath),
            'lineno': int(location.get('start') or 0),
        }
        results.append(result.from_config(config, **err))

    return results
Esempio n. 33
0
def external(files, config, **lintargs):
    if lintargs.get('fix'):
        # mimics no results because they got fixed
        return []

    results = []
    for path in files:
        if os.path.isdir(path):
            continue

        with open(path, 'r') as fh:
            for i, line in enumerate(fh.readlines()):
                if 'foobar' in line:
                    results.append(result.from_config(
                        config, path=path, lineno=i+1, column=1, rule="no-foobar"))
    return results
Esempio n. 34
0
def external(files, config, **lintargs):
    if lintargs.get('fix'):
        # mimics no results because they got fixed
        return []

    results = []
    for path in files:
        if os.path.isdir(path):
            continue

        with open(path, 'r') as fh:
            for i, line in enumerate(fh.readlines()):
                if 'foobar' in line:
                    results.append(result.from_config(
                        config, path=path, lineno=i+1, column=1, rule="no-foobar"))
    return results
Esempio n. 35
0
    def process_line(self, line):
        # Escape slashes otherwise JSON conversion will not work
        line = line.replace('\\', '\\\\')
        try:
            res = json.loads(line)
        except ValueError:
            print('Non JSON output from linter, will not be processed: {}'.format(line))
            return

        if 'code' in res:
            if res['code'].startswith('W'):
                res['level'] = 'warning'

            if res['code'] in LINE_OFFSETS:
                res['lineoffset'] = LINE_OFFSETS[res['code']]

        results.append(result.from_config(self.config, **res))
Esempio n. 36
0
    def process_line(self, line):
        try:
            data = json.loads(line)
        except JSONDecodeError as e:
            print('Unable to load shellcheck output ({}): {}'.format(e, line))
            return

        for entry in data:
            res = {
                'path': entry['file'],
                'message': entry['message'],
                'level': 'error',
                'lineno': entry['line'],
                'column': entry['column'],
                'rule': entry['code'],
            }
            results.append(result.from_config(self.config, **res))
Esempio n. 37
0
    def process_line(self, line):
        try:
            match = CODESPELL_FORMAT_REGEX.match(line)
            abspath, line, typo, correct = match.groups()
        except AttributeError:
            print('Unable to match regex against output: {}'.format(line))
            return

        # Ignore false positive like aParent (which would be fixed to apparent)
        # See https://github.com/lucasdemarchi/codespell/issues/314
        m = re.match(r'^[a-z][A-Z][a-z]*', typo)
        if m:
            return
        res = {'path': os.path.relpath(abspath, self.config['root']),
               'message': typo + " ==> " + correct,
               'level': "warning",
               'lineno': line,
               }
        results.append(result.from_config(self.config, **res))
Esempio n. 38
0
def lint(paths, config, binary=None, fix=None, setup=None, **lintargs):
    """Run eslint."""
    setup_helper.set_project_root(lintargs['root'])

    module_path = setup_helper.get_project_root()

    if not setup_helper.check_node_executables_valid():
        return 1

    if setup:
        return setup_helper.eslint_setup()

    setup_helper.eslint_maybe_setup()

    # Valid binaries are:
    #  - Any provided by the binary argument.
    #  - Any pointed at by the ESLINT environmental variable.
    #  - Those provided by mach eslint --setup.
    #
    #  eslint --setup installs some mozilla specific plugins and installs
    #  all node modules locally. This is the preferred method of
    #  installation.

    if not binary:
        binary = os.environ.get('ESLINT', None)

        if not binary:
            binary = os.path.join(module_path, "node_modules", ".bin", "eslint")
            if not os.path.isfile(binary):
                binary = None

    if not binary:
        print(ESLINT_NOT_FOUND_MESSAGE)
        return 1

    extra_args = lintargs.get('extra_args') or []
    cmd_args = [binary,
                # Enable the HTML plugin.
                # We can't currently enable this in the global config file
                # because it has bad interactions with the SublimeText
                # ESLint plugin (bug 1229874).
                '--plugin', 'html',
                # This keeps ext as a single argument.
                '--ext', '[{}]'.format(','.join(config['extensions'])),
                '--format', 'json',
                ] + extra_args + paths

    # eslint requires that --fix be set before the --ext argument.
    if fix:
        cmd_args.insert(1, '--fix')

    shell = False
    if os.environ.get('MSYSTEM') in ('MINGW32', 'MINGW64'):
        # The eslint binary needs to be run from a shell with msys
        shell = True

    orig = signal.signal(signal.SIGINT, signal.SIG_IGN)
    proc = ProcessHandler(cmd_args, env=os.environ, stream=None, shell=shell)
    proc.run()
    signal.signal(signal.SIGINT, orig)

    try:
        proc.wait()
    except KeyboardInterrupt:
        proc.kill()
        return []

    if not proc.output:
        return []  # no output means success

    try:
        jsonresult = json.loads(proc.output[0])
    except ValueError:
        print(ESLINT_ERROR_MESSAGE.format("\n".join(proc.output)))
        return 1

    results = []
    for obj in jsonresult:
        errors = obj['messages']

        for err in errors:
            err.update({
                'hint': err.get('fix'),
                'level': 'error' if err['severity'] == 2 else 'warning',
                'lineno': err.get('line'),
                'path': obj['filePath'],
                'rule': err.get('ruleId'),
            })
            results.append(result.from_config(config, **err))

    return results