示例#1
0
def formatcode(projroot: Path, full: bool) -> None:
    """Run clang-format on all of our source code (multithreaded)."""
    import time
    import concurrent.futures
    from efrotools import get_files_hash
    from multiprocessing import cpu_count
    os.chdir(projroot)
    cachepath = Path(projroot, 'config/.cache-formatcode')
    if full and cachepath.exists():
        cachepath.unlink()
    cache = FileCache(cachepath)
    cfconfig = Path(projroot, '.clang-format')

    filenames = get_code_filenames(projroot)
    confighash = get_files_hash([cfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> Dict[str, Any]:
        start_time = time.time()

        # Note: seems os.system does not unlock the gil;
        # make sure to use subprocess.
        result = subprocess.call(['clang-format', '-i', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()
        return {'f': filename, 't': duration}

    # NOTE: using fewer workers than we have logical procs for now;
    # we're bottlenecked by one or two long running instances
    # so it actually helps to lighten the load around them.
    # may want to revisit later when we have everything chopped up
    # better
    with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count() //
                                               2) as executor:
        # Converting this to a list will propagate any errors.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} code files.',
          flush=True)
示例#2
0
def cpplint(projroot: Path, full: bool) -> None:
    """Run lint-checking on all code deemed lint-able."""
    from concurrent.futures import ThreadPoolExecutor
    from multiprocessing import cpu_count
    from efrotools import get_config
    from efro.terminal import Clr

    os.chdir(projroot)
    filenames = get_code_filenames(projroot)
    if any(' ' in name for name in filenames):
        raise Exception('found space in path; unexpected')

    # Check the config for a list of ones to ignore.
    code_blacklist: List[str] = get_config(projroot).get(
        'cpplint_blacklist', [])

    # Just pretend blacklisted ones don't exist.
    filenames = [f for f in filenames if f not in code_blacklist]
    filenames = [f for f in filenames if not f.endswith('.mm')]

    cachepath = Path(projroot, 'config/.cache-lintcode')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)

    # Clear out entries and hashes for files that have changed/etc.
    cache.update(filenames, '')
    dirtyfiles = cache.get_dirty_files()

    if dirtyfiles:
        print(f'{Clr.BLU}CppLint checking'
              f' {len(dirtyfiles)} file(s)...{Clr.RST}')

    def lint_file(filename: str) -> None:
        result = subprocess.call(['cpplint', '--root=src', filename])
        if result != 0:
            raise Exception(f'Linting failed for {filename}')

    with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
        # Converting this to a list will propagate any errors.
        list(executor.map(lint_file, dirtyfiles))

    if dirtyfiles:
        cache.mark_clean(filenames)
        cache.write()
    print(
        f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
        flush=True)
示例#3
0
def formatscripts(projroot: Path, full: bool) -> None:
    """Runs yapf on all our scripts (multithreaded)."""
    import time
    from concurrent.futures import ThreadPoolExecutor
    from efrotools import get_files_hash
    from multiprocessing import cpu_count
    os.chdir(projroot)
    cachepath = Path(projroot, 'config/.cache-formatscripts')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)
    yapfconfig = Path(projroot, '.style.yapf')

    filenames = get_script_filenames(projroot)
    confighash = get_files_hash([yapfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> None:
        start_time = time.time()
        result = subprocess.call(['yapf', '--in-place', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()

    # NOTE: using fewer workers than we have logical procs for now;
    # we're bottlenecked by one or two long running instances
    # so it actually helps to lighten the load around them.
    # may want to revisit later when we have everything chopped up
    # better
    with ThreadPoolExecutor(max_workers=cpu_count() // 2) as executor:
        # Convert the futures to a list to propagate any errors even
        # though there are no return values we use.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} script files.',
          flush=True)
示例#4
0
def format_clang_format(projroot: Path, full: bool) -> None:
    """Run clang-format on all of our source code (multithreaded)."""
    import time
    import concurrent.futures
    from multiprocessing import cpu_count
    from efrotools import get_files_hash
    os.chdir(projroot)
    cachepath = Path(projroot, '.cache/format_clang_format')
    if full and cachepath.exists():
        cachepath.unlink()
    cache = FileCache(cachepath)
    cfconfig = Path(projroot, '.clang-format')

    filenames = get_code_filenames(projroot)
    confighash = get_files_hash([cfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> dict[str, Any]:
        start_time = time.time()

        # Note: seems os.system does not unlock the gil;
        # make sure to use subprocess.
        result = subprocess.call(['clang-format', '-i', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()
        return {'f': filename, 't': duration}

    with concurrent.futures.ThreadPoolExecutor(
            max_workers=cpu_count()) as executor:
        # Converting this to a list will propagate any errors.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} code files.',
          flush=True)
示例#5
0
def formatscripts(projroot: Path, full: bool) -> None:
    """Runs yapf on all our scripts (multithreaded)."""
    import time
    from concurrent.futures import ThreadPoolExecutor
    from multiprocessing import cpu_count
    from efrotools import get_files_hash, PYVER
    os.chdir(projroot)
    cachepath = Path(projroot, 'config/.cache-formatscripts')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)
    yapfconfig = Path(projroot, '.style.yapf')

    filenames = get_script_filenames(projroot)
    confighash = get_files_hash([yapfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> None:
        start_time = time.time()
        result = subprocess.call(
            [f'python{PYVER}', '-m', 'yapf', '--in-place', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()

    with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
        # Convert the futures to a list to propagate any errors even
        # though there are no return values we use.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} script files.',
          flush=True)
示例#6
0
def cpplint(projroot: Path, full: bool) -> None:
    """Run lint-checking on all code deemed lint-able."""
    # pylint: disable=too-many-locals
    import tempfile
    from concurrent.futures import ThreadPoolExecutor
    from multiprocessing import cpu_count
    from efrotools import getconfig, PYVER
    from efro.terminal import Clr
    from efro.error import CleanError

    os.chdir(projroot)
    filenames = get_code_filenames(projroot)
    for fpath in filenames:
        if ' ' in fpath:
            raise Exception(f'Found space in path {fpath}; unexpected.')

    # Check the config for a list of ones to ignore.
    code_blacklist: List[str] = getconfig(projroot).get(
        'cpplint_blacklist', [])

    # Just pretend blacklisted ones don't exist.
    filenames = [f for f in filenames if f not in code_blacklist]
    filenames = [f for f in filenames if not f.endswith('.mm')]

    cachepath = Path(projroot, 'config/.cache-lintcode')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)

    # Clear out entries and hashes for files that have changed/etc.
    cache.update(filenames, '')
    dirtyfiles = cache.get_dirty_files()

    if dirtyfiles:
        print(f'{Clr.BLU}CppLint checking'
              f' {len(dirtyfiles)} file(s)...{Clr.RST}')

    # We want to do a few custom modifications to the cpplint module...
    try:
        import cpplint as cpplintmodule
    except Exception:
        raise CleanError('Unable to import cpplint')
    with open(cpplintmodule.__file__) as infile:
        codelines = infile.read().splitlines()
    cheadersline = codelines.index('_C_HEADERS = frozenset([')

    # Extra headers we consider as valid C system headers.
    c_headers = [
        'malloc.h', 'tchar.h', 'jni.h', 'android/log.h', 'EGL/egl.h',
        'libgen.h', 'linux/netlink.h', 'linux/rtnetlink.h', 'android/bitmap.h',
        'android/log.h', 'uuid/uuid.h', 'cxxabi.h', 'direct.h', 'shellapi.h',
        'rpc.h', 'io.h'
    ]
    codelines.insert(cheadersline + 1, ''.join(f"'{h}'," for h in c_headers))

    # Skip unapproved C++ headers check (it flags <mutex>, <thread>, etc.)
    headercheckline = codelines.index(
        "  if include and include.group(1) in ('cfenv',")
    codelines[headercheckline] = (
        "  if False and include and include.group(1) in ('cfenv',")

    # Don't complain about unknown NOLINT categories.
    # (we use them for clang-tidy)
    unknownlintline = codelines.index(
        '        elif category not in _LEGACY_ERROR_CATEGORIES:')
    codelines[unknownlintline] = '        elif False:'

    def lint_file(filename: str) -> None:
        result = subprocess.call(
            [f'python{PYVER}', '-m', 'cpplint', '--root=src', filename],
            env=env)
        if result != 0:
            raise CleanError(
                f'{Clr.RED}Cpplint failed for {filename}.{Clr.RST}')

    with tempfile.TemporaryDirectory() as tmpdir:

        # Write our replacement module, make it discoverable, then run.
        with open(tmpdir + '/cpplint.py', 'w') as outfile:
            outfile.write('\n'.join(codelines))
        env = os.environ.copy()
        env['PYTHONPATH'] = tmpdir

        with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
            # Converting this to a list will propagate any errors.
            list(executor.map(lint_file, dirtyfiles))

    if dirtyfiles:
        cache.mark_clean(filenames)
        cache.write()
    print(
        f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
        flush=True)
示例#7
0
def check_cpplint(projroot: Path, full: bool) -> None:
    """Run cpplint on all our applicable code."""
    # pylint: disable=too-many-locals
    from concurrent.futures import ThreadPoolExecutor
    from multiprocessing import cpu_count
    from efrotools import getconfig, PYVER
    from efro.terminal import Clr
    from efro.error import CleanError

    os.chdir(projroot)
    filenames = get_code_filenames(projroot)
    for fpath in filenames:
        if ' ' in fpath:
            raise Exception(f'Found space in path {fpath}; unexpected.')

    # Check the config for a list of ones to ignore.
    code_blacklist: list[str] = getconfig(projroot).get(
        'cpplint_blacklist', [])

    # Just pretend blacklisted ones don't exist.
    filenames = [f for f in filenames if f not in code_blacklist]
    filenames = [f for f in filenames if not f.endswith('.mm')]

    cachepath = Path(projroot, '.cache/check_cpplint')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)

    # Clear out entries and hashes for files that have changed/etc.
    cache.update(filenames, '')
    dirtyfiles = cache.get_dirty_files()

    if dirtyfiles:
        print(f'{Clr.BLU}CppLint checking'
              f' {len(dirtyfiles)} file(s)...{Clr.RST}')

    disabled_filters: list[str] = [
        'build/include_what_you_use',
        'build/c++11',
        'readability/nolint',
        'legal/copyright',
    ]
    filterstr = ','.join(f'-{x}' for x in disabled_filters)

    def lint_file(filename: str) -> None:
        result = subprocess.call([
            f'python{PYVER}', '-m', 'cpplint', '--root=src',
            f'--filter={filterstr}', filename
        ])
        if result != 0:
            raise CleanError(
                f'{Clr.RED}Cpplint failed for {filename}.{Clr.RST}')

    with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
        # Converting this to a list will propagate any errors.
        list(executor.map(lint_file, dirtyfiles))

    if dirtyfiles:
        cache.mark_clean(filenames)
        cache.write()
    print(
        f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
        flush=True)