Esempio n. 1
0
def _dummy_module_dirty() -> tuple[bool, str]:
    """Test hashes on the dummy module to see if it needs updates."""

    # Let's generate a hash from all sources under the python source dir.
    pysources = []
    exts = ['.cc', '.c', '.h']
    for root, _dirs, files in os.walk('src/ballistica/python'):
        for fname in files:
            if any(fname.endswith(ext) for ext in exts):
                pysources.append(os.path.join(root, fname))

    # Also lets add this script so we re-create when it changes.
    pysources.append(__file__)

    outpath = 'assets/src/ba_data/python/._ba_sources_hash'
    if not os.path.exists(outpath):
        existing_hash = ''
    else:
        with open(outpath, encoding='utf-8') as infile:
            existing_hash = infile.read()

    # Important to keep this deterministic...
    pysources.sort()

    # Note: going with plain integers instead of hex so linters
    # don't see words and whine about spelling errors.
    pysources_hash = get_files_hash(pysources, int_only=True)
    dirty = existing_hash != pysources_hash
    return dirty, pysources_hash
Esempio n. 2
0
    def update(self, filenames: Sequence[str], extrahash: str) -> None:
        """Update the cache for the provided files and hash type.

        Hashes will be checked for all files (incorporating extrahash)
        and mismatched hash values cleared. Entries for no-longer-existing
        files will be cleared as well.
        """

        # First, completely prune entries for nonexistent files.
        self.entries = {
            path: val
            for path, val in self.entries.items() if os.path.isfile(path)
        }

        # Also remove any not in our passed list.
        self.entries = {
            path: val
            for path, val in self.entries.items() if path in filenames
        }

        # Add empty entries for files that lack them.
        # Also check and store current hashes for all files and clear
        # any entry hashes that differ so we know they're dirty.
        for filename in filenames:
            if filename not in self.entries:
                self.entries[filename] = {}
            self.curhashes[filename] = curhash = (get_files_hash([filename],
                                                                 extrahash))
            # Also store modtimes; we'll abort cache writes if
            # anything changed.
            self.mtimes[filename] = os.path.getmtime(filename)
            entry = self.entries[filename]
            if 'hash' in entry and entry['hash'] != curhash:
                del entry['hash']
Esempio n. 3
0
def update_docs_md(check: bool) -> None:
    """Updates docs markdown files if necessary."""
    # pylint: disable=too-many-locals
    from efrotools import get_files_hash, run

    docs_path = 'docs/ba_module.md'

    # We store the hash in a separate file that only exists on private
    # so public isn't full of constant hash change commits.
    # (don't care so much on private)
    docs_hash_path = '.cache/ba_module_hash'

    # Generate a hash from all c/c++ sources under the python subdir
    # as well as all python scripts.
    pysources = []
    exts = ['.cc', '.c', '.h', '.py']
    for basedir in [
            'src/ballistica/python',
            'tools/efro',
            'tools/bacommon',
            'assets/src/ba_data/python/ba',
    ]:
        assert os.path.isdir(basedir), f'{basedir} is not a dir.'
        for root, _dirs, files in os.walk(basedir):
            for fname in files:
                if any(fname.endswith(ext) for ext in exts):
                    pysources.append(os.path.join(root, fname))
    pysources.sort()
    storedhash: Optional[str]
    curhash = get_files_hash(pysources)

    # Extract the current embedded hash.
    if os.path.exists(docs_hash_path):
        with open(docs_hash_path, encoding='utf-8') as infile:
            storedhash = infile.read()
    else:
        storedhash = None

    if (storedhash is None or curhash != storedhash
            or not os.path.exists(docs_path)):
        if check:
            raise RuntimeError('Docs markdown is out of date.')

        print(f'Updating {docs_path}...', flush=True)
        run('make docs')

        # Our docs markdown is just the docs html with a few added
        # bits at the top.
        with open('build/docs.html', encoding='utf-8') as infile:
            docs = infile.read()
        docs = ('<!-- THIS FILE IS AUTO GENERATED; DO NOT EDIT BY HAND -->\n'
                ) + docs
        os.makedirs(os.path.dirname(docs_path), exist_ok=True)
        with open(docs_path, 'w', encoding='utf-8') as outfile:
            outfile.write(docs)
        with open(docs_hash_path, 'w', encoding='utf-8') as outfile:
            outfile.write(curhash)
    print(f'{docs_path} is up to date.')
Esempio n. 4
0
def pylint(projroot: Path, full: bool, fast: bool) -> None:
    """Run lint-checking on all scripts deemed lint-able."""
    from efrotools import get_files_hash
    pylintrc = Path(projroot, '.pylintrc')
    if not os.path.isfile(pylintrc):
        raise Exception('pylintrc not found where expected')
    filenames = get_script_filenames(projroot)

    if any(' ' in name for name in filenames):
        raise Exception('found space in path; unexpected')
    script_blacklist: List[str] = []
    filenames = [f for f in filenames if f not in script_blacklist]

    cachebasename = '.cache-lintscriptsfast' if fast else '.cache-lintscripts'
    cachepath = Path(projroot, 'config', cachebasename)
    if full and cachepath.exists():
        cachepath.unlink()
    cache = FileCache(cachepath)

    # Clear out entries and hashes for files that have changed/etc.
    cache.update(filenames, get_files_hash([pylintrc]))

    # Do a recursive dependency check and mark all files who are
    # either dirty or have a dependency that is dirty.
    filestates: Dict[str, bool] = {}
    for fname in filenames:
        _dirty_dep_check(fname, filestates, cache, fast, 0)

    dirtyfiles = [k for k, v in filestates.items() if v]

    # Let's sort by modification time, so ones we're actively trying
    # to fix get linted first and we see remaining errors faster.
    dirtyfiles.sort(reverse=True, key=lambda f: os.stat(f).st_mtime)

    if dirtyfiles:
        print(f'Pylint checking {len(dirtyfiles)} file(s)...', flush=True)
        try:
            _run_script_lint(projroot, pylintrc, cache, dirtyfiles, filenames)
        except Exception:
            # Note: even if we fail here, we still want to
            # update our disk cache (since some lints may have passed).
            print('Pylint failed.', flush=True)

            # Hmm; this can be handy sometimes; perhaps should add an env
            # var to control it?
            if bool(False):
                import traceback
                traceback.print_exc()
            cache.write()
            sys.exit(255)
    print(f'Pylint: all {len(filenames)} files are passing.', flush=True)

    cache.write()
Esempio n. 5
0
def lazy_increment_build() -> None:
    """Increment build number only if C++ sources have changed.

    This is convenient to place in automatic commit/push scripts.
    It could make sense to auto update build number when scripts/assets
    change too, but a build number change requires rebuilding all binaries
    so I'll leave that as an explicit choice to save work.
    """
    import os
    import subprocess
    from efro.terminal import Clr
    from efro.error import CleanError
    from efrotools import get_files_hash
    from efrotools.code import get_code_filenames
    if sys.argv[2:] not in [[], ['--update-hash-only']]:
        raise CleanError('Invalid arguments')
    update_hash_only = '--update-hash-only' in sys.argv
    codefiles = get_code_filenames(PROJROOT)
    codehash = get_files_hash(codefiles)
    hashfilename = '.cache/lazy_increment_build'
    try:
        with open(hashfilename, encoding='utf-8') as infile:
            lasthash = infile.read()
    except FileNotFoundError:
        lasthash = ''
    if codehash != lasthash:
        print(f'{Clr.SMAG}Source(s) changed; incrementing build...{Clr.RST}')

        if not update_hash_only:
            # Just go ahead and bless; this will increment the build as needed.
            # subprocess.run(['make', 'bless'], check=True)
            subprocess.run(
                ['tools/pcommand', 'version_utils', 'incrementbuild'],
                check=True)

        # We probably just changed code, so we need to re-calc the hash.
        codehash = get_files_hash(codefiles)
        os.makedirs(os.path.dirname(hashfilename), exist_ok=True)
        with open(hashfilename, 'w', encoding='utf-8') as outfile:
            outfile.write(codehash)
Esempio n. 6
0
def pylint(projroot: Path, full: bool, fast: bool) -> None:
    """Run Pylint on all scripts in our project (with smart dep tracking)."""
    from efrotools import get_files_hash
    from efro.terminal import Clr
    pylintrc = Path(projroot, '.pylintrc')
    if not os.path.isfile(pylintrc):
        raise Exception('pylintrc not found where expected')
    filenames = get_script_filenames(projroot)

    if any(' ' in name for name in filenames):
        raise Exception('found space in path; unexpected')
    script_blacklist: List[str] = []
    filenames = [f for f in filenames if f not in script_blacklist]

    cachebasename = '.cache-lintscriptsfast' if fast else '.cache-lintscripts'
    cachepath = Path(projroot, 'config', cachebasename)
    if full and cachepath.exists():
        cachepath.unlink()
    cache = FileCache(cachepath)

    # Clear out entries and hashes for files that have changed/etc.
    cache.update(filenames, get_files_hash([pylintrc]))

    # Do a recursive dependency check and mark all files who are
    # either dirty or have a dependency that is dirty.
    filestates: Dict[str, bool] = {}
    for fname in filenames:
        _dirty_dep_check(fname, filestates, cache, fast, 0)

    dirtyfiles = [k for k, v in filestates.items() if v]

    # Let's sort by modification time, so ones we're actively trying
    # to fix get linted first and we see remaining errors faster.
    dirtyfiles.sort(reverse=True, key=lambda f: os.stat(f).st_mtime)

    if dirtyfiles:
        print(
            f'{Clr.BLU}Pylint checking {len(dirtyfiles)} file(s)...{Clr.RST}',
            flush=True)
        try:
            _run_pylint(projroot, pylintrc, cache, dirtyfiles, filenames)
        finally:
            # No matter what happens, we still want to
            # update our disk cache (since some lints may have passed).
            cache.write()
    print(f'{Clr.GRN}Pylint: all {len(filenames)} files are passing.{Clr.RST}',
          flush=True)

    cache.write()
Esempio n. 7
0
def formatcode(projroot: Path, full: bool) -> None:
    """Run clang-format on all of our source code (multithreaded)."""
    import time
    import concurrent.futures
    from efrotools import get_files_hash
    from multiprocessing import cpu_count
    os.chdir(projroot)
    cachepath = Path(projroot, 'config/.cache-formatcode')
    if full and cachepath.exists():
        cachepath.unlink()
    cache = FileCache(cachepath)
    cfconfig = Path(projroot, '.clang-format')

    filenames = get_code_filenames(projroot)
    confighash = get_files_hash([cfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> Dict[str, Any]:
        start_time = time.time()

        # Note: seems os.system does not unlock the gil;
        # make sure to use subprocess.
        result = subprocess.call(['clang-format', '-i', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()
        return {'f': filename, 't': duration}

    # NOTE: using fewer workers than we have logical procs for now;
    # we're bottlenecked by one or two long running instances
    # so it actually helps to lighten the load around them.
    # may want to revisit later when we have everything chopped up
    # better
    with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count() //
                                               2) as executor:
        # Converting this to a list will propagate any errors.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} code files.',
          flush=True)
Esempio n. 8
0
def formatscripts(projroot: Path, full: bool) -> None:
    """Runs yapf on all our scripts (multithreaded)."""
    import time
    from concurrent.futures import ThreadPoolExecutor
    from efrotools import get_files_hash
    from multiprocessing import cpu_count
    os.chdir(projroot)
    cachepath = Path(projroot, 'config/.cache-formatscripts')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)
    yapfconfig = Path(projroot, '.style.yapf')

    filenames = get_script_filenames(projroot)
    confighash = get_files_hash([yapfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> None:
        start_time = time.time()
        result = subprocess.call(['yapf', '--in-place', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()

    # NOTE: using fewer workers than we have logical procs for now;
    # we're bottlenecked by one or two long running instances
    # so it actually helps to lighten the load around them.
    # may want to revisit later when we have everything chopped up
    # better
    with ThreadPoolExecutor(max_workers=cpu_count() // 2) as executor:
        # Convert the futures to a list to propagate any errors even
        # though there are no return values we use.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} script files.',
          flush=True)
Esempio n. 9
0
def format_clang_format(projroot: Path, full: bool) -> None:
    """Run clang-format on all of our source code (multithreaded)."""
    import time
    import concurrent.futures
    from multiprocessing import cpu_count
    from efrotools import get_files_hash
    os.chdir(projroot)
    cachepath = Path(projroot, '.cache/format_clang_format')
    if full and cachepath.exists():
        cachepath.unlink()
    cache = FileCache(cachepath)
    cfconfig = Path(projroot, '.clang-format')

    filenames = get_code_filenames(projroot)
    confighash = get_files_hash([cfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> dict[str, Any]:
        start_time = time.time()

        # Note: seems os.system does not unlock the gil;
        # make sure to use subprocess.
        result = subprocess.call(['clang-format', '-i', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()
        return {'f': filename, 't': duration}

    with concurrent.futures.ThreadPoolExecutor(
            max_workers=cpu_count()) as executor:
        # Converting this to a list will propagate any errors.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} code files.',
          flush=True)
Esempio n. 10
0
def formatscripts(projroot: Path, full: bool) -> None:
    """Runs yapf on all our scripts (multithreaded)."""
    import time
    from concurrent.futures import ThreadPoolExecutor
    from multiprocessing import cpu_count
    from efrotools import get_files_hash, PYVER
    os.chdir(projroot)
    cachepath = Path(projroot, 'config/.cache-formatscripts')
    if full and cachepath.exists():
        cachepath.unlink()

    cache = FileCache(cachepath)
    yapfconfig = Path(projroot, '.style.yapf')

    filenames = get_script_filenames(projroot)
    confighash = get_files_hash([yapfconfig])
    cache.update(filenames, confighash)

    dirtyfiles = cache.get_dirty_files()

    def format_file(filename: str) -> None:
        start_time = time.time()
        result = subprocess.call(
            [f'python{PYVER}', '-m', 'yapf', '--in-place', filename])
        if result != 0:
            raise Exception(f'Formatting failed for {filename}')
        duration = time.time() - start_time
        print(f'Formatted {filename} in {duration:.2f} seconds.')
        sys.stdout.flush()

    with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
        # Convert the futures to a list to propagate any errors even
        # though there are no return values we use.
        list(executor.map(format_file, dirtyfiles))

    if dirtyfiles:
        # Since we changed files, need to update hashes again.
        cache.update(filenames, confighash)
        cache.mark_clean(filenames)
        cache.write()
    print(f'Formatting is up to date for {len(filenames)} script files.',
          flush=True)