Esempio n. 1
0
    def __init__(self, project_path=None):

        # If not project path given, find the closest.
        if not project_path:
            project_path = Project.closest()

        # If no Project found on scan.
        if not project_path:            exit_with_message(
                "No gitml project found. Use 'gitml init' " + \
"command to create one.", tag=True)

        self.project_path = project_path

        self.git = Git(self.project_path)
        self.workspace = Workspace(self.project_path, self.CODE_ARCHIVE_IGNORE)

        self.db = DataModel(self.project_path, "iteration")()
        self.commit_db = DataModel(self.project_path, "commit")()
        self.query = Query()

        self.dir = _path_join(self.project_path, Project.VML_DIR_NAME,
                              self.DIR_NAME)

        self.commit_dir = _path_join(self.project_path, Project.VML_DIR_NAME,
                                     self.COMMIT_DIR)
Esempio n. 2
0
    def stash(self):
        if self.is_empty():
            exit_with_message("Empty workspace. Nothing to stash.")

        _list = self.list()

        for l in _list:
            move(_path_join(self.path, l), _path_join(self.stash_path, l))

        exit_with_message("Stashed successfully.")
Esempio n. 3
0
    def setup(cls, project_path):
        """Setup task for creating db files for all model.
		"""
        # Create data directory if not exist.
        data_dir = _path_join(project_path, cls.DATA_DIR)
        create_dir_if_not_exist(data_dir)
        # Create db file if not exist.
        for model_name in cls.MODELS:
            db_path = _path_join(data_dir, "%s.json" % model_name)
            if not _path_exists(db_path):
                open(db_path, "a", "utf-8").close()
        return data_dir
Esempio n. 4
0
def _all_files_to_copy_abspaths(source_dir, o, listener):
    from glob import glob
    for (typ, val) in o.file_copying_directives:
        if 'copy_these_files' == typ:
            glob_path = _path_join(source_dir, val)
            these = glob(glob_path)
            if 0 == len(these):
                raise _stop_because_glob_made_no_files(listener, glob_path)
            for this in these:
                yield this
            continue
        assert 'copy_this_file' == typ
        yield _path_join(source_dir, val)
Esempio n. 5
0
def _default_theme_path():
    from os.path import dirname as dn
    from sys import modules
    here = modules[__name__].__file__
    mono_repo_dir = dn(dn(dn(dn(here))))
    return _path_join(
        mono_repo_dir, 'pho-themes', 'for-pelican', 'alabaster-for-pelican')
Esempio n. 6
0
class DataModel(object):

    MODELS = ["iteration", "commit"]

    DATA_DIR = _path_join(".gitml", ".data")

    @classmethod
    def setup(cls, project_path):
        """Setup task for creating db files for all model.
		"""
        # Create data directory if not exist.
        data_dir = _path_join(project_path, cls.DATA_DIR)
        create_dir_if_not_exist(data_dir)
        # Create db file if not exist.
        for model_name in cls.MODELS:
            db_path = _path_join(data_dir, "%s.json" % model_name)
            if not _path_exists(db_path):
                open(db_path, "a", "utf-8").close()
        return data_dir

    def db_path(self, project_path, model_name):
        model_name = model_name.strip().lower()
        return _path_join(project_path, self.DATA_DIR, "%s.json" % model_name)

    def __init__(self, project_path, model_name):
        self.model = model_name.strip().lower()
        self.project = project_path
        # TinyDB instance using db file.
        self.path = self.db_path(self.project, self.model)
        self.db = TinyDB(self.path)

    def __call__(self):
        return self.db
Esempio n. 7
0
    def pprint( self,
        dump_files_path = None, pformat_config = _PrettyFormatConfig( )
    ):
        """ Dumps all loaded data to stdout or to files in a directory. """

        if not _path_exists( dump_files_path ):
            _os.mkdir( dump_files_path, 0o700 )
        else:
            if not _path_is_directory( dump_files_path ):
                raise IOError( "Not a directory: {0}".format(
                    dump_files_path
                ) )
            if not _os.access(
                dump_files_path, _os.R_OK | _os.W_OK | _os.X_OK
            ):
                raise IOError( "Could not access directory: {0}".format(
                    dump_files_path
                ) )

        tables = self._tables
        for table in tables.values( ):
            if None is dump_files_path:
                table.pprint( tables, pformat_config )
            else:
                dump_file_path = _path_join(
                    dump_files_path,
                    table.FILE_NAME_BASE( ) + _path_extsep + "txt"
                )
                with open( dump_file_path, "w" ) as dump_file:
                    stream_print \
                    = _functools.partial( print, file = dump_file )
                    table.pprint( tables, pformat_config, stream_print )
Esempio n. 8
0
 def find_module(self, fullname, return_path=False):
     if fullname in flags.IGNORED_MODULES:
         return None
     tail_module = fullname.rpartition('.')[2]
     base_path = _path_join(self.path, tail_module)
     if _path_isdir(base_path) and _case_ok(self.path, tail_module):
         init_filename = '__init__.py'
         full_path = _path_join(base_path, init_filename)
         if (_path_isfile(full_path) and
             _case_ok(base_path, init_filename)):
             return full_path if return_path else self
     mod_filename = tail_module + '.py'
     full_path = _path_join(self.path, mod_filename)
     if _path_isfile(full_path) and _case_ok(self.path, mod_filename):
         return full_path if return_path else self
     return None
Esempio n. 9
0
    def RECEIVE_FILESYSTEM_CHANGED(self, fcev, listener):
        assert 'file_created_or_saved' == fcev.change_type

        # Make intermediate directory (which is a whole peloocan project)
        diro = self._source_directory
        fkv = self._freeform_name_value_pairs

        rc = _touch_intermediate_project(diro, fkv, listener)
        if rc:
            return rc

        # Derive title and native lines from abstract (normalized) lines
        ad = fcev.TO_ABSTRACT_DOCUMENT(listener)
        if ad is None:
            return 123

        two = _entry_and_lines_via_abstract_document(ad, listener)
        if two is None:
            return 122

        entry, wlines = two

        # Write the intermediate file (maybe it's create, maybe clobber)
        wpath = _path_join(diro.path, 'pages', entry)  # [#882.B]
        with open(wpath, 'w') as fh:
            for line in wlines:
                fh.write(line)

        # Generate the final output file from the intermediate file!! WHEW
        return self.generate_file(listener, entry)
Esempio n. 10
0
    def from_dir(cls, path):
        self = cls()
        self.logger.info("Opening all files in path: %s" % path)
        for file in listdir(path):
            file_path = _path_join(path, file)
            self.add_file(file_path)

        return self
Esempio n. 11
0
 def find_module(self, fullname, return_path=False):
     tail_module = fullname.rpartition('.')[2]
     from . import imports
     if tail_module in imports.HOSTILE_IMPORTS:
         return None
     base_path = _path_join(self.path, tail_module)
     if _path_isdir(base_path) and _case_ok(self.path, tail_module):
         init_filename = '__init__.py'
         full_path = _path_join(base_path, init_filename)
         if (_path_isfile(full_path) and
             _case_ok(base_path, init_filename)):
             return full_path if return_path else self
     mod_filename = tail_module + '.py'
     full_path = _path_join(self.path, mod_filename)
     if _path_isfile(full_path) and _case_ok(self.path, mod_filename):
         return full_path if return_path else self
     return None
Esempio n. 12
0
def _output_path_via(output_directory, source_directory_entry):
    # Output path via

    from os.path import splitext as _splitext
    base, ext = _splitext(source_directory_entry)
    assert '.md' == ext
    tail = ''.join((base, '.html'))

    return _path_join(output_directory, 'pages', tail)  # [#882.B] pages
Esempio n. 13
0
 def run(self):                  
     try:
         print("removing tosdb/_tosdb.py ...")
         _remove(_OUTPUT_PATH)
     except:
         pass        
     try:
         print("removing ./build ...")
         _rmtree( _path_join(_OUR_PATH,'build') )
     except:
         pass              
     super().run()  
Esempio n. 14
0
 def run(self):
     try:
         print("removing tosdb/_tosdb.py ...")
         _remove(_OUTPUT_PATH)
     except:
         pass
     try:
         print("removing ./build ...")
         _rmtree(_path_join(_OUR_PATH, 'build'))
     except:
         pass
     super().run()
Esempio n. 15
0
    def restore(self):
        if not _path_exists(self.stash_path):
            exit_with_message("No stash found.")

        contents = listdir(self.stash_path)

        if len(contents) == 0:
            exit_with_message("Nothing to restore. Stash is empty.")

        for content in contents:
            move(_path_join(self.stash_path, content), self.path)

        exit_with_message("Stash restored.")
def _generate_step_assignments(task_prototype, param_set, num_task_per_step, temp_path):
    '''
    Creates len(param_set) number of tasks. Each of which is a clone of task_prototype with a
    different element of param_set set as its parameters. The output of each task will be set
    to a temporary locations.

    Returns a list of tasks organized into step (i.e. inner lists). Also returns the mapping
    of param values to output location.
    '''
    param_set = _copy(param_set)
    param_set.reverse() # Preserve order; we'll be 'popping' in reverse order.

    tasks, params_to_outputs, step_index = [], [], 0

    while param_set:
        tasks.append([])
        num_in_cur_step = 0
        while num_in_cur_step < num_task_per_step and param_set:

            cur_params = param_set.pop()
            cur_name = '-'.join([task_prototype.name, str(step_index), str(num_in_cur_step)])

            cur_task = task_prototype._clone(cur_name, session_aware = False)
            cur_task.set_params(cur_params)

            output_base_path = _path_join(temp_path, str(step_index), str(num_in_cur_step))
            params_to_outputs.append((cur_params, output_base_path))

            # Set each output to a temporary location under temp_path.
            for name in cur_task.get_outputs():
                output_path = _path_join(output_base_path, name)
                cur_task.set_outputs({name: output_path})

            tasks[-1].append(cur_task)
            num_in_cur_step += 1
        step_index += 1


    return tasks, params_to_outputs
Esempio n. 17
0
class IPythonStore:
    """
    A connector for getting (one-way) items stored with IPython/Jupyter
    %store magic.

    It wraps the underlying PickleStoreDB (_db) most thinly, stripping out
    the 'autorestore/' namespace added by %store magic.
    """
    _db = _InteractiveShell.instance().db  # IPython's PickleStore
    _NAMESPACE = 'autorestore/'  # IPython StoreMagic's "namespace"

    root = _path_join(
        str(_db.root),
        _NAMESPACE)  # The root directory of the store, used for watching

    def _trim(self, key: str, _ns=_NAMESPACE):
        # _ns = self._NAMESPACE
        return key[len(_ns):] if key.startswith(_ns) else key

    def keys(self):
        return (self._trim(key) for key in self._db.keys())

    def items(self):
        for key in self._db.keys():
            try:
                yield self._trim(key), self._db[key]
            except KeyError:  # Object unpickleable in this env; skip
                pass

    def get(self, key: str, default=None):
        return self._db.get(self._NAMESPACE + key, self._db.get(key))

    def __getitem__(self, key):
        return self._db[self._NAMESPACE + key]

    def __delitem__(self, key):
        del self._db[self._NAMESPACE + key]

    def __contains__(self, key):
        return (self._NAMESPACE + key) in self._db

    def __iter__(self):
        return iter(self.keys())

    def __len__(self):
        return len(list(self.keys()))
Esempio n. 18
0
    def from_program_and_data_files(
        cls, program_path, constants_path_base
    ):
        """ Instantiates from a Dominions executable
            and supporting data files. """

        tables = _OrderedDict( )

        with open( program_path, "rb" ) as program_file:
            with _mmap.mmap(
                program_file.fileno( ), 0, prot = _mmap.PROT_READ
#                Use this line on Windows
#                program_file.fileno( ), 0, access = _mmap.ACCESS_READ
            ) as program_image:

                dominions_version \
                = _DominionsVersion.from_program_image( program_image )

                # Load tables of constants from CSV files.
                for table_type in cls._LOADABLE_TABLE_TYPES:
                    table = table_type.from_csv_file(
                        _path_join(
                            constants_path_base,
                              table_type.FILE_NAME_BASE( )
                            + _path_extsep + "csv"
                        ),
                        dominions_version
                    )
                    tables[ table_type.LABEL( ) ] = table

                # Extract other tables from the Dominions executable.
                for table_type in cls._EXTRACTABLE_TABLE_TYPES:
                    table = table_type.from_program_image(
                        program_image, dominions_version
                    )
                    tables[ table_type.LABEL( ) ] = table

                # TODO: Implement other extractions.

                self = cls( dominions_version, tables )

        return self
def _combine(task):
    '''
    The actual code that will be ran inside of a task to combine all results and add the parameter
    column to the final SFrame(s).
    '''
    # Initialize empty SFrames for each output.
    for out_name in task.get_outputs():
        task.outputs[out_name] = _SFrame()

    params_to_outputs = task.params[_COMBINE_PARAMETER_NAME]
    for params, path in params_to_outputs:
        for out_name in task.get_outputs():

            try: 
                cur_result = _SFrame(_path_join(path, out_name))
            except IOError:
                _log.info("No output for %s with parameters: %s " % (out_name, str(params)))
                continue

            # Add the 'Parameters' column and append to previous results.
            cur_result['parameters'] = _SArray.from_const(params, len(cur_result))
            cur_result.__materialize__()
            task.outputs[out_name] = task.outputs[out_name].append(cur_result)
Esempio n. 20
0
def _database_path_via_collection_path(coll_path):
    from os.path import join as _path_join
    return _path_join(coll_path, 'document-history-cache.sqlite3')
Esempio n. 21
0
def path_join(*paths):
	return _path_join(*paths).replace('\\','/')
Esempio n. 22
0
from urllib.parse import ParseResult
from os.path import realpath, dirname, join as _path_join
import requests
from json import load as json_load

script_loc = realpath(__file__)
script_dir = dirname(script_loc)
del dirname
del realpath

mime_types: dict
with open(_path_join(script_dir, "mimes.json")) as f:
    mime_types = json_load(f)

UA_m = "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM2.171019.029; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/68.0.3325.109 Mobile Safari/537.36"
UA_d = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3526.73 Safari/537.36"
basic_headers = {
    "Accept-Encoding":
    "gzip, deflate",
    "User-Agent":
    UA_d,
    "Upgrade-Insecure-Requests":
    "1",
    "Accept-Language":
    "en-GB,en-US;q=0.9,en;q=0.8",
    "dnt":
    "1",
    "Accept":
    "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
}
Esempio n. 23
0
                scan_line_length = from_be_uint16(image, offset)
                offset += 2
                offset += 2  # Skip empty word.

                print("File Format Version: {0}".format(file_version))
                print("Number of Sprites: {0}".format(sprites_count))
                print("Scan Line Length: {0}".format(scan_line_length))

                for sprite_num in range(sprites_count):

                    sprite_metadata = SpriteMetadata.from_bytearray(
                        image, offset)
                    offset += sprite_metadata.size

                    sprite_metadata.print_indexed_summary(sprite_num)
                    path_base = _path_join(directory_path, basename)
                    sprite_metadata.save_sprite_image_as(
                        "{path_base}_{nbr:04d}{sep}{ext}".format(
                            path_base=path_base,
                            nbr=sprite_num,
                            sep=_path_extsep,
                            ext=output_format.lower(),
                        ),
                        output_format.upper(),
                        scan_line_length=scan_line_length,
                        fluff_lo_bits=clargs.fluff_lo_bits,
                        generate_alpha_channel=clargs.generate_alpha_channel,
                    )

    raise SystemExit(rc)
Esempio n. 24
0
from os.path import join as _path_join, dirname as _dirname, realpath as _realpath
from os import system as _system, getcwd as _getcwd, remove as _remove
from shutil import rmtree as _rmtree

NAME = 'tosdb'
VERSION = '0.8'
DESCRIPTION = "Python Front-End / Wrapper for TOSDataBridge"
AUTHOR = "Jonathon Ogden"
AUTHOR_EMAIL = "*****@*****.**"
PACKAGES = ['tosdb', 'tosdb/cli_scripts', 'tosdb/intervalize']

_AUTO_EXT = '_tosdb'
# everything should be relative to the python/setup.py
_OUR_PATH = _dirname(_realpath(__file__))
_HEADER_NAME = 'tos_databridge.h'
_HEADER_PATH = _path_join(_OUR_PATH, '..', 'include', _HEADER_NAME)
_OUTPUT_PATH = _path_join(_OUR_PATH, NAME, _AUTO_EXT + '.py')

if _OUR_PATH != _getcwd():
    _sys.stderr.write(
        "fatal: setup.py must be run from its own directory(python/)\n")
    exit(1)

#string that should bookmark the topics in Topic_Enum_Wrapper::TOPICS<T>
_MAGIC_TOPIC_STR = 'ksxaw9834hr84hf;esij?><'

#regex for finding our header #define consts
#TODO: adjust so we can pull non-ints
_REGEX_HEADER_CONST = "#define[\s]+([\w]+)[\s]+.*?(-?[\d][\w]*)"

#adjust for topics we had to permute to form valid enum vars
Esempio n. 25
0
from shutil import rmtree as _rmtree


NAME = 'tosdb'
VERSION = '0.9'
DESCRIPTION = "Python Front-End / Wrapper for TOSDataBridge"
AUTHOR = "Jonathon Ogden"
AUTHOR_EMAIL = "*****@*****.**"
PACKAGES = ['tosdb','tosdb/cli_scripts','tosdb/intervalize', 'tosdb/streaming']  


_AUTO_EXT = '_tosdb' 
# everything should be relative to the python/setup.py
_OUR_PATH = _dirname(_realpath(__file__))
_HEADER_NAME = 'tos_databridge.h'
_HEADER_PATH = _path_join(_OUR_PATH, '..', 'include', _HEADER_NAME)
_OUTPUT_PATH = _path_join(_OUR_PATH, NAME, _AUTO_EXT + '.py')

if _OUR_PATH != _getcwd():
    _sys.stderr.write("fatal: setup.py must be run from its own directory(python/)\n")
    exit(1)


#string that should bookmark the topics in Topic_Enum_Wrapper::TOPICS<T> 
_MAGIC_TOPIC_STR = 'ksxaw9834hr84hf;esij?><'

#regex for finding our header #define consts 
#TODO: adjust so we can pull non-ints
_REGEX_HEADER_CONST = "#define[\s]+([\w]+)[\s]+.*?(-?[\d][\w]*)"

#adjust for topics we had to permute to form valid enum vars
Esempio n. 26
0
def _copy_files(tmpdir, source_dir, o, listener):
    from shutil import copyfile
    from os.path import basename
    for source_path in _all_files_to_copy_abspaths(source_dir, o, listener):
        dest = _path_join(tmpdir, basename(source_path))
        copyfile(source_path, dest)
Esempio n. 27
0
 def produce_dotfile_path(self):
     env = self.environ
     from os.path import join as _path_join
     return _path_join(env['HOME'], '.tmx-pho-issues.rec')
Esempio n. 28
0
                offset += 2
                offset += 2     # Skip empty word.

                print( "File Format Version: {0}".format( file_version ) )
                print( "Number of Sprites: {0}".format( sprites_count ) )
                print( "Scan Line Length: {0}".format( scan_line_length ) )

                for sprite_num in range( sprites_count ):
                    
                    sprite_metadata = SpriteMetadata.from_bytearray(
                        image, offset
                    )
                    offset += sprite_metadata.size

                    sprite_metadata.print_indexed_summary( sprite_num )
                    path_base = _path_join( directory_path, basename )
                    sprite_metadata.save_sprite_image_as(
                        "{path_base}_{nbr:04d}{sep}{ext}".format(
                            path_base = path_base, nbr = sprite_num,
                            sep = _path_extsep, ext = output_format.lower( ),
                        ),
                        output_format.upper( ),
                        scan_line_length = scan_line_length,
                        fluff_lo_bits = clargs.fluff_lo_bits,
                        generate_alpha_channel = 
                        clargs.generate_alpha_channel,
                    )

    raise SystemExit( rc )

Esempio n. 29
0
 def lines():
     yikes = ''.join((_path_join(*ptup), '.md'))  # eew
     yield f'SKIPPING because of above - {yikes}'
Esempio n. 30
0
 def exists_file_dir(cls, path):
     file_path = _path_join(path, cls.VML_FILE_NAME)
     dir_path = _path_join(path, cls.VML_DIR_NAME)
     return (_path_exists(file_path) and _path_exists(dir_path))
Esempio n. 31
0
 def db_path(self, project_path, model_name):
     model_name = model_name.strip().lower()
     return _path_join(project_path, self.DATA_DIR, "%s.json" % model_name)
Esempio n. 32
0
def _build_schema_path():
    from os.path import dirname as dn, join as _path_join
    mono_repo = dn(dn(dn(__file__)))
    return _path_join(mono_repo, 'pho-doc', 'documents',
                      '429.4-document-history-schema.dot')
Esempio n. 33
0
            else:
                use_v = v
            yield f"{k}={use_v}"

    def extra_settings_raw():
        for k in _turn_off_all_feeds():
            yield k, ''
        for k, v in other_settings.items():
            yield k, v

    if theme_path is None:
        theme_path = _default_theme_path()

    write_selected = _output_path_via(output_directory, source_directory_entry)

    settings_file_path = _path_join(source_directory, 'pconf.py')

    return tuple(
        s for row in argv_tokens() for s in
        ((row,) if isinstance(row, str) else row))


def _output_path_via(output_directory, source_directory_entry):
    # Output path via

    from os.path import splitext as _splitext
    base, ext = _splitext(source_directory_entry)
    assert '.md' == ext
    tail = ''.join((base, '.html'))

    return _path_join(output_directory, 'pages', tail)  # [#882.B] pages
Esempio n. 34
0
 def test_120_path_of_thing_looks_a_way(self):
     c = self.config
     c = c._components[same_key]
     from os.path import join as _path_join
     exp = _path_join('zz', same_key)
     assert c.path == exp
Esempio n. 35
0
def _touch_this_one_file(tmpdir):
    # This is provided so that client scripts can make a sanity check

    from pathlib import Path
    Path(_path_join(tmpdir, '_THIS_IS_A_TEMPORARY_DIRECTORY_')).touch()
Esempio n. 36
0
def _CLI_for_markdown(sin, sout, serr, bash_argv, efx):
    """Generate markdown tailored to the specific SSG.

    If you specify '-' for the output directory, lines are written to
    STDOUT (with each filename written in a line to STDERR)

    Specifying an SSG adapter will show more options.
    Try "-t md:help" or "-t md:list".
    """

    tup, rc = _this_is_a_lot(sout, serr, bash_argv, efx,
                             _base_formals_for_markdown(efx),
                             _CLI_for_markdown)
    if tup is None:
        return rc
    adapter_func, vals, mon = tup  # #here5

    # ==

    path_head = vals.pop('output_directory')
    is_dry = vals.pop('dry_run', False)
    be_verbose = vals.pop('verbose', False)

    do_output_to_stdout = False
    if '-' == path_head:
        do_output_to_stdout = True
        path_head = '.'

    # ==

    if is_dry and do_output_to_stdout:
        serr.write("-n and collection path of '-' are mutually exclusive\n")
        return 123

    # ==

    if do_output_to_stdout:

        def open_file(wpath):
            serr.write(f"MARKDOWN FILE: {wpath}\n")
            from contextlib import nullcontext as func
            return func(write)

        def write(s):
            return sout.write(s)

        write.write = write  # #watch-the-world-burn
    elif is_dry:

        def open_file(wpath):
            from contextlib import nullcontext as func
            return func(write)

        def write(s):
            return len(s)

        write.write = write  # #watch-the-world-burn
    else:

        def open_file(wpath):
            return open(wpath, 'w')

    from os.path import join as _path_join

    tot_files, tot_lines, tot_something = 0, 0, 0
    did_error = False

    for tup in adapter_func(**vals):
        typ = tup[0]
        if 'adapter_error' == typ:
            did_error = True
            continue  # or w/e
        if 'markdown_file' != typ:
            xx(f"ok neato have fun: {typ!r}")

        path_tail, lines = tup[1:]

        wpath = _path_join(path_head, path_tail)
        with open_file(wpath) as io:
            local_tot_something = 0
            for line in lines:
                local_tot_something += io.write(line)
                tot_lines += 1

            if be_verbose:
                serr.write(f"wrote {wpath} ( ~ {local_tot_something} bytes)\n")

            tot_something += local_tot_something

        tot_files += 1

        for line in lines:
            sout.write(line)

    do_summary = not (did_error and 0 == tot_something)
    # (do summary unless we errored AND no bytes were written)

    if do_summary:
        serr.write(f"wrote {tot_files} file(s), "
                   f"{tot_lines} lines, ~ {tot_something} bytes\n")
    return mon.returncode
Esempio n. 37
0
 def run(self):
     print("removing tosdb/_tosdb.py ...")
     _system('rm ' + _OUTPUT_PATH)
     print("removing ./build ...")
     _system('rm -r ' + _path_join(_OUR_PATH, 'build'))
     super().run()