Ejemplo n.º 1
0
 def start(self, context: VerediContext) -> None:
     '''
     Runs the entry function.
     '''
     log.group_multi(_LOG_INIT, self.dotted, f"{self.klass}: "
                     f"Starting {self.name}...")
     self._entry_fn(self, context)
Ejemplo n.º 2
0
def _tear_down_end(proc: ProcToSubComm,
                   logger: Optional[log.PyLogType]) -> ExitCodeTuple:
    '''
    Checks that process finished shutdown. If not, we terminate it immediately.

    In any case, we return its exit code.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, '_tear_down.end')

    # Make sure it shut down and gave a good exit code.
    if (proc.process and proc.process.is_alive()
            and proc.process.exitcode is None):
        # Still not exited; terminate it.
        log.group_multi(_LOG_KILL,
                        _log_dotted, "_tear_down_end({}): "
                        "'{}' has still not exited; terminate it... "
                        "Immediately.",
                        proc.name,
                        proc.name,
                        veredi_logger=logger)
        proc.process.terminate()

    exitcode = (proc.process.exitcode if (proc and proc.process) else None)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "_tear_down_end({}): "
                    "'{}' has exited with exit code: {}",
                    proc.name,
                    proc.name,
                    str(exitcode),
                    veredi_logger=logger)
    return ExitCodeTuple(proc.name, exitcode)
Ejemplo n.º 3
0
    def _load(self) -> VerediHealth:
        '''
        Load our configuration data from its file.

        Raises LoadError
        '''
        log_groups = [log.Group.START_UP, log.Group.DATA_PROCESSING]
        log.group_multi(log_groups, self.dotted, "Configuration load...")

        # Spawn a context from what we know, and ask the config repo to load
        # something based on that.
        ctx = DataBareContext(self.dotted, ConfigContext.KEY, self._path,
                              DataAction.LOAD, self._meta())
        log.group_multi(log_groups, self.dotted,
                        "Configuration loading from repo...")
        with self._repo.load(ctx) as stream:
            # Decode w/ serdes.
            # Can raise an error - we'll let it.
            try:
                log.group_multi(log_groups, self.dotted,
                                "Configuration deserializing with serdes...")
                log.debug(
                    "Config Load Context: {}, "
                    "Confgig Repo: {}, "
                    "Confgig Serdes: {}", ctx, self._repo, self._serdes)
                for each in self._serdes.deserialize_all(
                        stream, self._codec, ctx):
                    log.debug("Config Loading Doc: {}", each)
                    self._load_doc(each)

            except LoadError as error:
                log.group_multi(log_groups,
                                self.dotted,
                                "Configuration load/deserialization failed "
                                "with a LoadError. Erroring out.",
                                log_success=False)
                # Log exception and let bubble up as-is.
                raise log.exception(
                    error, "Configuration init load/deserialization failed "
                    "with a LoadError:  type: {}, str: {}", type(error),
                    str(error))

            except Exception as error:
                log.group_multi(log_groups,
                                self.dotted,
                                "Configuration load/deserialization failed "
                                "with an error of type {}. Erroring out.",
                                type(error),
                                log_success=False)
                # Complain that we found an exception we don't handle.
                # ...then let it bubble up as-is.
                raise log.exception(LoadError,
                                    "Unhandled exception! type: {}, str: {}",
                                    type(error), str(error)) from error

        return VerediHealth.HEALTHY
Ejemplo n.º 4
0
    def start(self, time_sec: Optional[float] = None) -> None:
        '''
        Runs the sub-process.

        Sets `self.timer_val` based on optional param `time_sec`.
        Also sets `self.time_start` to current time.
        '''
        log.group_multi(
            _LOG_INIT, self.dotted, f"{self.klass}: "
            f"Starting {self.name} sub-process...")
        self.timer_val = time_sec
        self.time_start = veredi.time.machine.utcnow()
        self.process.start()
Ejemplo n.º 5
0
    def registrar(registry: Type['BaseRegistrar'], log_groups: List[log.Group],
                  context: 'ConfigContext') -> 'BaseRegistrar':
        '''
        Create this registry.
        '''
        log.group_multi(log_groups, registry.dotted, "Creating {} ({})...",
                        registry.klass, registry.dotted)

        # Create it.
        reg = registry(context)

        log.group_multi(log_groups, registry.dotted, "{} ({}) created.",
                        reg.klass, reg.dotted)
        return reg
Ejemplo n.º 6
0
def _tear_down_start(
    proc: ProcToSubComm,
    logger: Optional[log.PyLogType],
) -> None:
    '''
    Set shutdown flag. Proc should notice soon (not immediately) and start its
    shutdown.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, '_tear_down.start')
    log.group_multi(_LOG_KILL,
                    _log_dotted, "_tear_down_start({}): "
                    "Asking '{}' to end gracefully...",
                    proc.name,
                    proc.name,
                    veredi_logger=logger)
    proc.shutdown.set()
Ejemplo n.º 7
0
def nonblocking_tear_down_start(
        proc: ProcToSubComm) -> Optional[ExitCodeTuple]:
    '''
    Kicks off tear-down. Caller will have to loop calling
    `nonblocking_tear_down_wait` for however long they want to wait for a clean
    shutdown, then call `nonblocking_tear_down_end` to finish.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.nonblocking.start')
    logger = log.get_logger(proc.name)

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): Begin.",
                    proc.name,
                    veredi_logger=logger)

    # ------------------------------
    # Sanity Check, Early Out.
    # ------------------------------
    result = _tear_down_check(proc, logger)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "nonblocking_tear_down_start({}): ",
                        "Check returned exit code: {}",
                        proc.name,
                        result,
                        veredi_logger=logger)
        return result

    # ------------------------------
    # Kick off tear-down.
    # ------------------------------
    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): ",
                    "Starting tear-down...",
                    proc.name,
                    veredi_logger=logger)
    _tear_down_start(proc, logger)
    # No return value for `_tear_down_start()`; can't check anything.
    # if result:
    #     log.group_multi(_LOG_KILL,
    #                     _log_dotted,
    #                     "nonblocking_tear_down_start({}): ",
    #                     "_tear_down_start returned exit code: {}",
    #                     proc.name, result,
    #                     veredi_logger=logger)
    #     return result

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): Done.",
                    proc.name,
                    veredi_logger=logger)
Ejemplo n.º 8
0
def registrar(reg_type: Type['BaseRegistrar'], log_groups: List[log.Group],
              context: 'ConfigContext',
              instance: Optional['BaseRegistrar']) -> 'BaseRegistrar':
    '''
    Create a BaseRegistrar sub-class instance.

    `instance` should be where you store the registrar after creation.
    It will be checked to ensure one doesn't already exist.
    '''
    log.group_multi(log_groups, reg_type.dotted,
                    "Create requested for {} ({})...", reg_type.klass,
                    reg_type.dotted)

    # ------------------------------
    # Error: Do we already have one?
    # ------------------------------
    if instance:
        msg = (f"{reg_type} already exists! " "Should not be recreating it!")
        log.registration(reg_type.dotted,
                         msg,
                         log_minimum=log.Level.ERROR,
                         log_success=False)
        bg, _ = instance._background()
        raise log.exception(RegistryError,
                            msg,
                            context=context,
                            data={
                                'background': bg,
                                'type': reg_type,
                                'existing': instance,
                            })

    # ------------------------------
    # Create registrar.
    # ------------------------------
    instance = reg_type.registrar(log_groups, context)

    log.group_multi(
        log_groups, reg_type.dotted,
        f"Create request completed for {reg_type.klass} "
        f"({instance.dotted})...")

    return instance
Ejemplo n.º 9
0
def nonblocking_tear_down_end(proc: ProcToSubComm) -> Optional[ExitCodeTuple]:
    '''
    Finishes tear-down. Checks that process finished shutdown. If not, we
    terminate it immediately.

    In any case, we return its exit code.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.nonblocking.end')
    logger = log.get_logger(proc.name)

    # ------------------------------
    # Finish tear-down.
    # ------------------------------
    result = _tear_down_end(proc, logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "nonblocking_tear_down_end({}): "
                    "_tear_down_end returned exit code: {}",
                    proc.name,
                    result,
                    veredi_logger=logger)
    return result
Ejemplo n.º 10
0
def _import(module: str, log_dotted: label.DotStr) -> ModuleType:
    '''
    Tries to import module by `name`.

    Logs to start-up group on success/failure.

    If failure, an exception of whatever type will be allowed to bubble up.
    '''
    try:
        log.group_multi(_LOG_INIT, log_dotted, f"Importing {module}...")
        imported = importlib.import_module(module)
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        f"Imported {module}: {imported}",
                        log_success=(log.SuccessType.SUCCESS
                                     if imported else log.SuccessType.FAILURE))
        return imported

    except ModuleNotFoundError as error:
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        f"Failed to import {module}: {error}",
                        log_success=log.SuccessType.FAILURE)
        raise

    return None
Ejemplo n.º 11
0
def _tear_down_check(
        proc: ProcToSubComm,
        logger: Optional[log.PyLogType]) -> Optional[ExitCodeTuple]:
    '''
    Checks that process exists, then if process has good exit code.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, '_tear_down.check')

    if not proc or not proc.process:
        if proc:
            log.group_multi(_LOG_KILL,
                            _log_dotted, "_tear_down_check({}): "
                            "No {} to stop.",
                            proc.name,
                            proc.name,
                            veredi_logger=logger)
        else:
            log.group_multi(_LOG_KILL,
                            _log_dotted, "_tear_down_check(): "
                            "Cannot stop None/Null sub-process: {}",
                            proc,
                            veredi_logger=logger)
        # Pretend it exited with good exit code?
        return ExitCodeTuple(proc.name, 0)

    if proc.process.exitcode == 0:
        log.group_multi(_LOG_KILL,
                        _log_dotted, "_tear_down_check({}): "
                        "Process '{}' is already stopped.",
                        proc.name,
                        proc.name,
                        veredi_logger=logger)
        return ExitCodeTuple(proc.name, proc.process.exitcode)

    return None
Ejemplo n.º 12
0
    def rules(self, context: 'VerediContext') -> Nullable[RulesGame]:
        '''
        Creates and returns the proper RulesGame object for this specific game
        with its game definition and saved data.

        Raises a ConfigError if no rules label or game id.
        '''
        log_groups = [log.Group.START_UP, log.Group.DATA_PROCESSING]
        log.group_multi(
            log_groups, self.dotted, "rules: Creating game rules object "
            "from rules: {}, id: {}", self._rules, self._id)

        # ---
        # Sanity
        # ---
        if not self._rules or not self._id:
            log.group_multi(log_groups,
                            self.dotted,
                            "rules: Failed to create game rules... missing "
                            "our rules or id: rules {}, id: {}",
                            self._rules,
                            self._id,
                            log_success=False)
            raise log.exception(
                ConfigError,
                "No rules label or id for game; cannot create the "
                "RulesGame object. rules: {}, id: {}", self._rules, self._id)

        # ---
        # Context
        # ---
        # Allow something else if the caller wants, but...
        if not context:
            # ...this default w/ rules/id should be good in most cases.
            context = ConfigContext(self._path,
                                    self.dotted,
                                    key=self._rules,
                                    id=self._id)

        # ---
        # Create the rules.
        # ---
        rules = self.create_from_label(
            # '<rules-dotted>.game' is our full dotted string.
            label.normalize(self._rules, 'game'),
            context=context)
        log.group_multi(log_groups,
                        self.dotted,
                        "rules: Created game rules.",
                        log_success=True)
        return rules
Ejemplo n.º 13
0
    def _load_doc(self, document: 'DeserializeTypes') -> None:
        '''
        Load each document from our config file int our config data.

        Raises LoadError
        '''
        log_groups = [log.Group.START_UP, log.Group.DATA_PROCESSING]
        log.group_multi(log_groups, self.dotted,
                        "Configuration loading document...")

        if isinstance(document, list):
            log.group_multi(log_groups,
                            self.dotted,
                            "Configuration loaded a list instead of a dict?! "
                            "Erroring out.",
                            log_success=False)
            raise log.exception(
                LoadError, "TODO: How do we deal with list document? {}: {}",
                type(document), str(document))

        elif (isinstance(document, dict)
              and Hierarchy.VKEY_DOC_TYPE in document):
            # Save these to our config dict under their doc-type key.
            doc_type_str = document[Hierarchy.VKEY_DOC_TYPE]
            doc_type = Document.get(doc_type_str)
            log.group_multi(log_groups, self.dotted,
                            "Configuration loaded doc_type: {}", doc_type_str)
            self._config[doc_type] = document

        else:
            log.group_multi(log_groups,
                            self.dotted,
                            "Configuration cannot load unknow document. "
                            "Erroring out.",
                            log_success=False)
            raise log.exception(
                LoadError, "Unknown document while loading! "
                "Does it have a '{}' field? "
                "{}: {}", Hierarchy.VKEY_DOC_TYPE, type(document),
                str(document))
Ejemplo n.º 14
0
def nonblocking_tear_down_wait(
        proc: ProcToSubComm,
        graceful_wait: float = 0.1,
        log_enter: bool = False,
        log_wait_timeout: bool = False,
        log_exit: bool = False) -> Optional[ExitCodeTuple]:
    '''
    Wait for `graceful_wait` seconds for process to end gracefully.

    `log_<something>` flags are for help when looping for a small wait so other
    systems can do things. Logs are guarded by `log_<something>`, so a caller
    can have enter logged once, then just loop logging exit (for example).
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.nonblocking.wait')
    logger = log.get_logger(proc.name)
    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): Begin.",
                    proc.name,
                    veredi_logger=logger)

    # ------------------------------
    # Wait for tear-down.
    # ------------------------------
    result = _tear_down_wait(proc,
                             logger,
                             graceful_wait,
                             log_enter=log_enter,
                             log_wait_timeout=log_wait_timeout,
                             log_exit=log_exit)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "_tear_down_wait({}): Returned exit code: {}",
                        proc.name,
                        result,
                        veredi_logger=logger)
        return result

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): No exit yet...",
                    proc.name,
                    veredi_logger=logger)
Ejemplo n.º 15
0
def _find_modules(
    root: paths.Path,
    registrars: List[str] = [],
    registrars_ut: List[str] = [],
    registrees: List[str] = [],
    registrees_ut: List[str] = [],
    log_dotted: Optional[label.DotStr] = None,
    ignores: Optional[Set[Union[str, re.Pattern]]] = None,
    ignore_dirs: Optional[Set[Union[str, re.Pattern]]] = None,
    find_ut: Optional[bool] = None,
) -> Tuple[List[str], List[str], List[str]]:
    '''
    Finds all modules in `root` and subdirectories that match our
    requirements for being a place to put "Register me plz!!!" code for
    registry entries.

    `registrees` will be set to `[_REGISTREE_INIT_MODULE_NAME]` if not
    provided. String must match file-name-sans-extension exactly.

    `registrees_ut` will be set to `[_REGISTREE_INIT_UT_MODULE_NAME]` if not
    provided. String must match file-name-sans-extension exactly.
      - This can be disabled if `find_ut` is set explicitly to False, or forced
        to be enabled if `find_ut` is set explicitly to True. The default value
        of `None` will obey the `background.testing.get_unit_testing()` flag.

    `registrars` will be set to `[_REGISTRAR_INIT_MODULE_NAME]` if not
    provided. String must match file-name-sans-extension exactly.

    `registrars_ut` will be set to `[_REGISTRAR_INIT_UT_MODULE_NAME]` if not
    provided. String must match file-name-sans-extension exactly.
      - This can be disabled if `find_ut` is set explicitly to False, or forced
        to be enabled if `find_ut` is set explicitly to True. The default value
        of `None` will obey the `background.testing.get_unit_testing()` flag.

    `log_dotted` is only used for logging and will be
    `{_DOTTED}._find_modules' if not provided.

    Returns a 3-tuple of lists of strings of module names:
      - Tuple is:
        - Tuple[0]: Registrars found.
        - Tuple[1]: Registrees found.
        - Tuple[2]: Unknowns found.
          - Didn't get ignored but also not registrar/registree files.
      - Each tuple item is a list of strings, e.g.:
        [
          'veredi.__register__',
          'veredi.config.__register__',
          ...
        ]
    '''
    # ------------------------------
    # Set up vars...
    # ------------------------------
    log_dotted = log_dotted or label.normalize(_DOTTED, '_find_modules')

    # What should we import? Are we importing unit-testing stuff too?
    import_registrars = registrars
    if not registrars:
        # Can put this in imports directly - will always want it.
        import_registrars.append(_REGISTRAR_INIT_MODULE_NAME)
    if not registrars_ut:
        registrars_ut.append(_REGISTRAR_INIT_UT_MODULE_NAME)

    import_registrees = registrees
    if not registrees:
        # Can put this in imports directly - will always want it.
        import_registrees.append(_REGISTREE_INIT_MODULE_NAME)
    if not registrars_ut:
        registrees_ut.append(_REGISTREE_INIT_UT_MODULE_NAME)

    if find_ut is True:
        # Explicitly want to find unit-test class registrations.
        import_registrars.extend(registrars_ut)
        import_registrees.extend(registrees_ut)
    elif find_ut is False:
        # Explicitly /do not/ want to find unit-test class registrations.
        pass
    elif background.testing.get_unit_testing():
        # Implicitly want to find unit-test class registrations - we're in
        # unit-testing run mode.
        import_registrars.extend(registrars_ut)
        import_registrees.extend(registrees_ut)
    # Else, implicitly don't want unit-testing - we're a normal run.

    ignore_dirs = ignore_dirs or _FIND_MODULE_IGNORES_DIRS
    ignores = ignores or _FIND_MODULE_IGNORES

    base_path = root
    base_name = base_path.name

    # ------------------------------
    # Validate the root.
    # ------------------------------

    if root.exists():
        if root.is_dir():
            log.group_multi(
                _LOG_INIT, log_dotted, "Find module root is valid.\n"
                "  base: {}\n"
                "  path: {}\n"
                "  find: \n"
                "    registrars: {}\n"
                "    registrees: {}", base_name, base_path, import_registrars,
                import_registrees)
        else:
            log.group_multi(_LOG_INIT,
                            log_dotted,
                            "Find module root is not a directory!\n"
                            "  base: {}\n"
                            "  path: {}\n"
                            "  find: \n"
                            "    registrars: {}\n"
                            "    registrees: {}",
                            base_name,
                            base_path,
                            import_registrars,
                            import_registrees,
                            log_minimum=log.Level.ERROR)
            msg = "Find module root is not a directory!"
            data = {
                'root': root,
                'registrars': registrars,
                'registrars-unit-test': registrars_ut,
                'registrees': registrees,
                'registrees-unit-test': registrees_ut,
                'ignore_dirs': ignore_dirs,
                'ignores': ignores,
                'import_registrars': import_registrars,
                'import_registrees': import_registrees,
                'base_path': base_path,
                'base_name': base_name,
            }
            error = NotADirectoryError(msg, data)
            raise log.exception(error, msg)

    else:
        log.group_multi(_LOG_INIT,
                        log_dotted, "Find module root does not exist!\n"
                        "  base: {}\n"
                        "  path: {}\n"
                        "  find: \n"
                        "    registrars: {}\n"
                        "    registrees: {}",
                        base_name,
                        base_path,
                        import_registrars,
                        import_registrees,
                        log_minimum=log.Level.ERROR)
        msg = "Find module root does not exist!"
        data = {
            'root': root,
            'registrars': registrars,
            'registrars-unit-test': registrars_ut,
            'registrees': registrees,
            'registrees-unit-test': registrees_ut,
            'ignore_dirs': ignore_dirs,
            'ignores': ignores,
            'import_registrars': import_registrars,
            'import_registrees': import_registrees,
            'base_path': base_path,
            'base_name': base_name,
        }
        error = NotADirectoryError(msg, data)
        raise log.exception(error, msg)

    # ------------------------------
    # Find the modules.
    # ------------------------------
    return _scan_tree(log_dotted, base_name, base_path, import_registrars,
                      import_registrees, ignores, ignore_dirs, find_ut)
Ejemplo n.º 16
0
def set_up(
    proc_name: str,
    config: Configuration,
    context: VerediContext,
    entry_fn: StartProcFn,
    t_proc_to_sub: Type['ProcToSubComm'] = ProcToSubComm,
    t_sub_to_proc: Type['SubToProcComm'] = SubToProcComm,
    finalize_fn: FinalizeInitFn = None,
    initial_log_level: Optional[log.Level] = None,
    debug_flags: Optional[DebugFlag] = None,
    unit_testing: Optional[bool] = False,
    proc_test: Optional[ProcTest] = None,
    shutdown: Optional[multiprocessing.Event] = None
) -> Optional[ProcToSubComm]:
    '''
    Get a process ready for _run_proc().

    If `t_proc_to_sub` and/or `t_sub_to_proc` are not default, those classes
    will be instantiated instead of ProcToSubComm / SubToProcComm.

    If `unit_testing`, creates the ut_pipe side-channel.

    If `finalize_fn`, sends both ProcToSubComm and SubToProcComm objects in to
    be processed just before set-up is complete.

    `shutdown` is an optional param in case caller wants multiple sub-processes
    to share the same shutdown flag.

    Returns a `t_proc_to_sub` (default: ProcToSubComm) object. When ready to
    start/run the subprocess, call start() on it.
    '''
    logger = log.get_logger(proc_name, min_log_level=initial_log_level)
    log_dotted = label.normalize(_DOTTED_FUNCS, 'set_up')

    if proc_test and proc_test.has(ProcTest.DNE):
        # This process 'Does Not Exist' right now.
        # Should we downgrade this to debug, or error out more heavily?
        # (i.e. exception?)
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        "'{}' has {}. Skipping creation.",
                        proc_name,
                        proc_test,
                        veredi_logger=logger,
                        log_minimum=log.Level.ERROR,
                        log_success=False)
        return None

    # ------------------------------
    # Create multiproc IPC stuff.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating inter-process communication...",
                    proc_name,
                    veredi_logger=logger)

    # The official us<->them IPC pipe.
    child_pipe, parent_pipe = multiprocessing.Pipe()

    # The side-channel/unit-test us<->them IPC pipe.
    ut_child_pipe, ut_parent_pipe = None, None
    if unit_testing:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Creating unit-testing "
                        "inter-process communication...",
                        proc_name,
                        veredi_logger=logger)
        ut_child_pipe, ut_parent_pipe = multiprocessing.Pipe()
        context.add('proc-test', proc_test)

    # multiproc shutdown flag
    if not shutdown:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Creating shutdown inter-process "
                        "event flag...",
                        proc_name,
                        veredi_logger=logger)
        shutdown = multiprocessing.Event()

    # ------------------------------
    # Create the process's private info.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating process comms objects...",
                    proc_name,
                    veredi_logger=logger)

    # Info for the proc itself to own.
    comms = t_sub_to_proc(name=proc_name,
                          config=config,
                          entry_fn=entry_fn,
                          pipe=child_pipe,
                          shutdown=shutdown,
                          debug_flags=debug_flags,
                          ut_pipe=ut_child_pipe)

    # ---
    # Updated Context w/ start-up info (SubToProcComm, etc).
    # ---
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Saving into the ConfigContext...",
                    proc_name,
                    veredi_logger=logger)
    ConfigContext.set_log_level(context, initial_log_level)
    ConfigContext.set_subproc(context, comms)

    # ------------------------------
    # Create the Process, ProcToSubComm
    # ------------------------------
    subp_args = [context]
    subp_kwargs = {}

    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating the sub-process object...",
                    proc_name,
                    veredi_logger=logger)

    # Create the process object (doesn't start the process).
    subprocess = multiprocessing.Process(
        # _subproc_entry() is always the target; it will do some setup and then
        # call the actual target: `entry_fn`.
        target=_subproc_entry,
        name=proc_name,
        args=subp_args,
        kwargs=subp_kwargs)

    # Info for the caller about the proc and how to talk to.
    proc = t_proc_to_sub(name=proc_name,
                         process=subprocess,
                         pipe=parent_pipe,
                         shutdown=shutdown,
                         ut_pipe=ut_parent_pipe)

    # ------------------------------
    # Use Finalize Callback, if supplied.
    # ------------------------------
    if finalize_fn:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Finalize function supplied. "
                        "Calling {}...",
                        proc_name,
                        finalize_fn,
                        veredi_logger=logger)
        finalize_fn(proc, comms)

    # ------------------------------
    # Return ProcToSubComm for caller to use to communicate to sub-proc.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Set-up complete.",
                    proc_name,
                    veredi_logger=logger)
    return proc
Ejemplo n.º 17
0
def _register_entry(configuration: Configuration, entry: Dict[str, Any],
                    log_dotted: label.DotStr) -> (bool, label.DotStr):
    '''
    Run a registration sweep for one registration entry in the configuration.
    '''

    # ---
    # Get settings...
    # ---
    # Required:
    name = ConfigRegistration.name(entry)
    dotted = ConfigRegistration.dotted(entry)
    if not name or not dotted:
        msg = (f"Invalid 'registration' entry in configuration file. "
               "At a minimum, "
               f"'{ConfigRegistration.NAME.full_key()}' and"
               f"'{ConfigRegistration.DOTTED.full_key()}'."
               f"must be provided. All non-'{VEREDI_NAME_DISPLAY}' "
               "registrations must also, at a minimum, provide: "
               f"'{ConfigRegistration.PATH_ROOT.full_key()}'.")
        log.group_multi(_LOG_INIT, log_dotted, msg)
        background.config.exception(None,
                                    msg,
                                    error_data={
                                        'config-reg-entry': entry,
                                    })

    log.group_multi(_LOG_INIT, log_dotted,
                    f"Getting registration settings for {name} ({dotted})...")

    # Quantum Required:
    root = ConfigRegistration.path_root(entry, configuration)
    if not root:
        # If no root supplied, we must be dealing with ourself - otherwise no
        # idea what to do.
        if (name.lower() != VEREDI_NAME_CODE
                or dotted.lower() != VEREDI_NAME_CODE):
            msg = (f"Don't know how to register {name} ({dotted}). "
                   "At a minimum, "
                   f"'{ConfigRegistration.PATH_ROOT.full_key()}' "
                   "must be provided along with "
                   f"'{ConfigRegistration.NAME.full_key()}' and"
                   f"'{ConfigRegistration.DOTTED.full_key()}'.")
            log.group_multi(_LOG_INIT, log_dotted, msg)
            background.config.exception(None,
                                        msg,
                                        error_data={
                                            'config-reg-entry': entry,
                                        })
        # We know a default to use for Veredi. Our root.
        else:
            root = LIB_VEREDI_ROOT

    # Optional:
    registrars = (ConfigRegistration.path_run(entry, True) or None)
    registrars_ut = (ConfigRegistration.path_test(entry, True) or None)
    registrees = (ConfigRegistration.path_run(entry, False) or None)
    registrees_ut = (ConfigRegistration.path_test(entry, False) or None)
    ignores = (ConfigRegistration.path_ignore_files(entry) or None)
    ignore_dirs = (ConfigRegistration.path_ignore_dirs(entry) or None)
    find_ut = (ConfigRegistration.force_test(entry) or None)

    log.group_multi(
        _LOG_INIT,
        log_dotted,
        "Settings for {} ({}): {}",
        name,
        dotted,
        # TODO v://future/2021-03-14T12:27:54
        # data={,
        {
            'name': name,
            'dotted': dotted,
            'root': root,
            'registrars': registrars,
            'registrars_test': registrars_ut,
            'registrees': registrees,
            'registrees_test': registrees_ut,
            'ignores': ignores,
            'unit-test': find_ut,
        })

    # ---
    # Search w/ settings.
    # ---
    log.group_multi(_LOG_INIT, log_dotted,
                    "Searching {} ({}) for registration...\n"
                    "  root: {}", name, dotted, root)

    module_names = _find_modules(root, registrars, registrars_ut, registrees,
                                 registrees_ut, log_dotted, ignores,
                                 ignore_dirs, find_ut)
    registrar_names, registree_names, unknown_names = module_names

    # TODO v://future/2021-03-14T12:27:54
    # add registrar_names, registree_names, unknowns to log as 'data'.
    log.group_multi(
        _LOG_INIT, log_dotted, f"{len(registrar_names)} registrar "
        f"{text.plural(registrar_names, 'module')} "
        f"found for {name} ({dotted}).")
    log.group_multi(
        _LOG_INIT, log_dotted, f"{len(registree_names)} registree "
        f"{text.plural(registree_names, 'module')} "
        f"found for {name} ({dotted}).")
    if unknown_names:
        log.group_multi(_LOG_INIT,
                        log_dotted, f"{len(unknown_names)} unknown "
                        f"{text.plural(unknown_names, 'module')} "
                        f"found for {name} ({dotted})! "
                        "Modules did not get ignored but also do not "
                        "match any filenames for registrars/registrees?",
                        log_minimum=log.Level.WARNING)

    # ---
    # Load all registry modules.
    # ---
    # Now load the modules we found.
    log.group_multi(_LOG_INIT, log_dotted,
                    f"Loading {name} ({dotted}) registrar modules...")
    imported = []
    for name in registrar_names:
        imported.append(_import(name, log_dotted))

    for name in registree_names:
        imported.append(_import(name, log_dotted))

    log.group_multi(
        _LOG_INIT, log_dotted, f"{len(imported)} "
        f"{text.plural(imported, 'module')} "
        f"imported for {name} ({dotted}).")

    # If we imported nothing... that's probably a fail.
    if len(imported) <= 0:
        return False, dotted

    # ---
    # Set-up modules?
    # ---
    # Look for the function. Call it the args defined in RegistrationFunc if it
    # exists.
    log.group_multi(
        _LOG_INIT, log_dotted, "Checking for module initialization functions "
        f"(`{_REGISTRATION_FUNC_NAME}()`)...")

    context = configuration.make_config_context()
    module_successes = []
    module_failures = []
    module_noop = []
    for module in imported:
        module_set_up = None
        try:
            module_set_up = getattr(module, _REGISTRATION_FUNC_NAME)
        except AttributeError:
            pass
        if not module_set_up:
            module_noop.append(module.__name__)
            continue

        log.group_multi(
            _LOG_INIT, log_dotted,
            f"Running `{module.__name__}.{_REGISTRATION_FUNC_NAME}()`...")

        # Call registration function with config.
        success = module_set_up(configuration, context)
        if success:
            module_successes.append(module.__name__)
        else:
            module_failures.append(module.__name__)

        log.group_multi(
            _LOG_INIT,
            log_dotted,
            f"`{module.__name__}.{_REGISTRATION_FUNC_NAME}()` done.",
            log_success=success)

    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "Done initializing modules.",
                    data={
                        'successes': module_successes,
                        'failures': module_failures,
                        'no-init': module_noop,
                    })

    # ---
    # Success or Failure, and list of module names imported.
    # ---
    # We'll assume that no module failures is success.
    return (len(module_failures) == 0), dotted
Ejemplo n.º 18
0
def registration(configuration: Configuration) -> None:
    '''
    Searches for all of Veredi's required registries, registrars, registrees,
    and invisible elephants.

    Eagerly loads them so they are available at run-time when needed.
    '''
    log_dotted = label.normalize(_DOTTED, 'registration')
    log.group_multi(
        _LOG_INIT, log_dotted, "Importing and loading registries, "
        "registrars & registrees...")

    # ---
    # Sanity.
    # ---
    if not configuration:
        msg = "Configuration must be provided."
        log.group_multi(_LOG_INIT, log_dotted, msg)
        error = ConfigError(msg, data={
            'configuration': str(configuration),
        })
        raise log.exception(error, msg)

    # ---
    # Find all registry modules.
    # ---
    log.group_multi(_LOG_INIT, log_dotted, "Finding registry modules...")

    successes = []
    failures = []
    registrations = configuration.get(ConfigRegistration.KEY.value)
    if null_or_none(registrations):
        cfg_str, cfg_data = configuration.info_for_error()
        msg = ("No registration settings in configuration. "
               "Registration settings required if running registration.")
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        msg + "\n  {}",
                        data={
                            'configuration': cfg_str,
                            'settings': cfg_data,
                        },
                        log_minimum=log.Level.ERROR,
                        log_success=False)
        error = ConfigError(msg,
                            data={
                                'configuration': str(configuration),
                                'registrations': registrations,
                            })
        raise log.exception(error, msg)

    log.group_multi(_LOG_INIT, log_dotted,
                    f"{len(registrations)} registration entries to run.")
    for entry in registrations:
        # If a config exception is raised, ok. Otherwise track success/failure.
        registered, dotted = _register_entry(configuration, entry, log_dotted)

        if registered:
            successes.append(dotted)
        else:
            failures.append(dotted)

    log.group_multi(
        _LOG_INIT,
        log_dotted,
        "Registration completed.\n"
        f"  Attempted: {len(registrations)}\n"
        f"  Succeeded: {len(successes)}\n"
        f"     Failed: {len(failures)}\n",
        "{data}",
        # TODO v://future/2021-03-14T12:27:54
        # And get rid of that '\n'
        data={
            'success': successes,
            'failure': failures,
        })

    # ---
    # Done.
    # ---
    # Did we completely succeed?
    success = log.SuccessType.success_or_failure(successes, failures)
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "Done with registration importing & loading.",
                    log_success=success)
Ejemplo n.º 19
0
def _scan_tree(log_dotted: Optional[label.DotStr], root_name: str,
               root_path: paths.Path, import_registrars: List[str],
               import_registrees: List[str],
               ignore_files: Set[Union[str, re.Pattern]],
               ignore_dirs: Set[re.Pattern],
               find_ut: bool) -> Tuple[List[str], List[str], List[str]]:
    '''
    Find the import modules using os's `scandir()`, which is much faster than
    `pathlib.iterdir()` and `os.walk()`.

    `iterdir()`:
      - Just took too long.
      - Had too many calls to `posix.stat()`.
    `os.walk()` uses `scandir()`, so it had potential... but:
      - No way to stop it from walking all of ".git/", or other 'ignore' dirs.
      - Doesn't return DirEntry, so had to do additional `posix.stat()` to
        figure out file/dir.
    '''
    # Original idea from https://stackoverflow.com/a/5135444/425816
    # But using os.walk, which uses os.scandir, which is much much more
    # performant than my original pathlib.iterdir attempt.
    export_registrars = []
    export_registrees = []
    # Files that somehow got past ignore checks but are also not matching
    # registrar/registree names. /Should/ never happen...
    unknowns = []

    # Get module info from root path.
    log.group_multi(
        _LOG_INIT, log_dotted, "Finding modules...\n"
        "  unit-testing?: {}\n"
        "         module: {}\n"
        "           path: {}\n"
        "  find: \n"
        "     registrars: {}\n"
        "     registrees: {}", find_ut, root_name, root_path,
        import_registrars, import_registrees)

    # Start off with the root dir. Append more dir scans as we find valid ones.
    scans = [
        root_path,
    ]
    scanned_paths = 0
    # Pull the next directory string off of `scans` and do a scan of it for
    # files/dirs we want.
    for directory in scans:
        with os.scandir(directory) as entries:
            for entry in entries:
                scanned_paths += 1

                # ------------------------------
                # Directories
                # ------------------------------
                if (entry.is_dir() and
                        not _ignore_dir(log_dotted, entry.path, ignore_dirs)):
                    # Add to our list of dirs to scan.
                    scans.append(entry.path)
                    continue

                # ------------------------------
                # Files
                # ------------------------------

                # ---
                # Set-up for checking files.
                # ---
                path_relative = paths.cast(entry.path).relative_to(root_path)

                # ---
                # Check each module file.
                # ---
                if _ignore(log_dotted, root_path, path_relative, ignore_files,
                           import_registrars, import_registrees):
                    continue

                # Alright; sort this guy into an import list.
                _sort(log_dotted, root_path, path_relative, import_registrars,
                      import_registrees, export_registrars, export_registrees,
                      unknowns)

    # ---
    # Done; log info and return.
    # ---
    if log.will_output(log.Group.START_UP):
        log.group_multi(
            _LOG_INIT, log_dotted, "Done scanning for modules.\n"
            "  scanned: {}\n"
            "  matches: {}\n", scanned_paths,
            len(export_registrars) + len(export_registrees))

    if export_registrars and log.will_output(log.Group.START_UP):
        module_log = []
        for module in export_registrars:
            module_log.append("    - " + module)
        log.group_multi(
            _LOG_INIT, log_dotted, "Done finding registrar modules.\n"
            "   module: {}\n"
            "  matches: {}\n"
            "{}", root_name, len(export_registrars), '\n'.join(module_log))

    if export_registrees and log.will_output(log.Group.START_UP):
        module_log = []
        for module in export_registrees:
            module_log.append("    - " + module)
        log.group_multi(
            _LOG_INIT, log_dotted, "Done finding registree modules.\n"
            "   module: {}\n"
            "  matches: {}\n"
            "{}", root_name, len(export_registrees), '\n'.join(module_log))

    if unknowns:
        file_log = []
        for file in unknowns:
            file_log.append("    - " + file)
        log.group_multi(_LOG_INIT,
                        log_dotted, "Found unknown but matching files?!\n"
                        "    module: {}\n"
                        "  unknowns: {}\n"
                        "{}",
                        root_name,
                        len(unknowns),
                        '\n'.join(file_log),
                        log_minimum=log.Level.WARNING)

    return (export_registrars, export_registrees, unknowns)
Ejemplo n.º 20
0
def _sort(log_dotted: label.DotStr, path_root: paths.Path,
          path_relative: paths.Path, import_registrars: List[str],
          import_registrees: List[str], export_registrars: List[str],
          export_registrees: List[str], unknowns: List[str]) -> None:
    '''
    Figures out which of the export/unknown output lists that `path_relative`
    belongs to.

    Given `path_root`, `path_relative`, and the input lists
    (`import_registrars` & `import_registrees`), figure out which output list
    to place it in:
      - `export_registrars`
      - `export_registrees`
      - `unknowns`

    Appends `submodule()` to the correct list; returns None.
    '''
    # filename
    module_name = path_relative.stem
    # path -> DotStr
    submod = submodule(path_relative)

    match = True
    if module_name in import_registrars:
        export_registrars.append(submod)

    elif module_name in import_registrees:
        export_registrees.append(submod)

    else:
        match = False
        unknowns.append(submod)

        if log.will_output(*_LOG_INIT):
            module_ext = path_relative.suffix
            log.group_multi(_LOG_INIT,
                            log_dotted, "Found unknown module: {}\n"
                            "    path: {}\n"
                            "  module: {}\n"
                            "     ext: {}\n"
                            "  - Does not match registrar or registree names "
                            "but also wasn't ignored?!\n"
                            "    + registrars: {}\n"
                            "    + registrees: {}",
                            path_relative,
                            submod,
                            module_name,
                            module_ext,
                            import_registrars,
                            import_registrees,
                            log_minimum=log.Level.INFO)

    if match and log.will_output(*_LOG_INIT):
        module_ext = path_relative.suffix
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        "Found matching module: {}\n"
                        "    path: {}\n"
                        "  module: {}" +
                        ("\n     ext: {}" if module_ext else ""),
                        path_relative,
                        submodule,
                        module_name,
                        path_relative.suffix,
                        log_minimum=log.Level.INFO)
Ejemplo n.º 21
0
def encodable(klass: Type[EnumEncode],
              name_dotted: Optional[label.LabelInput] = None,
              name_string: Optional[str] = None,
              name_klass: Optional[str] = None,
              enum_encode_type: 'EnumWrap' = None) -> Type['EnumWrap']:
    '''
    Helper for creating an EnumWrap subclass for a specific Enum that needs to
    be Encodable. The enum itself cannot be an Encodable, but it will use this
    wrapper class to provide its Encodable functionality.

    Required:
      - `name_dotted`
      - `name_string`
      - `enum_encode_type`

    Optional:
      - `name_klass`
        + Will be `Wrap{wrapped_class_name}` if not supplied.

    Several helper/wrapper classes exist to be supplied as `enum_encode_type`:
      - FlagEncodeValue
      - FlagEncodeName
      - EnumEncodeName

    Does not exist yet; can be quickly made from EnumEncodeName
    and FlagEncodeValue:
      - EnumEncodeValue
    '''
    # ------------------------------
    # Sanity Checks
    # ------------------------------
    if not issubclass(klass, EnumWrapTypesTuple):
        msg = (f"{klass.klass}: `encodable` decorator should only be "
               f"used on enum classes: {EnumWrapTypesTuple}")
        error = ValueError(msg, klass, enum_encode_type)
        raise log.exception(error,
                            msg,
                            data={
                                'class': klass,
                                'dotted': name_dotted,
                                'name': name_string,
                                'klass': name_klass,
                                'wrapper': enum_encode_type,
                            })

    if not enum_encode_type:
        msg = (f"{klass.klass}: `encodable` decorator needs an "
               "`enum_encode_type` class to use for the wrapper.")
        error = ValueError(msg, klass, enum_encode_type)
        raise log.exception(error,
                            msg,
                            data={
                                'class': klass,
                                'dotted': name_dotted,
                                'name': name_string,
                                'klass': name_klass,
                                'wrapper': enum_encode_type,
                            })

    if not issubclass(enum_encode_type, EnumWrap):
        msg = (f"{klass.klass}: `encodable` decorator needs an "
               "`enum_encode_type` that is an EnumWrap "
               "or a subclass.")
        error = ValueError(msg, klass, enum_encode_type)
        raise log.exception(error,
                            msg,
                            data={
                                'class': klass,
                                'dotted': name_dotted,
                                'name': name_string,
                                'klass': name_klass,
                                'wrapper': enum_encode_type,
                            })

    # ------------------------------
    # Define Wrapper Class
    # ------------------------------
    class Wrapper(enum_encode_type,
                  name_dotted=name_dotted,
                  name_string=name_string,
                  name_klass=name_klass):
        '''
        Wrapper class for an enum that wants to be encodable.
        '''
        enum: EnumDescriptor = EnumDescriptor(None, klass, None)
        '''Init EnumDescriptor with the wrapper's class type.'''
        type: Type[py_enum.Enum] = klass
        '''Wrapped enum's type.'''

    # Dynamically set class name to something more specific
    # than `Wrapper`.
    name = (
        # Prefer user supplied.
        name_klass if name_klass else
        # Else build one using our formatting string.
        _WRAPPER_CLASS_FMT.format(name=klass.__name__))

    Wrapper.__name__ = name
    Wrapper.__qualname__ = name

    global _WRAPPED_ENUMS
    _WRAPPED_ENUMS[klass] = Wrapper

    log.group_multi([log.Group.REGISTRATION, log.Group.DATA_PROCESSING],
                    label.normalize(_DOTTED, 'encodable'), "encodable enum:\n"
                    "             klass: {}\n"
                    "       name_dotted: {}\n"
                    "       name_string: {}\n"
                    "        name_klass: {}\n"
                    "  enum_encode_type: {}\n"
                    "      <--  wrapper: {}", klass, name_dotted, name_string,
                    name_klass, enum_encode_type, Wrapper)

    # ------------------------------
    # Done; return the new Wrapper.
    # ------------------------------
    return Wrapper
Ejemplo n.º 22
0
    def stop(self,
             wait_timeout: float = GRACEFUL_SHUTDOWN_TIME_SEC,
             time_sec: Optional[float] = None) -> None:
        '''
        Asks this sub-process to stop/shutdown.

        Will first ask for a graceful shutdown via shutdown flag. This waits
        for a result until timed out (a `wait_timeout` of None will never time
        out).

        If graceful shutdown times out, this will kill the process.

        Sets `self.timer_val` based on optional params `time_sec`.

        Only sets `self.time_end` if this function stops the process. If it was
        already non-existant or stopped it will not be set.
        '''
        if not self.process:
            # Not sure this should be an exception. Have as a log for now.
            # raise log.exception(
            #     MultiProcError,
            #     "ProcToSubComm.process is null for {self.name};"
            #     "cannot stop process.")
            log.group_multi(_LOG_KILL,
                            self.dotted, f"{self.klass}: "
                            "process is null for {self.name}; "
                            "cannot stop process. Returning successful "
                            "exit anyways.",
                            log_minimum=log.Level.WARNING,
                            log_success=False)
            # Could return fail code if appropriate.
            return ExitCodeTuple(self.name, 0)

        if self.process.exitcode == 0:
            log.group_multi(
                _LOG_KILL, self.dotted, f"{self.klass}: "
                "{self.name} process already stopped.")
            return ExitCodeTuple(self.name, self.process.exitcode)

        # Set our process's shutdown flag. It should notice soon and start
        # doing its shutdown.
        log.group_multi(
            _LOG_KILL, self.dotted, f"{self.klass}: "
            f"Asking {self.name} to end gracefully...")
        self.shutdown.set()

        # Wait for our process to be done.
        if self.process.is_alive():
            log.group_multi(
                _LOG_KILL, self.dotted, f"{self.klass}: "
                f"Waiting for {self.name} to complete "
                "structured shutdown...")
            self.process.join(wait_timeout)
            log.group_multi(
                _LOG_KILL, self.dotted, f"{self.klass}: "
                f"{self.name} exit code: "
                f"{str(self.process.exitcode)}")
        else:
            log.group_multi(
                _LOG_KILL, self.dotted, f"{self.klass}: "
                f"{self.name} isn't alive; "
                "skip shutdown...")

        # Make sure it shut down and gave a good exit code.
        if (self.process.is_alive() and self.process.exitcode is None):
            # Still not exited; terminate it.
            log.group_multi(
                _LOG_KILL, self.dotted, f"{self.klass}: "
                f"{self.name} still not exited; terminating...")
            self.process.terminate()

        # We stopped it so we know what time_end to set.
        self.time_end = veredi.time.machine.utcnow()
        log.group_multi(_LOG_KILL, self.dotted, f"{self.klass}: "
                        f"{self.name} stopped.")
        return ExitCodeTuple(self.name, self.process.exitcode)
Ejemplo n.º 23
0
def _ignore_dir(log_dotted: label.DotStr, path: paths.PathType,
                ignores: Set[Union[str, re.Pattern]]) -> bool:
    '''
    Checks if the directory `path_relative` (relative to `path_root`), should
    be ignored or not according to the ignore set and import lists.

    Don't call this for the root - you should not ignore that.
    '''
    # Match type.
    matched_on = None
    # Match from `ignores` - str itself or re.Pattern's pattern string.
    matching = None
    # What matched? dir_name for str; regex search result for re.Pattern.
    matched = None
    # Need only the dir name for a string comparison; also want it for logs.
    dir_name = paths.cast(path).stem

    # Return Value: Should it be ignored or not?
    ignore = False

    # ------------------------------
    # Check list of explicit ignores.
    # ------------------------------
    for check in ignores:
        # Can check strings or regexs, so which one is this?
        if isinstance(check, str):
            # Ignore only if full match.
            if check == dir_name:
                ignore = True
                matched_on = "string"
                matching = check
                matched = dir_name

        elif isinstance(check, re.Pattern):
            # Need a string of full path to do our regex comparisons.
            full_path = str(path)
            # Ignore if regex /does/ matches.
            match = check.search(full_path)
            if match:
                ignore = True
                matched_on = "regex"
                matching = check.pattern
                matched = match.groups()

        # If we've found a reason to ignore this, quit early.
        if ignore:
            break

    # ------------------------------
    # Result?
    # ------------------------------
    if log.will_output(*_LOG_INIT):
        if ignore:
            log.group_multi(_LOG_INIT,
                            log_dotted, "Ignoring Directory:\n"
                            "          path: {}\n"
                            "     directory: {}\n"
                            "   ignore type: {}\n"
                            "  ignore match: {}\n"
                            "    matched on: {}",
                            path,
                            dir_name,
                            matched_on,
                            matching,
                            matched,
                            log_minimum=log.Level.DEBUG)
        else:
            log.group_multi(_LOG_INIT,
                            log_dotted, "Directory To Scan:\n"
                            "         path: {}\n"
                            "    directory: {}\n",
                            path,
                            dir_name,
                            log_minimum=log.Level.DEBUG)
    return ignore
Ejemplo n.º 24
0
def _subproc_entry(context: VerediContext) -> None:
    '''
    Init and run a multiprocessing process.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'entry')

    # ------------------------------
    # Basic Sanity
    # ------------------------------
    if not context:
        log.group_multi(
            _LOG_INIT, _log_dotted, "_subproc_entry: "
            "Require a context to run sub-process. Got nothing.")
        raise log.exception(
            MultiProcError,
            "Require a context to run sub-process. Got nothing.")

    proc = ConfigContext.subproc(context)
    if not proc:
        log.group_multi(
            _LOG_INIT, _log_dotted, "_subproc_entry: "
            "Require SubToProcComm to run sub-process. Got nothing.")
        raise log.exception(
            MultiProcError,
            "Require SubToProcComm to run sub-process. Got nothing.",
            context=context)

    # ------------------------------
    # Set-Up Logger & Signals
    # ------------------------------
    initial_log_level = ConfigContext.log_level(context)
    # TODO [2020-08-10]: Logging init should take care of level... Try to
    # get rid of this setLevel().
    proc_log = log.get_logger(proc.name)
    proc_log.setLevel(initial_log_level)

    # Sub-proc will ignore sig-int; primarily pay attention to shutdown flag.
    _sigint_ignore()

    log.group_multi(_LOG_INIT,
                    _log_dotted,
                    "Initializing sub-process '{}'",
                    proc.name,
                    veredi_logger=proc_log)

    # Start up the logging client
    log_is_server = ConfigContext.log_is_server(context)
    if not log_is_server:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Initializing log_client for '{}'",
                        proc.name,
                        veredi_logger=proc_log)
        log_client.init(proc.name, initial_log_level)

    # ------------------------------
    # More Sanity
    # ------------------------------
    if not proc.pipe:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Process '{}' requires a pipe connection; has None.",
                        proc.name,
                        veredi_logger=proc_log)
        raise log.exception(
            MultiProcError,
            "Process '{}' requires a pipe connection; has None.",
            proc.name,
            veredi_logger=proc_log)
    # Not all procs will require a config, maybe? Require until that's true
    # though.
    if not proc.config:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Process '{}' requires a configuration; has None.",
                        proc.name,
                        veredi_logger=proc_log)
        raise log.exception(MultiProcError,
                            "Process '{}' requires a configuration; has None.",
                            proc.name,
                            veredi_logger=proc_log)
    # If no log level, allow it to be default?
    # if not initial_log_level:
    #     raise log.exception(
    #         MultiProcError,
    #         "Process '{}' requires a default log level (int); "
    #         "received None.",
    #         proc.name,
    #         veredi_logger=proc_log)
    if not proc.shutdown:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Process '{}' requires a shutdown flag; has None.",
                        proc.name,
                        veredi_logger=proc_log)
        raise log.exception(MultiProcError,
                            "Process '{}' requires a shutdown flag; has None.",
                            proc.name,
                            veredi_logger=proc_log)

    # ------------------------------
    # Actually run the thing...
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    _log_dotted,
                    "Process '{}' starting...",
                    proc.name,
                    veredi_logger=proc_log)
    proc.start(context)

    # DONE WITH '_LOG_INIT'; SWITCH TO '_LOG_KILL'!

    # ------------------------------
    # Won't reach here until sub-proc is shutdown or dies.
    # ------------------------------
    if not log_is_server:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "Closing log_client for '{}' log_client.close().",
                        proc.name,
                        veredi_logger=proc_log)
        log_client.close()

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "Process '{}' done.",
                    proc.name,
                    veredi_logger=proc_log)
Ejemplo n.º 25
0
def blocking_tear_down(proc: ProcToSubComm,
                       graceful_wait: Optional[float] = -1) -> ExitCodeTuple:
    '''
    Stops process. First tries to ask it to stop (i.e. stop gracefully). If
    that takes too long, terminates the process.

    If `graceful_wait` is set to:
      - positive number: This will block for that many seconds for the
        multiprocessing.join() call to finish.
      - `None`: This will block forever until the process stops gracefully.
      - negative number: It will block for `GRACEFUL_SHUTDOWN_TIME_SEC` by
        default.

    Returns an ExitCodeTuple (the proc name and its exit code).
    '''
    if isinstance(graceful_wait, (int, float)) and graceful_wait < 0:
        graceful_wait = GRACEFUL_SHUTDOWN_TIME_SEC

    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.blocking.full')
    logger = log.get_logger(proc.name)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): "
                    "graceful_wait: {}, shutdown? {}",
                    proc.name,
                    graceful_wait,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)

    # ------------------------------
    # Sanity Check, Early Out.
    # ------------------------------
    result = _tear_down_check(proc, logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "blocking_tear_down({}): tear_down_check: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "blocking_tear_down({}): finished with: {}, "
                        "shutdown? {}",
                        proc.name,
                        result,
                        proc.shutdown.is_set(),
                        veredi_logger=logger)
        return result

    # ------------------------------
    # Kick off tear-down.
    # ------------------------------
    _tear_down_start(proc, logger)
    # `_tear_down_start()` doesn't have a return - can't check it.
    # if result:
    #     log.debug(f"blocking_tear_down({proc.name}): finished with: {result}, "
    #               f"shutdown? {proc.shutdown.is_set()}",
    #               veredi_logger=logger)
    #     return result

    # ------------------------------
    # Wait for tear-down.
    # ------------------------------
    result = _tear_down_wait(proc,
                             logger,
                             graceful_wait,
                             log_enter=True,
                             log_wait_timeout=True,
                             log_exit=True)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): tear_down_wait: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "blocking_tear_down({}): finished with: {}, "
                        "shutdown? {}",
                        proc.name,
                        result,
                        proc.shutdown.is_set(),
                        veredi_logger=logger)
        return result

    # ------------------------------
    # Finish tear-down.
    # ------------------------------
    result = _tear_down_end(proc, logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): tear_down_end: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): completed with: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    return result
Ejemplo n.º 26
0
def _ignore(log_dotted: label.DotStr, path_root: paths.Path,
            path_relative: paths.Path, ignores: Set[Union[str, re.Pattern]],
            import_registrars: List[str],
            import_registrees: List[str]) -> bool:
    '''
    Checks if the file `path_relative` (relative to `path_root`), should be
    ignored or not according to the ignore set and import lists.
    '''
    ignore = False
    path = path_root / path_relative
    module_name = path.stem
    module_ext = path.suffix

    # ------------------------------
    # Check list of explicit ignores.
    # ------------------------------
    matched_on = None
    matching = None
    for check in ignores:
        if isinstance(check, str):
            # Ignore only if full match.
            if check == module_name:
                ignore = True
                matched_on = "string"
                matching = check

        elif isinstance(check, re.Pattern):
            # Ignore if regex /does/ matches.
            if check.match(module_name):
                ignore = True
                matched_on = "regex"
                matching = check.pattern

        # If we've found a reason to ignore this, quit early.
        if ignore:
            break

    # ------------------------------
    # Early out?
    # ------------------------------
    # This path should be ignored, so continue on.
    if ignore:
        log.group_multi(_LOG_INIT,
                        log_dotted, "Ignoring:\n"
                        "          path: {}\n"
                        "        module: {}\n"
                        "   ignore type: {}\n"
                        "  ignore match: {}",
                        path_relative,
                        module_name,
                        matched_on,
                        matching,
                        log_minimum=log.Level.DEBUG)
        return ignore

    # ------------------------------
    # Check for implicit ignoring conditions
    # ------------------------------.
    if (module_name not in import_registrars
            and module_name not in import_registrees):
        ignore = True
        log.group_multi(_LOG_INIT,
                        log_dotted, "Ignoring sub-module to process: {}\n"
                        "    path: {}\n"
                        "  module: {}\n"
                        "  reason: no filename match",
                        submodule(path_relative),
                        path_relative,
                        module_name,
                        log_minimum=log.Level.DEBUG)

    # Import the match only if it's the correct file type.
    elif not path.is_file():
        ignore = True
        log.group_multi(_LOG_INIT,
                        log_dotted, "Ignoring (matching): {}\n"
                        "    path: {}\n"
                        "  module: {}\n"
                        "  reason: Not a file.",
                        submodule(path_relative),
                        path_relative,
                        module_name,
                        log_minimum=log.Level.DEBUG)

    elif module_ext not in ('.py', '.pyw'):
        ignore = True
        log.group_multi(_LOG_INIT,
                        log_dotted, "Ignoring (matching): {}\n"
                        "    path: {}\n"
                        "  module: {}\n"
                        "  reason: Not a python module file extension "
                        "(.py, .pyw).",
                        submodule(path_relative),
                        path_relative,
                        module_name,
                        log_minimum=log.Level.DEBUG)

    # `ignore` should still be False, so this is not needed, but is implied.
    # else:
    #   # ------------------------------
    #   # Failed all ignore conditions - do not ignore.
    #   # ------------------------------
    #   ignore = False

    return ignore
Ejemplo n.º 27
0
def _tear_down_wait(proc: ProcToSubComm,
                    logger: Optional[log.PyLogType],
                    graceful_wait: Optional[float] = -1,
                    log_enter: bool = True,
                    log_wait_timeout: bool = True,
                    log_exit: bool = True) -> Optional[ExitCodeTuple]:
    '''
    Waits for process to stop gracefully.

    If `graceful_wait` is set to:
      - positive number: This will block for that many seconds for the
        multiprocessing.join() call to finish.
      - `None`: This will block forever until the process stops gracefully.
      - negative number: It will block for `GRACEFUL_SHUTDOWN_TIME_SEC` by
        default.

    `log_<something>` flags are for help when looping for a small wait so other
    systems can do things. Logs are guarded by `log_<something>`, so a caller
    can have enter logged once, then just loop logging exit (for example).

    Returns an ExitCodeTuple (the proc name and its exit code).
    '''
    if isinstance(graceful_wait, (int, float)) and graceful_wait < 0:
        graceful_wait = GRACEFUL_SHUTDOWN_TIME_SEC

    _log_dotted = label.normalize(_DOTTED_FUNCS, '_tear_down.wait')

    # Wait for process to be done.
    if proc.process.is_alive():
        if log_enter:
            log.group_multi(_LOG_KILL,
                            _log_dotted, "_tear_down_wait({}): "
                            "Waiting for '{}' to complete "
                            "structured shutdown...",
                            proc.name,
                            proc.name,
                            veredi_logger=logger)
        proc.process.join(GRACEFUL_SHUTDOWN_TIME_SEC)
        if log_wait_timeout and proc.process.exitcode is None:
            log.group_multi(_LOG_KILL,
                            _log_dotted, "_tear_down_wait({}): "
                            "'{proc.name}' timed out of this wait; "
                            "not dead yet.",
                            proc.name,
                            proc.name,
                            veredi_logger=logger)
        elif log_exit and proc.process.exitcode is not None:
            log.group_multi(_LOG_KILL,
                            _log_dotted, "_tear_down_wait({}): "
                            "'{}' has exited with exit code: {}",
                            proc.name,
                            proc.name,
                            str(proc.process.exitcode),
                            veredi_logger=logger)
            return ExitCodeTuple(proc.name, proc.process.exitcode)
    else:
        if log_enter:
            log.group_multi(_LOG_KILL,
                            _log_dotted, "_tear_down_wait({}): "
                            "'{}' didn't run; skip shutdown...",
                            proc.name,
                            proc.name,
                            veredi_logger=logger)

    return None