示例#1
0
def run_mediator(conn: multiprocessing.connection.Connection = None,
                 config_path: Union[pathlib.Path, str] = None,
                 log_level: Union[log.Level, int] = None,
                 shutdown_flag: multiprocessing.Event = None) -> None:
    '''
    Init and run client/engine IO mediator.
    '''
    _sigint_ignore()
    log_client.init("outdated mediator", log_level)

    if not conn:
        lumberjack = log.get_logger(ProcessType.MEDIATOR.value)
        raise log.exception(
            ConfigError,
            "Mediator requires a pipe connection; received None.",
            veredi_logger=lumberjack)
    if not config_path:
        lumberjack = log.get_logger(ProcessType.MEDIATOR.value)
        raise log.exception(
            ConfigError,
            "Mediator requires a config file; received no path to one.",
            veredi_logger=lumberjack)
    if not log_level:
        lumberjack = log.get_logger(ProcessType.MEDIATOR.value)
        raise log.exception(
            ConfigError,
            "Mediator requires a default log level (int); received None.",
            veredi_logger=lumberjack)

    log.get_logger(
        ProcessType.MEDIATOR.value).critical("todo... server/mediator")
示例#2
0
    def set_up(self, log_level: log.Level, proc_flags_logs: ProcTest) -> None:
        # ---
        # Print out start of new test debuging separator lines.
        # ---
        if self._ut_is_verbose and log_level == log.Level.DEBUG:
            # Hope this is enough for log_server to finish printing from
            # previous test...
            py_time.sleep(0.1)

            # Give ourself a visible output split.
            print('\n\n' + 'v' * 60)
            print('v' * 60)
            print('start:  ', self._timing.start_str)
            print('\n')

        # ---
        # Wanted ASAP.
        # ---
        self.log_level = log_level
        self.delay_log_level = proc_flags_logs.has(ProcTest.LOG_LEVEL_DELAY)
        if not self.delay_log_level:
            log.set_level(log_level)
        self.lumberjack = log.get_logger(self.NAME_MAIN)

        # ------------------------------
        # Let parent do stuff.
        # ------------------------------
        super().set_up()

        # ---
        # Finish our things.
        # ---
        self._set_up_log(proc_flags_logs, log_level)
示例#3
0
def _game_over(processes: Mapping[str, multiprocessing.Process]) -> bool:
    '''
    Sets the game_end flag. Engine and Mediator should notice and go into
    graceful shutdown.
    '''
    lumberjack = log.get_logger(ProcessType.MAIN.value)

    # Set the game_end flag. They should notice soon and start doing
    # their shutdown.
    log.info("Asking engine/mediator to end the game gracefully...",
             veredi_logger=lumberjack)
    processes.game_end.set()

    # Wait on engine and mediator processes to be done.
    # Wait on mediator first, since I think it'll take less long?
    log.info("Waiting for mediator to complete structured shutdown...",
             veredi_logger=lumberjack)
    processes.proc[ProcessType.MEDIATOR].join(GRACEFUL_SHUTDOWN_TIME_SEC)
    if processes.proc[ProcessType.MEDIATOR].exitcode is None:
        log.error("Mediator did not shut down in time. Data may be lost...",
                  veredi_logger=lumberjack)
    else:
        log.info("Mediator shut down complete.", veredi_logger=lumberjack)

    # Now wait on the engine.
    log.info("Waiting for engine to complete structured shutdown...",
             veredi_logger=lumberjack)
    processes.proc[ProcessType.ENGINE].join(GRACEFUL_SHUTDOWN_TIME_SEC)
    if processes.proc[ProcessType.ENGINE].exitcode is None:
        log.error("Engine did not shut down in time. Data may be lost...",
                  veredi_logger=lumberjack)
    else:
        log.info("Engine shut down complete.", veredi_logger=lumberjack)
示例#4
0
def _start_server(comms: multiproc.SubToProcComm,
                  context: VerediContext) -> None:
    '''
    Entry function for our mediator server.

    Basically create mediator from config and call its `start()`.
    '''

    # ------------------------------
    # Set-Up
    # ------------------------------
    log_level = ConfigContext.log_level(context)
    lumberjack = log.get_logger(comms.name,
                                min_log_level=log_level)
    lumberjack.setLevel(log_level)
    log.debug(f"_start_server: {comms.name} {log_level}",
              veredi_logger=lumberjack)

    # log.set_group_level(log.Group.DATA_PROCESSING, log.Level.DEBUG)
    # log.set_group_level(log.Group.PARALLEL, log.Level.DEBUG)

    # ---
    # Config
    # ---
    comms = ConfigContext.subproc(context)
    if not comms:
        raise log.exception(
            TypeError,
            "MediatorServer requires a SubToProcComm; received None.")

    config = background.config.config(
        '_start_server',
        'veredi.interface.mediator._start_server',
        context)

    # ---
    # Ignore Ctrl-C. Have parent process deal with it and us.
    # ---
    multiproc._sigint_ignore()

    # ---
    # Logging
    # ---
    # Do not set up log_client here - multiproc does that.

    # ------------------------------
    # Create & Start
    # ------------------------------

    log.debug(f"MediatorSystem's _start_server for {comms.name} "
              "starting MediatorServer...",
              veredi_logger=lumberjack)
    mediator = config.create_from_config('server',
                                         'mediator',
                                         'type',
                                         context=context)
    mediator.start()
    log.debug(f"MediatorSystem's _start_server for {comms.name} done.",
              veredi_logger=lumberjack)
示例#5
0
def nonblocking_tear_down_start(
        proc: ProcToSubComm) -> Optional[ExitCodeTuple]:
    '''
    Kicks off tear-down. Caller will have to loop calling
    `nonblocking_tear_down_wait` for however long they want to wait for a clean
    shutdown, then call `nonblocking_tear_down_end` to finish.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.nonblocking.start')
    logger = log.get_logger(proc.name)

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): Begin.",
                    proc.name,
                    veredi_logger=logger)

    # ------------------------------
    # Sanity Check, Early Out.
    # ------------------------------
    result = _tear_down_check(proc, logger)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "nonblocking_tear_down_start({}): ",
                        "Check returned exit code: {}",
                        proc.name,
                        result,
                        veredi_logger=logger)
        return result

    # ------------------------------
    # Kick off tear-down.
    # ------------------------------
    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): ",
                    "Starting tear-down...",
                    proc.name,
                    veredi_logger=logger)
    _tear_down_start(proc, logger)
    # No return value for `_tear_down_start()`; can't check anything.
    # if result:
    #     log.group_multi(_LOG_KILL,
    #                     _log_dotted,
    #                     "nonblocking_tear_down_start({}): ",
    #                     "_tear_down_start returned exit code: {}",
    #                     proc.name, result,
    #                     veredi_logger=logger)
    #     return result

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): Done.",
                    proc.name,
                    veredi_logger=logger)
示例#6
0
def wait(processes: Mapping[str, multiprocessing.Process]) -> None:
    '''
    Waits forever. Kills server on Ctrl-C/SIGINT.

    Returns 0 if all exitcodes are 0.
    Returns None or some int if all exitcodes are not 0.
    '''
    lumberjack = log.get_logger(ProcessType.MAIN.value)
    log.info("Waiting for game to finish...", veredi_logger=lumberjack)

    try:
        game_running = _game_running(processes)
        while game_running:
            # Do nothing and take namps forever until SIGINT received or game
            # finished.
            game_running = _game_running(processes)

    except KeyboardInterrupt:
        # First, ask for a gentle, graceful shutdown...
        log.warning("Received SIGINT.", veredi_logger=lumberjack)

    # Finally, end the game.
    _game_over(processes)
    _logs_over(processes)

    # Give up and ask for the terminator... If necessary.
    for each in processes.proc:
        if processes.proc[each].exitcode is None:
            # Still not exited; terminate them.
            processes.proc[each].terminate()

    # Figure out our exitcode return value.
    time.sleep(0.1)  # Short nap for our kids to clean up...
    retval = 0
    for each in processes.proc:
        exited = processes.proc[each].exitcode
        if exited is None:
            # Might have to print instead of log this?
            log.warning(
                "Process '{}' is still running slightly after termination...",
                each.value,
                veredi_logger=lumberjack)

            retval = None
        elif exited == 0:
            # Do nothing; only get a retval exit code of 0 if all of them
            # hit this case and do nothing and leave it at its original 0.
            pass
        elif retval is not None:
            # Don't override 'None'... that indicates someone's still alive and
            # kicking...
            retval = exited

    return retval
示例#7
0
def nonblocking_tear_down_wait(
        proc: ProcToSubComm,
        graceful_wait: float = 0.1,
        log_enter: bool = False,
        log_wait_timeout: bool = False,
        log_exit: bool = False) -> Optional[ExitCodeTuple]:
    '''
    Wait for `graceful_wait` seconds for process to end gracefully.

    `log_<something>` flags are for help when looping for a small wait so other
    systems can do things. Logs are guarded by `log_<something>`, so a caller
    can have enter logged once, then just loop logging exit (for example).
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.nonblocking.wait')
    logger = log.get_logger(proc.name)
    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): Begin.",
                    proc.name,
                    veredi_logger=logger)

    # ------------------------------
    # Wait for tear-down.
    # ------------------------------
    result = _tear_down_wait(proc,
                             logger,
                             graceful_wait,
                             log_enter=log_enter,
                             log_wait_timeout=log_wait_timeout,
                             log_exit=log_exit)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "_tear_down_wait({}): Returned exit code: {}",
                        proc.name,
                        result,
                        veredi_logger=logger)
        return result

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "nonblocking_tear_down_start({}): No exit yet...",
                    proc.name,
                    veredi_logger=logger)
示例#8
0
def nonblocking_tear_down_end(proc: ProcToSubComm) -> Optional[ExitCodeTuple]:
    '''
    Finishes tear-down. Checks that process finished shutdown. If not, we
    terminate it immediately.

    In any case, we return its exit code.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.nonblocking.end')
    logger = log.get_logger(proc.name)

    # ------------------------------
    # Finish tear-down.
    # ------------------------------
    result = _tear_down_end(proc, logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "nonblocking_tear_down_end({}): "
                    "_tear_down_end returned exit code: {}",
                    proc.name,
                    result,
                    veredi_logger=logger)
    return result
示例#9
0
def _logs_over(processes: Mapping[str, multiprocessing.Process]) -> bool:
    '''
    Sets the logs_end flag. Logs server should notice and gracefully shut down.
    '''
    lumberjack = log.get_logger(ProcessType.MAIN.value)

    # Set the game_end flag. They should notice soon and start doing
    # their shutdown.
    log.info("Asking logs server to end gracefully...",
             veredi_logger=lumberjack)
    processes.logs_end.set()

    # Wait on engine and mediator processes to be done.
    # Wait on mediator first, since I think it'll take less long?
    log.info("Waiting for logs server to complete structured shutdown...",
             veredi_logger=lumberjack)
    processes.proc[ProcessType.LOGS].join(GRACEFUL_SHUTDOWN_TIME_SEC)
    if processes.proc[ProcessType.LOGS].exitcode is None:
        log.error(
            "Logs server did not shut down in time. "
            "Logs may be lost? IDK...",
            veredi_logger=lumberjack)
    else:
        log.info("Logs server shut down complete.", veredi_logger=lumberjack)
示例#10
0
def run_engine(conn: multiprocessing.connection.Connection = None,
               config_path: Union[pathlib.Path, str] = None,
               game_data: Mapping[str, str] = None,
               log_level: Union[log.Level, int] = None,
               shutdown_flag: multiprocessing.Event = None) -> None:
    '''
    Init engine. Starts engine. Runs engine...
    '''
    _sigint_ignore()
    log_client.init("outdated engine", log_level)
    lumberjack = log.get_logger(ProcessType.ENGINE.value)

    if not conn:
        raise log.exception(
            ConfigError,
            "Engine requires a pipe connection; received None.",
            veredi_logger=lumberjack)
    if not config_path:
        raise log.exception(
            ConfigError,
            "Engine requires a config file; received no path to one.",
            veredi_logger=lumberjack)
    if not log_level:
        raise log.exception(
            ConfigError,
            "Engine requires a default log level (int); received None.",
            veredi_logger=lumberjack)

    # Make our config object...
    config = Configuration(config_path=config_path)

    # TODO [2020-07-19]: Better game_data fields? A context or something
    # engine can use.
    owner = game_data.get('owner', None)
    campaign = game_data.get('campaign', None)
    debug_list = game_data.get('debug', [])
    debug_flags = None
    for each in debug_list:
        # Create or add to debug_flags
        if not debug_flags:
            debug_flags = DebugFlag[each.upper()]
        else:
            debug_flags |= DebugFlag[each.upper()]

    log.info("Game engine starting with: debug: {}, meta: {}",
             debug_flags,
             game_data,
             veredi_logger=lumberjack)

    # The engine will create the ECS managers and required ECS systems.
    engine = Engine(owner, campaign, config, debug=debug_flags)

    # Do each stage of engine's life.
    cycle = EngineTickCycle.START
    log.info("Game engine running {}...", cycle, veredi_logger=lumberjack)
    engine.run(cycle)
    log.info("Game engine finished {}.", cycle, veredi_logger=lumberjack)

    # We should be stuck in this one for a good while...
    cycle = EngineTickCycle.RUN
    log.info("Game engine running {}...", cycle, veredi_logger=lumberjack)
    engine.run(cycle)
    log.info("Game engine finished {}.", cycle, veredi_logger=lumberjack)

    # And finally on to a structured shut-down when the engine decides it's
    # done running.
    cycle = EngineTickCycle.STOP
    log.info("Game engine running {}...", cycle, veredi_logger=lumberjack)
    engine.run(cycle)
    log.info("Game engine finished {}.", cycle, veredi_logger=lumberjack)
示例#11
0
def run_server(comms: multiproc.SubToProcComm, context: VerediContext) -> None:
    '''
    Init and run server-side client/engine IO mediator.
    '''
    # ------------------------------
    # Set Up Logging, Get from Context
    # ------------------------------
    comms = ConfigContext.subproc(context)
    if not comms:
        raise log.exception(
            TypeError,
            "MediatorServer requires a SubToProcComm; received None.")

    proc_test = context.sub.get('proc-test', ProcTest.NONE)
    delay_log_level = proc_test.has(ProcTest.LOG_LEVEL_DELAY)
    log_level = ConfigContext.log_level(context)
    log_level_init = (None if delay_log_level else log_level)

    logger_server = log.get_logger(comms.name, min_log_level=log_level_init)

    multiproc._sigint_ignore()

    # ------------------------------
    # Sanity Check
    # ------------------------------
    # It's a test - and the first multiprocess test - so... don't assume the
    # basics.
    if not comms.pipe:
        raise log.exception(
            TypeError,
            "MediatorServer requires a pipe connection; received None.",
            veredi_logger=logger_server)
    if not comms.config:
        raise log.exception(
            TypeError,
            "MediatorServer requires a configuration; received None.",
            veredi_logger=logger_server)
    if not log_level:
        raise log.exception(
            TypeError, "MediatorServer requires a default log level (int); "
            "received None.",
            veredi_logger=logger_server)
    if not comms.shutdown:
        raise log.exception(
            TypeError,
            "MediatorServer requires a shutdown flag; received None.",
            veredi_logger=logger_server)

    # ------------------------------
    # Finish Set-Up and Start It.
    # ------------------------------

    # Always set LOG_SKIP flag in case its wanted.
    comms.debug_flags = comms.debug_flags | DebugFlag.LOG_SKIP

    logger_server.debug(f"Starting WebSocketServer '{comms.name}'...")
    mediator = WebSocketServer(context)
    mediator.start()

    # We've delayed as long as we can; set log level.
    if delay_log_level:
        log.set_level_min(log_level, logger_server)

    # ------------------------------
    # Sub-Process is done now.
    # ------------------------------
    logger_server.debug(f"MediatorServer '{comms.name}' done.")
示例#12
0
def blocking_tear_down(proc: ProcToSubComm,
                       graceful_wait: Optional[float] = -1) -> ExitCodeTuple:
    '''
    Stops process. First tries to ask it to stop (i.e. stop gracefully). If
    that takes too long, terminates the process.

    If `graceful_wait` is set to:
      - positive number: This will block for that many seconds for the
        multiprocessing.join() call to finish.
      - `None`: This will block forever until the process stops gracefully.
      - negative number: It will block for `GRACEFUL_SHUTDOWN_TIME_SEC` by
        default.

    Returns an ExitCodeTuple (the proc name and its exit code).
    '''
    if isinstance(graceful_wait, (int, float)) and graceful_wait < 0:
        graceful_wait = GRACEFUL_SHUTDOWN_TIME_SEC

    _log_dotted = label.normalize(_DOTTED_FUNCS, 'tear_down.blocking.full')
    logger = log.get_logger(proc.name)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): "
                    "graceful_wait: {}, shutdown? {}",
                    proc.name,
                    graceful_wait,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)

    # ------------------------------
    # Sanity Check, Early Out.
    # ------------------------------
    result = _tear_down_check(proc, logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "blocking_tear_down({}): tear_down_check: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "blocking_tear_down({}): finished with: {}, "
                        "shutdown? {}",
                        proc.name,
                        result,
                        proc.shutdown.is_set(),
                        veredi_logger=logger)
        return result

    # ------------------------------
    # Kick off tear-down.
    # ------------------------------
    _tear_down_start(proc, logger)
    # `_tear_down_start()` doesn't have a return - can't check it.
    # if result:
    #     log.debug(f"blocking_tear_down({proc.name}): finished with: {result}, "
    #               f"shutdown? {proc.shutdown.is_set()}",
    #               veredi_logger=logger)
    #     return result

    # ------------------------------
    # Wait for tear-down.
    # ------------------------------
    result = _tear_down_wait(proc,
                             logger,
                             graceful_wait,
                             log_enter=True,
                             log_wait_timeout=True,
                             log_exit=True)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): tear_down_wait: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    if result:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "blocking_tear_down({}): finished with: {}, "
                        "shutdown? {}",
                        proc.name,
                        result,
                        proc.shutdown.is_set(),
                        veredi_logger=logger)
        return result

    # ------------------------------
    # Finish tear-down.
    # ------------------------------
    result = _tear_down_end(proc, logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): tear_down_end: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    log.group_multi(_LOG_KILL,
                    _log_dotted, "blocking_tear_down({}): completed with: {}, "
                    "shutdown? {}",
                    proc.name,
                    result,
                    proc.shutdown.is_set(),
                    veredi_logger=logger)
    return result
示例#13
0
def _subproc_entry(context: VerediContext) -> None:
    '''
    Init and run a multiprocessing process.
    '''
    _log_dotted = label.normalize(_DOTTED_FUNCS, 'entry')

    # ------------------------------
    # Basic Sanity
    # ------------------------------
    if not context:
        log.group_multi(
            _LOG_INIT, _log_dotted, "_subproc_entry: "
            "Require a context to run sub-process. Got nothing.")
        raise log.exception(
            MultiProcError,
            "Require a context to run sub-process. Got nothing.")

    proc = ConfigContext.subproc(context)
    if not proc:
        log.group_multi(
            _LOG_INIT, _log_dotted, "_subproc_entry: "
            "Require SubToProcComm to run sub-process. Got nothing.")
        raise log.exception(
            MultiProcError,
            "Require SubToProcComm to run sub-process. Got nothing.",
            context=context)

    # ------------------------------
    # Set-Up Logger & Signals
    # ------------------------------
    initial_log_level = ConfigContext.log_level(context)
    # TODO [2020-08-10]: Logging init should take care of level... Try to
    # get rid of this setLevel().
    proc_log = log.get_logger(proc.name)
    proc_log.setLevel(initial_log_level)

    # Sub-proc will ignore sig-int; primarily pay attention to shutdown flag.
    _sigint_ignore()

    log.group_multi(_LOG_INIT,
                    _log_dotted,
                    "Initializing sub-process '{}'",
                    proc.name,
                    veredi_logger=proc_log)

    # Start up the logging client
    log_is_server = ConfigContext.log_is_server(context)
    if not log_is_server:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Initializing log_client for '{}'",
                        proc.name,
                        veredi_logger=proc_log)
        log_client.init(proc.name, initial_log_level)

    # ------------------------------
    # More Sanity
    # ------------------------------
    if not proc.pipe:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Process '{}' requires a pipe connection; has None.",
                        proc.name,
                        veredi_logger=proc_log)
        raise log.exception(
            MultiProcError,
            "Process '{}' requires a pipe connection; has None.",
            proc.name,
            veredi_logger=proc_log)
    # Not all procs will require a config, maybe? Require until that's true
    # though.
    if not proc.config:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Process '{}' requires a configuration; has None.",
                        proc.name,
                        veredi_logger=proc_log)
        raise log.exception(MultiProcError,
                            "Process '{}' requires a configuration; has None.",
                            proc.name,
                            veredi_logger=proc_log)
    # If no log level, allow it to be default?
    # if not initial_log_level:
    #     raise log.exception(
    #         MultiProcError,
    #         "Process '{}' requires a default log level (int); "
    #         "received None.",
    #         proc.name,
    #         veredi_logger=proc_log)
    if not proc.shutdown:
        log.group_multi(_LOG_INIT,
                        _log_dotted,
                        "Process '{}' requires a shutdown flag; has None.",
                        proc.name,
                        veredi_logger=proc_log)
        raise log.exception(MultiProcError,
                            "Process '{}' requires a shutdown flag; has None.",
                            proc.name,
                            veredi_logger=proc_log)

    # ------------------------------
    # Actually run the thing...
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    _log_dotted,
                    "Process '{}' starting...",
                    proc.name,
                    veredi_logger=proc_log)
    proc.start(context)

    # DONE WITH '_LOG_INIT'; SWITCH TO '_LOG_KILL'!

    # ------------------------------
    # Won't reach here until sub-proc is shutdown or dies.
    # ------------------------------
    if not log_is_server:
        log.group_multi(_LOG_KILL,
                        _log_dotted,
                        "Closing log_client for '{}' log_client.close().",
                        proc.name,
                        veredi_logger=proc_log)
        log_client.close()

    log.group_multi(_LOG_KILL,
                    _log_dotted,
                    "Process '{}' done.",
                    proc.name,
                    veredi_logger=proc_log)
示例#14
0
def set_up(
    proc_name: str,
    config: Configuration,
    context: VerediContext,
    entry_fn: StartProcFn,
    t_proc_to_sub: Type['ProcToSubComm'] = ProcToSubComm,
    t_sub_to_proc: Type['SubToProcComm'] = SubToProcComm,
    finalize_fn: FinalizeInitFn = None,
    initial_log_level: Optional[log.Level] = None,
    debug_flags: Optional[DebugFlag] = None,
    unit_testing: Optional[bool] = False,
    proc_test: Optional[ProcTest] = None,
    shutdown: Optional[multiprocessing.Event] = None
) -> Optional[ProcToSubComm]:
    '''
    Get a process ready for _run_proc().

    If `t_proc_to_sub` and/or `t_sub_to_proc` are not default, those classes
    will be instantiated instead of ProcToSubComm / SubToProcComm.

    If `unit_testing`, creates the ut_pipe side-channel.

    If `finalize_fn`, sends both ProcToSubComm and SubToProcComm objects in to
    be processed just before set-up is complete.

    `shutdown` is an optional param in case caller wants multiple sub-processes
    to share the same shutdown flag.

    Returns a `t_proc_to_sub` (default: ProcToSubComm) object. When ready to
    start/run the subprocess, call start() on it.
    '''
    logger = log.get_logger(proc_name, min_log_level=initial_log_level)
    log_dotted = label.normalize(_DOTTED_FUNCS, 'set_up')

    if proc_test and proc_test.has(ProcTest.DNE):
        # This process 'Does Not Exist' right now.
        # Should we downgrade this to debug, or error out more heavily?
        # (i.e. exception?)
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        "'{}' has {}. Skipping creation.",
                        proc_name,
                        proc_test,
                        veredi_logger=logger,
                        log_minimum=log.Level.ERROR,
                        log_success=False)
        return None

    # ------------------------------
    # Create multiproc IPC stuff.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating inter-process communication...",
                    proc_name,
                    veredi_logger=logger)

    # The official us<->them IPC pipe.
    child_pipe, parent_pipe = multiprocessing.Pipe()

    # The side-channel/unit-test us<->them IPC pipe.
    ut_child_pipe, ut_parent_pipe = None, None
    if unit_testing:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Creating unit-testing "
                        "inter-process communication...",
                        proc_name,
                        veredi_logger=logger)
        ut_child_pipe, ut_parent_pipe = multiprocessing.Pipe()
        context.add('proc-test', proc_test)

    # multiproc shutdown flag
    if not shutdown:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Creating shutdown inter-process "
                        "event flag...",
                        proc_name,
                        veredi_logger=logger)
        shutdown = multiprocessing.Event()

    # ------------------------------
    # Create the process's private info.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating process comms objects...",
                    proc_name,
                    veredi_logger=logger)

    # Info for the proc itself to own.
    comms = t_sub_to_proc(name=proc_name,
                          config=config,
                          entry_fn=entry_fn,
                          pipe=child_pipe,
                          shutdown=shutdown,
                          debug_flags=debug_flags,
                          ut_pipe=ut_child_pipe)

    # ---
    # Updated Context w/ start-up info (SubToProcComm, etc).
    # ---
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Saving into the ConfigContext...",
                    proc_name,
                    veredi_logger=logger)
    ConfigContext.set_log_level(context, initial_log_level)
    ConfigContext.set_subproc(context, comms)

    # ------------------------------
    # Create the Process, ProcToSubComm
    # ------------------------------
    subp_args = [context]
    subp_kwargs = {}

    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating the sub-process object...",
                    proc_name,
                    veredi_logger=logger)

    # Create the process object (doesn't start the process).
    subprocess = multiprocessing.Process(
        # _subproc_entry() is always the target; it will do some setup and then
        # call the actual target: `entry_fn`.
        target=_subproc_entry,
        name=proc_name,
        args=subp_args,
        kwargs=subp_kwargs)

    # Info for the caller about the proc and how to talk to.
    proc = t_proc_to_sub(name=proc_name,
                         process=subprocess,
                         pipe=parent_pipe,
                         shutdown=shutdown,
                         ut_pipe=ut_parent_pipe)

    # ------------------------------
    # Use Finalize Callback, if supplied.
    # ------------------------------
    if finalize_fn:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Finalize function supplied. "
                        "Calling {}...",
                        proc_name,
                        finalize_fn,
                        veredi_logger=logger)
        finalize_fn(proc, comms)

    # ------------------------------
    # Return ProcToSubComm for caller to use to communicate to sub-proc.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Set-up complete.",
                    proc_name,
                    veredi_logger=logger)
    return proc