Ejemplo n.º 1
0
    async def ping(self, msg: Message, context: MediatorContext) -> float:
        '''
        Send out a ping, wait for pong (response) back. Returns the time it
        took in fractional seconds.
        '''
        if not self._socket:
            log.error(f"Cannot ping; no socket connection: {self._socket}")
            return

        if msg.type != MsgType.PING:
            error = ValueError("Requested ping of non-ping message.", msg)
            raise log.exception(error,
                                f"Requested ping of non-ping message: {msg}")

        timer = MonotonicTimer()  # Timer starts timing on creation.

        # Run our actual ping.
        self.debug('ping pinging...')
        pong = await self._socket.ping()
        self.debug('ping ponging...')
        await pong
        self.debug('ping ponged.')

        # Return the ping time.
        self.debug('ping: {}', timer.elapsed_str)
        return timer.elapsed
Ejemplo n.º 2
0
    def _load_file(self, path, error_context):
        '''Load a single data file from path.

        Raises:
          - exceptions.LoadError
            - wrapped error from self.data_serdes._load()
              - e.g. JSONDecodeError
        '''

        data = None
        with open(path, 'r') as f:
            # Can raise an error - we'll let it.
            try:
                data = self.data_serdes._load(f, error_context)
            except exceptions.LoadError:
                # Let this one bubble up as-is.
                data = None
                raise
            except Exception as e:
                # Complain that we found an exception we don't handle.
                # ...then let it bubble up as-is.
                log.error("Unhandled exception:", e)
                data = None
                raise

        return data
Ejemplo n.º 3
0
    def _is_type_field(klass: Type['Encodable'],
                       data: Optional[EncodedEither]) -> bool:
        '''
        Returns False if `klass._get_type_field()` or `klass.type_field()`
        return None.

        Returns True if `data` has type field (via `klass._get_type_field()`)
        and it matches the expected type field (via `klass.type_field()`).

        Returns False otherwise.
        '''
        data_type_field = klass._get_type_field(data)
        if data_type_field is None:
            # This may be common for simply encoded stuff. Not sure. If so
            # change to debug level.
            log.warning("No type field in data. {}", data)
            return False

        class_type_field = klass.type_field()
        if class_type_field is None:
            msg = (f"Class {klass} returned 'None' from type_field(). "
                   "type_field() is a required function. Cannot determine "
                   f"type of data: {data}")
            error = EncodableError(msg, None, None, data)
            log.error(error, None, msg)
            return False

        return class_type_field == data_type_field
Ejemplo n.º 4
0
    def _attach(self, id_or_comp: Union[ComponentId, Component]):
        '''
        DO NOT CALL THIS UNLESS YOUR NAME IS EntityManager!

        Attaches the component to this entity.
        '''
        component = id_or_comp
        if isinstance(id_or_comp, ComponentId):
            component = self._component_manager.get(id_or_comp)
        if not component:
            log.error(
                "Ignoring 'attach' requested for a non-existing "
                "component. ID or instance: {} -> {}", id_or_comp, component)
            return

        existing = self._components.get(type(component), None)
        if not existing:
            self._components[type(component)] = component
        else:
            # Entity has component already and it cannot
            # be replaced.
            log.warning(
                "Ignoring 'attach' requested for component type already "
                "existing on entity. entity_id: {}, existing: {}, "
                "requested: {}", self.id, existing, component)
Ejemplo n.º 5
0
    async def _hrx_connect(
            self, match: re.Match, path: str, msg: Message,
            context: Optional[MediatorClientContext]) -> Optional[Message]:
        '''
        Receive connect ack/response from server.
        '''
        self.debug("_hrx_connect: Received connect response: {}: {}",
                   type(msg.payload), msg.payload)

        # Have Message check that this msg is a ACK_CONNECT and tell us if it
        # succeeded or not.
        success, reason = msg.verify_connected()
        if success:
            self.debug("_hrx_connect: Verified message: {}", msg)
            self._connection_attempt_success()
        else:
            log.error("{self.name}: Failed to connect to server! "
                      f"match: {match}, path: {path}, msg: {msg},"
                      f"Connection failure: {reason}")
            self._connection_attempt_failure()

        self.debug("_hrx_connect: Connection successful.")
        return await self._hrx_generic(
            match,
            path,
            msg,
            context,
            # Don't ack the ack back.
            send_ack=False,
            log_type='connect')
Ejemplo n.º 6
0
def _game_over(processes: Mapping[str, multiprocessing.Process]) -> bool:
    '''
    Sets the game_end flag. Engine and Mediator should notice and go into
    graceful shutdown.
    '''
    lumberjack = log.get_logger(ProcessType.MAIN.value)

    # Set the game_end flag. They should notice soon and start doing
    # their shutdown.
    log.info("Asking engine/mediator to end the game gracefully...",
             veredi_logger=lumberjack)
    processes.game_end.set()

    # Wait on engine and mediator processes to be done.
    # Wait on mediator first, since I think it'll take less long?
    log.info("Waiting for mediator to complete structured shutdown...",
             veredi_logger=lumberjack)
    processes.proc[ProcessType.MEDIATOR].join(GRACEFUL_SHUTDOWN_TIME_SEC)
    if processes.proc[ProcessType.MEDIATOR].exitcode is None:
        log.error("Mediator did not shut down in time. Data may be lost...",
                  veredi_logger=lumberjack)
    else:
        log.info("Mediator shut down complete.", veredi_logger=lumberjack)

    # Now wait on the engine.
    log.info("Waiting for engine to complete structured shutdown...",
             veredi_logger=lumberjack)
    processes.proc[ProcessType.ENGINE].join(GRACEFUL_SHUTDOWN_TIME_SEC)
    if processes.proc[ProcessType.ENGINE].exitcode is None:
        log.error("Engine did not shut down in time. Data may be lost...",
                  veredi_logger=lumberjack)
    else:
        log.info("Engine shut down complete.", veredi_logger=lumberjack)
Ejemplo n.º 7
0
    async def _handle_produce(self, conn: UserConnToken) -> Optional[Message]:
        '''
        Loop waiting for messages from our multiprocessing.connection to
        communicate about with the mediator on the other end.
        '''
        while True:
            # Die if requested.
            if self.any_shutdown():
                break

            # Check for something in connection to send.
            msg, ctx = await self._handle_produce_get_msg(conn)
            # Checks for actually having a message below...

            # Don't send nothing, please.
            if msg is False and ctx is False:
                # Ignore. Default return from _handle_produce_get_msg().
                await self._continuing()
                continue
            if not msg or msg.type == MsgType.IGNORE:
                debug_fn = log.warning if not msg else self.debug
                debug_fn(
                    "_handle_produce: "
                    "Produced nothing for sending. "
                    "Ignoring msg: {}, ctx: {}", msg, ctx)
                await self._continuing()
                continue

            # Have something to send!
            self.debug(
                "_handle_produce: "
                "Produced for sending: msg: {}, ctx: {}", msg, ctx)

            sender, _ = self._hp_paths_type.get(msg.type, None)
            if not sender:
                log.error(
                    "_handle_produce: "
                    "No handlers for msg type? "
                    "Ignoring msg: {}, ctx: {}", msg, ctx)
                await self._continuing()
                continue

            self.debug("_handle_produce: "
                       "Producing result from send processor...")
            result = await sender(msg, ctx, conn)

            # Only send out to socket if actually produced anything.
            if result:
                self.debug("_handle_produce: " "Sending {}...", result)
                result = await self._hook_produce(result, conn)
                return result

            else:
                self.debug("_handle_produce: " "No result to send; done.")

            # reloop
            await self._continuing()
            continue
Ejemplo n.º 8
0
    def _event_entity_life(self, event: EntityLifeEvent) -> None:
        '''
        Entity Life-cycle has changed enough that EntityManager has produced an
        event for it. See if we should add/remove from our dictionaries.
        '''
        # Doctor checkup.
        if not self._health_ok_event(event):
            return

        # ---
        # Deal with life-cycle transition.
        # ---
        entity_id = event.id
        entity_cycle = event.type
        if entity_cycle == EntityLifeCycle.INVALID:
            # INVALID should never come up, so... complain.
            log.error("EntityManager pushed Entity {} into {} life cycle. "
                      "Do not know how to handle this.",
                      entity_id, entity_cycle)
            self.health = VerediHealth.UNHEALTHY
            return

        elif entity_cycle == EntityLifeCycle.CREATING:
            # Don't care about CREATING - waiting for the ALIVE.
            pass

        elif entity_cycle == EntityLifeCycle.ALIVE:
            # They are now alive. Add to dictionaries.
            id_comp = self.component(entity_id)

            if not id_comp:
                # No identity; just store as anonymous for now...
                log.debug("Entity {} has entered life-cycle '{}' without any "
                          "identity_component. We have no current solution to "
                          "this conundrum... Recording as 'anonymous'.",
                          entity_id, entity_cycle)
                self._anonymous.add(entity_id)
                return

            # Now they have an IdentityComponent - update our dicts.
            self._user_ident_update(entity_id,
                                    user_id=id_comp.user_id,
                                    user_key=id_comp.user_key)

        elif (entity_cycle == EntityLifeCycle.DESTROYING
              or entity_cycle == EntityLifeCycle.DEAD):
            # Remove 'em from our dicts.
            self._user_ident_update(entity_id,
                                    delete=True)

        else:
            # Ignore.
            log.debug("Entity {} has entered life-cycle: {}. "
                      "We have nothing to do for that cycle.",
                      entity_id, entity_cycle)
            return
Ejemplo n.º 9
0
    def _update_apoptosis(self) -> VerediHealth:
        '''
        Structured death phase. We actually shut down our MediatorServer now.
        '''
        timed_out = self._manager.time.is_timed_out(
                None,
                self.timeout_desired(SystemTick.APOPTOSIS))

        # Set to failure state if over time.
        if self._manager.time.is_timed_out(
                None,
                self.timeout_desired(SystemTick.APOPTOSIS)):
            # Don't care about tear_down_end result; we'll check it with
            # exitcode_healthy().
            multiproc.nonblocking_tear_down_end(self.server)

            if exit_health == VerediHealth.FATAL:
                log.error("MediatorServer exit failure. "
                          f"Exitcode: {self.server.process.exitcode}")

            # Update with exitcode's health, and...
            exit_health = self.server.exitcode_healthy(
                VerediHealth.APOPTOSIS_DONE,
                VerediHealth.FATAL)

            # Update with our health (failed due to overtime), and return.
            # overtime_health = VerediHealth.FATAL

            # Technically, we're already overtime, so updating our health with
            # both exit_health (of whatever it is) and overtime_health (of
            # FATAL) makes sense. However, if we successfully exited just now,
            # that minor bit of overtime can be ignored. So just use
            # exit_health.
            self._health = self._health.update(exit_health)
            return self.health

        # Else we still have time to wait.
        multiproc.nonblocking_tear_down_wait(self.server,
                                             log_enter=True)
        done = self._apoptosis_done_check()
        if done is not False:
            # Update with done's health since we're done.
            self._health = self._health.update(done)
        else:
            # Update health with APOPTOSIS, since we're in progress.
            self._health = self._health.update(VerediHealth.APOPTOSIS)

        return self.health
Ejemplo n.º 10
0
    def add(self, key: Any, value: Any) -> bool:
        '''
        Adds to /our/ sub-context.

        That is, this is a shortcut for:
          self.sub[key] = value
        with added checks.

        Returns success/failure bool.
        '''
        sub = self.sub
        if key in sub:
            log.error(
                "Skipping add key '{}' to our sub-context - the key "
                "already exists. desired value: {}, current value: {}, "
                "subcontext: {}", key, value, sub[key], sub)
            return False
        sub[key] = value
        return True
Ejemplo n.º 11
0
    async def _handle_consume(self, msg: Message, path: str,
                              context: Optional[MediatorContext],
                              conn: UserConnToken) -> Optional[Message]:
        '''
        Handles a `VebSocketServer.serve_parallel` consume data callback.
        '''
        self.debug("_handle_consume: "
                   "Consuming a message on path: {}: {}", path, msg)
        match, processor = self._hrx_path_processor(path)
        self.debug("_handle_consume: "
                   "match: {}, processor: {}", match, processor)
        if not processor:
            # TODO [2020-07-29]: Log info about client too.
            log.error(
                "_handle_consume: "
                "Tried to consume message for unhandled path: {}, {}", msg,
                path)
            return None

        self.debug("_handle_consume: "
                   "Sending to path processor to consume...")
        result = await processor(match, path, msg, context)
        result = await self._hook_consume(result, conn)
        return result
Ejemplo n.º 12
0
def _logs_over(processes: Mapping[str, multiprocessing.Process]) -> bool:
    '''
    Sets the logs_end flag. Logs server should notice and gracefully shut down.
    '''
    lumberjack = log.get_logger(ProcessType.MAIN.value)

    # Set the game_end flag. They should notice soon and start doing
    # their shutdown.
    log.info("Asking logs server to end gracefully...",
             veredi_logger=lumberjack)
    processes.logs_end.set()

    # Wait on engine and mediator processes to be done.
    # Wait on mediator first, since I think it'll take less long?
    log.info("Waiting for logs server to complete structured shutdown...",
             veredi_logger=lumberjack)
    processes.proc[ProcessType.LOGS].join(GRACEFUL_SHUTDOWN_TIME_SEC)
    if processes.proc[ProcessType.LOGS].exitcode is None:
        log.error(
            "Logs server did not shut down in time. "
            "Logs may be lost? IDK...",
            veredi_logger=lumberjack)
    else:
        log.info("Logs server shut down complete.", veredi_logger=lumberjack)
Ejemplo n.º 13
0
    def _event_to_cmd(self,
                      string_unsafe: str,
                      entity:        'Entity',
                      event:         'Event',
                      context:       'VerediContext') -> None:
        '''
        Take args, verify, and send on to commander for further processing.
        '''
        ident = entity.get(IdentityComponent)
        if not ident:
            log.debug("No IdentityComponent for entity - cannot process "
                      "input event. Entity '{}'. input-string: '{}', "
                      "event: {}",
                      entity, string_unsafe, event)
            return

        string_unsafe = None
        try:
            string_unsafe = event.payload
        except AttributeError:
            try:
                string_unsafe = event.string_unsafe
            except AttributeError as err:
                log.exception(err,
                              "Event {} does not have 'payload' or "
                              "'string_unsafe' property - input system "
                              "cannot process it as a command.",
                              event,
                              context=context)

        log.debug("Input from '{}' (by '{}'). input-string: '{}', event: {}",
                  ident.log_name, ident.log_extra,
                  string_unsafe, event)

        string_safe, string_valid = sanitize.validate(string_unsafe,
                                                      ident.log_name,
                                                      ident.log_extra,
                                                      event.context)

        if string_valid != sanitize.InputValid.VALID:
            log.info("Input from '{}' (by '{}'): "
                     "Dropping event {} - input failed validation.",
                     ident.log_name, ident.log_extra,
                     event,
                     context=event.context)
            # TODO [2020-06-11]: Keep track of how many times user was
            # potentially naughty?
            return

        command_safe = self._commander.maybe_command(string_safe)
        if not command_safe:
            log.info("Input from '{}' (by '{}'): "
                     "Dropping event {} - input failed `maybe_command()`.",
                     ident.log_name, ident.log_extra,
                     event,
                     context=event.context)
            # TODO [2020-06-11]: Keep track of how many times user was
            # potentially naughty?
            return

        # Create history, generate ID.
        input_id = self._historian.add_text(entity, string_safe)

        # Get the command processed.
        cmd_ctx = InputContext(input_id, command_safe,
                               entity.id,
                               ident.log_name,
                               self.dotted)
        cmd_ctx.pull(event.context)
        status = self._commander.execute(entity, command_safe, cmd_ctx)
        # Update history w/ status.
        self._historian.update_executed(input_id, status)

        # TODO [2020-06-21]: Success/Failure OutputEvent?

        if not status.success:
            log.error("Failed to execute command: {}",
                      string_safe,
                      context=cmd_ctx)
            return
Ejemplo n.º 14
0
    def _canon_make(self,
                    names: label.LabelInput,
                    no_error_log: bool = False,
                    raise_error: bool = True) -> Nullable[str]:
        '''
        The actual canonicalize step for self.canonical().

        If `no_error_log`, skips logging an error if encountered.

        If `raise_error` is True, raises a KeyError if it falls off the data
        while trying to canonicalize.
          - This can be undesireable when e.g. creating commands from aliases.
            See AbilitySystem for how it deals with things so that its 'mod'
            alias doesn't get registered as an ability command.
        '''

        canon = []
        length = len(names)
        bookmark = self[self._key_prime]
        for i in range(length):
            name = names[i]
            resolved = False
            if isinstance(name, str):
                # Normal case, just look for aliases to replace.
                standard = self.get(self.ALIAS).get(name, None)
                if standard:
                    canon.append(standard)
                    bookmark = bookmark.get(standard, Null())
                    resolved = True
            elif isinstance(name, list):
                # Special case - look for 'this' to resolve. [[0], 1, 2]
                peek = names[i + 1] if (i < (length - 1)) else None
                standard_list = self._canon_this(canon, name, peek)
                for each in standard_list:
                    canon.append(each)
                    bookmark = bookmark.get(each, Null())
                resolved = True

            if not resolved:
                # It was fine all along as-is.
                standard = name
                canon.append(standard)
                bookmark = bookmark.get(standard, Null())

            # Did we get off track somehow?
            if not bookmark:
                if raise_error:
                    raise KeyError(("Canonicalizing and we fell off the "
                                    "definitions data? "
                                    f"input: {names}, current: {canon}"),
                                   names, canon)
                # Else just log the error and give 'em Null().
                # Well, maybe log.
                if not no_error_log:
                    log.error(
                        "Canonicalizing and we fell off the definitions data? "
                        "input: {}, current: {}", names, canon)
                return Null()

        # We've canonicalized. Are we at a leaf, or do we need maybe a default
        # value thrown in?
        if not isinstance(bookmark, (str, int, float)):
            self._append_default(canon)

        return label.normalize(*canon)
Ejemplo n.º 15
0
    def healthy(self, life_cycle: SystemLifeCycle) -> VerediHealth:
        '''
        Returns a health value based on sub-process's status.

        If `life_cycle` is SystemLifeCycle.AUTOPHAGY, this will return
        AUTOPHAGY_FAILURE, AUTOPHAGY_SUCCESSFUL, etc instead of FATAL, HEALTHY,
        DYING, etc.
        '''
        # ------------------------------
        # Process DNE.
        # ------------------------------
        if not self.process:
            # If trying to die and have no process... uh...
            # You should have a process.
            if life_cycle == SystemLifeCycle.AUTOPHAGY:
                return VerediHealth.AUTOPHAGY_FAILURE

            # No process is pretty bad for a multiprocess thing.
            return VerediHealth.FATAL

        # ------------------------------
        # Process Exists.
        # ------------------------------

        # ---
        # Shutdown?
        # ---
        # Did we tell it to shut down?
        if self.shutdown.is_set():
            # Do we want it to shut down?
            if life_cycle != VerediHealth.AUTOPHAGY:
                # Let's indicate something that says we want to let the
                # shutdown continue, but that it's also during the wrong
                # SystemLifeCycle...
                return VerediHealth.DYING

            # Ok: SystemLifeCycle is autophagy. A nice structured death.
            if self.process.is_alive():
                # We're still in autophagy?
                # TODO: time-out at system manager and/or game engine level.
                return VerediHealth.AUTOPHAGY

            # Healthy Exit Code == A Good Death
            elif self.process.exitcode == 0:
                return VerediHealth.AUTOPHAGY_SUCCESSFUL

            # Unhealthy Exit Code == Not So Good of a Death
            return VerediHealth.AUTOPHAGY_FAILURE

        # ---
        # Not Running but not Shutdown?!
        # ---
        if not self.process.is_alive():
            if life_cycle == VerediHealth.AUTOPHAGY:
                log.error(
                    "Process '{}' is in "
                    "SystemLifeCycle.AUTOPHAGY and "
                    "process is not alive, but shutdown flag isn't set "
                    "and it should be.", self.name)
                # Might be a successful autophagy from the multiproc standpoint
                # but we don't know. Someone changed the shutdown flag we want
                # to check.
                return VerediHealth.AUTOPHAGY_FAILURE

            # Not running and not in autophagy life-cycle... Dunno but not
            # healthy.
            return VerediHealth.UNHEALTHY

        # ---
        # Running and should be, so... Healthy.
        # ---
        return VerediHealth.HEALTHY