class AgentGroup(HasTraits):
    """Group of agents

    Examples:
        >>> group = AgentGroup(
        >>>             size=10,
        >>>             agent_type=Circular,
        >>>             attributes=...,
        >>>         )

    """
    agent_type = Type(AgentType,
                      allow_none=True,
                      help='AgentType for generating agent from attributes.')
    size = Int(
        default_value=0,
        help='Size of the agent group. Optional is attributes are instance of '
        'collection')
    attributes = Union(
        (Instance(Collection), Instance(Generator), Instance(Callable)),
        allow_none=True,
        help='Attributes of the chosen agent type.')
    members = List(Instance(AgentType), help='')

    @observe('size', 'agent_type', 'attributes')
    def _observe_members(self, change):
        if self.size > 0 and self.attributes is not None and self.agent_type is not None:
            if isinstance(self.attributes, Collection):
                self.members = [self.agent_type(**a) for a in self.attributes]
            elif isinstance(self.attributes, Generator):
                self.members = [
                    self.agent_type(**next(self.attributes))
                    for _ in range(self.size)
                ]
            elif isinstance(self.attributes, Callable):
                self.members = [
                    self.agent_type(**self.attributes())
                    for _ in range(self.size)
                ]
            else:
                raise TraitError
class Agents(AgentsBase):
    """Set groups of agents

    Examples:
        >>> agent = Agents(agent_type=Circular)
        >>> agent.add_non_overlapping_group(...)

    """
    agent_type = Type(
        klass=AgentType,
        help='Instance of AgentType. This will be used to create attributes '
        'for the agent.')
    size_max = Int(
        allow_none=None,
        help='Maximum number of agents that can be created.None allows the '
        'size grow dynamically.')
    cell_size = Float(
        default_value=0.6,
        min=0,
        help='Cell size for block list. Value should be little over the '
        'maximum of agent radii')

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.index = 0
        self.array = np.zeros(0, dtype=self.agent_type.dtype())
        # Block list for speeding up overlapping checks
        self._neighbours = MutableBlockList(cell_size=self.cell_size)

    def add_non_overlapping_group(self,
                                  speed,
                                  groupname,
                                  group,
                                  position_gen,
                                  position_iter,
                                  spawn,
                                  obstacles=None):
        """Add group of agents

        Args:
            group (AgentGroup):
            position_gen (Generator|Callable):
            obstacles (numpy.ndarray):
        """
        if self.agent_type is not group.agent_type:
            raise CrowdDynamicsException

        # resize self.array to fit new agents
        array = np.zeros(group.size, dtype=group.agent_type.dtype())
        self.array = np.concatenate((self.array, array))

        index = 0

        spawn_data = np.load("complex/spawn_complex.npy")

        while index < group.size:

            new_agent = group.members[index]

            if position_gen:
                new_agent.position = next(position_iter)
                new_agent.radius = 0.27
                new_agent.mass = 80.0
                new_agent.target_velocity = 1.15

            else:

                if groupname == "spawn_left":
                    new_agent.radius = spawn_data[index][7]
                    new_agent.mass = spawn_data[index][11]
                    #new_agent.target_velocity = spawn_data[index][13]
                    if speed == "slow":
                        new_agent.target_velocity = 0.5
                    elif speed == "fast":
                        new_agent.target_velocity = 1.55
                    new_agent.position = spawn_data[index][15]

                if groupname == "spawn_lower":
                    new_agent.radius = spawn_data[index + 50][7]
                    new_agent.mass = spawn_data[index + 50][11]
                    #new_agent.target_velocity = spawn_data[index+50][13]
                    if speed == "slow":
                        new_agent.target_velocity = 0.5
                    elif speed == "fast":
                        new_agent.target_velocity = 1.55
                    new_agent.position = spawn_data[index + 50][15]

                if groupname == "spawn_right":
                    new_agent.radius = spawn_data[index + 100][7]
                    new_agent.mass = spawn_data[index + 100][11]
                    #new_agent.target_velocity = spawn_data[index+100][13]
                    if speed == "slow":
                        new_agent.target_velocity = 0.5
                    elif speed == "fast":
                        new_agent.target_velocity = 1.55
                    new_agent.position = spawn_data[index + 100][15]

                if groupname == "spawn_upper":
                    new_agent.radius = spawn_data[index + 150][7]
                    new_agent.mass = spawn_data[index + 150][11]
                    #new_agent.target_velocity = spawn_data[index+150][13]
                    if speed == "slow":
                        new_agent.target_velocity = 0.5
                    elif speed == "fast":
                        new_agent.target_velocity = 1.55
                    new_agent.position = spawn_data[index + 150][15]

            # Agent can be successfully placed
            self.array[self.index] = np.array(new_agent)
            self._neighbours[new_agent.position] = self.index
            self.index += 1
            index += 1

        # TODO: remove agents that didn't fit from self.array
        #if self.index + 1 < self.array.size:
        #    pass

        # Array should remain contiguous
        assert self.array.flags.c_contiguous
Esempio n. 3
0
class LinePlotManager(Configurable):
    """
    Manage the line plots for one FigureManager.
    """
    omit_single_point_plot = Bool(True, config=True)
    line_class = Type()

    @default('line_class')
    def default_line_class(self):
        # By defining the default value of line_class dynamically here, we
        # avoid importing matplotlib if some non-matplotlib line_class is
        # specfied by configuration.
        from ..artists.mpl.line import Line
        return Line

    def __init__(self, fig_manager, dimensions):
        self.update_config(load_config())
        self.fig_manager = fig_manager
        self.start_doc = None
        self.dimensions = dimensions
        self.dim_streams = set(stream for _, stream in self.dimensions)
        if len(self.dim_streams) > 1:
            raise NotImplementedError

    def __call__(self, name, start_doc):
        self.start_doc = start_doc
        return [], [self.subfactory]

    def subfactory(self, name, descriptor_doc):
        if self.omit_single_point_plot and self.start_doc.get(
                'num_points') == 1:
            return []
        if len(self.dimensions) > 1:
            return []  # This is a job for Grid.
        fields = set(hinted_fields(descriptor_doc))
        # Filter out the fields with a data type or shape that we cannot
        # represent in a line plot.
        for field in list(fields):
            dtype = descriptor_doc['data_keys'][field]['dtype']
            if dtype not in ('number', 'integer'):
                fields.discard(field)
            ndim = len(descriptor_doc['data_keys'][field]['shape'] or [])
            if ndim != 0:
                fields.discard(field)

        callbacks = []
        dim_stream, = self.dim_streams  # TODO Handle multiple dim_streams.
        if descriptor_doc.get('name') == dim_stream:
            dimension, = self.dimensions
            x_keys, stream_name = dimension
            fields -= set(x_keys)
            assert stream_name == dim_stream  # TODO Handle multiple dim_streams.
            for x_key in x_keys:
                figure_label = f'Scalars v {x_key}'
                fig = self.fig_manager.get_figure(
                    ('line', x_key, tuple(fields)),
                    figure_label,
                    len(fields),
                    sharex=True)
                for y_key, ax in zip(fields, fig.axes):

                    log.debug('plot %s against %s', y_key, x_key)

                    ylabel = y_key
                    y_units = descriptor_doc['data_keys'][y_key].get('units')
                    ax.set_ylabel(y_key)
                    if y_units:
                        ylabel += f' [{y_units}]'
                    # Set xlabel only on lowest axes, outside for loop below.

                    def func(event_page, y_key=y_key):
                        """
                        Extract x points and y points to plot out of an EventPage.

                        This will be passed to LineWithPeaks.
                        """
                        y_data = event_page['data'][y_key]
                        if x_key == 'time':
                            t0 = self.start_doc['time']
                            x_data = numpy.asarray(event_page['time']) - t0
                        elif x_key == 'seq_num':
                            x_data = event_page['seq_num']
                        else:
                            x_data = event_page['data'][x_key]
                        return x_data, y_data

                    line = self.line_class(func, ax=ax)
                    callbacks.append(line)

                if fields:
                    # Set the xlabel on the bottom-most axis.
                    if x_key == 'time':
                        xlabel = x_key
                        x_units = 's'
                    elif x_key == 'seq_num':
                        xlabel = 'sequence number'
                        x_units = None
                    else:
                        xlabel = x_key
                        x_units = descriptor_doc['data_keys'][x_key].get(
                            'units')
                    if x_units:
                        xlabel += f' [{x_units}]'
                    ax.set_xlabel(x_key)
                    fig.tight_layout()
            # TODO Plot other streams against time.
        for callback in callbacks:
            callback('start', self.start_doc)
            callback('descriptor', descriptor_doc)
        return callbacks
Esempio n. 4
0
class Agents(AgentsBase):
    """Set groups of agents

    Examples:
        >>> agent = Agents(agent_type=Circular)
        >>> agent.add_non_overlapping_group(...)

    """
    agent_type = Type(
        klass=AgentType,
        help='Instance of AgentType. This will be used to create attributes '
        'for the agent.')
    size_max = Int(
        allow_none=None,
        help='Maximum number of agents that can be created.None allows the '
        'size grow dynamically.')
    cell_size = Float(
        default_value=0.6,
        min=0,
        help='Cell size for block list. Value should be little over the '
        'maximum of agent radii')

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.index = 0
        self.array = np.zeros(0, dtype=self.agent_type.dtype())
        # Block list for speeding up overlapping checks
        self._neighbours = MutableBlockList(cell_size=self.cell_size)

    def add_non_overlapping_group(self, group, position_gen, obstacles=None):
        """Add group of agents

        Args:
            group (AgentGroup):
            position_gen (Generator|Callable):
            obstacles (numpy.ndarray):
        """
        if self.agent_type is not group.agent_type:
            raise CrowdDynamicsException

        # resize self.array to fit new agents
        array = np.zeros(group.size, dtype=group.agent_type.dtype())
        self.array = np.concatenate((self.array, array))

        index = 0
        overlaps = 0
        overlaps_max = 10 * group.size

        while index < group.size and overlaps < overlaps_max:
            new_agent = group.members[index]
            new_agent.position = position_gen() if callable(position_gen) \
                else next(position_gen)

            # Overlapping check
            neighbours = self._neighbours.nearest(new_agent.position, radius=1)
            if new_agent.overlapping(self.array[neighbours]):
                # Agent is overlapping other agent.
                overlaps += 1
                continue

            if obstacles is not None and new_agent.overlapping_obstacles(
                    obstacles):
                # Agent is overlapping with an obstacle.
                overlaps += 1
                continue

            # Agent can be successfully placed
            self.array[self.index] = np.array(new_agent)
            self._neighbours[new_agent.position] = self.index
            self.index += 1
            index += 1

        # TODO: remove agents that didn't fit from self.array
        if self.index + 1 < self.array.size:
            pass

        # Array should remain contiguous
        assert self.array.flags.c_contiguous
Esempio n. 5
0
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMixin):
    name = "ipython-kernel"
    aliases = Dict(kernel_aliases)
    flags = Dict(kernel_flags)
    classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
    # the kernel class, as an importstring
    kernel_class = Type(
        "ipykernel.ipkernel.IPythonKernel",
        klass="ipykernel.kernelbase.Kernel",
        help="""The Kernel subclass to be used.

    This should allow easy re-use of the IPKernelApp entry point
    to configure and launch kernels other than IPython's own.
    """,
    ).tag(config=True)
    kernel = Any()
    poller = Any()  # don't restrict this even though current pollers are all Threads
    heartbeat = Instance(Heartbeat, allow_none=True)

    context = Any()
    shell_socket = Any()
    control_socket = Any()
    debugpy_socket = Any()
    debug_shell_socket = Any()
    stdin_socket = Any()
    iopub_socket = Any()
    iopub_thread = Any()
    control_thread = Any()

    _ports = Dict()

    subcommands = {
        "install": (
            "ipykernel.kernelspec.InstallIPythonKernelSpecApp",
            "Install the IPython kernel",
        ),
    }

    # connection info:
    connection_dir = Unicode()

    @default("connection_dir")
    def _default_connection_dir(self):
        return jupyter_runtime_dir()

    @property
    def abs_connection_file(self):
        if os.path.basename(self.connection_file) == self.connection_file:
            return os.path.join(self.connection_dir, self.connection_file)
        else:
            return self.connection_file

    # streams, etc.
    no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
    no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
    trio_loop = Bool(False, help="Set main event loop.").tag(config=True)
    quiet = Bool(True, help="Only send stdout/stderr to output stream").tag(config=True)
    outstream_class = DottedObjectName(
        "ipykernel.iostream.OutStream", help="The importstring for the OutStream factory"
    ).tag(config=True)
    displayhook_class = DottedObjectName(
        "ipykernel.displayhook.ZMQDisplayHook", help="The importstring for the DisplayHook factory"
    ).tag(config=True)

    capture_fd_output = Bool(
        True,
        help="""Attempt to capture and forward low-level output, e.g. produced by Extension libraries.
    """,
    ).tag(config=True)

    # polling
    parent_handle = Integer(
        int(os.environ.get("JPY_PARENT_PID") or 0),
        help="""kill this process if its parent dies.  On Windows, the argument
        specifies the HANDLE of the parent process, otherwise it is simply boolean.
        """,
    ).tag(config=True)
    interrupt = Integer(
        int(os.environ.get("JPY_INTERRUPT_EVENT") or 0),
        help="""ONLY USED ON WINDOWS
        Interrupt this process when the parent is signaled.
        """,
    ).tag(config=True)

    def init_crash_handler(self):
        sys.excepthook = self.excepthook

    def excepthook(self, etype, evalue, tb):
        # write uncaught traceback to 'real' stderr, not zmq-forwarder
        traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)

    def init_poller(self):
        if sys.platform == "win32":
            if self.interrupt or self.parent_handle:
                self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
        elif self.parent_handle and self.parent_handle != 1:
            # PID 1 (init) is special and will never go away,
            # only be reassigned.
            # Parent polling doesn't work if ppid == 1 to start with.
            self.poller = ParentPollerUnix()

    def _try_bind_socket(self, s, port):
        iface = "%s://%s" % (self.transport, self.ip)
        if self.transport == "tcp":
            if port <= 0:
                port = s.bind_to_random_port(iface)
            else:
                s.bind("tcp://%s:%i" % (self.ip, port))
        elif self.transport == "ipc":
            if port <= 0:
                port = 1
                path = "%s-%i" % (self.ip, port)
                while os.path.exists(path):
                    port = port + 1
                    path = "%s-%i" % (self.ip, port)
            else:
                path = "%s-%i" % (self.ip, port)
            s.bind("ipc://%s" % path)
        return port

    def _bind_socket(self, s, port):
        try:
            win_in_use = errno.WSAEADDRINUSE  # type:ignore[attr-defined]
        except AttributeError:
            win_in_use = None

        # Try up to 100 times to bind a port when in conflict to avoid
        # infinite attempts in bad setups
        max_attempts = 1 if port else 100
        for attempt in range(max_attempts):
            try:
                return self._try_bind_socket(s, port)
            except zmq.ZMQError as ze:
                # Raise if we have any error not related to socket binding
                if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
                    raise
                if attempt == max_attempts - 1:
                    raise

    def write_connection_file(self):
        """write connection info to JSON file"""
        cf = self.abs_connection_file
        self.log.debug("Writing connection file: %s", cf)
        write_connection_file(
            cf,
            ip=self.ip,
            key=self.session.key,
            transport=self.transport,
            shell_port=self.shell_port,
            stdin_port=self.stdin_port,
            hb_port=self.hb_port,
            iopub_port=self.iopub_port,
            control_port=self.control_port,
        )

    def cleanup_connection_file(self):
        cf = self.abs_connection_file
        self.log.debug("Cleaning up connection file: %s", cf)
        try:
            os.remove(cf)
        except OSError:
            pass

        self.cleanup_ipc_files()

    def init_connection_file(self):
        if not self.connection_file:
            self.connection_file = "kernel-%s.json" % os.getpid()
        try:
            self.connection_file = filefind(self.connection_file, [".", self.connection_dir])
        except OSError:
            self.log.debug("Connection file not found: %s", self.connection_file)
            # This means I own it, and I'll create it in this directory:
            os.makedirs(os.path.dirname(self.abs_connection_file), mode=0o700, exist_ok=True)
            # Also, I will clean it up:
            atexit.register(self.cleanup_connection_file)
            return
        try:
            self.load_connection_file()
        except Exception:
            self.log.error(
                "Failed to load connection file: %r", self.connection_file, exc_info=True
            )
            self.exit(1)

    def init_sockets(self):
        # Create a context, a session, and the kernel sockets.
        self.log.info("Starting the kernel at pid: %i", os.getpid())
        assert self.context is None, "init_sockets cannot be called twice!"
        self.context = context = zmq.Context()
        atexit.register(self.close)

        self.shell_socket = context.socket(zmq.ROUTER)
        self.shell_socket.linger = 1000
        self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
        self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)

        self.stdin_socket = context.socket(zmq.ROUTER)
        self.stdin_socket.linger = 1000
        self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
        self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)

        if hasattr(zmq, "ROUTER_HANDOVER"):
            # set router-handover to workaround zeromq reconnect problems
            # in certain rare circumstances
            # see ipython/ipykernel#270 and zeromq/libzmq#2892
            self.shell_socket.router_handover = self.stdin_socket.router_handover = 1

        self.init_control(context)
        self.init_iopub(context)

    def init_control(self, context):
        self.control_socket = context.socket(zmq.ROUTER)
        self.control_socket.linger = 1000
        self.control_port = self._bind_socket(self.control_socket, self.control_port)
        self.log.debug("control ROUTER Channel on port: %i" % self.control_port)

        self.debugpy_socket = context.socket(zmq.STREAM)
        self.debugpy_socket.linger = 1000

        self.debug_shell_socket = context.socket(zmq.DEALER)
        self.debug_shell_socket.linger = 1000
        if self.shell_socket.getsockopt(zmq.LAST_ENDPOINT):
            self.debug_shell_socket.connect(self.shell_socket.getsockopt(zmq.LAST_ENDPOINT))

        if hasattr(zmq, "ROUTER_HANDOVER"):
            # set router-handover to workaround zeromq reconnect problems
            # in certain rare circumstances
            # see ipython/ipykernel#270 and zeromq/libzmq#2892
            self.control_socket.router_handover = 1

        self.control_thread = ControlThread(daemon=True)

    def init_iopub(self, context):
        self.iopub_socket = context.socket(zmq.PUB)
        self.iopub_socket.linger = 1000
        self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
        self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
        self.configure_tornado_logger()
        self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
        self.iopub_thread.start()
        # backward-compat: wrap iopub socket API in background thread
        self.iopub_socket = self.iopub_thread.background_socket

    def init_heartbeat(self):
        """start the heart beating"""
        # heartbeat doesn't share context, because it mustn't be blocked
        # by the GIL, which is accessed by libzmq when freeing zero-copy messages
        hb_ctx = zmq.Context()
        self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
        self.hb_port = self.heartbeat.port
        self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
        self.heartbeat.start()

    def close(self):
        """Close zmq sockets in an orderly fashion"""
        # un-capture IO before we start closing channels
        self.reset_io()
        self.log.info("Cleaning up sockets")
        if self.heartbeat:
            self.log.debug("Closing heartbeat channel")
            self.heartbeat.context.term()
        if self.iopub_thread:
            self.log.debug("Closing iopub channel")
            self.iopub_thread.stop()
            self.iopub_thread.close()
        if self.control_thread and self.control_thread.is_alive():
            self.log.debug("Closing control thread")
            self.control_thread.stop()
            self.control_thread.join()

        if self.debugpy_socket and not self.debugpy_socket.closed:
            self.debugpy_socket.close()
        if self.debug_shell_socket and not self.debug_shell_socket.closed:
            self.debug_shell_socket.close()

        for channel in ("shell", "control", "stdin"):
            self.log.debug("Closing %s channel", channel)
            socket = getattr(self, channel + "_socket", None)
            if socket and not socket.closed:
                socket.close()
        self.log.debug("Terminating zmq context")
        self.context.term()
        self.log.debug("Terminated zmq context")

    def log_connection_info(self):
        """display connection info, and store ports"""
        basename = os.path.basename(self.connection_file)
        if (
            basename == self.connection_file
            or os.path.dirname(self.connection_file) == self.connection_dir
        ):
            # use shortname
            tail = basename
        else:
            tail = self.connection_file
        lines = [
            "To connect another client to this kernel, use:",
            "    --existing %s" % tail,
        ]
        # log connection info
        # info-level, so often not shown.
        # frontends should use the %connect_info magic
        # to see the connection info
        for line in lines:
            self.log.info(line)
        # also raw print to the terminal if no parent_handle (`ipython kernel`)
        # unless log-level is CRITICAL (--quiet)
        if not self.parent_handle and self.log_level < logging.CRITICAL:
            print(_ctrl_c_message, file=sys.__stdout__)
            for line in lines:
                print(line, file=sys.__stdout__)

        self._ports = dict(
            shell=self.shell_port,
            iopub=self.iopub_port,
            stdin=self.stdin_port,
            hb=self.hb_port,
            control=self.control_port,
        )

    def init_blackhole(self):
        """redirects stdout/stderr to devnull if necessary"""
        if self.no_stdout or self.no_stderr:
            blackhole = open(os.devnull, "w")
            if self.no_stdout:
                sys.stdout = sys.__stdout__ = blackhole
            if self.no_stderr:
                sys.stderr = sys.__stderr__ = blackhole

    def init_io(self):
        """Redirect input streams and set a display hook."""
        if self.outstream_class:
            outstream_factory = import_item(str(self.outstream_class))
            if sys.stdout is not None:
                sys.stdout.flush()

            e_stdout = None if self.quiet else sys.__stdout__
            e_stderr = None if self.quiet else sys.__stderr__

            if not self.capture_fd_output:
                outstream_factory = partial(outstream_factory, watchfd=False)

            sys.stdout = outstream_factory(self.session, self.iopub_thread, "stdout", echo=e_stdout)
            if sys.stderr is not None:
                sys.stderr.flush()
            sys.stderr = outstream_factory(self.session, self.iopub_thread, "stderr", echo=e_stderr)
            if hasattr(sys.stderr, "_original_stdstream_copy"):

                for handler in self.log.handlers:
                    if isinstance(handler, StreamHandler) and (handler.stream.buffer.fileno() == 2):
                        self.log.debug("Seeing logger to stderr, rerouting to raw filedescriptor.")

                        handler.stream = TextIOWrapper(
                            FileIO(
                                sys.stderr._original_stdstream_copy,  # type:ignore[attr-defined]
                                "w",
                            )
                        )
        if self.displayhook_class:
            displayhook_factory = import_item(str(self.displayhook_class))
            self.displayhook = displayhook_factory(self.session, self.iopub_socket)
            sys.displayhook = self.displayhook

        self.patch_io()

    def reset_io(self):
        """restore original io

        restores state after init_io
        """
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__
        sys.displayhook = sys.__displayhook__

    def patch_io(self):
        """Patch important libraries that can't handle sys.stdout forwarding"""
        try:
            import faulthandler
        except ImportError:
            pass
        else:
            # Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
            # updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
            # https://docs.python.org/3/library/faulthandler.html#faulthandler.enable

            # change default file to __stderr__ from forwarded stderr
            faulthandler_enable = faulthandler.enable

            def enable(file=sys.__stderr__, all_threads=True, **kwargs):
                return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)

            faulthandler.enable = enable

            if hasattr(faulthandler, "register"):
                faulthandler_register = faulthandler.register

                def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
                    return faulthandler_register(
                        signum, file=file, all_threads=all_threads, chain=chain, **kwargs
                    )

                faulthandler.register = register

    def init_signal(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)

    def init_kernel(self):
        """Create the Kernel object itself"""
        shell_stream = ZMQStream(self.shell_socket)
        control_stream = ZMQStream(self.control_socket, self.control_thread.io_loop)
        debugpy_stream = ZMQStream(self.debugpy_socket, self.control_thread.io_loop)
        self.control_thread.start()
        kernel_factory = self.kernel_class.instance

        kernel = kernel_factory(
            parent=self,
            session=self.session,
            control_stream=control_stream,
            debugpy_stream=debugpy_stream,
            debug_shell_socket=self.debug_shell_socket,
            shell_stream=shell_stream,
            control_thread=self.control_thread,
            iopub_thread=self.iopub_thread,
            iopub_socket=self.iopub_socket,
            stdin_socket=self.stdin_socket,
            log=self.log,
            profile_dir=self.profile_dir,
            user_ns=self.user_ns,
        )
        kernel.record_ports({name + "_port": port for name, port in self._ports.items()})
        self.kernel = kernel

        # Allow the displayhook to get the execution count
        self.displayhook.get_execution_count = lambda: kernel.execution_count

    def init_gui_pylab(self):
        """Enable GUI event loop integration, taking pylab into account."""

        # Register inline backend as default
        # this is higher priority than matplotlibrc,
        # but lower priority than anything else (mpl.use() for instance).
        # This only affects matplotlib >= 1.5
        if not os.environ.get("MPLBACKEND"):
            os.environ["MPLBACKEND"] = "module://matplotlib_inline.backend_inline"

        # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
        # to ensure that any exception is printed straight to stderr.
        # Normally _showtraceback associates the reply with an execution,
        # which means frontends will never draw it, as this exception
        # is not associated with any execute request.

        shell = self.shell
        assert shell is not None
        _showtraceback = shell._showtraceback
        try:
            # replace error-sending traceback with stderr
            def print_tb(etype, evalue, stb):
                print("GUI event loop or pylab initialization failed", file=sys.stderr)
                assert shell is not None
                print(shell.InteractiveTB.stb2text(stb), file=sys.stderr)

            shell._showtraceback = print_tb
            InteractiveShellApp.init_gui_pylab(self)
        finally:
            shell._showtraceback = _showtraceback

    def init_shell(self):
        self.shell = getattr(self.kernel, "shell", None)
        if self.shell:
            self.shell.configurables.append(self)

    def configure_tornado_logger(self):
        """Configure the tornado logging.Logger.

        Must set up the tornado logger or else tornado will call
        basicConfig for the root logger which makes the root logger
        go to the real sys.stderr instead of the capture streams.
        This function mimics the setup of logging.basicConfig.
        """
        logger = logging.getLogger("tornado")
        handler = logging.StreamHandler()
        formatter = logging.Formatter(logging.BASIC_FORMAT)
        handler.setFormatter(formatter)
        logger.addHandler(handler)

    def _init_asyncio_patch(self):
        """set default asyncio policy to be compatible with tornado

        Tornado 6 (at least) is not compatible with the default
        asyncio implementation on Windows

        Pick the older SelectorEventLoopPolicy on Windows
        if the known-incompatible default policy is in use.

        Support for Proactor via a background thread is available in tornado 6.1,
        but it is still preferable to run the Selector in the main thread
        instead of the background.

        do this as early as possible to make it a low priority and overrideable

        ref: https://github.com/tornadoweb/tornado/issues/2608

        FIXME: if/when tornado supports the defaults in asyncio without threads,
               remove and bump tornado requirement for py38.
               Most likely, this will mean a new Python version
               where asyncio.ProactorEventLoop supports add_reader and friends.

        """
        if sys.platform.startswith("win") and sys.version_info >= (3, 8):
            import asyncio

            try:
                from asyncio import (
                    WindowsProactorEventLoopPolicy,
                    WindowsSelectorEventLoopPolicy,
                )
            except ImportError:
                pass
                # not affected
            else:
                if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
                    # WindowsProactorEventLoopPolicy is not compatible with tornado 6
                    # fallback to the pre-3.8 default of Selector
                    asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())

    def init_pdb(self):
        """Replace pdb with IPython's version that is interruptible.

        With the non-interruptible version, stopping pdb() locks up the kernel in a
        non-recoverable state.
        """
        import pdb

        from IPython.core import debugger

        if hasattr(debugger, "InterruptiblePdb"):
            # Only available in newer IPython releases:
            debugger.Pdb = debugger.InterruptiblePdb
            pdb.Pdb = debugger.Pdb  # type:ignore[misc]
            pdb.set_trace = debugger.set_trace

    @catch_config_error
    def initialize(self, argv=None):
        self._init_asyncio_patch()
        super().initialize(argv)
        if self.subapp is not None:
            return

        self.init_pdb()
        self.init_blackhole()
        self.init_connection_file()
        self.init_poller()
        self.init_sockets()
        self.init_heartbeat()
        # writing/displaying connection info must be *after* init_sockets/heartbeat
        self.write_connection_file()
        # Log connection info after writing connection file, so that the connection
        # file is definitely available at the time someone reads the log.
        self.log_connection_info()
        self.init_io()
        try:
            self.init_signal()
        except Exception:
            # Catch exception when initializing signal fails, eg when running the
            # kernel on a separate thread
            if self.log_level < logging.CRITICAL:
                self.log.error("Unable to initialize signal:", exc_info=True)
        self.init_kernel()
        # shell init steps
        self.init_path()
        self.init_shell()
        if self.shell:
            self.init_gui_pylab()
            self.init_extensions()
            self.init_code()
        # flush stdout/stderr, so that anything written to these streams during
        # initialization do not get associated with the first execution request
        sys.stdout.flush()
        sys.stderr.flush()

    def start(self):
        if self.subapp is not None:
            return self.subapp.start()
        if self.poller is not None:
            self.poller.start()
        self.kernel.start()
        self.io_loop = ioloop.IOLoop.current()
        if self.trio_loop:
            from ipykernel.trio_runner import TrioRunner

            tr = TrioRunner()
            tr.initialize(self.kernel, self.io_loop)
            try:
                tr.run()
            except KeyboardInterrupt:
                pass
        else:
            try:
                self.io_loop.start()
            except KeyboardInterrupt:
                pass
Esempio n. 6
0
class BaseImageManager(Configurable):
    """
    Manage the image plots for one FigureManager.
    """
    imshow_options = Dict({}, config=True)
    image_class = Type()

    @default('image_class')
    def default_image_class(self):
        # By defining the default value of image_class dynamically here, we
        # avoid importing matplotlib if some non-matplotlib image_class is
        # specfied by configuration.
        from ..artists.mpl.image import Image
        return Image

    def __init__(self, fig_manager, dimensions):
        self.update_config(load_config())
        self.fig_manager = fig_manager
        self.start_doc = None
        # We do not actually do anything with self.dimensions, just stashing it
        # here in case we need it later.
        self.dimensions = dimensions

    def __call__(self, name, start_doc):
        # We do not actually do anything with self.start_doc, just stashing it
        # here in case we need it later.
        self.start_doc = start_doc
        return [], [self.subfactory]

    def subfactory(self, name, descriptor_doc):
        image_keys = {}
        for key, data_key in descriptor_doc['data_keys'].items():
            ndim = len(data_key['shape'] or [])
            # We want to record a shape that will match the arr.shape
            # of the arrays we will see later. Ophyd has been writing
            # incorrect info into descriptors. We try to detect and correct
            # that here.
            if ndim == 2:
                shape = data_key['shape']
                image_keys[key] = shape
            elif ndim == 3:
                # ophyd <1.4.0 gives (x, y, z) where z is 0
                # Maybe the better way to detect this is start['version']['ophyd'].
                if data_key['shape'][-1] == 0:
                    object_keys = descriptor_doc.get('object_keys', {})
                    for object_name, data_keys in object_keys.items():
                        if key in data_keys:
                            object_name = object_name  # used below
                            break
                    else:
                        log.debug("Couldn't find %s in object_keys %r", key, object_keys)
                        # Unable to handle this. Skip it.
                        continue
                    num_images = descriptor_doc['configuration'][object_name]['data'].get('num_images', -1)
                    x, y, _ = data_key['shape']
                    shape = (num_images, y, x)
                    image_keys[key] = shape[1:]  # Stash (y, x) shape alone.
                    log.debug("Patching the shape in the data key for %s"
                              "from %r to %r", key, data_key['shape'], shape)
                else:
                    # Assume we are getting correct metadata.
                    shape = data_key['shape'][1:]  # Stash (y, x) shape alone.
                    image_keys[key] = shape
            else:
                continue
            log.debug('%s has %d-dimensional image of shape %r',
                      key, ndim, shape)

        callbacks = []

        for image_key, shape in image_keys.items():
            caption_desc = f'{" ".join(self.func.__name__.split("_")).capitalize()}'
            figure_label = f'{caption_desc} of {image_key}'
            fig = self.fig_manager.get_figure(
                ('image', image_key), figure_label, 1)

            # If we are reusing an existing figure, it will have a second axis
            # for the colorbar, which we should ignore.
            # This is likely a bit brittle.
            ax, *_possible_colorbar = fig.axes

            log.debug('plot image %s', image_key)

            func = functools.partial(self.func, image_key=image_key)

            image = self.image_class(func, shape=shape, ax=ax, **self.imshow_options)
            callbacks.append(image)

        for callback in callbacks:
            callback('start', self.start_doc)
            callback('descriptor', descriptor_doc)
        return callbacks
class HydraKernelClient(AsyncKernelClient):
    shell_channel_class = Type(HydraChannel)
    iopub_channel_class = Type(HydraChannel)
    hb_channel_class = Type(HydraHBChannel)