Esempio n. 1
0
 def _setup_stream(self, recv_callback):
     self.iostream = zmqstream.ZMQStream(self.socket)
     self.iostream.on_recv(recv_callback)
Esempio n. 2
0
        if msg[1] == current:
            delta = time.time() - self.tic
            # self.log.debug("heartbeat::heart %r took %.2f ms to respond"%(msg[0], 1000*delta))
            self.responses.add(msg[0])
        elif msg[1] == last:
            delta = time.time() - self.tic + (self.lifetime - self.last_ping)
            self.log.warn(
                "heartbeat::heart %r missed a beat, and took %.2f ms to respond"
                % (msg[0], 1000 * delta))
            self.responses.add(msg[0])
        else:
            self.log.warn(
                "heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)"
                % (msg[1], self.lifetime))


if __name__ == '__main__':
    loop = ioloop.IOLoop.instance()
    context = zmq.Context()
    pub = context.socket(zmq.PUB)
    pub.bind('tcp://127.0.0.1:5555')
    xrep = context.socket(zmq.ROUTER)
    xrep.bind('tcp://127.0.0.1:5556')

    outstream = zmqstream.ZMQStream(pub, loop)
    instream = zmqstream.ZMQStream(xrep, loop)

    hb = HeartMonitor(loop, outstream, instream)

    loop.start()
Esempio n. 3
0
    def run(self):
        """
        Main method.
        """

        in_stream = zmqstream.ZMQStream(self.incoming, self.ioloop)
        in_stream.on_recv(self.handle_operator)

        # Periodic tasks
        # Heartbeating
        counter = [0]  # read/write closure workaround

        def register():
            """
            Runs periodically and broadcasts a ping message in order to
            discover subscribers.
            """

            self.broadcast('register', 'ping', counter[0],
                           int(time.time() * 1000.0))
            counter[0] += 1

        def heartbeat():
            """
            Runs periodically and pings operators to determine liveness.
            """
            for (oper_id, operator) in self.operators.items():
                if operator.pong_acknowledged:
                    operator.pong_acknowledged = False
                    self.broadcast(oper_id, 'ping', counter[0],
                                   int(time.time() * 1000.0))
                    counter[0] += 1

        # Handle network issues with operators
        def is_alive():
            """
            Runs periodically and updates Operator state given last_seen values.

            Currently, the transition is: ACTIVE -> DISCONNECTED
            We do DISCONNECTED -> ACTIVE inside the pong handler
            since we want no delay on this.
            """
            for operator in self.operators.values():
                if time.time() - operator.last_seen > ALIVE_THRESHOLD:
                    if operator.state == Operator.ACTIVE:
                        operator.state = Operator.DISCONNECTED
                        operator.logger.warn(
                            'Operator %s not responding, updating state.' %
                            operator.name)

        ioloop.PeriodicCallback(register, 1000, self.ioloop).start()
        ioloop.PeriodicCallback(heartbeat, 3000, self.ioloop).start()
        ioloop.PeriodicCallback(is_alive, 5000, self.ioloop).start()

        # Exception handlers
        def handle_interrupt(_):
            self.commander_logger.info('Caught CTRL-C')

        def handle_exception(_):
            self.commander_logger.exception('Unhandled exception @ run():')

        self.add_exception_handler(KeyboardInterrupt, handle_interrupt)
        self.add_exception_handler(Exception, handle_exception)

        # Shutdown handler
        def on_shutdown():
            # Shutdown
            self.commander_logger.info('Shutting down')
            self.commander_logger.disabled = True
            self.operator_logger.disabled = True
            self.incoming.close()
            in_stream.close()
            self.outgoing.close()
            self.context.term()
            self.event_writer.close()

        self.add_shutdown_handler(on_shutdown)
        self.commander_logger.info('Entered message loop')
        self.commander_logger.info('Binding to %s' % self.pub_addr)
        self.commander_logger.info('Binding to %s' % self.pull_addr)
        self.start()
Esempio n. 4
0
def log_queue():
    socket = context.socket(zmq.SUB)
    socket.connect(INTERNAL_SOCKET)
    socket.setsockopt(zmq.SUBSCRIBE, '')
    loop = zmqstream.ZMQStream(socket, io_loop=io_loop)
    loop.on_recv(logger)
Esempio n. 5
0
    def complete_registration(self, msg, connect, maybe_tunnel):
        # print msg
        self._abort_dc.stop()
        ctx = self.context
        loop = self.loop
        identity = self.bident
        idents, msg = self.session.feed_identities(msg)
        msg = self.session.unserialize(msg)
        content = msg['content']
        info = self.connection_info

        def url(key):
            """get zmq url for given channel"""
            return str(info["interface"] + ":%i" % info[key])

        if content['status'] == 'ok':
            self.id = int(content['id'])

            # launch heartbeat
            # possibly forward hb ports with tunnels
            hb_ping = maybe_tunnel(url('hb_ping'))
            hb_pong = maybe_tunnel(url('hb_pong'))

            heart = Heart(hb_ping, hb_pong, heart_id=identity)
            heart.start()

            # create Shell Connections (MUX, Task, etc.):
            shell_addrs = url('mux'), url('task')

            # Use only one shell stream for mux and tasks
            stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            stream.setsockopt(zmq.IDENTITY, identity)
            shell_streams = [stream]
            for addr in shell_addrs:
                connect(stream, addr)

            # control stream:
            control_addr = url('control')
            control_stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            control_stream.setsockopt(zmq.IDENTITY, identity)
            connect(control_stream, control_addr)

            # create iopub stream:
            iopub_addr = url('iopub')
            iopub_socket = ctx.socket(zmq.PUB)
            iopub_socket.setsockopt(zmq.IDENTITY, identity)
            connect(iopub_socket, iopub_addr)

            # disable history:
            self.config.HistoryManager.hist_file = ':memory:'

            # Redirect input streams and set a display hook.
            if self.out_stream_factory:
                sys.stdout = self.out_stream_factory(self.session,
                                                     iopub_socket, u'stdout')
                sys.stdout.topic = cast_bytes('engine.%i.stdout' % self.id)
                sys.stderr = self.out_stream_factory(self.session,
                                                     iopub_socket, u'stderr')
                sys.stderr.topic = cast_bytes('engine.%i.stderr' % self.id)
            if self.display_hook_factory:
                sys.displayhook = self.display_hook_factory(
                    self.session, iopub_socket)
                sys.displayhook.topic = cast_bytes('engine.%i.pyout' % self.id)

            self.kernel = Kernel(config=self.config,
                                 int_id=self.id,
                                 ident=self.ident,
                                 session=self.session,
                                 control_stream=control_stream,
                                 shell_streams=shell_streams,
                                 iopub_socket=iopub_socket,
                                 loop=loop,
                                 user_ns=self.user_ns,
                                 log=self.log)

            self.kernel.shell.display_pub.topic = cast_bytes(
                'engine.%i.displaypub' % self.id)

            # FIXME: This is a hack until IPKernelApp and IPEngineApp can be fully merged
            app = IPKernelApp(config=self.config,
                              shell=self.kernel.shell,
                              kernel=self.kernel,
                              log=self.log)
            app.init_profile_dir()
            app.init_code()

            self.kernel.start()
        else:
            self.log.fatal("Registration Failed: %s" % msg)
            raise Exception("Registration Failed: %s" % msg)

        self.log.info("Completed registration with id %i" % self.id)
Esempio n. 6
0
    def run(self):
        """
        Starts the Tornado web server and ZeroMQ server.
        """

        settings = {
            "debug":
            True,
            "cookie_secret":
            base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes),
            "login_url":
            "/login",
        }

        ssl_options = {}

        try:
            cloud_config = Config.instance().get_section_config("CLOUD_SERVER")

            cloud_settings = {
                "required_user": cloud_config['WEB_USERNAME'],
                "required_pass": cloud_config['WEB_PASSWORD'],
            }

            settings.update(cloud_settings)

            if cloud_config["SSL_ENABLED"] == "yes":
                ssl_options = {
                    "certfile": cloud_config["SSL_CRT"],
                    "keyfile": cloud_config["SSL_KEY"],
                }

                log.info("Certs found - starting in SSL mode")
        except KeyError:
            log.info("Missing cloud.conf - disabling HTTP auth and SSL")

        router = self._create_zmq_router()
        # Add our JSON-RPC Websocket handler to Tornado
        self.handlers.extend([(r"/", JSONRPCWebSocket, dict(zmq_router=router))
                              ])
        if hasattr(sys, "frozen"):
            templates_dir = "templates"
        else:
            templates_dir = pkg_resources.resource_filename(
                "gns3server", "templates")
        tornado_app = tornado.web.Application(self.handlers,
                                              template_path=templates_dir,
                                              **settings)  # FIXME: debug mode!

        try:
            print("Starting server on {}:{} (Tornado v{}, PyZMQ v{}, ZMQ v{})".
                  format(self._host, self._port, tornado.version,
                         zmq.__version__, zmq.zmq_version()))
            kwargs = {"address": self._host}

            if ssl_options:
                kwargs["ssl_options"] = ssl_options

            if parse_version(tornado.version) >= parse_version("3.1"):
                kwargs[
                    "max_buffer_size"] = 524288000  # 500 MB file upload limit
            tornado_app.listen(self._port, **kwargs)
        except OSError as e:
            if e.errno == errno.EADDRINUSE:  # socket already in use
                logging.critical("socket in use for {}:{}".format(
                    self._host, self._port))
                self._cleanup(graceful=False)

        ioloop = tornado.ioloop.IOLoop.instance()
        self._stream = zmqstream.ZMQStream(router, ioloop)
        self._stream.on_recv_stream(JSONRPCWebSocket.dispatch_message)
        tornado.autoreload.add_reload_hook(self._reload_callback)

        def signal_handler(signum=None, frame=None):
            try:
                log.warning("Server got signal {}, exiting...".format(signum))
                self._cleanup(signum)
            except RuntimeError:
                # to ignore logging exception: RuntimeError: reentrant call inside <_io.BufferedWriter name='<stderr>'>
                pass

        signals = [signal.SIGTERM, signal.SIGINT]
        if not sys.platform.startswith("win"):
            signals.extend([signal.SIGHUP, signal.SIGQUIT])
        else:
            signals.extend([signal.SIGBREAK])
        for sig in signals:
            signal.signal(sig, signal_handler)

        try:
            ioloop.start()
        except (KeyboardInterrupt, SystemExit):
            print("\nExiting...")
            self._cleanup()
Esempio n. 7
0
 def publish(self, addr):
     socket_pub = self._ctx.socket(zmq.PUB)
     socket_pub.bind(addr)
     return zmqstream.ZMQStream(socket_pub)
Esempio n. 8
0
 def setup_stream():
     self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
     self.stream.on_recv(self._handle_recv)
     evt.set()
    def complete_registration(self, msg, connect, maybe_tunnel):
        # print msg
        self._abort_dc.stop()
        ctx = self.context
        loop = self.loop
        identity = self.bident
        idents, msg = self.session.feed_identities(msg)
        msg = Message(self.session.unserialize(msg))

        if msg.content.status == 'ok':
            self.id = int(msg.content.id)

            # launch heartbeat
            hb_addrs = msg.content.heartbeat

            # possibly forward hb ports with tunnels
            hb_addrs = [maybe_tunnel(addr) for addr in hb_addrs]
            heart = Heart(*list(map(str, hb_addrs)), heart_id=identity)
            heart.start()

            # create Shell Streams (MUX, Task, etc.):
            queue_addr = msg.content.mux
            shell_addrs = [str(queue_addr)]
            task_addr = msg.content.task
            if task_addr:
                shell_addrs.append(str(task_addr))

            # Uncomment this to go back to two-socket model
            # shell_streams = []
            # for addr in shell_addrs:
            #     stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            #     stream.setsockopt(zmq.IDENTITY, identity)
            #     stream.connect(disambiguate_url(addr, self.location))
            #     shell_streams.append(stream)

            # Now use only one shell stream for mux and tasks
            stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            stream.setsockopt(zmq.IDENTITY, identity)
            shell_streams = [stream]
            for addr in shell_addrs:
                connect(stream, addr)
            # end single stream-socket

            # control stream:
            control_addr = str(msg.content.control)
            control_stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            control_stream.setsockopt(zmq.IDENTITY, identity)
            connect(control_stream, control_addr)

            # create iopub stream:
            iopub_addr = msg.content.iopub
            iopub_socket = ctx.socket(zmq.PUB)
            iopub_socket.setsockopt(zmq.IDENTITY, identity)
            connect(iopub_socket, iopub_addr)

            # disable history:
            self.config.HistoryManager.hist_file = ':memory:'

            # Redirect input streams and set a display hook.
            if self.out_stream_factory:
                sys.stdout = self.out_stream_factory(self.session,
                                                     iopub_socket, 'stdout')
                sys.stdout.topic = cast_bytes('engine.%i.stdout' % self.id)
                sys.stderr = self.out_stream_factory(self.session,
                                                     iopub_socket, 'stderr')
                sys.stderr.topic = cast_bytes('engine.%i.stderr' % self.id)
            if self.display_hook_factory:
                sys.displayhook = self.display_hook_factory(
                    self.session, iopub_socket)
                sys.displayhook.topic = cast_bytes('engine.%i.pyout' % self.id)

            self.kernel = Kernel(config=self.config,
                                 int_id=self.id,
                                 ident=self.ident,
                                 session=self.session,
                                 control_stream=control_stream,
                                 shell_streams=shell_streams,
                                 iopub_socket=iopub_socket,
                                 loop=loop,
                                 user_ns=self.user_ns,
                                 log=self.log)

            self.kernel.shell.display_pub.topic = cast_bytes(
                'engine.%i.displaypub' % self.id)

            # FIXME: This is a hack until IPKernelApp and IPEngineApp can be fully merged
            app = IPKernelApp(config=self.config,
                              shell=self.kernel.shell,
                              kernel=self.kernel,
                              log=self.log)
            app.init_profile_dir()
            app.init_code()

            self.kernel.start()
        else:
            self.log.fatal("Registration Failed: %s" % msg)
            raise Exception("Registration Failed: %s" % msg)

        self.log.info("Completed registration with id %i" % self.id)
Esempio n. 10
0
    secure_key,
    digestmod=signature_schemes[config["signature_scheme"]])
execution_count = 1

##########################################
# Heartbeat:
ctx = zmq.Context()
heartbeat_socket = ctx.socket(zmq.REP)
config["hb_port"] = bind(heartbeat_socket, connection, config["hb_port"])

##########################################
# IOPub/Sub:
# aslo called SubSocketChannel in IPython sources
iopub_socket = ctx.socket(zmq.PUB)
config["iopub_port"] = bind(iopub_socket, connection, config["iopub_port"])
iopub_stream = zmqstream.ZMQStream(iopub_socket)
# iopub_stream.on_recv(iopub_handler)

##########################################
# Control:
control_socket = ctx.socket(zmq.ROUTER)
config["control_port"] = bind(control_socket, connection, config["control_port"])
control_stream = zmqstream.ZMQStream(control_socket)
control_stream.on_recv(control_handler)

##########################################
# Stdin:
stdin_socket = ctx.socket(zmq.ROUTER)
config["stdin_port"] = bind(stdin_socket, connection, config["stdin_port"])
stdin_stream = zmqstream.ZMQStream(stdin_socket)
# stdin_stream.on_recv(stdin_handler)
Esempio n. 11
0
 def _subscribe_setup(self):
     self.subscribe = self.context.socket(SUB)
     self.sub_stream = zmqstream.ZMQStream(self.subscribe)
     self.sub_stream.on_recv(self._on_subscribe)
     self.subscriptions = set([])
Esempio n. 12
0
    def stream(self,
               sock_type,
               addr,
               bind,
               callback=None,
               subscribe=b'',
               identity=None):
        """
        Creates a :class:`~zmq.eventloop.zmqstream.ZMQStream`.

        :param sock_type: The ØMQ socket type (e.g. ``zmq.REQ``)
        :param addr: Address to bind or connect to formatted as *host:port*,
                *(host, port)* or *host* (bind to random port).
                If *bind* is ``True``, *host* may be:

                - the wild-card ``*``, meaning all available interfaces,
                - the primary IPv4 address assigned to the interface, in its
                  numeric representation or
                - the interface name as defined by the operating system.

                If *bind* is ``False``, *host* may be:

                - the DNS name of the peer or
                - the IPv4 address of the peer, in its numeric representation.

                If *addr* is just a host name without a port and *bind* is
                ``True``, the socket will be bound to a random port.
        :param bind: Binds to *addr* if ``True`` or tries to connect to it
                otherwise.
        :param callback: A callback for
                :meth:`~zmq.eventloop.zmqstream.ZMQStream.on_recv`, optional
        :param subscribe: Subscription pattern for *SUB* sockets, optional,
                defaults to ``b''``.
        :returns: A tuple containg the stream and the port number.

        """
        sock = self.context.socket(sock_type)

        sock.identity = (u"%s" % identity).encode('ascii')

        # addr may be 'host:port' or ('host', port)
        if isinstance(addr, str):
            addr = addr.split(':')
        host, port = addr if len(addr) == 2 else (addr[0], None)

        # Bind/connect the socket
        if bind:
            if port:
                sock.bind('tcp://%s:%s' % (host, port))
            else:
                port = sock.bind_to_random_port('tcp://%s' % host)
        else:
            print("Debug:{}:{}".format(host, port))
            sock.connect('tcp://%s:%s' % (host, port))

        # Add a default subscription for SUB sockets
        if sock_type == zmq.SUB:
            print("Subscribed to: {}".format(subscribe))
            sock.setsockopt(zmq.SUBSCRIBE, subscribe)

        # Create the stream and add the callback
        stream = zmqstream.ZMQStream(sock, self.loop)
        if callback:
            stream.on_recv(callback)

        return stream, int(port)
Esempio n. 13
0
def cmd_line():
    """
    Entry point for \'spaghetti\' command line script.
    """
    from tornado.options import define, options
    from zmq.eventloop import ioloop, zmqstream
    import os
    ioloop.install()

    # define spaghetti options
    define("zmq_socks",
           default="tcp://127.0.0.1:8766",
           multiple=True,
           help="Comma separated list of zmq sockets")
    define("http_host", default="127.0.0.1")
    define("http_port", default=8765)
    template_path = os.path.join(os.path.dirname(__file__), "templates")
    define("template_path", default=template_path)
    static_path = os.path.join(os.path.dirname(__file__), "static")
    define("static_path", default=static_path)
    define("debug", default=False)

    # parse options from config files and command line
    if os.path.exists(MAIN_CONF):
        tornado.options.parse_config_file(MAIN_CONF, final=False)
    if os.path.exists(os.path.expanduser(USER_CONF)):
        tornado.options.parse_config_file(os.path.expanduser(USER_CONF),
                                          final=False)
    tornado.options.parse_command_line()
    if not isinstance(options.zmq_socks, list):
        zsocks = [options.zmq_socks]
    else:
        zsocks = options.zmq_socks
    if options.debug:
        logger.info("running in debug mode")
    logger.debug("Template path: %s" % (options.template_path, ))
    logger.debug("Static path: %s" % (options.static_path, ))

    # Create the app
    app = SpaghettiApplication(debug=options.debug,
                               static_path=options.static_path,
                               template_path=options.template_path)
    # HTTP stuff
    app.listen(options.http_port, address=options.http_host)
    app.http_host = options.http_host
    app.http_port = options.http_port

    # ZMQ stuff
    context = zmq.Context()
    socket = context.socket(zmq.PULL)
    for _zsock in zsocks:
        logger.info("binding %s" % (_zsock, ))
        socket.bind(_zsock)
    zstream = zmqstream.ZMQStream(socket)
    zstream.on_recv(app.update_channel)

    # start the event loop
    logger.info("starting io loop")
    logger.info("http host: " + str(options.http_host))
    logger.info("http port: " + str(options.http_port))
    ioloop.IOLoop.instance().start()
Esempio n. 14
0
def create_msg_channel(channel, topic):  # pragma: no cover
    context = zmq.Context()
    socket = context.socket(zmq.SUB)
    socket.connect(channel)
    socket.setsockopt(zmq.SUBSCRIBE, topic)
    return zmqstream.ZMQStream(socket)
Esempio n. 15
0
 def start(self):
     """Start ZAP authentication"""
     super(IOLoopAuthenticator, self).start()
     self.zap_stream = zmqstream.ZMQStream(self.zap_socket, self.io_loop)
     self.zap_stream.on_recv(self.handle_zap_message)
Esempio n. 16
0
import time

ioloop.install()

context   = zmq.Context(1)
socket    = context.socket(zmq.PUB)
# Avoid killing the server with requests

if "RCVHWM" in dir(zmq):
  socket.setsockopt(zmq.RCVHWM, 2000)
if "SNDHWM" in dir(zmq):
  socket.setsockopt(zmq.SNDHWM, 2000)
if "HWM" in dir(zmq):
  socket.setsockopt(zmq.HWM, 2000)

publisher = zmqstream.ZMQStream(socket)
socket.bind("tcp://127.0.0.1:22620")

counter = 0 

def publish():
  #print "D:"
  global counter
  counter += 1
  ip_to_ban = "10.%d.%d.%d" % (int(random.random() * 255),
                               int(random.random() * 255),
                               int(random.random() * 255))
  publisher.send_multipart(("swabber_bans", ip_to_ban))
  print ip_to_ban
  time.sleep(0.001)
Esempio n. 17
0
    def __init__(self, overlayList, displayCtx, frame):
        """Set up the kernel and ``zmq`` ports. A jupyter connection file
        containing the information needed to connect to the kernel is saved
        to a temporary file - its path is accessed as an attribute
        called :meth:`connfile`.

        :arg overlayList: The :class:`.OverlayList`.
        :arg displayCtx:  The master :class:`.DisplayContext`.
        :arg frame:       The :class:`.FSLeyesFrame`.
        """

        ip = '127.0.0.1'
        transport = 'tcp'
        addr = '{}://{}'.format(transport, ip)
        self.__connfile = None
        self.__kernel = None
        self.__error = None
        self.__lastIter = 0
        self.__overlayList = overlayList
        self.__displayCtx = displayCtx
        self.__frame = frame
        self.__env = runscript.fsleyesScriptEnvironment(
            frame, overlayList, displayCtx)[1]

        self.__env['screenshot'] = self.__screenshot

        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=DeprecationWarning)

            # Use an empty key to disable message signing
            session = jcsession.Session(key=b'')
            context = zmq.Context.instance()

            # create sockets for kernel communication
            shellsock = context.socket(zmq.ROUTER)
            stdinsock = context.socket(zmq.ROUTER)
            controlsock = context.socket(zmq.ROUTER)
            iopubsock = context.socket(zmq.PUB)

            shellstrm = zmqstream.ZMQStream(shellsock)
            controlstrm = zmqstream.ZMQStream(controlsock)

            # I/O and heartbeat communication
            # are managed by separate threads.
            self.__iopub = iostream.IOPubThread(iopubsock)
            self.__heartbeat = heartbeat.Heartbeat(zmq.Context(),
                                                   (transport, ip, 0))
            iopubsock = self.__iopub.background_socket

            self.__heartbeat.start()
            self.__iopub.start()

            # Streams which redirect stdout/
            # stderr to the iopub socket
            stdout = iostream.OutStream(session, self.__iopub, u'stdout')
            stderr = iostream.OutStream(session, self.__iopub, u'stderr')

            # TCP ports for all sockets
            shellport = shellsock.bind_to_random_port(addr)
            stdinport = stdinsock.bind_to_random_port(addr)
            controlport = controlsock.bind_to_random_port(addr)
            iopubport = iopubsock.bind_to_random_port(addr)
            hbport = self.__heartbeat.port

            # Create the kernel
            self.__kernel = FSLeyesIPythonKernel.instance(
                stdout,
                stderr,
                shell_class=FSLeyesIPythonShell,
                session=session,
                shell_streams=[shellstrm, controlstrm],
                iopub_socket=iopubsock,
                stdin_socket=stdinsock,
                user_ns=self.__env,
                log=logging.getLogger('ipykernel.kernelbase'))

        # write connection file to a temp dir
        hd, fname = tempfile.mkstemp(prefix='fsleyes-kernel-{}.json'.format(
            os.getpid()),
                                     suffix='.json')
        os.close(hd)

        self.__connfile = fname

        log.debug('IPython kernel connection file: %s', fname)

        jc.write_connection_file(fname,
                                 shell_port=shellport,
                                 stdin_port=stdinport,
                                 iopub_port=iopubport,
                                 control_port=controlport,
                                 hb_port=hbport,
                                 ip=ip)

        atexit.register(os.remove, self.__connfile)
Esempio n. 18
0
 def complete_registration(self, msg):
     # print msg
     self._abort_dc.stop()
     ctx = self.context
     loop = self.loop
     identity = self.ident
     
     idents,msg = self.session.feed_identities(msg)
     msg = Message(self.session.unpack_message(msg))
     
     if msg.content.status == 'ok':
         self.id = int(msg.content.id)
         
         # create Shell Streams (MUX, Task, etc.):
         queue_addr = msg.content.mux
         shell_addrs = [ str(queue_addr) ]
         task_addr = msg.content.task
         if task_addr:
             shell_addrs.append(str(task_addr))
         
         # Uncomment this to go back to two-socket model
         # shell_streams = []
         # for addr in shell_addrs:
         #     stream = zmqstream.ZMQStream(ctx.socket(zmq.XREP), loop)
         #     stream.setsockopt(zmq.IDENTITY, identity)
         #     stream.connect(disambiguate_url(addr, self.location))
         #     shell_streams.append(stream)
         
         # Now use only one shell stream for mux and tasks
         stream = zmqstream.ZMQStream(ctx.socket(zmq.XREP), loop)
         stream.setsockopt(zmq.IDENTITY, identity)
         shell_streams = [stream]
         for addr in shell_addrs:
             stream.connect(disambiguate_url(addr, self.location))
         # end single stream-socket
         
         # control stream:
         control_addr = str(msg.content.control)
         control_stream = zmqstream.ZMQStream(ctx.socket(zmq.XREP), loop)
         control_stream.setsockopt(zmq.IDENTITY, identity)
         control_stream.connect(disambiguate_url(control_addr, self.location))
         
         # create iopub stream:
         iopub_addr = msg.content.iopub
         iopub_stream = zmqstream.ZMQStream(ctx.socket(zmq.PUB), loop)
         iopub_stream.setsockopt(zmq.IDENTITY, identity)
         iopub_stream.connect(disambiguate_url(iopub_addr, self.location))
         
         # launch heartbeat
         hb_addrs = msg.content.heartbeat
         # print (hb_addrs)
         
         # # Redirect input streams and set a display hook.
         if self.out_stream_factory:
             sys.stdout = self.out_stream_factory(self.session, iopub_stream, u'stdout')
             sys.stdout.topic = 'engine.%i.stdout'%self.id
             sys.stderr = self.out_stream_factory(self.session, iopub_stream, u'stderr')
             sys.stderr.topic = 'engine.%i.stderr'%self.id
         if self.display_hook_factory:
             sys.displayhook = self.display_hook_factory(self.session, iopub_stream)
             sys.displayhook.topic = 'engine.%i.pyout'%self.id
         
         self.kernel = Kernel(config=self.config, int_id=self.id, ident=self.ident, session=self.session, 
                 control_stream=control_stream, shell_streams=shell_streams, iopub_stream=iopub_stream, 
                 loop=loop, user_ns = self.user_ns, logname=self.log.name)
         self.kernel.start()
         hb_addrs = [ disambiguate_url(addr, self.location) for addr in hb_addrs ]
         heart = Heart(*map(str, hb_addrs), heart_id=identity)
         # ioloop.DelayedCallback(heart.start, 1000, self.loop).start()
         heart.start()
         
         
     else:
         self.log.fatal("Registration Failed: %s"%msg)
         raise Exception("Registration Failed: %s"%msg)
     
     self.log.info("Completed registration with id %i"%self.id)
Esempio n. 19
0
    LOCAL_OUTPUT = 'ipc:///tmp/message_flow_out'

    import zmq

    # https://zeromq.github.io/pyzmq/eventloop.html
    from zmq.eventloop import ioloop, zmqstream

    ioloop.install()

    sub = ctx.socket(zmq.SUB)
    sub.connect(LOCAL_OUTPUT)
    sub.setsockopt(zmq.SUBSCRIBE, b'')

    print('[websocket_server] Broadcasting {} to all websockets'.format(
        LOCAL_OUTPUT))
    stream = zmqstream.ZMQStream(sub)
    stream.on_recv(WebSocket.broadcast)

    server = web.Application([
        (r'/websocket', WebSocket),
    ])
    server.listen(PORT)

    # We send a heartbeat every 45 seconds to make sure that nginx
    # proxy does not time out and close the connection
    ioloop.PeriodicCallback(WebSocket.heartbeat, 45000).start()

    print(
        '[websocket_server] Listening for incoming websocket connections on port {}'
        .format(PORT))
    ioloop.IOLoop.instance().start()
Esempio n. 20
0
 def setUp(self):
     self.context = zmq.Context()
     self.socket = self.context.socket(zmq.REP)
     self.loop = ioloop.IOLoop.instance()
     self.stream = zmqstream.ZMQStream(self.socket)
Esempio n. 21
0
 def _init_stream(self):
     self.stream = zmqstream.ZMQStream(self.ctrl_socket, self.loop)
     self.stream.on_recv(self.handle_message)
Esempio n. 22
0
    def handle_pong(self, msg):
        "a heart just beat"
        current = str_to_bytes(str(self.lifetime))
        last = str_to_bytes(str(self.last_ping))
        if msg[1] == current:
            delta = time.time()-self.tic
            # self.log.debug("heartbeat::heart %r took %.2f ms to respond"%(msg[0], 1000*delta))
            self.responses.add(msg[0])
        elif msg[1] == last:
            delta = time.time()-self.tic + (self.lifetime-self.last_ping)
            self.log.warn("heartbeat::heart %r missed a beat, and took %.2f ms to respond", msg[0], 1000*delta)
            self.responses.add(msg[0])
        else:
            self.log.warn("heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)", msg[1], self.lifetime)


if __name__ == '__main__':
    loop = ioloop.IOLoop.instance()
    context = zmq.Context()
    pub = context.socket(zmq.PUB)
    pub.bind('tcp://127.0.0.1:5555')
    router = context.socket(zmq.ROUTER)
    router.bind('tcp://127.0.0.1:5556')

    outstream = zmqstream.ZMQStream(pub, loop)
    instream = zmqstream.ZMQStream(router, loop)

    hb = HeartMonitor(loop, outstream, instream)

    loop.start()
Esempio n. 23
0
#!/usr/bin/env python
"""Adapted echo.py to put the send in the event loop using a ZMQStream.

Authors
-------
* MinRK
"""

import zmq
from zmq.eventloop import ioloop, zmqstream

loop = ioloop.IOLoop.instance()

ctx = zmq.Context()
s = ctx.socket(zmq.REP)
s.bind('tcp://127.0.0.1:5555')
stream = zmqstream.ZMQStream(s, loop)


def echo(msg):
    print " ".join(msg)
    stream.send_multipart(msg)


stream.on_recv(echo)

loop.start()
Esempio n. 24
0
 def read_forever(self, socket, callback, copy=False):
     stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
     stream.on_recv(callback, copy=copy)
     stream.on_send(functools.partial(self.send_callback, Counter()))
     return stream
Esempio n. 25
0
    def complete_registration(self, msg, connect, maybe_tunnel):
        # print msg
        self.loop.remove_timeout(self._abort_timeout)
        ctx = self.context
        loop = self.loop
        identity = self.bident
        idents,msg = self.session.feed_identities(msg)
        msg = self.session.deserialize(msg)
        content = msg['content']
        info = self.connection_info
        
        def url(key):
            """get zmq url for given channel"""
            return str(info["interface"] + ":%i" % info[key])
        
        if content['status'] == 'ok':
            self.id = int(content['id'])

            # launch heartbeat
            # possibly forward hb ports with tunnels
            hb_ping = maybe_tunnel(url('hb_ping'))
            hb_pong = maybe_tunnel(url('hb_pong'))
            
            hb_monitor = None
            if self.max_heartbeat_misses > 0:
                # Add a monitor socket which will record the last time a ping was seen
                mon = self.context.socket(zmq.SUB)
                mport = mon.bind_to_random_port('tcp://%s' % localhost())
                mon.setsockopt(zmq.SUBSCRIBE, b"")
                self._hb_listener = zmqstream.ZMQStream(mon, self.loop)
                self._hb_listener.on_recv(self._report_ping)
            
            
                hb_monitor = "tcp://%s:%i" % (localhost(), mport)

            heart = Heart(hb_ping, hb_pong, hb_monitor , heart_id=identity)
            heart.start()

            # create Shell Connections (MUX, Task, etc.):
            shell_addrs = url('mux'), url('task')

            # Use only one shell stream for mux and tasks
            stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            stream.setsockopt(zmq.IDENTITY, identity)
            shell_streams = [stream]
            for addr in shell_addrs:
                connect(stream, addr)

            # control stream:
            control_addr = url('control')
            control_stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            control_stream.setsockopt(zmq.IDENTITY, identity)
            connect(control_stream, control_addr)

            # create iopub stream:
            iopub_addr = url('iopub')
            iopub_socket = ctx.socket(zmq.PUB)
            iopub_socket.setsockopt(zmq.IDENTITY, identity)
            connect(iopub_socket, iopub_addr)

            # disable history:
            self.config.HistoryManager.hist_file = ':memory:'
            
            # Redirect input streams and set a display hook.
            if self.out_stream_factory:
                sys.stdout = self.out_stream_factory(self.session, iopub_socket, u'stdout')
                sys.stdout.topic = cast_bytes('engine.%i.stdout' % self.id)
                sys.stderr = self.out_stream_factory(self.session, iopub_socket, u'stderr')
                sys.stderr.topic = cast_bytes('engine.%i.stderr' % self.id)
            if self.display_hook_factory:
                sys.displayhook = self.display_hook_factory(self.session, iopub_socket)
                sys.displayhook.topic = cast_bytes('engine.%i.execute_result' % self.id)

            self.kernel = Kernel(parent=self, int_id=self.id, ident=self.ident, session=self.session,
                    control_stream=control_stream, shell_streams=shell_streams, iopub_socket=iopub_socket,
                    loop=loop, user_ns=self.user_ns, log=self.log)
            
            self.kernel.shell.display_pub.topic = cast_bytes('engine.%i.displaypub' % self.id)
            
                
            # periodically check the heartbeat pings of the controller
            # Should be started here and not in "start()" so that the right period can be taken 
            # from the hubs HeartBeatMonitor.period
            if self.max_heartbeat_misses > 0:
                # Use a slightly bigger check period than the hub signal period to not warn unnecessary 
                self.hb_check_period = int(content['hb_period'])+10
                self.log.info("Starting to monitor the heartbeat signal from the hub every %i ms." , self.hb_check_period)
                self._hb_reporter = ioloop.PeriodicCallback(self._hb_monitor, self.hb_check_period, self.loop)
                self._hb_reporter.start()
            else:
                self.log.info("Monitoring of the heartbeat signal from the hub is not enabled.")

            
            # FIXME: This is a hack until IPKernelApp and IPEngineApp can be fully merged
            app = IPKernelApp(parent=self, shell=self.kernel.shell, kernel=self.kernel, log=self.log)
            app.init_profile_dir()
            app.init_code()
            
            self.kernel.start()
        else:
            self.log.fatal("Registration Failed: %s"%msg)
            raise Exception("Registration Failed: %s"%msg)

        self.log.info("Completed registration with id %i"%self.id)
Esempio n. 26
0
    def __init__(self,
                 broker=DEFAULT_FRONTEND,
                 ping_delay=10.,
                 ping_retries=3,
                 params=None,
                 timeout=DEFAULT_TIMEOUT_MOVF,
                 max_age=DEFAULT_MAX_AGE,
                 max_age_delta=DEFAULT_MAX_AGE_DELTA):
        logger.debug('Initializing the agent.')
        self.debug = logger.isEnabledFor(logging.DEBUG)
        self.params = params
        self.pid = os.getpid()
        self.timeout = timeout
        self.max_age = max_age
        self.max_age_delta = max_age_delta
        self.env = os.environ.copy()
        self.running = False
        self._workers = {}
        self._max_id = defaultdict(int)

        self.loop = ioloop.IOLoop()
        self.ctx = zmq.Context()

        # Setup the zmq sockets

        # Let's ask the broker its options
        self.broker = broker
        client = Client(self.broker)
        result = client.ping()
        self.endpoints = result['endpoints']

        # backend socket - used to receive work from the broker
        self._backend = self.ctx.socket(zmq.REP)
        self._backend.identity = str(self.pid)
        self._backend.connect(self.endpoints['backend'])

        # register socket - used to register into the broker
        self._reg = self.ctx.socket(zmq.PUSH)
        self._reg.connect(self.endpoints['register'])

        # hearbeat socket - used to check if the broker is alive
        heartbeat = self.endpoints.get('heartbeat')

        if heartbeat is not None:
            self.ping = Stethoscope(heartbeat,
                                    onbeatlost=self.lost,
                                    delay=ping_delay,
                                    retries=ping_retries,
                                    ctx=self.ctx,
                                    io_loop=self.loop,
                                    onregister=self.register)
        else:
            self.ping = None

        # Setup the zmq streams.
        self._backstream = zmqstream.ZMQStream(self._backend, self.loop)
        self._backstream.on_recv(self._handle_recv_back)

        self._check = ioloop.PeriodicCallback(self._check_proc,
                                              ping_delay * 1000,
                                              io_loop=self.loop)
Esempio n. 27
0
#!/usr/bin/env python
import argparse
import zmq
from zmq.eventloop import ioloop, zmqstream
import json

io_loop = ioloop.IOLoop()
context = zmq.Context()
socket = context.socket(zmq.ROUTER)

stream = zmqstream.ZMQStream(socket, io_loop=io_loop)

parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bind-address', default='tcp://0.0.0.0:7777')
parser.add_argument('-c', '--city', default='Berlin')

args = parser.parse_args()


def guess(stream, message):
    addr, text = message
    print text
    stream.send_multipart(
        (addr, 'CORRECT' if text == args.city else 'INCORRECT'))


stream.on_recv_stream(guess)

socket.bind(args.bind_address)
io_loop.start()
Esempio n. 28
0
    def __init__(self, node_name, pub_endpoint, router_endpoint, peer_names,
                 debug):
        self.context = zmq.Context()

        self.node_name = node_name

        self.debug = debug
        self.connected = False

        chord_node_addr = get_chord_id_from_name(node_name)
        chord_node_peers = set(
            get_chord_id_from_name(name)
            for name in peer_names).difference(set([chord_node_addr]))
        # SUB socket for receiving messages from the broker
        # chord_node_addr = int(node_name)
        self.chord_node_addr = chord_node_addr
        self.sub_sock_chord = self.context.socket(zmq.SUB)
        self.sub_sock_chord.connect(pub_endpoint)
        # Make sure we get messages meant for us!
        self.sub_sock_chord.setsockopt_string(zmq.SUBSCRIBE,
                                              str(chord_node_addr))
        # Create handler for SUB socket
        self.sub_chord = zmqstream.ZMQStream(self.sub_sock_chord)
        self.sub_chord.on_recv(self.handle)

        raft_nodes = get_raft_ids_from_name(node_name)
        # Raft Ids are strings
        self.raft_id = raft_nodes[0]
        self.raft_peers = raft_nodes[1:]
        self.sub_sock_raft = self.context.socket(zmq.SUB)
        self.sub_sock_raft.connect(pub_endpoint)
        # Make sure we get messages meant for us!
        self.sub_sock_raft.setsockopt_string(zmq.SUBSCRIBE, self.raft_id)
        # Create handler for SUB socket
        self.sub_raft = zmqstream.ZMQStream(self.sub_sock_raft)
        self.sub_raft.on_recv(self.handle)

        # I'm not sure if we're going to need this.
        # SUB socket for recieveing broadcast messages from the broker
        # self.broadcast_sock = self.context.socket(zmq.SUB)
        # self.broadcast_sock.connect(pub_endpoint)
        # self.broadcast_sock.setsockopt_string(zmq.SUBSCRIBE, "BROADCAST")
        # self.broadcast = zmqstream.ZMQStream(self.broadcast_sock, self.loop)
        # self.sub.on_recv(self.handle)

        # REQ socket for sending messages to the broker
        self.req_sock = self.context.socket(zmq.REQ)
        self.req_sock.connect(router_endpoint)
        self.req_sock.setsockopt_string(zmq.IDENTITY, node_name)
        # We don't strictly need a message handler for the REQ socket,
        # but we define one in case we ever receive any errors through it.
        self.req = zmqstream.ZMQStream(self.req_sock)
        self.req.on_recv(self.handle_broker_message)

        # self.name = node_name
        self.chord_node = None
        # self.peer_ids = [int(peer) for peer in peer_names]

        # Initialize the services.

        debug_log("NetworkNode.__init__ with Raft Id", self.raft_id,
                  " and Chord Id ", self.chord_node_addr)
        self.async_scheduler = AsyncScheduler()
        self.chord_message_sender = MessageSender(self.send_to_broker,
                                                  chord_node_addr)
        self.raft_message_sender = MessageSender(self.send_to_broker,
                                                 self.raft_id)

        def became_leader():
            """
            Start up the chord node! We're the leader!
            """
            print(colorama.Fore.GREEN + "Node " + self.node_name +
                  " Became leader " + colorama.Style.RESET_ALL)
            debug_test(
                f"Node {self.raft_id}: became leader... Starting up Chord Node for addr",
                self.chord_node_addr)
            debug_test(
                f"Chord {self.chord_node_addr}: starting up because {self.raft_id} leader "
            )
            self.chord_node = ChordNode(chord_node_addr, self.another_node,
                                        self.persistent_storage,
                                        self.async_scheduler,
                                        self.chord_message_sender,
                                        self.key_transferer)
            asyncio.create_task(self.chord_node.startup())

        def no_longer_leader():
            """
            Oh no. Shut down the chord node. We're not the leader. :(
            """
            print(colorama.Fore.GREEN + "Node " + self.node_name +
                  " is no longer leader " + colorama.Style.RESET_ALL)
            debug_test(
                f"Node {self.raft_id}: no longer leader... Shutting down Chord Node for addr",
                self.chord_node_addr)
            debug_test(
                f"Chord {self.chord_node_addr}: shutting down because {self.raft_id} not longer leader"
            )
            self.chord_node.hard_shutdown()

        self.persistent_storage = RaftPersistentKeyValueStore(
            self.raft_id, self.raft_peers, self.async_scheduler,
            self.raft_message_sender, became_leader, no_longer_leader)

        self.key_transferer = KeyTransferer(chord_node_addr,
                                            self.persistent_storage,
                                            self.async_scheduler,
                                            self.chord_message_sender)

        # We assume that the minimum chord_node addr will be started first. There is no relay node for them.
        if min(chord_node_peers) > chord_node_addr:
            self.another_node = None
        else:
            # Otherwise, the relay node is the node that was first started.
            self.another_node = min(chord_node_peers)
        print(self.another_node)

        # Capture signals to ensure an orderly shutdown
        for sig in [
                signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT
        ]:
            signal.signal(sig, self.shutdown)
Esempio n. 29
0
def launch_scheduler(in_addr,
                     out_addr,
                     mon_addr,
                     not_addr,
                     reg_addr,
                     config=None,
                     logname='root',
                     log_url=None,
                     loglevel=logging.DEBUG,
                     identity=b'task',
                     in_thread=False):

    ZMQStream = zmqstream.ZMQStream

    if config:
        # unwrap dict back into Config
        config = Config(config)

    if in_thread:
        # use instance() to get the same Context/Loop as our parent
        ctx = zmq.Context.instance()
        loop = ioloop.IOLoop.instance()
    else:
        # in a process, don't use instance()
        # for safety with multiprocessing
        ctx = zmq.Context()
        loop = ioloop.IOLoop()
    ins = ZMQStream(ctx.socket(zmq.ROUTER), loop)
    util.set_hwm(ins, 0)
    ins.setsockopt(zmq.IDENTITY, identity + b'_in')
    ins.bind(in_addr)

    outs = ZMQStream(ctx.socket(zmq.ROUTER), loop)
    util.set_hwm(outs, 0)
    outs.setsockopt(zmq.IDENTITY, identity + b'_out')
    outs.bind(out_addr)
    mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB), loop)
    util.set_hwm(mons, 0)
    mons.connect(mon_addr)
    nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB), loop)
    nots.setsockopt(zmq.SUBSCRIBE, b'')
    nots.connect(not_addr)

    querys = ZMQStream(ctx.socket(zmq.DEALER), loop)
    querys.connect(reg_addr)

    # setup logging.
    if in_thread:
        log = Application.instance().log
    else:
        if log_url:
            log = connect_logger(logname,
                                 ctx,
                                 log_url,
                                 root="scheduler",
                                 loglevel=loglevel)
        else:
            log = local_logger(logname, loglevel)

    scheduler = TaskScheduler(client_stream=ins,
                              engine_stream=outs,
                              mon_stream=mons,
                              notifier_stream=nots,
                              query_stream=querys,
                              loop=loop,
                              log=log,
                              config=config)
    scheduler.start()
    if not in_thread:
        try:
            loop.start()
        except KeyboardInterrupt:
            scheduler.log.critical("Interrupted, exiting...")
Esempio n. 30
0
tornado.ioloop = ioloop
"""
this application can be used with echostream.py, start echostream.py,
start web.py, then every time you hit http://localhost:8888/,
echostream.py will print out 'hello'
"""


def printer(msg):
    print(msg)


ctx = zmq.Context()
s = ctx.socket(zmq.REQ)
s.connect('tcp://127.0.0.1:5555')
stream = zmqstream.ZMQStream(s, tornado.ioloop.IOLoop.instance())
stream.on_recv(printer)


class TestHandler(tornado.web.RequestHandler):
    def get(self):
        print("sending hello")
        stream.send("hello")
        self.write("hello")


application = tornado.web.Application([(r"/", TestHandler)])

if __name__ == "__main__":
    application.listen(8888)
    tornado.ioloop.IOLoop.instance().start()