예제 #1
0
async def _setup_heartmonitor(
    ctx,
    ping_url,
    pong_url,
    monitor_url,
    log_level=logging.INFO,
    curve_publickey=None,
    curve_secretkey=None,
    **heart_monitor_kwargs,
):
    """Set up heart monitor

    For use in a background process,
    via Process(target=start_heartmonitor)
    """
    ping_socket = ctx.socket(zmq.PUB)
    bind(
        ping_socket,
        ping_url,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
    )
    ping_stream = ZMQStream(ping_socket)

    pong_socket = ctx.socket(zmq.ROUTER)
    set_hwm(pong_socket, 0)
    bind(
        pong_socket,
        pong_url,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
    )
    pong_stream = ZMQStream(pong_socket)

    monitor_socket = ctx.socket(zmq.XPUB)
    connect(
        monitor_socket,
        monitor_url,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
        curve_serverkey=curve_publickey,
    )
    monitor_stream = ZMQStream(monitor_socket)

    # reinitialize logging after fork
    from .app import IPController

    app = IPController(log_level=log_level)
    heart_monitor_kwargs['log'] = app.log

    heart_monitor = HeartMonitor(
        ping_stream=ping_stream,
        pong_stream=pong_stream,
        monitor_stream=monitor_stream,
        **heart_monitor_kwargs,
    )
    heart_monitor.start()
예제 #2
0
def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, reg_addr, config=None,
                        logname='root', log_url=None, loglevel=logging.DEBUG,
                        identity=b'task', in_thread=False):

    ZMQStream = zmqstream.ZMQStream

    if config:
        # unwrap dict back into Config
        config = Config(config)

    if in_thread:
        # use instance() to get the same Context/Loop as our parent
        ctx = zmq.Context.instance()
        loop = ioloop.IOLoop.current()
    else:
        # in a process, don't use instance()
        # for safety with multiprocessing
        ctx = zmq.Context()
        loop = ioloop.IOLoop()
    ins = ZMQStream(ctx.socket(zmq.ROUTER),loop)
    util.set_hwm(ins, 0)
    ins.setsockopt(zmq.IDENTITY, identity + b'_in')
    ins.bind(in_addr)

    outs = ZMQStream(ctx.socket(zmq.ROUTER),loop)
    util.set_hwm(outs, 0)
    outs.setsockopt(zmq.IDENTITY, identity + b'_out')
    outs.bind(out_addr)
    mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB),loop)
    util.set_hwm(mons, 0)
    mons.connect(mon_addr)
    nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB),loop)
    nots.setsockopt(zmq.SUBSCRIBE, b'')
    nots.connect(not_addr)

    querys = ZMQStream(ctx.socket(zmq.DEALER),loop)
    querys.connect(reg_addr)

    # setup logging.
    if in_thread:
        log = Application.instance().log
    else:
        if log_url:
            log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel)
        else:
            log = local_logger(logname, loglevel)

    scheduler = TaskScheduler(client_stream=ins, engine_stream=outs,
                            mon_stream=mons, notifier_stream=nots,
                            query_stream=querys,
                            loop=loop, log=log,
                            config=config)
    scheduler.start()
    if not in_thread:
        try:
            loop.start()
        except KeyboardInterrupt:
            scheduler.log.critical("Interrupted, exiting...")
def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, reg_addr, config=None,
                        logname='root', log_url=None, loglevel=logging.DEBUG,
                        identity=b'task', in_thread=False):

    ZMQStream = zmqstream.ZMQStream

    if config:
        # unwrap dict back into Config
        config = Config(config)

    if in_thread:
        # use instance() to get the same Context/Loop as our parent
        ctx = zmq.Context.instance()
        loop = ioloop.IOLoop.instance()
    else:
        # in a process, don't use instance()
        # for safety with multiprocessing
        ctx = zmq.Context()
        loop = ioloop.IOLoop()
    ins = ZMQStream(ctx.socket(zmq.ROUTER),loop)
    util.set_hwm(ins, 0)
    ins.setsockopt(zmq.IDENTITY, identity + b'_in')
    ins.bind(in_addr)

    outs = ZMQStream(ctx.socket(zmq.ROUTER),loop)
    util.set_hwm(outs, 0)
    outs.setsockopt(zmq.IDENTITY, identity + b'_out')
    outs.bind(out_addr)
    mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB),loop)
    util.set_hwm(mons, 0)
    mons.connect(mon_addr)
    nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB),loop)
    nots.setsockopt(zmq.SUBSCRIBE, b'')
    nots.connect(not_addr)
    
    querys = ZMQStream(ctx.socket(zmq.DEALER),loop)
    querys.connect(reg_addr)
    
    # setup logging.
    if in_thread:
        log = Application.instance().log
    else:
        if log_url:
            log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel)
        else:
            log = local_logger(logname, loglevel)

    scheduler = TaskScheduler(client_stream=ins, engine_stream=outs,
                            mon_stream=mons, notifier_stream=nots,
                            query_stream=querys,
                            loop=loop, log=log,
                            config=config)
    scheduler.start()
    if not in_thread:
        try:
            loop.start()
        except KeyboardInterrupt:
            scheduler.log.critical("Interrupted, exiting...")
예제 #4
0
def launch_scheduler(
    scheduler_class,
    in_addr,
    out_addr,
    mon_addr,
    not_addr,
    reg_addr,
    config=None,
    logname='root',
    log_url=None,
    loglevel=logging.DEBUG,
    identity=None,
    in_thread=False,
):
    config, ctx, loop, mons, nots, querys, log = get_common_scheduler_streams(
        mon_addr, not_addr, reg_addr, config, logname, log_url, loglevel,
        in_thread)

    util.set_hwm(mons, 0)
    ins = ZMQStream(ctx.socket(zmq.ROUTER), loop)
    util.set_hwm(ins, 0)
    if identity:
        ins.setsockopt(zmq.IDENTITY, identity + b'_in')

    ins.bind(in_addr)

    outs = ZMQStream(ctx.socket(zmq.ROUTER), loop)
    util.set_hwm(outs, 0)

    if identity:
        outs.setsockopt(zmq.IDENTITY, identity + b'_out')
    outs.bind(out_addr)

    scheduler = scheduler_class(
        client_stream=ins,
        engine_stream=outs,
        mon_stream=mons,
        notifier_stream=nots,
        query_stream=querys,
        loop=loop,
        log=log,
        config=config,
    )

    scheduler.start()
    if not in_thread:
        try:
            loop.start()
        except KeyboardInterrupt:
            scheduler.log.critical("Interrupted, exiting...")
예제 #5
0
def start_heartmonitor(
    ping_url,
    pong_url,
    monitor_url,
    log_level=logging.INFO,
    curve_publickey=None,
    curve_secretkey=None,
    **kwargs,
):
    """Start a heart monitor.

    For use in a background process,
    via Process(target=start_heartmonitor)
    """
    loop = ioloop.IOLoop()
    loop.make_current()
    ctx = zmq.Context()

    ping_socket = ctx.socket(zmq.PUB)
    bind(
        ping_socket,
        ping_url,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
    )
    ping_stream = ZMQStream(ping_socket)

    pong_socket = ctx.socket(zmq.ROUTER)
    set_hwm(pong_socket, 0)
    bind(
        pong_socket,
        pong_url,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
    )
    pong_stream = ZMQStream(pong_socket)

    monitor_socket = ctx.socket(zmq.XPUB)
    connect(
        monitor_socket,
        monitor_url,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
        curve_serverkey=curve_publickey,
    )
    monitor_stream = ZMQStream(monitor_socket)

    # reinitialize logging after fork
    from .app import IPController

    app = IPController(log_level=log_level)
    kwargs['log'] = app.log

    heart_monitor = HeartMonitor(
        ping_stream=ping_stream,
        pong_stream=pong_stream,
        monitor_stream=monitor_stream,
        **kwargs,
    )
    heart_monitor.start()

    try:
        loop.start()
    finally:
        loop.close(all_fds=True)
    ctx.destroy()
예제 #6
0
def launch_broadcast_scheduler(
    in_addr,
    out_addrs,
    mon_addr,
    not_addr,
    reg_addr,
    identity,
    config=None,
    loglevel=logging.DEBUG,
    log_url=None,
    is_leaf=False,
    in_thread=False,
    outgoing_ids=None,
    curve_publickey=None,
    curve_secretkey=None,
    depth=0,
    max_depth=0,
    scheduler_class=BroadcastScheduler,
    logname='broadcast',
):
    config, ctx, loop, mons, nots, querys, log = get_common_scheduler_streams(
        mon_addr,
        not_addr,
        reg_addr,
        config,
        logname,
        log_url,
        loglevel,
        in_thread,
        curve_serverkey=curve_publickey,
        curve_publickey=curve_publickey,
        curve_secretkey=curve_secretkey,
    )

    is_root = depth == 0
    sub_scheduler_id = get_id_with_prefix(identity)

    incoming_stream = ZMQStream(ctx.socket(zmq.ROUTER), loop)
    util.set_hwm(incoming_stream, 0)
    incoming_stream.setsockopt(zmq.IDENTITY, sub_scheduler_id)

    if is_root:
        util.bind(incoming_stream, in_addr, curve_secretkey=curve_secretkey)
    else:
        util.connect(
            incoming_stream,
            in_addr,
            curve_serverkey=curve_publickey,
            curve_publickey=curve_publickey,
            curve_secretkey=curve_secretkey,
        )

    outgoing_streams = []
    for out_addr in out_addrs:
        out = ZMQStream(ctx.socket(zmq.ROUTER), loop)
        util.set_hwm(out, 0)
        out.setsockopt(zmq.IDENTITY, sub_scheduler_id)
        util.bind(out, out_addr, curve_secretkey=curve_secretkey)
        outgoing_streams.append(out)

    scheduler_args = dict(
        client_stream=incoming_stream,
        mon_stream=mons,
        notifier_stream=nots,
        query_stream=querys,
        loop=loop,
        log=log,
        config=config,
        depth=depth,
        max_depth=max_depth,
        name=identity,
    )
    if is_leaf:
        scheduler_args.update(engine_stream=outgoing_streams[0], is_leaf=True)
    else:
        scheduler_args.update(
            connected_sub_scheduler_ids=[
                get_id_with_prefix(identity) for identity in outgoing_ids
            ],
            outgoing_streams=outgoing_streams,
        )

    scheduler = scheduler_class(**scheduler_args)

    scheduler.start()
    if not in_thread:
        try:
            loop.start()
        except KeyboardInterrupt:
            scheduler.log.critical("Interrupted, exiting...")
예제 #7
0
파일: app.py 프로젝트: ipython/ipyparallel
    def init_hub(self):
        if self.enable_curve:
            self.log.info(
                "Using CURVE security. Ignore warnings about disabled message signing."
            )

        c = self.config

        ctx = self.context
        loop = self.loop
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            from .task_scheduler import TaskScheduler

            scheme = TaskScheduler.scheme_name.default_value

        if self.engine_info:
            registration_port = self.engine_info['registration']
        else:
            registration_port = self.next_port('engine')

        # build connection dicts
        if not self.engine_info:
            self.engine_info = {
                'interface':
                f"{self.engine_transport}://{self.engine_ip}",
                'registration':
                registration_port,
                'control':
                self.next_port('engine'),
                'mux':
                self.next_port('engine'),
                'task':
                self.next_port('engine'),
                'iopub':
                self.next_port('engine'),
                'hb_ping':
                self.next_port('engine'),
                'hb_pong':
                self.next_port('engine'),
                BroadcastScheduler.port_name: [
                    self.next_port('engine')
                    for i in range(self.number_of_leaf_schedulers)
                ],
            }

        if not self.client_info:
            self.client_info = {
                'interface': f"{self.client_transport}://{self.client_ip}",
                'registration': registration_port,
                'control': self.next_port('client'),
                'mux': self.next_port('client'),
                'task': self.next_port('client'),
                'task_scheme': scheme,
                'iopub': self.next_port('client'),
                'notification': self.next_port('client'),
                BroadcastScheduler.port_name: self.next_port('client'),
            }
        if self.engine_transport == 'tcp':
            internal_interface = "tcp://127.0.0.1"
        else:
            internal_interface = self.engine_info['interface']

        broadcast_ids = []  # '0', '00', '01', '001', etc.
        # always a leading 0 for the root node
        for d in range(1, self.broadcast_scheduler_depth + 1):
            for i in range(2**d):
                broadcast_ids.append(format(i, f"0{d + 1}b"))
        self.internal_info = {
            'interface': internal_interface,
            BroadcastScheduler.port_name:
            {broadcast_id: self.next_port()
             for broadcast_id in broadcast_ids},
        }
        mon_port = self.next_port()
        self.monitor_url = f"{self.monitor_transport}://{self.monitor_ip}:{mon_port}"

        # debug port pool consumption
        if self.engine_ports:
            self.log.debug(
                f"Used {self.engine_port_index} / {len(self.engine_ports)} engine ports"
            )
        if self.client_ports:
            self.log.debug(
                f"Used {self.client_port_index} / {len(self.client_ports)} client ports"
            )
        if self.ports:
            self.log.debug(
                f"Used {self.port_index} / {len(self.ports)} common ports")
        if self._random_port_count:
            self.log.debug(f"Used {self._random_port_count} random ports")

        self.log.debug("Hub engine addrs: %s", self.engine_info)
        self.log.debug("Hub client addrs: %s", self.client_info)
        self.log.debug("Hub internal addrs: %s", self.internal_info)

        # Registrar socket
        query = ZMQStream(ctx.socket(zmq.ROUTER), loop)
        util.set_hwm(query, 0)
        self.bind(query, self.client_url('registration'))
        self.log.info("Hub listening on %s for registration.",
                      self.client_url('registration'))
        if self.client_ip != self.engine_ip:
            self.bind(query, self.engine_url('registration'))
            self.log.info("Hub listening on %s for registration.",
                          self.engine_url('registration'))

        ### Engine connections ###

        # heartbeat
        hm_config = Config()
        for key in ("Session", "HeartMonitor"):
            if key in self.config:
                hm_config[key] = self.config[key]
            hm_config.Session.key = self.session.key

        self.heartmonitor_process = Process(
            target=start_heartmonitor,
            kwargs=dict(
                ping_url=self.engine_url('hb_ping'),
                pong_url=self.engine_url('hb_pong'),
                monitor_url=disambiguate_url(self.monitor_url),
                config=hm_config,
                log_level=self.log.getEffectiveLevel(),
                curve_publickey=self.curve_publickey,
                curve_secretkey=self.curve_secretkey,
            ),
            daemon=True,
        )

        ### Client connections ###

        # Notifier socket
        notifier = ZMQStream(ctx.socket(zmq.PUB), loop)
        notifier.socket.SNDHWM = 0
        self.bind(notifier, self.client_url('notification'))

        ### build and launch the queues ###

        # monitor socket
        sub = ctx.socket(zmq.SUB)
        sub.RCVHWM = 0
        sub.setsockopt(zmq.SUBSCRIBE, b"")
        self.bind(sub, self.monitor_url)
        # self.bind(sub, 'inproc://monitor')
        sub = ZMQStream(sub, loop)

        # connect the db
        db_class = self.db_class
        self.log.info(f'Hub using DB backend: {self.db_class.__name__}')
        self.db = self.db_class(session=self.session.session,
                                parent=self,
                                log=self.log)
        time.sleep(0.25)

        # resubmit stream
        resubmit = ZMQStream(ctx.socket(zmq.DEALER), loop)
        url = util.disambiguate_url(self.client_url('task'))
        self.connect(resubmit, url)

        self.hub = Hub(
            loop=loop,
            session=self.session,
            monitor=sub,
            query=query,
            notifier=notifier,
            resubmit=resubmit,
            db=self.db,
            heartmonitor_period=HeartMonitor(parent=self).period,
            engine_info=self.engine_info,
            client_info=self.client_info,
            log=self.log,
            registration_timeout=self.registration_timeout,
            parent=self,
        )

        if self.write_connection_files:
            # save to new json config files
            base = {
                'key':
                self.session.key.decode('ascii'),
                'curve_serverkey':
                self.curve_publickey.decode("ascii")
                if self.enable_curve else None,
                'location':
                self.location,
                'pack':
                self.session.packer,
                'unpack':
                self.session.unpacker,
                'signature_scheme':
                self.session.signature_scheme,
            }

            cdict = {'ssh': self.ssh_server}
            cdict.update(self.client_info)
            cdict.update(base)
            self.save_connection_dict(self.client_json_file, cdict)

            edict = {'ssh': self.engine_ssh_server}
            edict.update(self.engine_info)
            edict.update(base)
            self.save_connection_dict(self.engine_json_file, edict)

        fname = "engines%s.json" % self.cluster_id
        self.hub.engine_state_file = os.path.join(self.profile_dir.log_dir,
                                                  fname)
        if self.restore_engines:
            self.hub._load_engine_state()