Пример #1
0
 def connect(self, south_peer=None, west_peer=None):
     """connect to peers.  `peers` will be a 3-tuples, of the form:
     (location, north_addr, east_addr)
     as produced by
     """
     if south_peer is not None:
         location, url, _ = south_peer
         self.south.connect(disambiguate_url(url, location))
     if west_peer is not None:
         location, _, url = west_peer
         self.west.connect(disambiguate_url(url, location))
Пример #2
0
 def connect(self, south_peer=None, west_peer=None):
     """connect to peers.  `peers` will be a 3-tuples, of the form:
     (location, north_addr, east_addr)
     as produced by
     """
     if south_peer is not None:
         location, url, _ = south_peer
         self.south.connect(disambiguate_url(url, location))
     if west_peer is not None:
         location, _, url = west_peer
         self.west.connect(disambiguate_url(url, location))
Пример #3
0
 def connect(self, peers):
     """connect to peers.  `peers` will be a dict of 4-tuples, keyed by name.
     {peer : (ident, addr, pub_addr, location)}
     where peer is the name, ident is the XREP identity, addr,pub_addr are the
     """
     for peer, (ident, url, pub_url, location) in peers.items():
         self.peers[peer] = ident
         if ident != self.identity:
             self.sub.connect(disambiguate_url(pub_url, location))
         if ident > self.identity:
             # prevent duplicate xrep, by only connecting
             # engines to engines with higher IDENTITY
             # a doubly-connected pair will crash
             self.socket.connect(disambiguate_url(url, location))
Пример #4
0
 def connect(self, peers):
     """connect to peers.  `peers` will be a dict of 4-tuples, keyed by name.
     {peer : (ident, addr, pub_addr, location)}
     where peer is the name, ident is the XREP identity, addr,pub_addr are the
     """
     for peer, (ident, url, pub_url, location) in peers.items():
         self.peers[peer] = ident
         if ident != self.identity:
             self.sub.connect(disambiguate_url(pub_url, location))
         if ident > self.identity:
             # prevent duplicate xrep, by only connecting
             # engines to engines with higher IDENTITY
             # a doubly-connected pair will crash
             self.socket.connect(disambiguate_url(url, location))
Пример #5
0
 def maybe_tunnel(url):
     """like connect, but don't complete the connection (for use by heartbeat)"""
     url = disambiguate_url(url, self.location)
     if self.using_ssh:
         self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
         url, tunnelobj = self.tunnel_mod.open_tunnel(
             url, self.sshserver, keyfile=self.sshkey, paramiko=self.paramiko, password=password
         )
     return str(url)
Пример #6
0
 def connect(s, url):
     url = disambiguate_url(url, self.location)
     if self.using_ssh:
         self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
         return self.tunnel_mod.tunnel_connection(
             s, url, self.sshserver, keyfile=self.sshkey, paramiko=self.paramiko, password=password
         )
     else:
         return s.connect(url)
 def maybe_tunnel(url):
     """like connect, but don't complete the connection (for use by heartbeat)"""
     url = disambiguate_url(url, self.location)
     if self.using_ssh:
         self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
         url, tunnelobj = self.tunnel_mod.open_tunnel(url, self.sshserver,
                     keyfile=self.sshkey, paramiko=self.paramiko,
                     password=password,
         )
     return str(url)
 def connect(s, url):
     url = disambiguate_url(url, self.location)
     if self.using_ssh:
         self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
         return self.tunnel_mod.tunnel_connection(s, url, self.sshserver,
                     keyfile=self.sshkey, paramiko=self.paramiko,
                     password=password,
         )
     else:
         return s.connect(url)
Пример #9
0
 def get_python_scheduler_args(
     self,
     scheduler_name,
     scheduler_class,
     monitor_url,
     identity=None,
     in_addr=None,
     out_addr=None,
 ):
     if identity is not None:
         logname = f"{scheduler_name}-{identity}"
     else:
         logname = scheduler_name
     return {
         'scheduler_class':
         scheduler_class,
         'in_addr':
         in_addr or self.client_url(scheduler_name),
         'out_addr':
         out_addr or self.engine_url(scheduler_name),
         'mon_addr':
         monitor_url,
         'not_addr':
         disambiguate_url(self.client_url('notification')),
         'reg_addr':
         disambiguate_url(self.client_url('registration')),
         'identity':
         identity if identity is not None else bytes(
             scheduler_name, 'utf8'),
         'logname':
         logname,
         'loglevel':
         self.log_level,
         'log_url':
         self.log_url,
         'config':
         dict(self.config),
         'curve_secretkey':
         self.curve_secretkey if self.enable_curve else None,
         'curve_publickey':
         self.curve_publickey if self.enable_curve else None,
     }
Пример #10
0
 def get_python_scheduler_args(self,
                               scheduler_name,
                               factory,
                               scheduler_class,
                               monitor_url,
                               identity=None):
     return {
         'scheduler_class': scheduler_class,
         'in_addr': factory.client_url(scheduler_name),
         'out_addr': factory.engine_url(scheduler_name),
         'mon_addr': monitor_url,
         'not_addr': disambiguate_url(factory.client_url('notification')),
         'reg_addr': disambiguate_url(factory.client_url('registration')),
         'identity':
         identity if identity else bytes(scheduler_name, 'utf8'),
         'logname': 'scheduler',
         'loglevel': self.log_level,
         'log_url': self.log_url,
         'config': dict(self.config),
     }
Пример #11
0
        def recursively_start_schedulers(identity, depth):
            outgoing_id1 = identity * 2 + 1
            outgoing_id2 = outgoing_id1 + 1
            is_leaf = depth == self.factory.broadcast_scheduler_depth

            scheduler_args = dict(
                in_addr=factory.client_url(BroadcastScheduler.port_name,
                                           identity),
                mon_addr=monitor_url,
                not_addr=disambiguate_url(factory.client_url('notification')),
                reg_addr=disambiguate_url(factory.client_url('registration')),
                identity=identity,
                config=dict(self.config),
                loglevel=self.log_level,
                log_url=self.log_url,
                outgoing_ids=[outgoing_id1, outgoing_id2],
                depth=depth,
                is_leaf=is_leaf,
            )
            if is_leaf:
                scheduler_args.update(out_addrs=[
                    factory.engine_url(
                        BroadcastScheduler.port_name,
                        identity - factory.number_of_non_leaf_schedulers,
                    )
                ], )
            else:
                scheduler_args.update(out_addrs=[
                    factory.client_url(BroadcastScheduler.port_name,
                                       outgoing_id1),
                    factory.client_url(BroadcastScheduler.port_name,
                                       outgoing_id2),
                ])
            launch_in_thread_or_process(scheduler_args)
            if not is_leaf:
                recursively_start_schedulers(outgoing_id1, depth + 1)
                recursively_start_schedulers(outgoing_id2, depth + 1)
Пример #12
0
        def connect(s, url, curve_serverkey=None):
            url = disambiguate_url(url, self.location)
            if curve_serverkey is None:
                curve_serverkey = self.curve_serverkey
            if curve_serverkey:
                s.setsockopt(zmq.CURVE_SERVERKEY, curve_serverkey)
                s.setsockopt(zmq.CURVE_SECRETKEY, self.curve_secretkey)
                s.setsockopt(zmq.CURVE_PUBLICKEY, self.curve_publickey)

            if self.using_ssh:
                self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
                return self.tunnel_mod.tunnel_connection(
                    s,
                    url,
                    self.sshserver,
                    keyfile=self.sshkey,
                    paramiko=self.paramiko,
                    password=password,
                )
            else:
                return s.connect(url)
Пример #13
0
    def init_hub(self):
        if self.enable_curve:
            self.log.info(
                "Using CURVE security. Ignore warnings about disabled message signing."
            )

        c = self.config

        ctx = self.context
        loop = self.loop
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            from .task_scheduler import TaskScheduler

            scheme = TaskScheduler.scheme_name.default_value

        if self.engine_info:
            registration_port = self.engine_info['registration']
        else:
            registration_port = self.next_port('engine')

        # build connection dicts
        if not self.engine_info:
            self.engine_info = {
                'interface':
                f"{self.engine_transport}://{self.engine_ip}",
                'registration':
                registration_port,
                'control':
                self.next_port('engine'),
                'mux':
                self.next_port('engine'),
                'task':
                self.next_port('engine'),
                'iopub':
                self.next_port('engine'),
                'hb_ping':
                self.next_port('engine'),
                'hb_pong':
                self.next_port('engine'),
                BroadcastScheduler.port_name: [
                    self.next_port('engine')
                    for i in range(self.number_of_leaf_schedulers)
                ],
            }

        if not self.client_info:
            self.client_info = {
                'interface': f"{self.client_transport}://{self.client_ip}",
                'registration': registration_port,
                'control': self.next_port('client'),
                'mux': self.next_port('client'),
                'task': self.next_port('client'),
                'task_scheme': scheme,
                'iopub': self.next_port('client'),
                'notification': self.next_port('client'),
                BroadcastScheduler.port_name: self.next_port('client'),
            }
        if self.engine_transport == 'tcp':
            internal_interface = "tcp://127.0.0.1"
        else:
            internal_interface = self.engine_info['interface']

        broadcast_ids = []  # '0', '00', '01', '001', etc.
        # always a leading 0 for the root node
        for d in range(1, self.broadcast_scheduler_depth + 1):
            for i in range(2**d):
                broadcast_ids.append(format(i, f"0{d + 1}b"))
        self.internal_info = {
            'interface': internal_interface,
            BroadcastScheduler.port_name:
            {broadcast_id: self.next_port()
             for broadcast_id in broadcast_ids},
        }
        mon_port = self.next_port()
        self.monitor_url = f"{self.monitor_transport}://{self.monitor_ip}:{mon_port}"

        # debug port pool consumption
        if self.engine_ports:
            self.log.debug(
                f"Used {self.engine_port_index} / {len(self.engine_ports)} engine ports"
            )
        if self.client_ports:
            self.log.debug(
                f"Used {self.client_port_index} / {len(self.client_ports)} client ports"
            )
        if self.ports:
            self.log.debug(
                f"Used {self.port_index} / {len(self.ports)} common ports")
        if self._random_port_count:
            self.log.debug(f"Used {self._random_port_count} random ports")

        self.log.debug("Hub engine addrs: %s", self.engine_info)
        self.log.debug("Hub client addrs: %s", self.client_info)
        self.log.debug("Hub internal addrs: %s", self.internal_info)

        # Registrar socket
        query = ZMQStream(ctx.socket(zmq.ROUTER), loop)
        util.set_hwm(query, 0)
        self.bind(query, self.client_url('registration'))
        self.log.info("Hub listening on %s for registration.",
                      self.client_url('registration'))
        if self.client_ip != self.engine_ip:
            self.bind(query, self.engine_url('registration'))
            self.log.info("Hub listening on %s for registration.",
                          self.engine_url('registration'))

        ### Engine connections ###

        # heartbeat
        hm_config = Config()
        for key in ("Session", "HeartMonitor"):
            if key in self.config:
                hm_config[key] = self.config[key]
            hm_config.Session.key = self.session.key

        self.heartmonitor_process = Process(
            target=start_heartmonitor,
            kwargs=dict(
                ping_url=self.engine_url('hb_ping'),
                pong_url=self.engine_url('hb_pong'),
                monitor_url=disambiguate_url(self.monitor_url),
                config=hm_config,
                log_level=self.log.getEffectiveLevel(),
                curve_publickey=self.curve_publickey,
                curve_secretkey=self.curve_secretkey,
            ),
            daemon=True,
        )

        ### Client connections ###

        # Notifier socket
        notifier = ZMQStream(ctx.socket(zmq.PUB), loop)
        notifier.socket.SNDHWM = 0
        self.bind(notifier, self.client_url('notification'))

        ### build and launch the queues ###

        # monitor socket
        sub = ctx.socket(zmq.SUB)
        sub.RCVHWM = 0
        sub.setsockopt(zmq.SUBSCRIBE, b"")
        self.bind(sub, self.monitor_url)
        # self.bind(sub, 'inproc://monitor')
        sub = ZMQStream(sub, loop)

        # connect the db
        db_class = self.db_class
        self.log.info(f'Hub using DB backend: {self.db_class.__name__}')
        self.db = self.db_class(session=self.session.session,
                                parent=self,
                                log=self.log)
        time.sleep(0.25)

        # resubmit stream
        resubmit = ZMQStream(ctx.socket(zmq.DEALER), loop)
        url = util.disambiguate_url(self.client_url('task'))
        self.connect(resubmit, url)

        self.hub = Hub(
            loop=loop,
            session=self.session,
            monitor=sub,
            query=query,
            notifier=notifier,
            resubmit=resubmit,
            db=self.db,
            heartmonitor_period=HeartMonitor(parent=self).period,
            engine_info=self.engine_info,
            client_info=self.client_info,
            log=self.log,
            registration_timeout=self.registration_timeout,
            parent=self,
        )

        if self.write_connection_files:
            # save to new json config files
            base = {
                'key':
                self.session.key.decode('ascii'),
                'curve_serverkey':
                self.curve_publickey.decode("ascii")
                if self.enable_curve else None,
                'location':
                self.location,
                'pack':
                self.session.packer,
                'unpack':
                self.session.unpacker,
                'signature_scheme':
                self.session.signature_scheme,
            }

            cdict = {'ssh': self.ssh_server}
            cdict.update(self.client_info)
            cdict.update(base)
            self.save_connection_dict(self.client_json_file, cdict)

            edict = {'ssh': self.engine_ssh_server}
            edict.update(self.engine_info)
            edict.update(base)
            self.save_connection_dict(self.engine_json_file, edict)

        fname = "engines%s.json" % self.cluster_id
        self.hub.engine_state_file = os.path.join(self.profile_dir.log_dir,
                                                  fname)
        if self.restore_engines:
            self.hub._load_engine_state()
Пример #14
0
    def init_schedulers(self):
        children = self.children
        mq = import_item(str(self.mq_class))
        # ensure session key is shared across sessions
        self.config.Session.key = self.session.key
        ident = self.session.bsession

        def add_auth(q):
            """Add CURVE auth to a monitored queue"""
            if not self.enable_curve:
                return False
            q.setsockopt_in(zmq.CURVE_SERVER, 1)
            q.setsockopt_in(zmq.CURVE_SECRETKEY, self.curve_secretkey)
            q.setsockopt_out(zmq.CURVE_SERVER, 1)
            q.setsockopt_out(zmq.CURVE_SECRETKEY, self.curve_secretkey)
            # monitor is a client
            pub, secret = zmq.curve_keypair()
            q.setsockopt_mon(zmq.CURVE_SERVERKEY, self.curve_publickey)
            q.setsockopt_mon(zmq.CURVE_SECRETKEY, secret)
            q.setsockopt_mon(zmq.CURVE_PUBLICKEY, pub)

        # disambiguate url, in case of *
        monitor_url = disambiguate_url(self.monitor_url)
        # maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
        # IOPub relay (in a Process)
        q = mq(zmq.SUB, zmq.PUB, zmq.PUB, b'iopub', b'N/A')
        add_auth(q)
        q.name = "IOPubScheduler"

        q.bind_in(self.engine_url('iopub'))
        q.setsockopt_in(zmq.SUBSCRIBE, b'')
        q.bind_out(self.client_url('iopub'))
        q.setsockopt_out(zmq.IDENTITY, ident + b"_iopub")
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Multiplexer Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
        add_auth(q)
        q.name = "DirectScheduler"

        q.bind_in(self.client_url('mux'))
        q.setsockopt_in(zmq.IDENTITY, b'mux_in')
        q.bind_out(self.engine_url('mux'))
        q.setsockopt_out(zmq.IDENTITY, b'mux_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Control Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
        add_auth(q)
        q.name = "ControlScheduler"
        q.bind_in(self.client_url('control'))
        q.setsockopt_in(zmq.IDENTITY, b'control_in')
        q.bind_out(self.engine_url('control'))
        q.setsockopt_out(zmq.IDENTITY, b'control_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            scheme = TaskScheduler.scheme_name.default_value
        # Task Queue (in a Process)
        if scheme == 'pure':
            self.log.warning("task::using pure DEALER Task scheduler")
            q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
            add_auth(q)
            q.name = "TaskScheduler(pure)"
            # q.setsockopt_out(zmq.HWM, hub.hwm)
            q.bind_in(self.client_url('task'))
            q.setsockopt_in(zmq.IDENTITY, b'task_in')
            q.bind_out(self.engine_url('task'))
            q.setsockopt_out(zmq.IDENTITY, b'task_out')
            q.connect_mon(monitor_url)
            q.daemon = True
            children.append(q)
        elif scheme == 'none':
            self.log.warning("task::using no Task scheduler")

        else:
            self.log.info("task::using Python %s Task scheduler" % scheme)
            self.launch_python_scheduler(
                'TaskScheduler',
                self.get_python_scheduler_args('task', TaskScheduler,
                                               monitor_url),
                children,
            )

        self.launch_broadcast_schedulers(monitor_url, children)

        # set unlimited HWM for all relay devices
        if hasattr(zmq, 'SNDHWM'):
            q = children[0]
            q.setsockopt_in(zmq.RCVHWM, 0)
            q.setsockopt_out(zmq.SNDHWM, 0)

            for q in children[1:]:
                if not hasattr(q, 'setsockopt_in'):
                    continue
                q.setsockopt_in(zmq.SNDHWM, 0)
                q.setsockopt_in(zmq.RCVHWM, 0)
                q.setsockopt_out(zmq.SNDHWM, 0)
                q.setsockopt_out(zmq.RCVHWM, 0)
                q.setsockopt_mon(zmq.SNDHWM, 0)
Пример #15
0
    def init_schedulers(self):
        children = self.children
        mq = import_item(str(self.mq_class))

        f = self.factory
        ident = f.session.bsession
        # disambiguate url, in case of *
        monitor_url = disambiguate_url(f.monitor_url)
        # maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
        # IOPub relay (in a Process)
        q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A', b'iopub')
        q.bind_in(f.client_url('iopub'))
        q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
        q.bind_out(f.engine_url('iopub'))
        q.setsockopt_out(zmq.SUBSCRIBE, b'')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Multiplexer Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')

        q.bind_in(f.client_url('mux'))
        q.setsockopt_in(zmq.IDENTITY, b'mux_in')
        q.bind_out(f.engine_url('mux'))
        q.setsockopt_out(zmq.IDENTITY, b'mux_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Control Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
        q.bind_in(f.client_url('control'))
        q.setsockopt_in(zmq.IDENTITY, b'control_in')
        q.bind_out(f.engine_url('control'))
        q.setsockopt_out(zmq.IDENTITY, b'control_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            scheme = TaskScheduler.scheme_name.default_value
        # Task Queue (in a Process)
        if scheme == 'pure':
            self.log.warn("task::using pure DEALER Task scheduler")
            q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
            # q.setsockopt_out(zmq.HWM, hub.hwm)
            q.bind_in(f.client_url('task'))
            q.setsockopt_in(zmq.IDENTITY, b'task_in')
            q.bind_out(f.engine_url('task'))
            q.setsockopt_out(zmq.IDENTITY, b'task_out')
            q.connect_mon(monitor_url)
            q.daemon = True
            children.append(q)
        elif scheme == 'none':
            self.log.warn("task::using no Task scheduler")

        else:
            self.log.info("task::using Python %s Task scheduler" % scheme)
            self.launch_python_scheduler(
                self.get_python_scheduler_args('task', f, TaskScheduler,
                                               monitor_url),
                children,
            )

        self.launch_broadcast_schedulers(f, monitor_url, children)

        # set unlimited HWM for all relay devices
        if hasattr(zmq, 'SNDHWM'):
            q = children[0]
            q.setsockopt_in(zmq.RCVHWM, 0)
            q.setsockopt_out(zmq.SNDHWM, 0)

            for q in children[1:]:
                if not hasattr(q, 'setsockopt_in'):
                    continue
                q.setsockopt_in(zmq.SNDHWM, 0)
                q.setsockopt_in(zmq.RCVHWM, 0)
                q.setsockopt_out(zmq.SNDHWM, 0)
                q.setsockopt_out(zmq.RCVHWM, 0)
                q.setsockopt_mon(zmq.SNDHWM, 0)
Пример #16
0
    def init_schedulers(self):
        children = self.children
        mq = import_item(str(self.mq_class))

        f = self.factory
        ident = f.session.bsession
        # disambiguate url, in case of *
        monitor_url = disambiguate_url(f.monitor_url)
        # maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
        # IOPub relay (in a Process)
        q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A', b'iopub')
        q.bind_in(f.client_url('iopub'))
        q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
        q.bind_out(f.engine_url('iopub'))
        q.setsockopt_out(zmq.SUBSCRIBE, b'')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Multiplexer Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')

        q.bind_in(f.client_url('mux'))
        q.setsockopt_in(zmq.IDENTITY, b'mux_in')
        q.bind_out(f.engine_url('mux'))
        q.setsockopt_out(zmq.IDENTITY, b'mux_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Control Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
        q.bind_in(f.client_url('control'))
        q.setsockopt_in(zmq.IDENTITY, b'control_in')
        q.bind_out(f.engine_url('control'))
        q.setsockopt_out(zmq.IDENTITY, b'control_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            scheme = TaskScheduler.scheme_name.default_value
        # Task Queue (in a Process)
        if scheme == 'pure':
            self.log.warn("task::using pure DEALER Task scheduler")
            q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
            # q.setsockopt_out(zmq.HWM, hub.hwm)
            q.bind_in(f.client_url('task'))
            q.setsockopt_in(zmq.IDENTITY, b'task_in')
            q.bind_out(f.engine_url('task'))
            q.setsockopt_out(zmq.IDENTITY, b'task_out')
            q.connect_mon(monitor_url)
            q.daemon = True
            children.append(q)
        elif scheme == 'none':
            self.log.warn("task::using no Task scheduler")

        else:
            self.log.info("task::using Python %s Task scheduler" % scheme)
            sargs = (
                f.client_url('task'),
                f.engine_url('task'),
                monitor_url,
                disambiguate_url(f.client_url('notification')),
                disambiguate_url(f.client_url('registration')),
            )
            kwargs = dict(logname='scheduler',
                          loglevel=self.log_level,
                          log_url=self.log_url,
                          config=dict(self.config))
            if 'Process' in self.mq_class:
                # run the Python scheduler in a Process
                q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
                q.daemon = True
                children.append(q)
            else:
                # single-threaded Controller
                kwargs['in_thread'] = True
                launch_scheduler(*sargs, **kwargs)

        # set unlimited HWM for all relay devices
        if hasattr(zmq, 'SNDHWM'):
            q = children[0]
            q.setsockopt_in(zmq.RCVHWM, 0)
            q.setsockopt_out(zmq.SNDHWM, 0)

            for q in children[1:]:
                if not hasattr(q, 'setsockopt_in'):
                    continue
                q.setsockopt_in(zmq.SNDHWM, 0)
                q.setsockopt_in(zmq.RCVHWM, 0)
                q.setsockopt_out(zmq.SNDHWM, 0)
                q.setsockopt_out(zmq.RCVHWM, 0)
                q.setsockopt_mon(zmq.SNDHWM, 0)
Пример #17
0
def disambiguate_dns_url(url, location):
    """accept either IP address or dns name, and return IP"""
    if not ip_pat.match(location):
        location = socket.gethostbyname(location)
    return disambiguate_url(url, location)
Пример #18
0
    def init_schedulers(self):
        children = self.children
        mq = import_item(str(self.mq_class))
        
        f = self.factory
        ident = f.session.bsession
        # disambiguate url, in case of *
        monitor_url = disambiguate_url(f.monitor_url)
        # maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
        # IOPub relay (in a Process)
        q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A',b'iopub')
        q.bind_in(f.client_url('iopub'))
        q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
        q.bind_out(f.engine_url('iopub'))
        q.setsockopt_out(zmq.SUBSCRIBE, b'')
        q.connect_mon(monitor_url)
        q.daemon=True
        children.append(q)

        # Multiplexer Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
        
        q.bind_in(f.client_url('mux'))
        q.setsockopt_in(zmq.IDENTITY, b'mux_in')
        q.bind_out(f.engine_url('mux'))
        q.setsockopt_out(zmq.IDENTITY, b'mux_out')
        q.connect_mon(monitor_url)
        q.daemon=True
        children.append(q)

        # Control Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
        q.bind_in(f.client_url('control'))
        q.setsockopt_in(zmq.IDENTITY, b'control_in')
        q.bind_out(f.engine_url('control'))
        q.setsockopt_out(zmq.IDENTITY, b'control_out')
        q.connect_mon(monitor_url)
        q.daemon=True
        children.append(q)
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            scheme = TaskScheduler.scheme_name.default_value
        # Task Queue (in a Process)
        if scheme == 'pure':
            self.log.warn("task::using pure DEALER Task scheduler")
            q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
            # q.setsockopt_out(zmq.HWM, hub.hwm)
            q.bind_in(f.client_url('task'))
            q.setsockopt_in(zmq.IDENTITY, b'task_in')
            q.bind_out(f.engine_url('task'))
            q.setsockopt_out(zmq.IDENTITY, b'task_out')
            q.connect_mon(monitor_url)
            q.daemon=True
            children.append(q)
        elif scheme == 'none':
            self.log.warn("task::using no Task scheduler")

        else:
            self.log.info("task::using Python %s Task scheduler"%scheme)
            sargs = (f.client_url('task'), f.engine_url('task'),
                    monitor_url, disambiguate_url(f.client_url('notification')),
                    disambiguate_url(f.client_url('registration')),
            )
            kwargs = dict(logname='scheduler', loglevel=self.log_level,
                            log_url = self.log_url, config=dict(self.config))
            if 'Process' in self.mq_class:
                # run the Python scheduler in a Process
                q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
                q.daemon=True
                children.append(q)
            else:
                # single-threaded Controller
                kwargs['in_thread'] = True
                launch_scheduler(*sargs, **kwargs)
        
        # set unlimited HWM for all relay devices
        if hasattr(zmq, 'SNDHWM'):
            q = children[0]
            q.setsockopt_in(zmq.RCVHWM, 0)
            q.setsockopt_out(zmq.SNDHWM, 0)
            
            for q in children[1:]:
                if not hasattr(q, 'setsockopt_in'):
                    continue
                q.setsockopt_in(zmq.SNDHWM, 0)
                q.setsockopt_in(zmq.RCVHWM, 0)
                q.setsockopt_out(zmq.SNDHWM, 0)
                q.setsockopt_out(zmq.RCVHWM, 0)
                q.setsockopt_mon(zmq.SNDHWM, 0)