class BaseWorker(BrokerMessageReceiver): def _init(self): """ Initializes the instance, sets up the broker client. """ self._setup_broker_client() def _setup_broker_client(self): """ Connects to the broker and sets up all the sockets. """ self.broker_client = BrokerClient() self.broker_client.name = self.worker_data.broker_config.name # TODO: Rename self.worker_data to self.worker_config self.broker_client.token = self.worker_data.broker_config.broker_token self.broker_client.zmq_context = self.worker_data.broker_config.zmq_context self.broker_client.broker_push_client_pull = self.worker_data.broker_config.broker_push_client_pull self.broker_client.client_push_broker_pull = self.worker_data.broker_config.client_push_broker_pull self.broker_client.broker_pub_client_sub = self.worker_data.broker_config.broker_pub_client_sub self.broker_client.on_pull_handler = self.on_broker_msg self.broker_client.on_sub_handler = self.on_broker_msg self.broker_client.init() self.broker_client.start()
class ParallelServer(BrokerMessageReceiver): def __init__(self, host=None, port=None, zmq_context=None, crypto_manager=None, odb=None, singleton_server=None, worker_config=None, repo_location=None, ftp=None): self.host = host self.port = port self.zmq_context = zmq_context or zmq.Context() self.crypto_manager = crypto_manager self.odb = odb self.singleton_server = singleton_server self.worker_config = worker_config self.repo_location = repo_location self.ftp = ftp def _after_init_common(self, server): """ Initializes parts of the server that don't depend on whether the server's been allowed to join the cluster or not. """ self.broker_token = server.cluster.broker_token self.broker_push_worker_pull = 'tcp://{0}:{1}'.format(server.cluster.broker_host, server.cluster.broker_start_port + PORTS.BROKER_PUSH_WORKER_THREAD_PULL) self.worker_push_broker_pull = self.parallel_push_broker_pull = 'tcp://{0}:{1}'.format(server.cluster.broker_host, server.cluster.broker_start_port + PORTS.WORKER_THREAD_PUSH_BROKER_PULL) self.broker_pub_worker_sub = 'tcp://{0}:{1}'.format(server.cluster.broker_host, server.cluster.broker_start_port + PORTS.BROKER_PUB_WORKER_THREAD_SUB) if self.singleton_server: self.service_store.read_internal_services() kwargs = {'zmq_context':self.zmq_context, 'broker_host': server.cluster.broker_host, 'broker_push_singleton_pull_port': server.cluster.broker_start_port + PORTS.BROKER_PUSH_SINGLETON_PULL, 'singleton_push_broker_pull_port': server.cluster.broker_start_port + PORTS.SINGLETON_PUSH_BROKER_PULL, 'broker_token':self.broker_token, } Thread(target=self.singleton_server.run, kwargs=kwargs).start() # Let the scheduler fully initialize time.sleep(0.2) def _after_init_accepted(self, server): if self.singleton_server: for(_, name, is_active, job_type, start_date, extra, service,\ _, weeks, days, hours, minutes, seconds, repeats, cron_definition)\ in self.odb.get_job_list(server.cluster.id): if is_active: job_data = Bunch({'name':name, 'is_active':is_active, 'job_type':job_type, 'start_date':start_date, 'extra':extra, 'service':service, 'weeks':weeks, 'days':days, 'hours':hours, 'minutes':minutes, 'seconds':seconds, 'repeats':repeats, 'cron_definition':cron_definition}) self.singleton_server.scheduler.create_edit('create', job_data) # Start the connectors only once throughout the whole cluster self._init_connectors(server) # Mapping between SOAP actions and internal services. #for soap_action, service_name in self.odb.get_internal_channel_list(server.cluster.id): # self.request_handler.soap_handler.soap_config[soap_action] = service_name # FTP ftp_conn_params = Bunch() for item in self.odb.get_out_ftp_list(server.cluster.id): ftp_conn_params[item.name] = Bunch() ftp_conn_params[item.name].is_active = item.is_active ftp_conn_params[item.name].name = item.name ftp_conn_params[item.name].host = item.host ftp_conn_params[item.name].user = item.user ftp_conn_params[item.name].password = item.password ftp_conn_params[item.name].acct = item.acct ftp_conn_params[item.name].timeout = item.timeout ftp_conn_params[item.name].port = item.port ftp_conn_params[item.name].dircache = item.dircache self.ftp = FTPFacade(ftp_conn_params) self.worker_config = Bunch() # Repo location so that AMQP subprocesses know how where to read # the server's configuration from. self.worker_config.repo_location = self.repo_location # The broker client for each of the worker threads. self.worker_config.broker_config = Bunch() self.worker_config.broker_config.name = 'worker-thread' self.worker_config.broker_config.broker_token = self.broker_token self.worker_config.broker_config.zmq_context = self.zmq_context self.worker_config.broker_config.broker_push_client_pull = self.broker_push_worker_pull self.worker_config.broker_config.client_push_broker_pull = self.worker_push_broker_pull self.worker_config.broker_config.broker_pub_client_sub = self.broker_pub_worker_sub # HTTP Basic Auth ba_config = Bunch() for item in self.odb.get_basic_auth_list(server.cluster.id): ba_config[item.name] = Bunch() ba_config[item.name].is_active = item.is_active ba_config[item.name].username = item.username ba_config[item.name].domain = item.domain ba_config[item.name].password = item.password # Technical accounts ta_config = Bunch() for item in self.odb.get_tech_acc_list(server.cluster.id): ta_config[item.name] = Bunch() ta_config[item.name].is_active = item.is_active ta_config[item.name].name = item.name ta_config[item.name].password = item.password ta_config[item.name].salt = item.salt wss_config = Bunch() for item in self.odb.get_wss_list(server.cluster.id): wss_config[item.name] = Bunch() wss_config[item.name].is_active = item.is_active wss_config[item.name].username = item.username wss_config[item.name].password = item.password wss_config[item.name].password_type = item.password_type wss_config[item.name].reject_empty_nonce_ts = item.reject_empty_nonce_ts wss_config[item.name].reject_stale_username = item.reject_stale_username wss_config[item.name].expiry_limit = item.expiry_limit wss_config[item.name].nonce_freshness = item.nonce_freshness # Security configuration of HTTP URLs. url_sec = self.odb.get_url_security(server) # All the HTTP/SOAP channels. http_soap = MultiDict() for item in self.odb.get_http_soap_list(server.cluster.id, 'channel'): _info = Bunch() _info[item.soap_action] = Bunch() _info[item.soap_action].id = item.id _info[item.soap_action].name = item.name _info[item.soap_action].is_internal = item.is_internal _info[item.soap_action].url_path = item.url_path _info[item.soap_action].method = item.method _info[item.soap_action].soap_version = item.soap_version _info[item.soap_action].service_id = item.service_id _info[item.soap_action].service_name = item.service_name _info[item.soap_action].impl_name = item.impl_name http_soap.add(item.url_path, _info) self.worker_config.basic_auth = ba_config self.worker_config.tech_acc = ta_config self.worker_config.wss = wss_config self.worker_config.url_sec = url_sec self.worker_config.http_soap = http_soap # The parallel server's broker client. The client's used to notify # all the server's AMQP subprocesses that they need to shut down. self.broker_client = BrokerClient() self.broker_client.name = 'parallel' self.broker_client.token = server.cluster.broker_token self.broker_client.zmq_context = self.zmq_context self.broker_client.client_push_broker_pull = self.parallel_push_broker_pull self.broker_client.init() self.broker_client.start() def _init_connectors(self, server): """ Starts all the connector subprocesses. """ # AMQP - channels for item in self.odb.get_channel_amqp_list(server.cluster.id): amqp_channel_start_connector(self.repo_location, item.id, item.def_id) # AMQP - outgoing for item in self.odb.get_out_amqp_list(server.cluster.id): amqp_out_start_connector(self.repo_location, item.id, item.def_id) # JMS WMQ - channels for item in self.odb.get_channel_jms_wmq_list(server.cluster.id): jms_wmq_channel_start_connector(self.repo_location, item.id, item.def_id) # JMS WMQ - outgoing for item in self.odb.get_out_jms_wmq_list(server.cluster.id): jms_wmq_out_start_connector(self.repo_location, item.id, item.def_id) # ZMQ - channels for item in self.odb.get_channel_zmq_list(server.cluster.id): zmq_channel_start_connector(self.repo_location, item.id) # ZMQ - outgoimg for item in self.odb.get_out_zmq_list(server.cluster.id): zmq_outgoing_start_connector(self.repo_location, item.id) def _after_init_non_accepted(self, server): pass def after_init(self): # First try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server() if not server: raise Exception('Server does not exist in the ODB') self._after_init_common(server) # A server which hasn't been approved in the cluster still needs to fetch # all the config data but it won't start any MQ/AMQP/ZMQ/etc. listeners # except for a ZMQ config subscriber that will listen for an incoming approval. if server.last_join_status == ZATO_JOIN_REQUEST_ACCEPTED: self._after_init_accepted(server) else: msg = 'Server has not been accepted, last_join_status=[{0}]' logger.warn(msg.format(server.last_join_status)) self._after_init_non_accepted(server) def run_forever(self): task_dispatcher = _TaskDispatcher(self, self.worker_config, self.on_broker_msg, self.zmq_context) task_dispatcher.setThreadCount(10) logger.debug('host=[{0}], port=[{1}]'.format(self.host, self.port)) ZatoHTTPListener(self, task_dispatcher) try: while True: asyncore.poll(5) except KeyboardInterrupt: logger.info('Shutting down') # Close all the connector subprocesses this server has started pairs = ((AMQP_CONNECTOR.CLOSE, MESSAGE_TYPE.TO_AMQP_CONNECTOR_SUB), (JMS_WMQ_CONNECTOR.CLOSE, MESSAGE_TYPE.TO_JMS_WMQ_CONNECTOR_SUB), (ZMQ_CONNECTOR.CLOSE, MESSAGE_TYPE.TO_ZMQ_CONNECTOR_SUB), ) for action, msg_type in pairs: msg = {} msg['action'] = action msg['odb_token'] = self.odb.odb_data['token'] self.broker_client.send_json(msg, msg_type=msg_type) time.sleep(0.2) self.broker_client.close() if self.singleton_server: if getattr(self.singleton_server, 'broker_client', None): self.singleton_server.broker_client.close() self.zmq_context.term() self.odb.close() task_dispatcher.shutdown()
class SingletonServer(BrokerMessageReceiver): """ A server of which one instance only may be running in a Zato container. Holds and processes data which can't be made parallel, such as scheduler, hot-deployment or on-disk configuration management. """ def __init__(self, parallel_server=None, scheduler=None, broker_token=None, zmq_context=None, broker_host=None, broker_push_singleton_pull_port=None, singleton_push_broker_pull_port=None, initial_sleep_time=None): self.parallel_server = parallel_server self.scheduler = scheduler self.broker_token = broker_token self.broker_host = broker_host self.broker_push_singleton_pull_port = broker_push_singleton_pull_port self.singleton_push_broker_pull_port = singleton_push_broker_pull_port self.zmq_context = zmq_context self.initial_sleep_time = initial_sleep_time def run(self, *ignored_args, **kwargs): self.logger = logging.getLogger(self.__class__.__name__) # So that other moving parts - like connector subprocesses - have time # to initialize before the singleton server starts the scheduler. self.logger.debug('Sleeping for {0} s'.format(self.initial_sleep_time)) sleep(self.initial_sleep_time) for name in('broker_token', 'zmq_context', 'broker_host', 'broker_push_singleton_pull_port', 'singleton_push_broker_pull_port'): if name in kwargs: setattr(self, name, kwargs[name]) self.broker_push_client_pull = 'tcp://{0}:{1}'.format(self.broker_host, self.broker_push_singleton_pull_port) self.client_push_broker_pull = 'tcp://{0}:{1}'.format(self.broker_host, self.singleton_push_broker_pull_port) # Initialize scheduler. self.scheduler.singleton = self self.scheduler._init() self.broker_client = BrokerClient() self.broker_client.name = 'singleton' self.broker_client.token = self.broker_token self.broker_client.zmq_context = self.zmq_context self.broker_client.broker_push_client_pull = self.broker_push_client_pull self.broker_client.client_push_broker_pull = self.client_push_broker_pull self.broker_client.on_pull_handler = self.on_broker_msg self.broker_client.init() self.broker_client.start() ''' # Start the pickup monitor. self.logger.debug("Pickup notifier starting.") self.pickup.watch() ''' ################################################################################ def on_broker_pull_msg_SCHEDULER_CREATE(self, msg, *ignored_args): self.scheduler.create_edit('create', msg) def on_broker_pull_msg_SCHEDULER_EDIT(self, msg, *ignored_args): self.scheduler.create_edit('edit', msg) def on_broker_pull_msg_SCHEDULER_DELETE(self, msg, *ignored_args): self.scheduler.delete(msg) def on_broker_pull_msg_SCHEDULER_EXECUTE(self, msg, *ignored_args): self.scheduler.execute(msg) ################################################################################ def load_egg_services(self, egg_path): """ Tells each of parallel servers to load Zato services off an .egg distribution. The .egg is guaranteed to contain at least one service to load. """ # XXX: That loop could be refactored out to some common place. for q in self.partner_request_queues.values(): req = self._create_ipc_config_request("LOAD_EGG_SERVICES", egg_path) code, reason = self._send_config_request(req, q, timeout=1.0) if code != ZATO_OK: # XXX: Add the parallel server's PID/name here. msg = "Could not update a parallel server, server may have been left in an unstable state, reason=[%s]" % reason self.logger.error(msg) raise ZatoException(msg) return ZATO_OK, ""