def run(self): if self.is_forwarder: spawn_greenlet(Forwarder, self.name, self.pid) else: self.publisher = Publisher(self.name, self.pid) self.subscriber = Subscriber(self.on_message_callback, self.name, self.pid) spawn_greenlet(self.subscriber.serve_forever)
def handle(self): # Maps notification type to a service handling it notif_type_service = { 'openstack_swift': 'zato.notif.cloud.openstack.swift.run-notifier', 'sql': 'zato.notif.sql.run-notifier', } spawn_greenlet( self.invoke, notif_type_service[self.request.payload['config']['notif_type']], self.request.payload['config'])
def __init__(self, sub_key, delivery_lock, delivery_list, deliver_pubsub_msg_cb, confirm_pubsub_msg_delivered_cb): self.keep_running = True self.sub_key = sub_key self.delivery_lock = delivery_lock self.delivery_list = delivery_list self.deliver_pubsub_msg_cb = deliver_pubsub_msg_cb self.confirm_pubsub_msg_delivered_cb = confirm_pubsub_msg_delivered_cb spawn_greenlet(self.run)
def __init__(self, pubsub): self.pubsub = pubsub # type: PubSub self.sub_key_to_msg_id = {} # Sub key -> Msg ID set --- What messages are available for a given subcriber self.msg_id_to_sub_key = {} # Msg ID -> Sub key set - What subscribers are interested in a given message self.msg_id_to_msg = {} # Msg ID -> Message data - What is the actual contents of each message self.topic_msg_id = {} # Topic ID -> Msg ID set --- What messages are available for each topic (no matter sub_key) self.lock = RLock() # Start in background a cleanup task that deletes all expired and removed messages spawn_greenlet(self.run_cleanup_task)
def run(self, max_wait=20): spawn_greenlet(self._run, timeout=10) now = datetime.utcnow() until = now + timedelta(seconds=max_wait) while not self.is_authenticated: sleep(0.01) now = datetime.utcnow() if now >= until: return
def start(self, needs_log=True): with self._start_stop_logger('Starting', ' Started'): self.keep_running = True try: if self.start_in_greenlet: spawn_greenlet(self._spawn_start) else: self._start() except Exception, e: logger.warn(format_exc(e))
def __init__(self, config): # type: (Bunch) -> None self.config = config # By default, we are not connected anywhere self.is_connected = False # Initialize in a separate greenlet so as not to block the main one # if the remote server is slow to respond. spawn_greenlet(self._init, timeout=2)
def serve_forever(self): try: try: spawn_greenlet(self.sched.run) except Exception: logger.warn(format_exc()) while not self.sched.ready: sleep(0.1) except Exception: logger.warn(format_exc())
def start(self, needs_log=True): with self._start_stop_logger('Starting', ' Started', self._wait_until_connected): self.keep_running = True self.keep_connecting = True try: if self.start_in_greenlet: spawn_greenlet(self._spawn_start) else: self._start_loop() except Exception: logger.warn(format_exc())
def __init__(self, pubsub_tool, pubsub, sub_key, delivery_lock, delivery_list, deliver_pubsub_msg, confirm_pubsub_msg_delivered_cb, sub_config): self.keep_running = True self.pubsub_tool = pubsub_tool self.pubsub = pubsub self.sub_key = sub_key self.delivery_lock = delivery_lock self.delivery_list = delivery_list self.deliver_pubsub_msg = deliver_pubsub_msg self.confirm_pubsub_msg_delivered_cb = confirm_pubsub_msg_delivered_cb self.sub_config = sub_config self.topic_name = sub_config.topic_name self.wait_sock_err = float(self.sub_config.wait_sock_err) self.wait_non_sock_err = float(self.sub_config.wait_non_sock_err) self.last_run = utcnow_as_ms() self.delivery_interval = self.sub_config.task_delivery_interval / 1000.0 self.delivery_max_retry = self.sub_config.delivery_max_retry self.previous_delivery_method = self.sub_config.delivery_method # This is a total of messages processed so far self.delivery_counter = 0 # A list of messages that were requested to be deleted while a delivery was in progress, # checked before each delivery. self.delete_requested = [] # This is a lock used for micro-operations such as changing or consulting the contents of self.delete_requested. self.interrupt_lock = RLock() # If self.wrap_in_list is True, messages will be always wrapped in a list, # even if there is only one message to send. Note that self.wrap_in_list will be False # only if both batch_size is 1 and wrap_one_msg_in_list is True. if self.sub_config.delivery_batch_size == 1: if self.sub_config.wrap_one_msg_in_list: self.wrap_in_list = True else: self.wrap_in_list = False # With batch_size > 1, we always send a list, no matter what. else: self.wrap_in_list = True spawn_greenlet(self.run)
def invoke_async(self, target_name, payload, channel, cid): invoked_service = FakeService(self.cache, self.lock, payload) invoked_service.name = target_name invoked_service.cid = cid # If we are invoked via patterns, let the callbacks run .. if channel in _pattern_call_channels: # .. find the correct callback function first .. if channel == _fanout_call: func = self.patterns.fanout.on_call_finished else: func = self.patterns.parallel.on_call_finished # .. and run the function in a new greenlet. spawn_greenlet(func, invoked_service, self.response_payload, self.response_exception)
def start(self, needs_log=True): if self.is_inactive: logger.warn('Skipped creation of an inactive connector `%s` (%s)', self.name, self.type) return with self._start_stop_logger('Starting', ' Started', self._wait_until_connected): self.keep_running = True self.keep_connecting = True try: if self.start_in_greenlet: spawn_greenlet(self._spawn_start, timeout=1) else: self._start_loop() except Exception: logger.warn(format_exc())
def on_created(self, wd_event): # type: (FileSystemEvent) -> None try: file_name = os.path.basename(wd_event.src_path) # type: str if not self.manager.should_pick_up(file_name, self.config.patterns): return pe = PickupEvent() pe.full_path = wd_event.src_path pe.base_dir = os.path.dirname(wd_event.src_path) pe.file_name = file_name pe.stanza = self.stanza if self.config.is_service_hot_deploy: spawn_greenlet(hot_deploy, self.manager.server, pe.file_name, pe.full_path, self.config.delete_after_pickup) return if self.config.read_on_pickup: f = open(pe.full_path, 'rb') pe.raw_data = f.read() pe.has_raw_data = True f.close() if self.config.parse_on_pickup: try: pe.data = self.manager.get_parser( self.config.parse_with)(pe.raw_data) pe.has_data = True except Exception as e: pe.parse_error = e spawn_greenlet(self.manager.invoke_callbacks, pe, self.config.services, self.config.topics) self.manager.post_handle(pe.full_path, self.config) except Exception: logger.warn('Exception in pickup event handler `%s`', format_exc())
def invoke(self, targets, on_final, on_target=None, cid=None, _utcnow=datetime.utcnow): """ Invokes targets collecting their responses, can be both as a whole or individual ones, and executes callback(s). """ # type: (dict, list, list, str, object) -> None # Establish what our CID is .. cid = cid or self.cid # .. set up targets to invoke .. target_list = [] for target_name, payload in targets.items(): target = Target() target.name = target_name target.payload = payload target_list.append(target) # .. create an execution context .. ctx = ParallelCtx() ctx.cid = cid ctx.req_ts_utc = _utcnow() ctx.source_name = self.source.name ctx.target_list = target_list # .. on-final is always available .. ctx.on_final_list = [on_final] if isinstance(on_final, str) else on_final # .. but on-target may be None .. if on_target: ctx.on_target_list = [on_target] if isinstance(on_target, str) else on_target # .. invoke our implementation in background .. spawn_greenlet(self._invoke, ctx) # .. and return the CID to the caller. return cid
def invoke_callbacks(self, pickup_event, recipients): request = { 'base_dir': pickup_event.base_dir, 'file_name': pickup_event.file_name, 'full_path': pickup_event.full_path, 'stanza': pickup_event.stanza, 'ts_utc': datetime.utcnow().isoformat(), 'raw_data': pickup_event.raw_data, 'data': pickup_event.data if pickup_event.data is not _singleton else None, 'has_raw_data': pickup_event.has_raw_data, 'has_data': pickup_event.has_data, 'parse_error': pickup_event.parse_error, } try: for recipient in recipients: spawn_greenlet(self.server.invoke, recipient, request) except Exception, e: logger.warn(format_exc(e))
def run(self): try: # Add the statistics-related scheduler jobs to the ODB if self._add_startup_jobs: spawn_greenlet(self.add_startup_jobs) # All other jobs if self._add_scheduler_jobs: add_scheduler_jobs(self.api, self.odb, self.config.main.cluster.id, spawn=False) _sleep = self.sleep _sleep_time = self.sleep_time with self.lock: for job in sorted(self.jobs): if job.max_repeats_reached: logger.info( 'Job `%s` already reached max runs count (%s UTC)', job.name, job.max_repeats_reached_at) else: self.spawn_job(job) # Ok, we're good now. self.ready = True logger.info('Scheduler started') while self.keep_running: _sleep(_sleep_time) if self.iter_cb: self.iter_cb(*self.iter_cb_args) except Exception, e: logger.warn(format_exc(e))
def on_message(self, msg): if has_debug: logger.debug('Got broker message `%s`', msg) if msg.type == 'message': # Replace payload with stuff read off the KVDB in case this is where the actual message happens to reside. if msg.channel in NEEDS_TMP_KEY: tmp_key = '{}.tmp'.format(msg.data) if self.lua_container.run_lua( 'zato.rename_if_exists', [msg.data, tmp_key]) == CODE_NO_SUCH_FROM_KEY: payload = None else: payload = self.kvdb.conn.get(tmp_key) self.kvdb.conn.delete( tmp_key) # Note that it would've expired anyway if not payload: logger.warning( 'No KVDB payload for key `%s` (already expired?)', tmp_key) else: payload = loads(payload) else: payload = loads(msg.data) if payload: payload = Bunch(payload) if has_debug: logger.debug('Got broker message payload `%s`', payload) callback = self.topic_callbacks[msg.channel] spawn_greenlet(callback, payload) else: if has_debug: logger.debug('No payload in msg: `%s`', msg)
def amqp_invoke_async(self, *args, **kwargs): spawn_greenlet(self._amqp_invoke_async, *args, **kwargs)
def set_up_pickup(self): empty = [] # Fix up booleans and paths for stanza, stanza_config in self.pickup_config.items(): # user_config_items is empty by default if not stanza_config: empty.append(stanza) continue stanza_config.read_on_pickup = asbool( stanza_config.get('read_on_pickup', True)) stanza_config.parse_on_pickup = asbool( stanza_config.get('parse_on_pickup', True)) stanza_config.delete_after_pick_up = asbool( stanza_config.get('delete_after_pick_up', True)) stanza_config.case_insensitive = asbool( stanza_config.get('case_insensitive', True)) stanza_config.pickup_from = absolutize(stanza_config.pickup_from, self.base_dir) stanza_config.is_service_hot_deploy = False mpt = stanza_config.get('move_processed_to') stanza_config.move_processed_to = absolutize( mpt, self.base_dir) if mpt else None services = stanza_config.get('services') or [] stanza_config.services = [ services ] if not isinstance(services, list) else services topics = stanza_config.get('topics') or [] stanza_config.topics = [ topics ] if not isinstance(topics, list) else topics flags = globre.EXACT if stanza_config.case_insensitive: flags |= IGNORECASE patterns = stanza_config.patterns stanza_config.patterns = [ patterns ] if not isinstance(patterns, list) else patterns stanza_config.patterns = [ globre.compile(elem, flags) for elem in stanza_config.patterns ] if not os.path.exists(stanza_config.pickup_from): logger.warn('Pickup dir `%s` does not exist (%s)', stanza_config.pickup_from, stanza) for item in empty: del self.pickup_config[item] # Ok, now that we have configured everything that pickup.conf had # we still need to make it aware of services and how to pick them up from FS. stanza = 'zato_internal_service_hot_deploy' stanza_config = Bunch({ 'pickup_from': absolutize(self.fs_server_config.hot_deploy.pickup_dir, self.repo_location), 'patterns': [globre.compile('*.py', globre.EXACT | IGNORECASE)], 'read_on_pickup': False, 'parse_on_pickup': False, 'delete_after_pick_up': self.hot_deploy_config.delete_after_pick_up, 'is_service_hot_deploy': True, }) self.pickup_config[stanza] = stanza_config self.pickup = PickupManager(self, self.pickup_config) spawn_greenlet(self.pickup.run)
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # This also cannot be done in __init__ which doesn't have this variable yet self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0 # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Create all POSIX IPC objects now that we have the deployment key self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer self.server_startup_ipc.create(self.deployment_key, self.shmem_size) # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_up_odb() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid()) # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool( self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name, self.decrypt) logger.info( 'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config( self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config( self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.work_dir = os.path.normpath( os.path.join(self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int( self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format # Configure remaining parts of SSO self.configure_sso() # Cannot be done in __init__ because self.sso_config is not available there yet salt_size = self.sso_config.hash_secret.salt_size self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size) for name in ('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'): # New in 2.0 if name == 'delete_after_pick_up': value = asbool(self.fs_server_config.hot_deploy.get( name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath( os.path.join(self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) self._after_init_accepted(locally_deployed) self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) if is_first: logger.info('First worker of `%s` is %s', self.name, self.pid) self.startup_callable_tool.invoke( SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={ 'parallel_server': self, }) # Startup services self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # IPC ipc_forwarder_name = '{}-{}'.format(self.cluster.name, self.name) ipc_forwarder_name = fs_safe_name(ipc_forwarder_name) self.ipc_forwarder.name = ipc_forwarder_name self.ipc_forwarder.pid = self.pid spawn_greenlet(self.ipc_forwarder.run) # Set up IBM MQ connections if that component is enabled if self.fs_server_config.component_enabled.ibm_mq: # Will block for a few seconds at most, until is_ok is returned # which indicates that a connector started or not. is_ok = self.start_ibm_mq_connector( int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) if is_ok: self.create_initial_wmq_definitions( self.worker_store.worker_config.definition_wmq) self.create_initial_wmq_outconns( self.worker_store.worker_config.out_wmq) self.create_initial_wmq_channels( self.worker_store.worker_config.channel_wmq) else: self.startup_callable_tool.invoke( SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={ 'parallel_server': self, }) # IPC self.ipc_api.name = self.name self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={ 'parallel_server': self, }) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def run(self): try: for path in self.callback_config: if not os.path.exists(path): raise Exception('Path does not exist `{}`'.format(path)) self.wd_to_path[infx.add_watch( self.infx_fd, path, infx.IN_CLOSE_WRITE | infx.IN_MOVE)] = path while self.keep_running: try: events = infx.get_events(self.infx_fd, 1.0) for event in events: pe = PickupEvent() try: pe.base_dir = self.wd_to_path[event.wd] config = self.callback_config[pe.base_dir] if not self.should_pick_up(event.name, config.patterns): continue pe.file_name = event.name pe.stanza = config.stanza pe.full_path = os.path.join( pe.base_dir, event.name) # If we are deploying services, the path is different than for other resources if config.is_service_hot_deploy: spawn_greenlet(hot_deploy, self.server, pe.file_name, pe.full_path, config.delete_after_pick_up) continue if config.read_on_pickup: f = open(pe.full_path, 'rb') pe.raw_data = f.read() pe.has_raw_data = True f.close() if config.parse_on_pickup: try: pe.data = self.get_parser( config.parse_with)(pe.raw_data) pe.has_data = True except Exception, e: pe.parse_error = e else: pe.data = pe.raw_data spawn_greenlet(self.invoke_callbacks, pe, config.services, config.topics) self.post_handle(pe.full_path, config) except Exception, e: logger.warn(format_exc(e)) except KeyboardInterrupt: self.keep_running = False
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # type: ParallelServer # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # This also cannot be done in __init__ which doesn't have this variable yet self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0 # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Create all POSIX IPC objects now that we have the deployment key self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer self.server_startup_ipc.create(self.deployment_key, self.shmem_size) self.connector_config_ipc.create(self.deployment_key, self.shmem_size) # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_up_odb() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid()) # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool(self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name, self.decrypt) logger.info('Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Configure which HTTP methods can be invoked via REST or SOAP channels methods_allowed = self.fs_server_config.http.methods_allowed methods_allowed = methods_allowed if isinstance(methods_allowed, list) else [methods_allowed] self.http_methods_allowed.extend(methods_allowed) # As above, as a regular expression to be used in pattern matching http_methods_allowed_re = '|'.join(self.http_methods_allowed) self.http_methods_allowed_re = '({})'.format(http_methods_allowed_re) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config(self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config(self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.pickup_dir = absolutize(self.fs_server_config.hot_deploy.pickup_dir, self.repo_location) self.hot_deploy_config.work_dir = os.path.normpath(os.path.join( self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format # Added in 3.1, hence optional max_batch_size = int(self.fs_server_config.hot_deploy.get('max_batch_size', 1000)) # Turn it into megabytes max_batch_size = max_batch_size * 1000 # Finally, assign it to ServiceStore self.service_store.max_batch_size = max_batch_size # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Configure remaining parts of SSO self.configure_sso() # Cannot be done in __init__ because self.sso_config is not available there yet salt_size = self.sso_config.hash_secret.salt_size self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size) for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pickup'): # New in 2.0 if name == 'delete_after_pickup': # For backward compatibility, we need to support both names old_name = 'delete_after_pick_up' if old_name in self.fs_server_config.hot_deploy: _name = old_name else: _name = name value = asbool(self.fs_server_config.hot_deploy.get(_name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath(os.path.join( self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) # Make sure that broker client's connection is ready before continuing # to rule out edge cases where, for instance, hot deployment would # try to publish a locally found package (one of extra packages found) # before the client's thread connected to KVDB. if not self.broker_client.ready: start = now = datetime.utcnow() max_seconds = 120 until = now + timedelta(seconds=max_seconds) while not self.broker_client.ready: now = datetime.utcnow() delta = (now - start).total_seconds() if now < until: # Do not log too early so as not to clutter logs if delta > 2: logger.info('Waiting for broker client to become ready (%s, max:%s)', delta, max_seconds) gevent.sleep(0.5) else: raise Exception('Broker client did not become ready within {} seconds'.format(max_seconds)) self._after_init_accepted(locally_deployed) self.odb.server_up_down( server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) if is_first: logger.info('First worker of `%s` is %s', self.name, self.pid) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={ 'parallel_server': self, }) # Clean up any old WSX connections possibly registered for this server # which may be still linger around, for instance, if the server was previously # shut down forcibly and did not have an opportunity to run self.cleanup_on_stop self.cleanup_wsx() # Startup services self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # Set up subprocess-based IBM MQ connections if that component is enabled if self.fs_server_config.component_enabled.ibm_mq: # Will block for a few seconds at most, until is_ok is returned # which indicates that a connector started or not. is_ok = self.connector_ibm_mq.start_ibm_mq_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) try: if is_ok: self.connector_ibm_mq.create_initial_wmq_definitions(self.worker_store.worker_config.definition_wmq) self.connector_ibm_mq.create_initial_wmq_outconns(self.worker_store.worker_config.out_wmq) self.connector_ibm_mq.create_initial_wmq_channels(self.worker_store.worker_config.channel_wmq) except Exception as e: logger.warn('Could not create initial IBM MQ objects, e:`%s`', e) # Set up subprocess-based SFTP connections is_ok = self.connector_sftp.start_sftp_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) if is_ok: self.connector_sftp.create_initial_sftp_outconns(self.worker_store.worker_config.out_sftp) else: self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={ 'parallel_server': self, }) # IPC self.ipc_api.name = self.ipc_api.get_endpoint_name(self.cluster.name, self.name, self.pid) self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={ 'parallel_server': self, }) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def start(self): spawn_greenlet(self._start)
def __init__(self, config): self.config = config self.is_connected = False spawn_greenlet(self._init)
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_odb_pool() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool( self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name) logger.info( 'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config( self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config( self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.work_dir = os.path.normpath( os.path.join(self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int( self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format for name in ('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'): # New in 2.0 if name == 'delete_after_pick_up': value = asbool(self.fs_server_config.hot_deploy.get( name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath( os.path.join(self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) self._after_init_accepted(locally_deployed) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) # Startup services if is_first: self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # IPC if is_first: self.ipc_forwarder.name = self.name self.ipc_forwarder.pid = self.pid spawn_greenlet(self.ipc_forwarder.run) # IPC self.ipc_api.name = self.name self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def _spawn(self, *args, **kwargs): """ As in the Job class, this is a thin wrapper so that it is easier to mock this method out in unit-tests. """ return spawn_greenlet(*args, **kwargs)
def __exit__(self, *args, **kwargs): spawn_greenlet(self.exit_func, self.exit_verb, self.predicate_func)
def run(self): self.subscriber = Subscriber(self.on_message_callback, self.name, self.pid) spawn_greenlet(self.subscriber.serve_forever)