def run(base_dir, start_gunicorn_app=True): # Filter out warnings we are not interested in warnings.filterwarnings('ignore', 'Mean of empty slice.') warnings.filterwarnings('ignore', 'invalid value encountered in double_scalars') # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) with open(os.path.join(repo_location, 'logging.conf')) as f: dictConfig(yaml.load(f)) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # New in 2.0 - override gunicorn-set Server HTTP header gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool( config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) parallel_server = app_context.get_object('parallel_server') zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto) parallel_server.crypto_manager = crypto_manager parallel_server.odb_data = config.odb parallel_server.host = zato_gunicorn_app.zato_host parallel_server.port = zato_gunicorn_app.zato_port parallel_server.repo_location = repo_location parallel_server.base_dir = base_dir parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls') parallel_server.fs_server_config = config parallel_server.user_config.update(config.user_config_items) parallel_server.startup_jobs = app_context.get_object('startup_jobs') parallel_server.app_context = app_context # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.conf import setup_logging from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, config.profiler.profiler_dir)) parallel_server.on_wsgi_request = ProfileMiddleware( parallel_server.on_wsgi_request, log_filename=os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, config.profiler.cachegrind_filename), discard_first_request=config.profiler.discard_first_request, flush_at_shutdown=config.profiler.flush_at_shutdown, path=config.profiler.url_path, unwind=config.profiler.unwind) # Run the app at last we execute from command line if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def run(base_dir, start_gunicorn_app=True): # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) logging.config.fileConfig(os.path.join(repo_location, 'logging.conf')) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize( config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) parallel_server = app_context.get_object('parallel_server') zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto) parallel_server.crypto_manager = crypto_manager parallel_server.odb_data = config.odb parallel_server.host = zato_gunicorn_app.zato_host parallel_server.port = zato_gunicorn_app.zato_port parallel_server.repo_location = repo_location parallel_server.base_dir = base_dir parallel_server.fs_server_config = config parallel_server.user_config.update(config.user_config_items) parallel_server.startup_jobs = app_context.get_object('startup_jobs') parallel_server.app_context = app_context # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) if asbool(profiler_enabled): profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir)) parallel_server.on_wsgi_request = ProfileMiddleware( parallel_server.on_wsgi_request, log_filename = os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename), discard_first_request = config.profiler.discard_first_request, flush_at_shutdown = config.profiler.flush_at_shutdown, path = config.profiler.url_path, unwind = config.profiler.unwind) # Run the app at last we execute from command line if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # type: ParallelServer # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # This also cannot be done in __init__ which doesn't have this variable yet self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0 # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Create all POSIX IPC objects now that we have the deployment key self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer self.server_startup_ipc.create(self.deployment_key, self.shmem_size) self.connector_config_ipc.create(self.deployment_key, self.shmem_size) # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_up_odb() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid()) # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool(self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name, self.decrypt) logger.info('Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Configure which HTTP methods can be invoked via REST or SOAP channels methods_allowed = self.fs_server_config.http.methods_allowed methods_allowed = methods_allowed if isinstance(methods_allowed, list) else [methods_allowed] self.http_methods_allowed.extend(methods_allowed) # As above, as a regular expression to be used in pattern matching http_methods_allowed_re = '|'.join(self.http_methods_allowed) self.http_methods_allowed_re = '({})'.format(http_methods_allowed_re) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config(self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config(self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.pickup_dir = absolutize(self.fs_server_config.hot_deploy.pickup_dir, self.repo_location) self.hot_deploy_config.work_dir = os.path.normpath(os.path.join( self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format # Added in 3.1, hence optional max_batch_size = int(self.fs_server_config.hot_deploy.get('max_batch_size', 1000)) # Turn it into megabytes max_batch_size = max_batch_size * 1000 # Finally, assign it to ServiceStore self.service_store.max_batch_size = max_batch_size # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Configure remaining parts of SSO self.configure_sso() # Cannot be done in __init__ because self.sso_config is not available there yet salt_size = self.sso_config.hash_secret.salt_size self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size) for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pickup'): # New in 2.0 if name == 'delete_after_pickup': # For backward compatibility, we need to support both names old_name = 'delete_after_pick_up' if old_name in self.fs_server_config.hot_deploy: _name = old_name else: _name = name value = asbool(self.fs_server_config.hot_deploy.get(_name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath(os.path.join( self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) # Make sure that broker client's connection is ready before continuing # to rule out edge cases where, for instance, hot deployment would # try to publish a locally found package (one of extra packages found) # before the client's thread connected to KVDB. if not self.broker_client.ready: start = now = datetime.utcnow() max_seconds = 120 until = now + timedelta(seconds=max_seconds) while not self.broker_client.ready: now = datetime.utcnow() delta = (now - start).total_seconds() if now < until: # Do not log too early so as not to clutter logs if delta > 2: logger.info('Waiting for broker client to become ready (%s, max:%s)', delta, max_seconds) gevent.sleep(0.5) else: raise Exception('Broker client did not become ready within {} seconds'.format(max_seconds)) self._after_init_accepted(locally_deployed) self.odb.server_up_down( server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) if is_first: logger.info('First worker of `%s` is %s', self.name, self.pid) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={ 'parallel_server': self, }) # Clean up any old WSX connections possibly registered for this server # which may be still linger around, for instance, if the server was previously # shut down forcibly and did not have an opportunity to run self.cleanup_on_stop self.cleanup_wsx() # Startup services self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # Set up subprocess-based IBM MQ connections if that component is enabled if self.fs_server_config.component_enabled.ibm_mq: # Will block for a few seconds at most, until is_ok is returned # which indicates that a connector started or not. is_ok = self.connector_ibm_mq.start_ibm_mq_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) try: if is_ok: self.connector_ibm_mq.create_initial_wmq_definitions(self.worker_store.worker_config.definition_wmq) self.connector_ibm_mq.create_initial_wmq_outconns(self.worker_store.worker_config.out_wmq) self.connector_ibm_mq.create_initial_wmq_channels(self.worker_store.worker_config.channel_wmq) except Exception as e: logger.warn('Could not create initial IBM MQ objects, e:`%s`', e) # Set up subprocess-based SFTP connections is_ok = self.connector_sftp.start_sftp_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) if is_ok: self.connector_sftp.create_initial_sftp_outconns(self.worker_store.worker_config.out_sftp) else: self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={ 'parallel_server': self, }) # IPC self.ipc_api.name = self.ipc_api.get_endpoint_name(self.cluster.name, self.name, self.pid) self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={ 'parallel_server': self, }) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # This also cannot be done in __init__ which doesn't have this variable yet self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0 # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Create all POSIX IPC objects now that we have the deployment key self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer self.server_startup_ipc.create(self.deployment_key, self.shmem_size) # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_up_odb() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid()) # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool( self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name, self.decrypt) logger.info( 'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config( self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config( self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.work_dir = os.path.normpath( os.path.join(self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int( self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format # Configure remaining parts of SSO self.configure_sso() # Cannot be done in __init__ because self.sso_config is not available there yet salt_size = self.sso_config.hash_secret.salt_size self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size) for name in ('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'): # New in 2.0 if name == 'delete_after_pick_up': value = asbool(self.fs_server_config.hot_deploy.get( name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath( os.path.join(self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) self._after_init_accepted(locally_deployed) self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) if is_first: logger.info('First worker of `%s` is %s', self.name, self.pid) self.startup_callable_tool.invoke( SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={ 'parallel_server': self, }) # Startup services self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # IPC ipc_forwarder_name = '{}-{}'.format(self.cluster.name, self.name) ipc_forwarder_name = fs_safe_name(ipc_forwarder_name) self.ipc_forwarder.name = ipc_forwarder_name self.ipc_forwarder.pid = self.pid spawn_greenlet(self.ipc_forwarder.run) # Set up IBM MQ connections if that component is enabled if self.fs_server_config.component_enabled.ibm_mq: # Will block for a few seconds at most, until is_ok is returned # which indicates that a connector started or not. is_ok = self.start_ibm_mq_connector( int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) if is_ok: self.create_initial_wmq_definitions( self.worker_store.worker_config.definition_wmq) self.create_initial_wmq_outconns( self.worker_store.worker_config.out_wmq) self.create_initial_wmq_channels( self.worker_store.worker_config.channel_wmq) else: self.startup_callable_tool.invoke( SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={ 'parallel_server': self, }) # IPC self.ipc_api.name = self.name self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={ 'parallel_server': self, }) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def run(base_dir, start_gunicorn_app=True, options=None): options = options or {} # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Capture warnings to log files logging.captureWarnings(True) # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # We know we don't need warnings because users may explicitly configure no certificate validation. # We don't want for urllib3 to warn us about it. import requests as _r _r.packages.urllib3.disable_warnings() repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) logging_conf_path = os.path.join(repo_location, 'logging.conf') with open(logging_conf_path) as f: logging_config = yaml.load(f) dictConfig(logging_config) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') crypto_manager = ServerCryptoManager(repo_location, secret_key=options['secret_key'], stdin_data=read_stdin_data()) secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'), use_zato=False) server_config = get_config(repo_location, 'server.conf', crypto_manager=crypto_manager, secrets_conf=secrets_config) pickup_config = get_config(repo_location, 'pickup.conf') sio_config = get_config(repo_location, 'simple-io.conf', needs_user_config=False) sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False) normalize_sso_config(sso_config) # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly, # assuming that there are any - otherwise do not do anything. to_greenify = [] for key, value in server_config.get('greenify', {}).items(): if asbool(value): if not os.path.exists(key): raise ValueError('No such path `{}`'.format(key)) else: to_greenify.append(key) # Go ahead only if we actually have anything to greenify if to_greenify: import greenify greenify.greenify() for name in to_greenify: result = greenify.patch_lib(name) if not result: raise ValueError( 'Library `{}` could not be greenified'.format(name)) else: logger.info('Greenified library `%s`', name) server_config.main.token = server_config.main.token.encode('utf8') # Do not proceed unless we can be certain our own preferred address or IP can be obtained. preferred_address = server_config.preferred_address.get('address') if not preferred_address: preferred_address = get_preferred_ip(server_config.main.gunicorn_bind, server_config.preferred_address) if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found: msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf' logger.warn(msg) raise Exception(msg) # Create the startup callable tool as soon as practical startup_callable_tool = StartupCallableTool(server_config) # Run the hook before there is any server object created startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY, kwargs={ 'server_config': server_config, 'pickup_config': pickup_config, 'sio_config': sio_config, 'sso_config': sso_config, }) # New in 2.0 - Start monitoring as soon as possible if server_config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(server_config.newrelic.config, server_config.newrelic.environment or None, server_config.newrelic.ignore_errors or None, server_config.newrelic.log_file or None, server_config.newrelic.log_level or None) zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(server_config.kvdb) kvdb_logger.info('Main process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = server_config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Makes queries against Postgres asynchronous if asbool(server_config.odb.use_async_driver ) and server_config.odb.engine == 'postgresql': make_psycopg_green() if server_config.misc.http_proxy: os.environ['http_proxy'] = server_config.misc.http_proxy # Basic components needed for the server to boot up kvdb = KVDB() odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA) sql_pool_store = PoolStore() service_store = ServiceStore() service_store.odb = odb_manager service_store.services = {} server = ParallelServer() server.odb = odb_manager server.service_store = service_store server.service_store.server = server server.sql_pool_store = sql_pool_store server.service_modules = [] server.kvdb = kvdb # Assigned here because it is a circular dependency odb_manager.parallel_server = server zato_gunicorn_app = ZatoGunicornApplication(server, repo_location, server_config.main, server_config.crypto) server.has_fg = options.get('fg') server.crypto_manager = crypto_manager server.odb_data = server_config.odb server.host = zato_gunicorn_app.zato_host server.port = zato_gunicorn_app.zato_port server.repo_location = repo_location server.user_conf_location = os.path.join(server.repo_location, 'user-conf') server.base_dir = base_dir server.logs_dir = os.path.join(server.base_dir, 'logs') server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls') server.static_dir = os.path.join(server.base_dir, 'config', 'repo', 'static') server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo', 'schema', 'json') server.fs_server_config = server_config server.fs_sql_config = get_config(repo_location, 'sql.conf', needs_user_config=False) server.pickup_config = pickup_config server.logging_config = logging_config server.logging_conf_path = logging_conf_path server.sio_config = sio_config server.sso_config = sso_config server.user_config.update(server_config.user_config_items) server.preferred_address = preferred_address server.sync_internal = options['sync_internal'] server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8') server.startup_callable_tool = startup_callable_tool server.is_sso_enabled = server.fs_server_config.component_enabled.sso if server.is_sso_enabled: server.sso_api = SSOAPI(server, sso_config, None, crypto_manager.encrypt, crypto_manager.decrypt, crypto_manager.hash_secret, crypto_manager.verify_hash, new_user_id) # Remove all locks possibly left over by previous server instances kvdb.component = 'master-proc' clear_locks(kvdb, server_config.main.token, server_config.kvdb, crypto_manager.decrypt) # New in 2.0.8 server.return_tracebacks = asbool( server_config.misc.get('return_tracebacks', True)) server.default_error_message = server_config.misc.get( 'default_error_message', 'An error has occurred') # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = server_config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = server_config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, server_config.profiler.profiler_dir)) server.on_wsgi_request = ProfileMiddleware( server.on_wsgi_request, log_filename=os.path.join(profiler_dir, server_config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, server_config.profiler.cachegrind_filename), discard_first_request=server_config.profiler.discard_first_request, flush_at_shutdown=server_config.profiler.flush_at_shutdown, path=server_config.profiler.url_path, unwind=server_config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = server_config.get('os_environ', {}) for key, value in os_environ.items(): os.environ[key] = value # Run the hook right before the Gunicorn-level server actually starts startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN, kwargs={ 'zato_gunicorn_app': zato_gunicorn_app, }) # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def run(base_dir, start_gunicorn_app=True, options=None): options = options or {} # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Capture warnings to log files logging.captureWarnings(True) # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # We know we don't need warnings because users may explicitly configure no certificate validation. # We don't want for urllib3 to warn us about it. import requests as _r _r.packages.urllib3.disable_warnings() repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) with open(os.path.join(repo_location, 'logging.conf')) as f: dictConfig(yaml.load(f)) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') pickup_config = get_config(repo_location, 'pickup.conf') # Do not proceed unless we can be certain our own preferred address or IP can be obtained. preferred_address = config.preferred_address.get('address') if not preferred_address: preferred_address = get_preferred_ip(config.main.gunicorn_bind, config.preferred_address) if not preferred_address and not config.server_to_server.boot_if_preferred_not_found: msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf' logger.warn(msg) raise Exception(msg) # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # New in 2.0 - override gunicorn-set Server HTTP header gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool( config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) server = app_context.get_object('server') zato_gunicorn_app = ZatoGunicornApplication(server, repo_location, config.main, config.crypto) server.crypto_manager = crypto_manager server.odb_data = config.odb server.host = zato_gunicorn_app.zato_host server.port = zato_gunicorn_app.zato_port server.repo_location = repo_location server.user_conf_location = os.path.join(server.repo_location, 'user-conf') server.base_dir = base_dir server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls') server.fs_server_config = config server.pickup_config = pickup_config server.user_config.update(config.user_config_items) server.app_context = app_context server.preferred_address = preferred_address server.sync_internal = options['sync_internal'] server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8') # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # New in 2.0.8 server.return_tracebacks = asbool( config.misc.get('return_tracebacks', True)) server.default_error_message = config.misc.get('default_error_message', 'An error has occurred') # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, config.profiler.profiler_dir)) server.on_wsgi_request = ProfileMiddleware( server.on_wsgi_request, log_filename=os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, config.profiler.cachegrind_filename), discard_first_request=config.profiler.discard_first_request, flush_at_shutdown=config.profiler.flush_at_shutdown, path=config.profiler.url_path, unwind=config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = config.get('os_environ', {}) for key, value in os_environ.items(): os.environ[key] = value # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def run(base_dir): register_diag_handlers() os.chdir(base_dir) # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) logging.config.fileConfig(os.path.join(repo_location, 'logging.conf')) logger = logging.getLogger(__name__) config = get_config(repo_location, 'server.conf') app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() crypto_manager = get_crypto_manager(repo_location, app_context, config) parallel_server = app_context.get_object('parallel_server') zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto) parallel_server.crypto_manager = crypto_manager parallel_server.odb_data = config.odb parallel_server.host = zato_gunicorn_app.zato_host parallel_server.port = zato_gunicorn_app.zato_port parallel_server.repo_location = repo_location parallel_server.base_dir = base_dir parallel_server.fs_server_config = config parallel_server.startup_jobs = app_context.get_object('startup_jobs') parallel_server.app_context = app_context # Remove all locks possibly left over by previous server instances clear_locks(app_context.get_object('kvdb'), config.main.token, config.kvdb, crypto_manager.decrypt) # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # This is new in 1.2 so is optional profiler_enabled = config.get('profiler', {}).get('enabled', False) if asbool(profiler_enabled): profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir)) parallel_server.on_wsgi_request = ProfileMiddleware( parallel_server.on_wsgi_request, log_filename = os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename), discard_first_request = config.profiler.discard_first_request, flush_at_shutdown = config.profiler.flush_at_shutdown, path = config.profiler.url_path, unwind = config.profiler.unwind) # Run the app at last zato_gunicorn_app.run()
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_odb_pool() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool( self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name) logger.info( 'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config( self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config( self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.work_dir = os.path.normpath( os.path.join(self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int( self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format for name in ('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'): # New in 2.0 if name == 'delete_after_pick_up': value = asbool(self.fs_server_config.hot_deploy.get( name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath( os.path.join(self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) self._after_init_accepted(locally_deployed) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) # Startup services if is_first: self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # IPC if is_first: self.ipc_forwarder.name = self.name self.ipc_forwarder.pid = self.pid spawn_greenlet(self.ipc_forwarder.run) # IPC self.ipc_api.name = self.name self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def run(base_dir, start_gunicorn_app=True): # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Capture warnings to log files logging.captureWarnings(True) # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # We know we don't need warnings because users may explicitly configure no certificate validation. # We don't want for urllib3 to warn us about it. import requests as _r _r.packages.urllib3.disable_warnings() repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) with open(os.path.join(repo_location, 'logging.conf')) as f: dictConfig(yaml.load(f)) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize( config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # New in 2.0 - override gunicorn-set Server HTTP header gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) parallel_server = app_context.get_object('parallel_server') zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto) parallel_server.crypto_manager = crypto_manager parallel_server.odb_data = config.odb parallel_server.host = zato_gunicorn_app.zato_host parallel_server.port = zato_gunicorn_app.zato_port parallel_server.repo_location = repo_location parallel_server.base_dir = base_dir parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls') parallel_server.fs_server_config = config parallel_server.user_config.update(config.user_config_items) parallel_server.startup_jobs = app_context.get_object('startup_jobs') parallel_server.app_context = app_context # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir)) parallel_server.on_wsgi_request = ProfileMiddleware( parallel_server.on_wsgi_request, log_filename = os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename), discard_first_request = config.profiler.discard_first_request, flush_at_shutdown = config.profiler.flush_at_shutdown, path = config.profiler.url_path, unwind = config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = config.get('os_environ', {}) for key, value in os_environ.items(): os.environ[key] = value # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app