Exemple #1
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server # type: ParallelServer

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # This also cannot be done in __init__ which doesn't have this variable yet
        self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Create all POSIX IPC objects now that we have the deployment key
        self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer

        self.server_startup_ipc.create(self.deployment_key, self.shmem_size)
        self.connector_config_ipc.create(self.deployment_key, self.shmem_size)

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_up_odb()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster
        self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid())

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name, self.decrypt)
        logger.info('Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name,
                    self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address,
            self.port)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(os.path.join(
            self.repo_location, self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        # Added in 3.1, hence optional
        max_batch_size = int(self.fs_server_config.hot_deploy.get('max_batch_size', 1000))

        # Turn it into megabytes
        max_batch_size = max_batch_size * 1000

        # Finally, assign it to ServiceStore
        self.service_store.max_batch_size = max_batch_size

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Configure remaining parts of SSO
        self.configure_sso()

        # Cannot be done in __init__ because self.sso_config is not available there yet
        salt_size = self.sso_config.hash_secret.salt_size
        self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size)

        for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pickup'):

            # New in 2.0
            if name == 'delete_after_pickup':

                # For backward compatibility, we need to support both names
                old_name = 'delete_after_pick_up'

                if old_name in self.fs_server_config.hot_deploy:
                    _name = old_name
                else:
                    _name = name

                value = asbool(self.fs_server_config.hot_deploy.get(_name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(os.path.join(
                    self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name]))

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        # Make sure that broker client's connection is ready before continuing
        # to rule out edge cases where, for instance, hot deployment would
        # try to publish a locally found package (one of extra packages found)
        # before the client's thread connected to KVDB.
        if not self.broker_client.ready:
            start = now = datetime.utcnow()
            max_seconds = 120
            until = now + timedelta(seconds=max_seconds)

            while not self.broker_client.ready:
                now = datetime.utcnow()
                delta = (now - start).total_seconds()
                if now < until:
                    # Do not log too early so as not to clutter logs
                    if delta > 2:
                        logger.info('Waiting for broker client to become ready (%s, max:%s)', delta, max_seconds)
                    gevent.sleep(0.5)
                else:
                    raise Exception('Broker client did not become ready within {} seconds'.format(max_seconds))

        self._after_init_accepted(locally_deployed)
        self.odb.server_up_down(
            server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls)

        if is_first:

            logger.info('First worker of `%s` is %s', self.name, self.pid)

            self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={
                'parallel_server': self,
            })

            # Clean up any old WSX connections possibly registered for this server
            # which may be still linger around, for instance, if the server was previously
            # shut down forcibly and did not have an opportunity to run self.cleanup_on_stop
            self.cleanup_wsx()

            # Startup services
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

            # Set up subprocess-based IBM MQ connections if that component is enabled
            if self.fs_server_config.component_enabled.ibm_mq:

                # Will block for a few seconds at most, until is_ok is returned
                # which indicates that a connector started or not.
                is_ok = self.connector_ibm_mq.start_ibm_mq_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))

                try:
                    if is_ok:
                        self.connector_ibm_mq.create_initial_wmq_definitions(self.worker_store.worker_config.definition_wmq)
                        self.connector_ibm_mq.create_initial_wmq_outconns(self.worker_store.worker_config.out_wmq)
                        self.connector_ibm_mq.create_initial_wmq_channels(self.worker_store.worker_config.channel_wmq)
                except Exception as e:
                    logger.warn('Could not create initial IBM MQ objects, e:`%s`', e)

            # Set up subprocess-based SFTP connections
            is_ok = self.connector_sftp.start_sftp_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))
            if is_ok:
                self.connector_sftp.create_initial_sftp_outconns(self.worker_store.worker_config.out_sftp)

        else:
            self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={
                'parallel_server': self,
            })

        # IPC
        self.ipc_api.name = self.ipc_api.get_endpoint_name(self.cluster.name, self.name, self.pid)
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={
            'parallel_server': self,
        })

        logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
Exemple #2
0
    def __init__(self):
        self.host = None
        self.port = None
        self.crypto_manager = None
        self.odb = None
        self.odb_data = None
        self.config = None
        self.repo_location = None
        self.user_conf_location = None
        self.sql_pool_store = None
        self.soap11_content_type = None
        self.soap12_content_type = None
        self.plain_xml_content_type = None
        self.json_content_type = None
        self.internal_service_modules = None  # Zato's own internal services
        self.service_modules = None  # Set programmatically in Spring
        self.service_sources = None  # Set in a config file
        self.base_dir = None
        self.tls_dir = None
        self.static_dir = None
        self.hot_deploy_config = None
        self.pickup = None
        self.fs_server_config = None
        self.fs_sql_config = None
        self.pickup_config = None
        self.logging_config = None
        self.logging_conf_path = None
        self.sio_config = None
        self.sso_config = None
        self.connector_server_grace_time = None
        self.id = None
        self.name = None
        self.worker_id = None
        self.worker_pid = None
        self.cluster = None
        self.cluster_id = None
        self.kvdb = None
        self.startup_jobs = None
        self.worker_store = None
        self.request_dispatcher_dispatch = None
        self.deployment_lock_expires = None
        self.deployment_lock_timeout = None
        self.deployment_key = ''
        self.app_context = None
        self.has_gevent = None
        self.delivery_store = None
        self.static_config = None
        self.component_enabled = Bunch()
        self.client_address_headers = [
            'HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR'
        ]
        self.broker_client = None
        self.return_tracebacks = None
        self.default_error_message = None
        self.time_util = None
        self.preferred_address = None
        self.crypto_use_tls = None
        self.servers = None
        self.zato_lock_manager = None
        self.pid = None
        self.sync_internal = None
        self.ipc_api = IPCAPI(False)
        self.ipc_forwarder = IPCAPI(True)
        self.wmq_ipc_tcp_port = None
        self.fifo_response_buffer_size = None  # Will be in megabytes
        self.is_first_worker = None
        self.shmem_size = -1.0
        self.server_startup_ipc = ServerStartupIPC()
        self.keyutils = KeyUtils()
        self.sso_api = None
        self.is_sso_enabled = False
        self.audit_pii = audit_pii
        self.startup_callable_tool = None
        self.default_internal_pubsub_endpoint_id = None
        self._hash_secret_method = None
        self._hash_secret_rounds = None
        self._hash_secret_salt_size = None

        # Allows users store arbitrary data across service invocations
        self.user_ctx = Bunch()
        self.user_ctx_lock = gevent.lock.RLock()

        self.access_logger = logging.getLogger('zato_access_log')
        self.access_logger_log = self.access_logger._log
        self.needs_access_log = self.access_logger.isEnabledFor(INFO)
        self.has_pubsub_audit_log = logging.getLogger(
            'zato_pubsub_audit').isEnabledFor('INFO')
        self.is_enabled_for_warn = logging.getLogger('zato').isEnabledFor(
            'WARN')

        # The main config store
        self.config = ConfigStore()

        gevent.signal(signal.SIGINT, self.destroy)
Exemple #3
0
    def set_up_config(self, server):

        # Which components are enabled
        self.component_enabled.stats = asbool(self.fs_server_config.component_enabled.stats)
        self.component_enabled.slow_response = asbool(self.fs_server_config.component_enabled.slow_response)

        #
        # Cassandra - start
        #

        query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
        self.config.cassandra_conn = ConfigDict.from_query('cassandra_conn', query, decrypt_func=self.decrypt)

        query = self.odb.get_cassandra_query_list(server.cluster.id, True)
        self.config.cassandra_query = ConfigDict.from_query('cassandra_query', query, decrypt_func=self.decrypt)

        #
        # Cassandra - end
        #

        #
        # Search - start
        #

        query = self.odb.get_search_es_list(server.cluster.id, True)
        self.config.search_es = ConfigDict.from_query('search_es', query, decrypt_func=self.decrypt)

        query = self.odb.get_search_solr_list(server.cluster.id, True)
        self.config.search_solr = ConfigDict.from_query('search_solr', query, decrypt_func=self.decrypt)

        #
        # Search - end
        #

        #
        # SMS - start
        #

        query = self.odb.get_sms_twilio_list(server.cluster.id, True)
        self.config.sms_twilio = ConfigDict.from_query('sms_twilio', query, decrypt_func=self.decrypt)

        #
        # SMS - end
        #

        #
        # Cloud - start
        #

        # OpenStack - Swift

        query = self.odb.get_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.cloud_openstack_swift = ConfigDict.from_query('cloud_openstack_swift', query, decrypt_func=self.decrypt)

        query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
        self.config.cloud_aws_s3 = ConfigDict.from_query('cloud_aws_s3', query, decrypt_func=self.decrypt)

        #
        # Cloud - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Services
        query = self.odb.get_service_list(server.cluster.id, True)
        self.config.service = ConfigDict.from_query('service_list', query, decrypt_func=self.decrypt)

        #
        # Definitions - start
        #

        # AMQP
        query = self.odb.get_definition_amqp_list(server.cluster.id, True)
        self.config.definition_amqp = ConfigDict.from_query('definition_amqp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_definition_wmq_list(server.cluster.id, True)
        self.config.definition_wmq = ConfigDict.from_query('definition_wmq', query, decrypt_func=self.decrypt)

        #
        # Definitions - end
        #

        #
        # Channels - start
        #

        # AMQP
        query = self.odb.get_channel_amqp_list(server.cluster.id, True)
        self.config.channel_amqp = ConfigDict.from_query('channel_amqp', query, decrypt_func=self.decrypt)

        # STOMP
        query = self.odb.get_channel_stomp_list(server.cluster.id, True)
        self.config.channel_stomp = ConfigDict.from_query('channel_stomp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_channel_wmq_list(server.cluster.id, True)
        self.config.channel_wmq = ConfigDict.from_query('channel_wmq', query, decrypt_func=self.decrypt)

        #
        # Channels - end
        #

        #
        # Outgoing connections - start
        #

        # AMQP
        query = self.odb.get_out_amqp_list(server.cluster.id, True)
        self.config.out_amqp = ConfigDict.from_query('out_amqp', query, decrypt_func=self.decrypt)

        # Caches
        query = self.odb.get_cache_builtin_list(server.cluster.id, True)
        self.config.cache_builtin = ConfigDict.from_query('cache_builtin', query, decrypt_func=self.decrypt)

        query = self.odb.get_cache_memcached_list(server.cluster.id, True)
        self.config.cache_memcached = ConfigDict.from_query('cache_memcached', query, decrypt_func=self.decrypt)

        # FTP
        query = self.odb.get_out_ftp_list(server.cluster.id, True)
        self.config.out_ftp = ConfigDict.from_query('out_ftp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_out_wmq_list(server.cluster.id, True)
        self.config.out_wmq = ConfigDict.from_query('out_wmq', query, decrypt_func=self.decrypt)

        # Odoo
        query = self.odb.get_out_odoo_list(server.cluster.id, True)
        self.config.out_odoo = ConfigDict.from_query('out_odoo', query, decrypt_func=self.decrypt)

        # SAP RFC
        query = self.odb.get_out_sap_list(server.cluster.id, True)
        self.config.out_sap = ConfigDict.from_query('out_sap', query)

        # REST
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'plain_http', True)
        self.config.out_plain_http = ConfigDict.from_query('out_plain_http', query, decrypt_func=self.decrypt)

        # SFTP
        query = self.odb.get_out_sftp_list(server.cluster.id, True)
        self.config.out_sftp = ConfigDict.from_query('out_sftp', query, decrypt_func=self.decrypt, drop_opaque=True)

        # SOAP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'soap', True)
        self.config.out_soap = ConfigDict.from_query('out_soap', query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_out_sql_list(server.cluster.id, True)
        self.config.out_sql = ConfigDict.from_query('out_sql', query, decrypt_func=self.decrypt)

        # STOMP
        query = self.odb.get_out_stomp_list(server.cluster.id, True)
        self.config.out_stomp = ConfigDict.from_query('out_stomp', query, decrypt_func=self.decrypt)

        # ZMQ channels
        query = self.odb.get_channel_zmq_list(server.cluster.id, True)
        self.config.channel_zmq = ConfigDict.from_query('channel_zmq', query, decrypt_func=self.decrypt)

        # ZMQ outgoing
        query = self.odb.get_out_zmq_list(server.cluster.id, True)
        self.config.out_zmq = ConfigDict.from_query('out_zmq', query, decrypt_func=self.decrypt)

        # WebSocket channels
        query = self.odb.get_channel_web_socket_list(server.cluster.id, True)
        self.config.channel_web_socket = ConfigDict.from_query('channel_web_socket', query, decrypt_func=self.decrypt)

        #
        # Outgoing connections - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Generic - start
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Connections
        query = self.odb.get_generic_connection_list(server.cluster.id, True)
        self.config.generic_connection = ConfigDict.from_query('generic_connection', query, decrypt_func=self.decrypt)

        #
        # Generic - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Notifications - start
        #

        # OpenStack Swift
        query = self.odb.get_notif_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.notif_cloud_openstack_swift = ConfigDict.from_query('notif_cloud_openstack_swift',
            query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_notif_sql_list(server.cluster.id, True)
        self.config.notif_sql = ConfigDict.from_query('notif_sql', query, decrypt_func=self.decrypt)

        #
        # Notifications - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Security - start
        #

        # API keys
        query = self.odb.get_apikey_security_list(server.cluster.id, True)
        self.config.apikey = ConfigDict.from_query('apikey', query, decrypt_func=self.decrypt)

        # AWS
        query = self.odb.get_aws_security_list(server.cluster.id, True)
        self.config.aws = ConfigDict.from_query('aws', query, decrypt_func=self.decrypt)

        # HTTP Basic Auth
        query = self.odb.get_basic_auth_list(server.cluster.id, None, True)
        self.config.basic_auth = ConfigDict.from_query('basic_auth', query, decrypt_func=self.decrypt)

        # JWT
        query = self.odb.get_jwt_list(server.cluster.id, None, True)
        self.config.jwt = ConfigDict.from_query('jwt', query, decrypt_func=self.decrypt)

        # NTLM
        query = self.odb.get_ntlm_list(server.cluster.id, True)
        self.config.ntlm = ConfigDict.from_query('ntlm', query, decrypt_func=self.decrypt)

        # OAuth
        query = self.odb.get_oauth_list(server.cluster.id, True)
        self.config.oauth = ConfigDict.from_query('oauth', query, decrypt_func=self.decrypt)

        # OpenStack
        query = self.odb.get_openstack_security_list(server.cluster.id, True)
        self.config.openstack_security = ConfigDict.from_query('openstack_security', query, decrypt_func=self.decrypt)

        # RBAC - permissions
        query = self.odb.get_rbac_permission_list(server.cluster.id, True)
        self.config.rbac_permission = ConfigDict.from_query('rbac_permission', query, decrypt_func=self.decrypt)

        # RBAC - roles
        query = self.odb.get_rbac_role_list(server.cluster.id, True)
        self.config.rbac_role = ConfigDict.from_query('rbac_role', query, decrypt_func=self.decrypt)

        # RBAC - client roles
        query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
        self.config.rbac_client_role = ConfigDict.from_query('rbac_client_role', query, decrypt_func=self.decrypt)

        # RBAC - role permission
        query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
        self.config.rbac_role_permission = ConfigDict.from_query('rbac_role_permission', query, decrypt_func=self.decrypt)

        # TLS CA certs
        query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
        self.config.tls_ca_cert = ConfigDict.from_query('tls_ca_cert', query, decrypt_func=self.decrypt)

        # TLS channel security
        query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
        self.config.tls_channel_sec = ConfigDict.from_query('tls_channel_sec', query, decrypt_func=self.decrypt)

        # TLS key/cert pairs
        query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
        self.config.tls_key_cert = ConfigDict.from_query('tls_key_cert', query, decrypt_func=self.decrypt)

        # WS-Security
        query = self.odb.get_wss_list(server.cluster.id, True)
        self.config.wss = ConfigDict.from_query('wss', query, decrypt_func=self.decrypt)

        # Vault connections
        query = self.odb.get_vault_connection_list(server.cluster.id, True)
        self.config.vault_conn_sec = ConfigDict.from_query('vault_conn_sec', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_sec_list(server.cluster.id, True)
        self.config.xpath_sec = ConfigDict.from_query('xpath_sec', query, decrypt_func=self.decrypt)

        # New in 3.0 - encrypt all old secrets
        self._migrate_30_encrypt_secrets()

        #
        # Security - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # All the HTTP/SOAP channels.
        http_soap = []

        for item in elems_with_opaque(self.odb.get_http_soap_list(server.cluster.id, 'channel')):

            hs_item = {}
            for key in item.keys():
                hs_item[key] = getattr(item, key)

            hs_item['match_target'] = '{}{}{}'.format(hs_item['soap_action'], MISC.SEPARATOR, hs_item['url_path'])
            hs_item['match_target_compiled'] = Matcher(hs_item['match_target'], hs_item.get('match_slash', ''))

            http_soap.append(hs_item)

        self.config.http_soap = http_soap

        # Namespaces
        query = self.odb.get_namespace_list(server.cluster.id, True)
        self.config.msg_ns = ConfigDict.from_query('msg_ns', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_list(server.cluster.id, True)
        self.config.xpath = ConfigDict.from_query('msg_xpath', query, decrypt_func=self.decrypt)

        # JSON Pointer
        query = self.odb.get_json_pointer_list(server.cluster.id, True)
        self.config.json_pointer = ConfigDict.from_query('json_pointer', query, decrypt_func=self.decrypt)

        # SimpleIO
        # In preparation for a SIO rewrite, we loaded SIO config from a file
        # but actual code paths require the pre-3.0 format so let's prepare it here.
        self.config.simple_io = ConfigDict('simple_io', Bunch())

        int_exact = self.sio_config.int.exact
        int_suffix = self.sio_config.int.suffix
        bool_prefix = self.sio_config.bool.prefix

        self.config.simple_io['int_parameters'] = int_exact if isinstance(int_exact, list) else [int_exact]
        self.config.simple_io['int_parameter_suffixes'] = int_suffix if isinstance(int_suffix, list) else [int_suffix]
        self.config.simple_io['bool_parameter_prefixes'] = bool_prefix if isinstance(bool_prefix, list) else [bool_prefix]

        # Maintain backward-compatibility with pre-3.1 versions that did not specify any particular encoding
        bytes_to_str = self.sio_config.get('bytes_to_str')
        if not bytes_to_str:
            bytes_to_str = {'encoding': None}

        self.config.simple_io['bytes_to_str'] = bytes_to_str

        # Pub/sub
        self.config.pubsub = Bunch()

        # Pub/sub - endpoints
        query = self.odb.get_pubsub_endpoint_list(server.cluster.id, True)
        self.config.pubsub_endpoint = ConfigDict.from_query('pubsub_endpoint', query, decrypt_func=self.decrypt)

        # Pub/sub - topics
        query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
        self.config.pubsub_topic = ConfigDict.from_query('pubsub_topic', query, decrypt_func=self.decrypt)

        # Pub/sub - subscriptions
        query = self.odb.get_pubsub_subscription_list(server.cluster.id, True)
        self.config.pubsub_subscription = ConfigDict.from_query('pubsub_subscription', query, decrypt_func=self.decrypt)

        # E-mail - SMTP
        query = self.odb.get_email_smtp_list(server.cluster.id, True)
        self.config.email_smtp = ConfigDict.from_query('email_smtp', query, decrypt_func=self.decrypt)

        # E-mail - IMAP
        query = self.odb.get_email_imap_list(server.cluster.id, True)
        self.config.email_imap = ConfigDict.from_query('email_imap', query, decrypt_func=self.decrypt)

        # Message paths
        self.config.msg_ns_store = NamespaceStore()
        self.config.json_pointer_store = JSONPointerStore()
        self.config.xpath_store = XPathStore()

        # Assign config to worker
        self.worker_store.worker_config = self.config
Exemple #4
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # This also cannot be done in __init__ which doesn't have this variable yet
        self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(),
                                                 uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Create all POSIX IPC objects now that we have the deployment key
        self.shmem_size = int(float(self.fs_server_config.shmem.size) *
                              10**6)  # Convert to megabytes as integer
        self.server_startup_ipc.create(self.deployment_key, self.shmem_size)

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_up_odb()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato',
                                             self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster
        self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id,
                                              self.worker_pid, new_cid())

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(
            self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name, self.decrypt)
        logger.info(
            'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`',
            self.name, self.cluster.name, self.pid, 's' if use_tls else '',
            self.preferred_address, self.port)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(
            self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(
            self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(
            os.path.join(self.repo_location,
                         self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(
            self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        # Configure remaining parts of SSO
        self.configure_sso()

        # Cannot be done in __init__ because self.sso_config is not available there yet
        salt_size = self.sso_config.hash_secret.salt_size
        self.crypto_manager.add_hash_scheme('zato.default',
                                            self.sso_config.hash_secret.rounds,
                                            salt_size)

        for name in ('current_work_dir', 'backup_work_dir',
                     'last_backup_work_dir', 'delete_after_pick_up'):

            # New in 2.0
            if name == 'delete_after_pick_up':
                value = asbool(self.fs_server_config.hot_deploy.get(
                    name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(
                    os.path.join(self.hot_deploy_config.work_dir,
                                 self.fs_server_config.hot_deploy[name]))

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]:
            self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]:
            self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel',
                                          broker_callbacks,
                                          self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        self._after_init_accepted(locally_deployed)

        self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True,
                                self.host, self.port, self.preferred_address,
                                use_tls)

        if is_first:

            logger.info('First worker of `%s` is %s', self.name, self.pid)

            self.startup_callable_tool.invoke(
                SERVER_STARTUP.PHASE.IN_PROCESS_FIRST,
                kwargs={
                    'parallel_server': self,
                })

            # Startup services
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

            # IPC
            self.ipc_forwarder.name = self.name
            self.ipc_forwarder.pid = self.pid
            spawn_greenlet(self.ipc_forwarder.run)

            # Set up IBM MQ connections if that component is enabled
            if self.fs_server_config.component_enabled.ibm_mq:

                # Will block for a few seconds at most, until is_ok is returned
                # which indicates that a connector started or not.
                is_ok = self.start_ibm_mq_connector(
                    int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))
                if is_ok:
                    self.create_initial_wmq_definitions(
                        self.worker_store.worker_config.definition_wmq)
                    self.create_initial_wmq_outconns(
                        self.worker_store.worker_config.out_wmq)
                    self.create_initial_wmq_channels(
                        self.worker_store.worker_config.channel_wmq)

        else:
            self.startup_callable_tool.invoke(
                SERVER_STARTUP.PHASE.IN_PROCESS_OTHER,
                kwargs={
                    'parallel_server': self,
                })

        # IPC
        self.ipc_api.name = self.name
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED,
                                          kwargs={
                                              'parallel_server': self,
                                          })

        logger.info('Started `%s@%s` (pid: %s)', server.name,
                    server.cluster.name, self.pid)
Exemple #5
0
    def set_up_pickup(self):

        empty = []

        # Fix up booleans and paths
        for stanza, stanza_config in self.pickup_config.items():

            # user_config_items is empty by default
            if not stanza_config:
                empty.append(stanza)
                continue

            stanza_config.read_on_pickup = asbool(
                stanza_config.get('read_on_pickup', True))
            stanza_config.parse_on_pickup = asbool(
                stanza_config.get('parse_on_pickup', True))
            stanza_config.delete_after_pick_up = asbool(
                stanza_config.get('delete_after_pick_up', True))
            stanza_config.case_insensitive = asbool(
                stanza_config.get('case_insensitive', True))
            stanza_config.pickup_from = absolutize(stanza_config.pickup_from,
                                                   self.base_dir)
            stanza_config.is_service_hot_deploy = False

            mpt = stanza_config.get('move_processed_to')
            stanza_config.move_processed_to = absolutize(
                mpt, self.base_dir) if mpt else None

            services = stanza_config.get('services') or []
            stanza_config.services = [
                services
            ] if not isinstance(services, list) else services

            topics = stanza_config.get('topics') or []
            stanza_config.topics = [
                topics
            ] if not isinstance(topics, list) else topics

            flags = globre.EXACT

            if stanza_config.case_insensitive:
                flags |= IGNORECASE

            patterns = stanza_config.patterns
            stanza_config.patterns = [
                patterns
            ] if not isinstance(patterns, list) else patterns
            stanza_config.patterns = [
                globre.compile(elem, flags) for elem in stanza_config.patterns
            ]

            if not os.path.exists(stanza_config.pickup_from):
                logger.warn('Pickup dir `%s` does not exist (%s)',
                            stanza_config.pickup_from, stanza)

        for item in empty:
            del self.pickup_config[item]

        # Ok, now that we have configured everything that pickup.conf had
        # we still need to make it aware of services and how to pick them up from FS.

        stanza = 'zato_internal_service_hot_deploy'
        stanza_config = Bunch({
            'pickup_from':
            absolutize(self.fs_server_config.hot_deploy.pickup_dir,
                       self.repo_location),
            'patterns': [globre.compile('*.py', globre.EXACT | IGNORECASE)],
            'read_on_pickup':
            False,
            'parse_on_pickup':
            False,
            'delete_after_pick_up':
            self.hot_deploy_config.delete_after_pick_up,
            'is_service_hot_deploy':
            True,
        })

        self.pickup_config[stanza] = stanza_config
        self.pickup = PickupManager(self, self.pickup_config)

        spawn_greenlet(self.pickup.run)
Exemple #6
0
    def deploy_missing_services(self, locally_deployed):
        """ Deploys services that exist on other servers but not on ours.
        """
        # The locally_deployed list are all the services that we could import based on our current
        # understanding of the contents of the cluster. However, it's possible that we have
        # been shut down for a long time and during that time other servers deployed services
        # we don't know anything about. They are not stored locally because we were down.
        # Hence we need to check out if there are any other servers in the cluster and if so,
        # grab their list of services, compare it with what we have deployed and deploy
        # any that are missing.

        # Continue only if there is more than one running server in the cluster.
        other_servers = self.odb.get_servers()

        if other_servers:
            other_server = other_servers[
                0]  # Index 0 is as random as any other because the list is not sorted.
            missing = self.odb.get_missing_services(other_server,
                                                    locally_deployed)

            if missing:

                logger.info('Found extra services to deploy: %s',
                            ', '.join(sorted(item.name for item in missing)))

                # (file_name, source_path) -> a list of services it contains
                modules = {}

                # Coalesce all service modules - it is possible that each one has multiple services
                # so we do want to deploy the same module over for each service found.
                for service_id, name, source_path, source in missing:
                    file_name = os.path.basename(source_path)
                    _, tmp_full_path = mkstemp(suffix='-' + file_name)

                    # Module names are unique so they can serve as keys
                    key = file_name

                    if key not in modules:
                        modules[key] = {
                            'tmp_full_path': tmp_full_path,
                            'services': [
                                name
                            ]  # We can append initial name already in this 'if' branch
                        }

                        # Save the source code only once here
                        f = open(tmp_full_path, 'wb')
                        f.write(source)
                        f.close()

                    else:
                        modules[key]['services'].append(name)

                # Create a deployment package in ODB out of which all the services will be picked up ..
                for file_name, values in modules.items():
                    msg = Bunch()
                    msg.action = HOT_DEPLOY.CREATE_SERVICE.value
                    msg.msg_type = MESSAGE_TYPE.TO_PARALLEL_ALL
                    msg.package_id = hot_deploy(self,
                                                file_name,
                                                values['tmp_full_path'],
                                                notify=False)

                    # .. and tell the worker to actually deploy all the services the package contains.
                    #gevent.spawn(self.worker_store.on_broker_msg_HOT_DEPLOY_CREATE_SERVICE, msg)
                    self.worker_store.on_broker_msg_HOT_DEPLOY_CREATE_SERVICE(
                        msg)

                    logger.info('Deployed extra services found: %s',
                                sorted(values['services']))
Exemple #7
0
    def _after_init_accepted(self, server, locally_deployed):

        # Which components are enabled
        self.component_enabled.stats = asbool(
            self.fs_server_config.component_enabled.stats)
        self.component_enabled.slow_response = asbool(
            self.fs_server_config.component_enabled.slow_response)

        # Pub/sub
        self.pubsub = PubSubAPI(RedisPubSub(self.kvdb.conn))

        # Repo location so that AMQP subprocesses know where to read
        # the server's configuration from.
        self.config.repo_location = self.repo_location

        #
        # Cassandra - start
        #

        query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
        self.config.cassandra_conn = ConfigDict.from_query(
            'cassandra_conn', query)

        query = self.odb.get_cassandra_query_list(server.cluster.id, True)
        self.config.cassandra_query = ConfigDict.from_query(
            'cassandra_query', query)

        #
        # Cassandra - end
        #

        #
        # Search - start
        #

        query = self.odb.get_search_es_list(server.cluster.id, True)
        self.config.search_es = ConfigDict.from_query('search_es', query)

        query = self.odb.get_search_solr_list(server.cluster.id, True)
        self.config.search_solr = ConfigDict.from_query('search_solr', query)

        #
        # Search - end
        #

        #
        # Cloud - start
        #

        # OpenStack - Swift

        query = self.odb.get_cloud_openstack_swift_list(
            server.cluster.id, True)
        self.config.cloud_openstack_swift = ConfigDict.from_query(
            'cloud_openstack_swift', query)

        query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
        self.config.cloud_aws_s3 = ConfigDict.from_query('cloud_aws_s3', query)

        #
        # Cloud - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Services
        query = self.odb.get_service_list(server.cluster.id, True)
        self.config.service = ConfigDict.from_query('service_list', query)

        #
        # Channels - start
        #

        # STOMP
        query = self.odb.get_channel_stomp_list(server.cluster.id, True)
        self.config.channel_stomp = ConfigDict.from_query(
            'channel_stomp', query)

        #
        # Channels - end
        #

        #
        # Outgoing connections - start
        #

        # AMQP
        query = self.odb.get_out_amqp_list(server.cluster.id, True)
        self.config.out_amqp = ConfigDict.from_query('out_amqp', query)

        # FTP
        query = self.odb.get_out_ftp_list(server.cluster.id, True)
        self.config.out_ftp = ConfigDict.from_query('out_ftp', query)

        # JMS WMQ
        query = self.odb.get_out_jms_wmq_list(server.cluster.id, True)
        self.config.out_jms_wmq = ConfigDict.from_query('out_jms_wmq', query)

        # Odoo
        query = self.odb.get_out_odoo_list(server.cluster.id, True)
        self.config.out_odoo = ConfigDict.from_query('out_odoo', query)

        # Plain HTTP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing',
                                            'plain_http', True)
        self.config.out_plain_http = ConfigDict.from_query(
            'out_plain_http', query)

        # SOAP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing',
                                            'soap', True)
        self.config.out_soap = ConfigDict.from_query('out_soap', query)

        # SQL
        query = self.odb.get_out_sql_list(server.cluster.id, True)
        self.config.out_sql = ConfigDict.from_query('out_sql', query)

        # STOMP
        query = self.odb.get_out_stomp_list(server.cluster.id, True)
        self.config.out_stomp = ConfigDict.from_query('out_stomp', query)

        # ZMQ channels
        query = self.odb.get_channel_zmq_list(server.cluster.id, True)
        self.config.channel_zmq = ConfigDict.from_query('channel_zmq', query)

        # ZMQ outgoing
        query = self.odb.get_out_zmq_list(server.cluster.id, True)
        self.config.out_zmq = ConfigDict.from_query('out_zmq', query)

        # WebSocket channels
        query = self.odb.get_channel_web_socket_list(server.cluster.id, True)
        self.config.channel_web_socket = ConfigDict.from_query(
            'channel_web_socket', query)

        #
        # Outgoing connections - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Notifications - start
        #

        # OpenStack Swift
        query = self.odb.get_notif_cloud_openstack_swift_list(
            server.cluster.id, True)
        self.config.notif_cloud_openstack_swift = ConfigDict.from_query(
            'notif_cloud_openstack_swift', query)

        # SQL
        query = self.odb.get_notif_sql_list(server.cluster.id, True)
        self.config.notif_sql = ConfigDict.from_query('notif_sql', query)

        #
        # Notifications - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Security - start
        #

        # API keys
        query = self.odb.get_apikey_security_list(server.cluster.id, True)
        self.config.apikey = ConfigDict.from_query('apikey', query)

        # AWS
        query = self.odb.get_aws_security_list(server.cluster.id, True)
        self.config.aws = ConfigDict.from_query('aws', query)

        # HTTP Basic Auth
        query = self.odb.get_basic_auth_list(server.cluster.id, None, True)
        self.config.basic_auth = ConfigDict.from_query('basic_auth', query)

        # HTTP Basic Auth
        query = self.odb.get_jwt_list(server.cluster.id, None, True)
        self.config.jwt = ConfigDict.from_query('jwt', query)

        # NTLM
        query = self.odb.get_ntlm_list(server.cluster.id, True)
        self.config.ntlm = ConfigDict.from_query('ntlm', query)

        # OAuth
        query = self.odb.get_oauth_list(server.cluster.id, True)
        self.config.oauth = ConfigDict.from_query('oauth', query)

        # OpenStack
        query = self.odb.get_openstack_security_list(server.cluster.id, True)
        self.config.openstack_security = ConfigDict.from_query(
            'openstack_security', query)

        # RBAC - permissions
        query = self.odb.get_rbac_permission_list(server.cluster.id, True)
        self.config.rbac_permission = ConfigDict.from_query(
            'rbac_permission', query)

        # RBAC - roles
        query = self.odb.get_rbac_role_list(server.cluster.id, True)
        self.config.rbac_role = ConfigDict.from_query('rbac_role', query)

        # RBAC - client roles
        query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
        self.config.rbac_client_role = ConfigDict.from_query(
            'rbac_client_role', query)

        # RBAC - role permission
        query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
        self.config.rbac_role_permission = ConfigDict.from_query(
            'rbac_role_permission', query)

        # Technical accounts
        query = self.odb.get_tech_acc_list(server.cluster.id, True)
        self.config.tech_acc = ConfigDict.from_query('tech_acc', query)

        # TLS CA certs
        query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
        self.config.tls_ca_cert = ConfigDict.from_query('tls_ca_cert', query)

        # TLS channel security
        query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
        self.config.tls_channel_sec = ConfigDict.from_query(
            'tls_channel_sec', query)

        # TLS key/cert pairs
        query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
        self.config.tls_key_cert = ConfigDict.from_query('tls_key_cert', query)

        # WS-Security
        query = self.odb.get_wss_list(server.cluster.id, True)
        self.config.wss = ConfigDict.from_query('wss', query)

        # XPath
        query = self.odb.get_xpath_sec_list(server.cluster.id, True)
        self.config.xpath_sec = ConfigDict.from_query('xpath_sec', query)

        #
        # Security - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # All the HTTP/SOAP channels.
        http_soap = []
        for item in self.odb.get_http_soap_list(server.cluster.id, 'channel'):

            hs_item = Bunch()
            for key in item.keys():
                hs_item[key] = getattr(item, key)

            hs_item.replace_patterns_json_pointer = item.replace_patterns_json_pointer
            hs_item.replace_patterns_xpath = item.replace_patterns_xpath

            hs_item.match_target = '{}{}{}'.format(hs_item.soap_action,
                                                   MISC.SEPARATOR,
                                                   hs_item.url_path)
            hs_item.match_target_compiled = Matcher(hs_item.match_target)

            http_soap.append(hs_item)

        self.config.http_soap = http_soap

        # Namespaces
        query = self.odb.get_namespace_list(server.cluster.id, True)
        self.config.msg_ns = ConfigDict.from_query('msg_ns', query)

        # XPath
        query = self.odb.get_xpath_list(server.cluster.id, True)
        self.config.xpath = ConfigDict.from_query('msg_xpath', query)

        # JSON Pointer
        query = self.odb.get_json_pointer_list(server.cluster.id, True)
        self.config.json_pointer = ConfigDict.from_query('json_pointer', query)

        # SimpleIO
        self.config.simple_io = ConfigDict('simple_io', Bunch())
        self.config.simple_io['int_parameters'] = self.int_parameters
        self.config.simple_io[
            'int_parameter_suffixes'] = self.int_parameter_suffixes
        self.config.simple_io[
            'bool_parameter_prefixes'] = self.bool_parameter_prefixes

        # Pub/sub config
        self.config.pubsub = Bunch()
        self.config.pubsub.default_consumer = Bunch()
        self.config.pubsub.default_producer = Bunch()

        query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
        self.config.pubsub.topics = ConfigDict.from_query(
            'pubsub_topics', query)

        id, name = self.odb.get_pubsub_default_client(
            server.cluster.id, 'zato.pubsub.default-consumer')
        self.config.pubsub.default_consumer.id, self.config.pubsub.default_consumer.name = id, name

        id, name = self.odb.get_pubsub_default_client(
            server.cluster.id, 'zato.pubsub.default-producer')
        self.config.pubsub.default_producer.id, self.config.pubsub.default_producer.name = id, name

        query = self.odb.get_pubsub_producer_list(server.cluster.id, True)
        self.config.pubsub.producers = ConfigDict.from_query(
            'pubsub_producers', query, list_config=True)

        query = self.odb.get_pubsub_consumer_list(server.cluster.id, True)
        self.config.pubsub.consumers = ConfigDict.from_query(
            'pubsub_consumers', query, list_config=True)

        # E-mail - SMTP
        query = self.odb.get_email_smtp_list(server.cluster.id, True)
        self.config.email_smtp = ConfigDict.from_query('email_smtp', query)

        # E-mail - IMAP
        query = self.odb.get_email_imap_list(server.cluster.id, True)
        self.config.email_imap = ConfigDict.from_query('email_imap', query)

        # Assign config to worker
        self.worker_store.worker_config = self.config
        self.worker_store.pubsub = self.pubsub
        self.worker_store.init()

        # Deployed missing services found on other servers
        if locally_deployed:
            self.deploy_missing_services(locally_deployed)

        # Signal to ODB that we are done with deploying everything
        self.odb.on_deployment_finished()

        # Default content type
        self.json_content_type = self.fs_server_config.content_type.json
        self.plain_xml_content_type = self.fs_server_config.content_type.plain_xml
        self.soap11_content_type = self.fs_server_config.content_type.soap11
        self.soap12_content_type = self.fs_server_config.content_type.soap12
Exemple #8
0
    def __init__(self):
        self.host = None
        self.port = None
        self.crypto_manager = None
        self.odb = None # type: ODBManager
        self.odb_data = None
        self.config = None # type: ConfigStore
        self.repo_location = None
        self.user_conf_location = None
        self.sql_pool_store = None
        self.soap11_content_type = None
        self.soap12_content_type = None
        self.plain_xml_content_type = None
        self.json_content_type = None
        self.service_modules = None # Set programmatically in Spring
        self.service_sources = None # Set in a config file
        self.base_dir = None        # type: unicode
        self.tls_dir = None         # type: unicode
        self.static_dir = None      # type: unicode
        self.json_schema_dir = None # type: unicode
        self.hot_deploy_config = None
        self.pickup = None
        self.fs_server_config = None
        self.fs_sql_config = None
        self.pickup_config = None
        self.logging_config = None
        self.logging_conf_path = None
        self.sio_config = None
        self.sso_config = None
        self.connector_server_grace_time = None
        self.id = None # type: int
        self.name = None # type: unicode
        self.worker_id = None # type: int
        self.worker_pid = None # type: int
        self.cluster = None
        self.cluster_id = None # type: int
        self.kvdb = None
        self.startup_jobs = None
        self.worker_store = None # type: WorkerStore
        self.service_store = None # type: ServiceStore
        self.request_dispatcher_dispatch = None
        self.deployment_lock_expires = None
        self.deployment_lock_timeout = None
        self.deployment_key = ''
        self.has_gevent = None
        self.delivery_store = None
        self.static_config = None
        self.component_enabled = Bunch()
        self.client_address_headers = ['HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR']
        self.broker_client = None # type: BrokerClient
        self.return_tracebacks = None # type: bool
        self.default_error_message = None # type: unicode
        self.time_util = None # type: TimeUtil
        self.preferred_address = None # type: unicode
        self.crypto_use_tls = None # type: bool
        self.servers = None # type: Servers
        self.zato_lock_manager = None # type: LockManager
        self.pid = None # type: int
        self.sync_internal = None # type: bool
        self.ipc_api = IPCAPI()
        self.fifo_response_buffer_size = None # type: int # Will be in megabytes
        self.is_first_worker = None # type: bool
        self.shmem_size = -1.0
        self.server_startup_ipc = ServerStartupIPC()
        self.connector_config_ipc = ConnectorConfigIPC()
        self.keyutils = KeyUtils()
        self.sso_api = None # type: SSOAPI
        self.is_sso_enabled = False
        self.audit_pii = audit_pii
        self.has_fg = False
        self.startup_callable_tool = None
        self.default_internal_pubsub_endpoint_id = None
        self.rate_limiting = None # type: RateLimiting
        self._hash_secret_method = None
        self._hash_secret_rounds = None
        self._hash_secret_salt_size = None

        # Our arbiter may potentially call the cleanup procedure multiple times
        # and this will be set to True the first time around.
        self._is_process_closing = False

        # Allows users store arbitrary data across service invocations
        self.user_ctx = Bunch()
        self.user_ctx_lock = gevent.lock.RLock()

        # Connectors
        self.connector_ibm_mq = IBMMQIPC(self)
        self.connector_sftp   = SFTPIPC(self)

        # HTTP methods allowed as a Python list
        self.http_methods_allowed = []

        # As above, but as a regular expression pattern
        self.http_methods_allowed_re = ''

        self.access_logger = logging.getLogger('zato_access_log')
        self.access_logger_log = self.access_logger._log
        self.needs_access_log = self.access_logger.isEnabledFor(INFO)
        self.has_pubsub_audit_log = logging.getLogger('zato_pubsub_audit').isEnabledFor(INFO)
        self.is_enabled_for_warn = logging.getLogger('zato').isEnabledFor(WARN)

        # The main config store
        self.config = ConfigStore()
Exemple #9
0
    def __init__(self):
        self.host = None
        self.port = None
        self.crypto_manager = None
        self.odb = None
        self.odb_data = None
        self.config = None
        self.repo_location = None
        self.user_conf_location = None
        self.sql_pool_store = None
        self.int_parameters = None
        self.int_parameter_suffixes = None
        self.bool_parameter_prefixes = None
        self.soap11_content_type = None
        self.soap12_content_type = None
        self.plain_xml_content_type = None
        self.json_content_type = None
        self.internal_service_modules = None # Zato's own internal services
        self.service_modules = None # Set programmatically in Spring
        self.service_sources = None # Set in a config file
        self.base_dir = None
        self.tls_dir = None
        self.hot_deploy_config = None
        self.pickup = None
        self.fs_server_config = None
        self.pickup_config = None
        self.connector_server_grace_time = None
        self.id = None
        self.name = None
        self.cluster = None
        self.cluster_id = None
        self.kvdb = None
        self.startup_jobs = None
        self.worker_store = None
        self.deployment_lock_expires = None
        self.deployment_lock_timeout = None
        self.deployment_key = ''
        self.app_context = None
        self.has_gevent = None
        self.delivery_store = None
        self.static_config = None
        self.component_enabled = Bunch()
        self.client_address_headers = ['HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR']
        self.broker_client = None
        self.time_util = None
        self.preferred_address = None
        self.crypto_use_tls = None
        self.servers = None
        self.zato_lock_manager = None
        self.pid = None
        self.sync_internal = None
        self.ipc_api = IPCAPI(False)
        self.ipc_forwarder = IPCAPI(True)
        self.fifo_response_buffer_size = 0.1 # In megabytes

        # Allows users store arbitrary data across service invocations
        self.user_ctx = Bunch()
        self.user_ctx_lock = gevent.lock.RLock()

        self.access_logger = logging.getLogger('zato_access_log')
        self.needs_access_log = self.access_logger.isEnabledFor(INFO)

        # The main config store
        self.config = ConfigStore()

        gevent.signal(signal.SIGINT, self.destroy)
Exemple #10
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_odb_pool()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster

        # For server-server communication
        logger.info('Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name,
            self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address,
            self.port)

        self.servers = Servers(self.odb, self.cluster.name)

        is_first, locally_deployed = self._after_init_common(server)

        self._after_init_accepted(server, locally_deployed)

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True, self.host,
            self.port, self.preferred_address, use_tls)

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(os.path.join(
            self.repo_location, self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'):

            # New in 2.0
            if name == 'delete_after_pick_up':
                value = asbool(self.fs_server_config.hot_deploy.get(name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(os.path.join(
                    self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name]))

        # Startup services
        if is_first:
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

        # IPC
        if is_first:
            self.ipc_forwarder.name = self.name
            self.ipc_forwarder.pid = self.pid
            spawn_greenlet(self.ipc_forwarder.run)

        # IPC
        self.ipc_api.name = self.name
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)