Esempio n. 1
0
    def test_acquire_already_taken_lock_timeout(self):

        if not self.is_set_up:
            return

        name = rand_string()
        default_ns = rand_string()

        lock_manager = LockManager(self.backend_type, default_ns)

        lock1 = lock_manager.acquire(name, ttl=2)
        self.assertEquals(lock1.acquired, True)

        # This raises LockTimeout because we wait for 1 second only and the lock above has ttl of 2 seconds.
        try:
            lock_manager.acquire(name, block=1)
        except LockTimeout, e:
            expected = 'Could not obtain lock for `{}` `{}` within 1s'.format(default_ns, name)
            self.assertEquals(e.args[0], expected)
Esempio n. 2
0
    def test_acquire_already_taken_auto_release(self):

        if not self.is_set_up:
            return

        name = rand_string()
        default_ns = rand_string()

        lock_manager = LockManager(self.backend_type, default_ns)

        lock1 = lock_manager.acquire(name, ttl=1)
        self.assertEquals(lock1.acquired, True)

        # This will not obtain the lock because it's just been taken above ..
        lock2 = lock_manager.acquire(name, block=False)
        self.assertEquals(lock2.acquired, False)

        # .. however, if we wait a moment we will get it because the original will have expired.
        sleep(2)
        lock3 = lock_manager.acquire(name)
        self.assertEquals(lock3.acquired, True)
Esempio n. 3
0
    def test_acquire_already_taken_manual_release(self):

        if not self.is_set_up:
            return

        name = rand_string()
        default_ns = rand_string()

        lock_manager = LockManager(self.backend_type, default_ns)

        lock1 = lock_manager.acquire(name, ttl=10)
        self.assertEquals(lock1.acquired, True)

        # This will not obtain the lock because it's just been taken above ..
        lock2 = lock_manager.acquire(name, block=False)
        self.assertEquals(lock2.acquired, False)

        # .. however, if we release the lock manually it will become available straightaway.
        lock1.release()

        lock3 = lock_manager.acquire(name)
        self.assertEquals(lock3.acquired, True)
Esempio n. 4
0
    def test_lock_info_name_no_namespace(self):

        if not self.is_set_up:
            return

        name = rand_string()
        default_ns = rand_string()
        lock_manager = LockManager(self.backend_type, default_ns)

        with lock_manager(name) as lock_info:
            self.assertEquals(lock_info.namespace, default_ns)
            self.assertEquals(lock_info.name, name)
            self.assertEquals(lock_info.ttl, DEFAULT.TTL)
            self.assertEquals(lock_info.acquired, True)
            self.assertEquals(lock_info.lock_type, LOCK_TYPE.PERMANENT)
            self.assertEquals(lock_info.block, DEFAULT.BLOCK)
            self.assertEquals(lock_info.block_interval, DEFAULT.BLOCK_INTERVAL)
Esempio n. 5
0
    def test_lock_info_name_with_attrs(self):

        if not self.is_set_up:
            return

        name = rand_string()
        default_ns = rand_string()
        ns = rand_string(7)
        ttl = rand_int()
        block = rand_int()
        block_interval = rand_int()
        lock_manager = LockManager(self.backend_type, default_ns)

        with lock_manager(name, ns, ttl, block, block_interval) as lock_info:
            self.assertEquals(lock_info.namespace, ns)
            self.assertEquals(lock_info.name, name)
            self.assertEquals(lock_info.ttl, ttl)
            self.assertEquals(lock_info.acquired, True)
            self.assertEquals(lock_info.lock_type, LOCK_TYPE.PERMANENT)
            self.assertEquals(lock_info.block, block)
            self.assertEquals(lock_info.block_interval, block_interval)
Esempio n. 6
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # This also cannot be done in __init__ which doesn't have this variable yet
        self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(),
                                                 uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Create all POSIX IPC objects now that we have the deployment key
        self.shmem_size = int(float(self.fs_server_config.shmem.size) *
                              10**6)  # Convert to megabytes as integer
        self.server_startup_ipc.create(self.deployment_key, self.shmem_size)

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_up_odb()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato',
                                             self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster
        self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id,
                                              self.worker_pid, new_cid())

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(
            self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name, self.decrypt)
        logger.info(
            'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`',
            self.name, self.cluster.name, self.pid, 's' if use_tls else '',
            self.preferred_address, self.port)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(
            self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(
            self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(
            os.path.join(self.repo_location,
                         self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(
            self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        # Configure remaining parts of SSO
        self.configure_sso()

        # Cannot be done in __init__ because self.sso_config is not available there yet
        salt_size = self.sso_config.hash_secret.salt_size
        self.crypto_manager.add_hash_scheme('zato.default',
                                            self.sso_config.hash_secret.rounds,
                                            salt_size)

        for name in ('current_work_dir', 'backup_work_dir',
                     'last_backup_work_dir', 'delete_after_pick_up'):

            # New in 2.0
            if name == 'delete_after_pick_up':
                value = asbool(self.fs_server_config.hot_deploy.get(
                    name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(
                    os.path.join(self.hot_deploy_config.work_dir,
                                 self.fs_server_config.hot_deploy[name]))

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]:
            self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]:
            self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel',
                                          broker_callbacks,
                                          self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        self._after_init_accepted(locally_deployed)

        self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True,
                                self.host, self.port, self.preferred_address,
                                use_tls)

        if is_first:

            logger.info('First worker of `%s` is %s', self.name, self.pid)

            self.startup_callable_tool.invoke(
                SERVER_STARTUP.PHASE.IN_PROCESS_FIRST,
                kwargs={
                    'parallel_server': self,
                })

            # Startup services
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

            # IPC
            ipc_forwarder_name = '{}-{}'.format(self.cluster.name, self.name)
            ipc_forwarder_name = fs_safe_name(ipc_forwarder_name)

            self.ipc_forwarder.name = ipc_forwarder_name
            self.ipc_forwarder.pid = self.pid
            spawn_greenlet(self.ipc_forwarder.run)

            # Set up IBM MQ connections if that component is enabled
            if self.fs_server_config.component_enabled.ibm_mq:

                # Will block for a few seconds at most, until is_ok is returned
                # which indicates that a connector started or not.
                is_ok = self.start_ibm_mq_connector(
                    int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))
                if is_ok:
                    self.create_initial_wmq_definitions(
                        self.worker_store.worker_config.definition_wmq)
                    self.create_initial_wmq_outconns(
                        self.worker_store.worker_config.out_wmq)
                    self.create_initial_wmq_channels(
                        self.worker_store.worker_config.channel_wmq)

        else:
            self.startup_callable_tool.invoke(
                SERVER_STARTUP.PHASE.IN_PROCESS_OTHER,
                kwargs={
                    'parallel_server': self,
                })

        # IPC
        self.ipc_api.name = self.name
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED,
                                          kwargs={
                                              'parallel_server': self,
                                          })

        logger.info('Started `%s@%s` (pid: %s)', server.name,
                    server.cluster.name, self.pid)
Esempio n. 7
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server # type: ParallelServer

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # This also cannot be done in __init__ which doesn't have this variable yet
        self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Create all POSIX IPC objects now that we have the deployment key
        self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer

        self.server_startup_ipc.create(self.deployment_key, self.shmem_size)
        self.connector_config_ipc.create(self.deployment_key, self.shmem_size)

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_up_odb()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster
        self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid())

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name, self.decrypt)
        logger.info('Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name,
                    self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address,
            self.port)

        # Configure which HTTP methods can be invoked via REST or SOAP channels
        methods_allowed = self.fs_server_config.http.methods_allowed
        methods_allowed = methods_allowed if isinstance(methods_allowed, list) else [methods_allowed]
        self.http_methods_allowed.extend(methods_allowed)

        # As above, as a regular expression to be used in pattern matching
        http_methods_allowed_re = '|'.join(self.http_methods_allowed)
        self.http_methods_allowed_re = '({})'.format(http_methods_allowed_re)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.pickup_dir = absolutize(self.fs_server_config.hot_deploy.pickup_dir, self.repo_location)

        self.hot_deploy_config.work_dir = os.path.normpath(os.path.join(
            self.repo_location, self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        # Added in 3.1, hence optional
        max_batch_size = int(self.fs_server_config.hot_deploy.get('max_batch_size', 1000))

        # Turn it into megabytes
        max_batch_size = max_batch_size * 1000

        # Finally, assign it to ServiceStore
        self.service_store.max_batch_size = max_batch_size

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Configure remaining parts of SSO
        self.configure_sso()

        # Cannot be done in __init__ because self.sso_config is not available there yet
        salt_size = self.sso_config.hash_secret.salt_size
        self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size)

        for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pickup'):

            # New in 2.0
            if name == 'delete_after_pickup':

                # For backward compatibility, we need to support both names
                old_name = 'delete_after_pick_up'

                if old_name in self.fs_server_config.hot_deploy:
                    _name = old_name
                else:
                    _name = name

                value = asbool(self.fs_server_config.hot_deploy.get(_name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(os.path.join(
                    self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name]))

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        # Make sure that broker client's connection is ready before continuing
        # to rule out edge cases where, for instance, hot deployment would
        # try to publish a locally found package (one of extra packages found)
        # before the client's thread connected to KVDB.
        if not self.broker_client.ready:
            start = now = datetime.utcnow()
            max_seconds = 120
            until = now + timedelta(seconds=max_seconds)

            while not self.broker_client.ready:
                now = datetime.utcnow()
                delta = (now - start).total_seconds()
                if now < until:
                    # Do not log too early so as not to clutter logs
                    if delta > 2:
                        logger.info('Waiting for broker client to become ready (%s, max:%s)', delta, max_seconds)
                    gevent.sleep(0.5)
                else:
                    raise Exception('Broker client did not become ready within {} seconds'.format(max_seconds))

        self._after_init_accepted(locally_deployed)
        self.odb.server_up_down(
            server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls)

        if is_first:

            logger.info('First worker of `%s` is %s', self.name, self.pid)

            self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={
                'parallel_server': self,
            })

            # Clean up any old WSX connections possibly registered for this server
            # which may be still linger around, for instance, if the server was previously
            # shut down forcibly and did not have an opportunity to run self.cleanup_on_stop
            self.cleanup_wsx()

            # Startup services
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

            # Set up subprocess-based IBM MQ connections if that component is enabled
            if self.fs_server_config.component_enabled.ibm_mq:

                # Will block for a few seconds at most, until is_ok is returned
                # which indicates that a connector started or not.
                is_ok = self.connector_ibm_mq.start_ibm_mq_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))

                try:
                    if is_ok:
                        self.connector_ibm_mq.create_initial_wmq_definitions(self.worker_store.worker_config.definition_wmq)
                        self.connector_ibm_mq.create_initial_wmq_outconns(self.worker_store.worker_config.out_wmq)
                        self.connector_ibm_mq.create_initial_wmq_channels(self.worker_store.worker_config.channel_wmq)
                except Exception as e:
                    logger.warn('Could not create initial IBM MQ objects, e:`%s`', e)

            # Set up subprocess-based SFTP connections
            is_ok = self.connector_sftp.start_sftp_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))
            if is_ok:
                self.connector_sftp.create_initial_sftp_outconns(self.worker_store.worker_config.out_sftp)

        else:
            self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={
                'parallel_server': self,
            })

        # IPC
        self.ipc_api.name = self.ipc_api.get_endpoint_name(self.cluster.name, self.name, self.pid)
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={
            'parallel_server': self,
        })

        logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
Esempio n. 8
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(),
                                                 uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_odb_pool()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato',
                                             self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(
            self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name)
        logger.info(
            'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`',
            self.name, self.cluster.name, self.pid, 's' if use_tls else '',
            self.preferred_address, self.port)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(
            self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(
            self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(
            os.path.join(self.repo_location,
                         self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(
            self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        for name in ('current_work_dir', 'backup_work_dir',
                     'last_backup_work_dir', 'delete_after_pick_up'):

            # New in 2.0
            if name == 'delete_after_pick_up':
                value = asbool(self.fs_server_config.hot_deploy.get(
                    name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(
                    os.path.join(self.hot_deploy_config.work_dir,
                                 self.fs_server_config.hot_deploy[name]))

        self._after_init_accepted(locally_deployed)

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]:
            self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]:
            self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel',
                                          broker_callbacks,
                                          self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True,
                                self.host, self.port, self.preferred_address,
                                use_tls)

        # Startup services
        if is_first:
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

        # IPC
        if is_first:
            self.ipc_forwarder.name = self.name
            self.ipc_forwarder.pid = self.pid
            spawn_greenlet(self.ipc_forwarder.run)

        # IPC
        self.ipc_api.name = self.name
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        logger.info('Started `%s@%s` (pid: %s)', server.name,
                    server.cluster.name, self.pid)