Пример #1
0
    def deploy_missing_services(self, locally_deployed):
        """ Deploys services that exist on other servers but not on ours.
        """
        # The locally_deployed list are all the services that we could import based on our current
        # understanding of the contents of the cluster. However, it's possible that we have
        # been shut down for a long time and during that time other servers deployed services
        # we don't know anything about. They are not stored locally because we were down.
        # Hence we need to check out if there are any other servers in the cluster and if so,
        # grab their list of services, compare it with what we have deployed and deploy
        # any that are missing.

        # Continue only if there is more than one running server in the cluster.
        other_servers = self.odb.get_servers()

        if other_servers:
            other_server = other_servers[0] # Index 0 is as random as any other because the list is not sorted.
            missing = self.odb.get_missing_services(other_server, locally_deployed)

            if missing:

                logger.info('Found extra services to deploy: %s', ', '.join(sorted(item.name for item in missing)))

                # (file_name, source_path) -> a list of services it contains
                modules = {}

                # Coalesce all service modules - it is possible that each one has multiple services
                # so we do want to deploy the same module over for each service found.
                for service_id, name, source_path, source in missing:
                    file_name = os.path.basename(source_path)
                    _, tmp_full_path = mkstemp(suffix='-'+ file_name)

                    # Module names are unique so they can serve as keys
                    key = file_name

                    if key not in modules:
                        modules[key] = {
                            'tmp_full_path': tmp_full_path,
                            'services': [name] # We can append initial name already in this 'if' branch
                        }

                        # Save the source code only once here
                        f = open(tmp_full_path, 'wb')
                        f.write(source)
                        f.close()

                    else:
                        modules[key]['services'].append(name)

                # Create a deployment package in ODB out of which all the services will be picked up ..
                for file_name, values in modules.items():
                    msg = Bunch()
                    msg.action = HOT_DEPLOY.CREATE_SERVICE.value
                    msg.msg_type = MESSAGE_TYPE.TO_PARALLEL_ALL
                    msg.package_id = hot_deploy(self, file_name, values['tmp_full_path'], notify=False)

                    # .. and tell the worker to actually deploy all the services the package contains.
                    #gevent.spawn(self.worker_store.on_broker_msg_HOT_DEPLOY_CREATE_SERVICE, msg)
                    self.worker_store.on_broker_msg_HOT_DEPLOY_CREATE_SERVICE(msg)

                    logger.info('Deployed extra services found: %s', sorted(values['services']))
Пример #2
0
    def test_top_level_skip_empty_input_single(self):
        class MyService(Service):
            class SimpleIO:
                input = 'aaa', 'bbb', '-ccc', '-ddd', '-eee', '-fff'
                default_value = NotGiven

                class SkipEmpty:
                    input = 'ccc'

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        data = Bunch()
        data.aaa = 'aaa'
        data.bbb = 'bbb'

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)

        self.assertIsInstance(input, Bunch)
        self.assertDictEqual(
            input, {
                'aaa': 'aaa',
                'bbb': 'bbb',
                'ddd': NotGiven,
                'eee': NotGiven,
                'fff': NotGiven,
            })
Пример #3
0
    def deploy_missing_services(self, locally_deployed):
        """ Deploys services that exist on other servers but not on ours.
        """

        # The locally_deployed list are all the services that we could import based on our current
        # understanding of the contents of the cluster. However, it's possible that we have
        # been shut down for a long time and during that time other servers deployed services
        # we don't know anything about. They are not stored locally because we were down.
        # Hence we need to check out if there are any other servers in the cluster and if so,
        # grab their list of services, compare it with what we have deployed and deploy
        # any that are missing.

        # Continue only if there is more than one running server in the cluster.
        other_servers = self.odb.get_servers()

        if other_servers:
            other_server = other_servers[
                0]  # Index 0 is as random as any other because the list is not sorted.
            missing = self.odb.get_missing_services(other_server,
                                                    locally_deployed)

            if missing:
                logger.info('Found extra services to deploy: %s',
                            ', '.join(sorted(item.name for item in missing)))

                for service_id, name, source_path, source in missing:
                    file_name = os.path.basename(source_path)
                    _, full_path = mkstemp(suffix='-' + file_name)

                    f = open(full_path, 'wb')
                    f.write(source)
                    f.close()

                    # Create a deployment package in ODB out of which all the services will be picked up ..
                    msg = Bunch()
                    msg.action = HOT_DEPLOY.CREATE_SERVICE.value
                    msg.msg_type = MESSAGE_TYPE.TO_PARALLEL_ALL
                    msg.package_id = hot_deploy(self,
                                                file_name,
                                                full_path,
                                                notify=False)

                    # .. and tell the worker to actually deploy all the services the package contains.
                    gevent.spawn(
                        self.worker_store.
                        on_broker_msg_HOT_DEPLOY_CREATE_SERVICE, msg)

                    logger.info('Deployed an extra service found: %s (%s)',
                                name, service_id)
Пример #4
0
 def __init__(self, *ignored_args, **ignored_kwargs):
     self.logger = logging.getLogger(self.get_name())
     self.server = None
     self.broker_client = None
     self.pubsub = None
     self.channel = None
     self.cid = None
     self.in_reply_to = None
     self.outgoing = None
     self.cloud = None
     self.worker_store = None
     self.odb = None
     self.data_format = None
     self.transport = None
     self.wsgi_environ = None
     self.job_type = None
     self.environ = Bunch()
     self.request = Request(self.logger)
     self.response = Response(self.logger)
     self.invocation_time = None # When was the service invoked
     self.handle_return_time = None # When did its 'handle' method finished processing the request
     self.processing_time_raw = None # A timedelta object with the processing time up to microseconds
     self.processing_time = None # Processing time in milliseconds
     self.usage = 0 # How many times the service has been invoked
     self.slow_threshold = maxint # After how many ms to consider the response came too late
     self.name = self.__class__.get_name()
     self.impl_name = self.__class__.get_impl_name()
     self.time = None
     self.patterns = None
     self.user_config = None
     self.dictnav = DictNav
     self.listnav = ListNav
     self.has_validate_input = False
     self.has_validate_output = False
Пример #5
0
    def set_up_pickup(self):

        empty = []

        # Fix up booleans and paths
        for stanza, stanza_config in self.pickup_config.items():

            # user_config_items is empty by default
            if not stanza_config:
                empty.append(stanza)
                continue

            stanza_config.read_on_pickup = asbool(stanza_config.get('read_on_pickup', True))
            stanza_config.parse_on_pickup = asbool(stanza_config.get('parse_on_pickup', True))
            stanza_config.delete_after_pickup = asbool(stanza_config.get('delete_after_pickup', True))
            stanza_config.case_insensitive = asbool(stanza_config.get('case_insensitive', True))
            stanza_config.pickup_from = absolutize(stanza_config.pickup_from, self.base_dir)
            stanza_config.is_service_hot_deploy = False

            mpt = stanza_config.get('move_processed_to')
            stanza_config.move_processed_to = absolutize(mpt, self.base_dir) if mpt else None

            services = stanza_config.get('services') or []
            stanza_config.services = [services] if not isinstance(services, list) else services

            topics = stanza_config.get('topics') or []
            stanza_config.topics = [topics] if not isinstance(topics, list) else topics

            flags = globre.EXACT

            if stanza_config.case_insensitive:
                flags |= IGNORECASE

            patterns = stanza_config.patterns
            stanza_config.patterns = [patterns] if not isinstance(patterns, list) else patterns
            stanza_config.patterns = [globre.compile(elem, flags) for elem in stanza_config.patterns]

            if not os.path.exists(stanza_config.pickup_from):
                logger.warn('Pickup dir `%s` does not exist (%s)', stanza_config.pickup_from, stanza)

        for item in empty:
            del self.pickup_config[item]

        # Ok, now that we have configured everything that pickup.conf had
        # we still need to make it aware of services and how to pick them up from FS.

        stanza = 'zato_internal_service_hot_deploy'
        stanza_config = Bunch({
            'pickup_from': self.hot_deploy_config.pickup_dir,
            'patterns': [globre.compile('*.py', globre.EXACT | IGNORECASE)],
            'read_on_pickup': False,
            'parse_on_pickup': False,
            'delete_after_pickup': self.hot_deploy_config.delete_after_pickup,
            'is_service_hot_deploy': True,
        })

        self.pickup_config[stanza] = stanza_config
        self.pickup = PickupManager(self, self.pickup_config)

        spawn_greenlet(self.pickup.run)
Пример #6
0
    def from_query(name, query_data, impl_class=Bunch, item_class=Bunch, list_config=False):
        """ Return a new ConfigDict with items taken from an SQL query.
        """
        config_dict = ConfigDict(name)
        config_dict._impl = impl_class()

        if query_data:
            query, attrs = query_data

            for item in query:

                if hasattr(item, 'name'):
                    item_name = item.name
                else:
                    item_name = item.get_name()

                if list_config:
                    list_dict = Bunch()
                    if item_name not in config_dict._impl:
                        config_dict._impl[item_name] = []
                    config_dict._impl[item_name].append(list_dict)
                else:
                    config_dict._impl[item_name] = item_class()

                if list_config:
                    for attr_name in attrs.keys():
                        list_dict[attr_name] = getattr(item, attr_name)

                else:
                    config_dict._impl[item_name].config = item_class()
                    for attr_name in attrs.keys():
                        config_dict._impl[item_name]['config'][attr_name] = getattr(item, attr_name)

        return config_dict
Пример #7
0
    def from_query(name,
                   query_data,
                   impl_class=Bunch,
                   item_class=Bunch,
                   list_config=False,
                   decrypt_func=None,
                   drop_opaque=False):
        """ Return a new ConfigDict with items taken from an SQL query.
        """
        config_dict = ConfigDict(name)
        config_dict._impl = impl_class()

        if query_data:
            query, attrs = query_data

            for item in query:

                if hasattr(item, 'name'):
                    item_name = item.name
                else:
                    item_name = item.get_name()

                if list_config:
                    list_dict = Bunch()
                    if item_name not in config_dict._impl:
                        config_dict._impl[item_name] = []
                    config_dict._impl[item_name].append(list_dict)
                else:
                    config_dict._impl[item_name] = item_class()

                if list_config:
                    for attr_name in attrs.keys():
                        list_dict[attr_name] = getattr(item, attr_name)

                else:
                    config_dict._impl[item_name].config = item_class()
                    for attr_name in attrs.keys():
                        config = config_dict._impl[item_name]['config']
                        original = value = getattr(item, attr_name)
                        value = resolve_value(attr_name, value, decrypt_func)
                        config[attr_name] = value

                        # Temporarily, add a flag to indicate whether the password in ODB was encrypted or not.
                        if attr_name in SECRETS.PARAMS:
                            if original is None:
                                original = ''
                            config['_encryption_needed'] = True
                            if original.startswith(SECRETS.PREFIX):
                                config['_encrypted_in_odb'] = True
                            else:
                                config['_encrypted_in_odb'] = False

        # Post-process data before it is returned to resolve any opaque attributes
        for value in config_dict.values():
            value_config = value['config']
            if ElemsWithOpaqueMaker.has_opaque_data(value_config):
                ElemsWithOpaqueMaker.process_config_dict(
                    value_config, drop_opaque)

        return config_dict
Пример #8
0
    def test_parse_nested_dict_customer_no_defaults(self):

        locality = Dict('locality', 'type', 'name')
        address = Dict('address', locality, 'street')
        email = Dict('email', 'personal', 'business')
        customer = Dict('customer', 'name', email, address)

        class MyService(Service):
            class SimpleIO:
                input = customer

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        data = Bunch()
        data.customer = Bunch()
        data.customer.name = 'my-name'
        data.customer.email = Bunch()
        data.customer.email.personal = 'my-personal-email'
        data.customer.email.business = 'my-business-email'
        data.customer.address = Bunch()
        data.customer.address.street = 'my-street'
        data.customer.address.locality = Bunch()
        data.customer.address.locality.type = 'my-type'
        data.customer.address.locality.name = 'my-name'

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)

        self.assertEquals(input.customer.name, data.customer.name)
        self.assertEquals(input.customer.email.personal, data.customer.email.personal)
        self.assertEquals(input.customer.email.business, data.customer.email.business)
        self.assertEquals(input.customer.address.street, data.customer.address.street)
        self.assertEquals(input.customer.address.locality.type, data.customer.address.locality.type)
        self.assertEquals(input.customer.address.locality.name, data.customer.address.locality.name)
Пример #9
0
    def copy(self):
        """ Returns a new instance of ConfigDict with items copied over from self.
        """
        with self.lock:
            config_dict = ConfigDict(self.name)
            config_dict._impl = Bunch()
            config_dict._impl.update(deepcopy(self._impl))

            return config_dict
Пример #10
0
    def test_top_level_skip_empty_input_true_no_force_empty_with_attribute(
            self):
        class MyService(Service):
            class SimpleIO:
                input = 'aaa', 'bbb', '-ccc', '-ddd'
                skip_empty_keys = True

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        data = Bunch()
        data.aaa = 'aaa'
        data.bbb = 'bbb'

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)
        self.assertDictEqual(input, {
            'aaa': 'aaa',
            'bbb': 'bbb',
        })
Пример #11
0
    def server(self):

        server = ParallelServer()
        server.odb = self.odb_manager()
        server.service_store = self.service_store()
        server.service_store.server = server
        server.sql_pool_store = self.sql_pool_store()
        server.service_modules = self.service_modules()
        server.kvdb = self.kvdb()
        server.user_config = Bunch()

        return server
Пример #12
0
    def test_top_level_skip_empty_input_true_with_force_empty_single(self):
        class MyService(Service):
            class SimpleIO:
                input = 'aaa', 'bbb', '-ccc', '-ddd', '-eee', '-fff'

                class SkipEmpty:
                    input = True
                    force_empty_input = 'eee'

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        data = Bunch()
        data.aaa = 'aaa'
        data.bbb = 'bbb'

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)
        self.assertDictEqual(input, {
            'aaa': 'aaa',
            'bbb': 'bbb',
            'eee': backward_compat_default_value,
        })
Пример #13
0
    def from_query(name,
                   query_data,
                   impl_class=Bunch,
                   item_class=Bunch,
                   list_config=False,
                   decrypt_func=None):
        """ Return a new ConfigDict with items taken from an SQL query.
        """
        config_dict = ConfigDict(name)
        config_dict._impl = impl_class()

        if query_data:
            query, attrs = query_data

            for item in query:

                if hasattr(item, 'name'):
                    item_name = item.name
                else:
                    item_name = item.get_name()

                if list_config:
                    list_dict = Bunch()
                    if item_name not in config_dict._impl:
                        config_dict._impl[item_name] = []
                    config_dict._impl[item_name].append(list_dict)
                else:
                    config_dict._impl[item_name] = item_class()

                if list_config:
                    for attr_name in attrs.keys():
                        list_dict[attr_name] = getattr(item, attr_name)

                else:
                    config_dict._impl[item_name].config = item_class()
                    for attr_name in attrs.keys():
                        config = config_dict._impl[item_name]['config']
                        original = value = getattr(item, attr_name)
                        value = resolve_value(attr_name, value, decrypt_func)
                        config[attr_name] = value

                        # Temporarily, add a flag to indicate whether the password in ODB was encrypted or not.
                        if attr_name in SECRETS.PARAMS:
                            config['_encryption_needed'] = True
                            if original.startswith(SECRETS.PREFIX):
                                config['_encrypted_in_odb'] = True
                            else:
                                config['_encrypted_in_odb'] = False

        return config_dict
Пример #14
0
    def test_parse_nested_dict_all_sio_elems(self):

        locality = Dict('locality', Int('type'), Text('name'), AsIs('coords'), Decimal('geo_skip'), Float('geo_diff'))
        address = Dict('address', locality, UUID('street_id'), CSV('prefs'), DateTime('since'), List('types'), Opaque('opaque1'))
        email = Dict('email', Text('value'), Bool('is_business'), Date('join_date'), DictList('preferred_order', 'name', 'pos'))
        customer = Dict('customer', 'name', email, address)

        class MyService(Service):
            class SimpleIO:
                input = customer

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        data = Bunch()
        data.customer = Bunch()
        data.customer.name = 'my-name'
        data.customer.email = Bunch()
        data.customer.email.value = 'my-email'
        data.customer.email.is_business = True
        data.customer.email.join_date = '1999-12-31'
        data.customer.email.preferred_order = [{'name':'address2', 'pos':'2'}, {'name':'address1', 'pos':'1'}]
        data.customer.address = Bunch()
        data.customer.address.locality = Bunch()
        data.customer.address.locality.type = '111'
        data.customer.address.locality.name = 'my-locality'
        data.customer.address.locality.coords = object()
        data.customer.address.locality.geo_skip = '123.456'
        data.customer.address.locality.geo_diff = '999.777'
        data.customer.address.street_id = uuid4().hex
        data.customer.address.prefs = '1,2,3,4'
        data.customer.address.since = '27-11-1988T11:22:33'
        data.customer.address.types = ['a', 'b', 'c', 'd']
        data.customer.address.opaque1 = object()

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)

        self.assertEquals(input.customer.name, data.customer.name)
        self.assertEquals(input.customer.email.value, data.customer.email.value)
        self.assertEquals(input.customer.email.is_business, data.customer.email.is_business)
        self.assertEquals(input.customer.email.join_date, dt_parse(data.customer.email.join_date))
        self.assertListEqual(input.customer.email.preferred_order, data.customer.email.preferred_order)
        self.assertEquals(input.customer.address.locality.type, int(data.customer.address.locality.type))
        self.assertEquals(input.customer.address.locality.name, data.customer.address.locality.name)
        self.assertIs(input.customer.address.locality.coords, data.customer.address.locality.coords)
        self.assertEquals(input.customer.address.locality.geo_skip, decimal_Decimal(data.customer.address.locality.geo_skip))
        self.assertEquals(input.customer.address.locality.geo_diff, float(data.customer.address.locality.geo_diff))
        self.assertEquals(input.customer.address.street_id, uuid_UUID(data.customer.address.street_id))
        self.assertEquals(input.customer.address.prefs, data.customer.address.prefs.split(','))
        self.assertEquals(input.customer.address.since, dt_parse(data.customer.address.since))
        self.assertEquals(input.customer.address.types, data.customer.address.types)
        self.assertIs(input.customer.address.opaque1, data.customer.address.opaque1)
Пример #15
0
    def parallel_server(self):

        server = ParallelServer()
        server.odb = self.odb_manager()
        server.service_store = self.service_store()
        server.sql_pool_store = self.sql_pool_store()
        server.int_parameters = self.int_parameters()
        server.int_parameter_suffixes = self.int_parameter_suffixes()
        server.bool_parameter_prefixes = self.bool_parameter_prefixes()
        server.internal_service_modules = self.internal_service_modules()
        server.service_modules = self.service_modules()
        server.kvdb = self.kvdb()
        server.user_config = Bunch()

        return server
Пример #16
0
    def test_parse_nested_dict_customer_deep_defaults_elem_level(self):

        locality_default = object()

        locality = Dict('locality', '-type', '-name', default=locality_default)
        address = Dict('address', locality, '-street')
        email = Dict('email', 'personal', 'business')
        customer = Dict('customer', 'name', email, address)

        _default_input_value = 'default-input-value'

        class MyService(Service):
            class SimpleIO:
                input = customer
                default_input_value = 'default-input-value'

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        # Note that this locality has no type nor name but we expect for that Dict's default value to be used,
        # also, address has no street but since this Dict has no default value, again, SimpleIO one will be used.
        data = Bunch()
        data.customer = Bunch()
        data.customer.name = 'my-name'
        data.customer.email = Bunch()
        data.customer.email.personal = 'my-personal-email'
        data.customer.email.business = 'my-business-email'
        data.customer.address = Bunch()
        data.customer.address.locality = Bunch()

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)

        self.assertEquals(input.customer.name, data.customer.name)
        self.assertEquals(input.customer.email.personal,
                          data.customer.email.personal)
        self.assertEquals(input.customer.email.business,
                          data.customer.email.business)
        self.assertEquals(input.customer.address.street, _default_input_value)
        self.assertEquals(input.customer.address.locality.type,
                          locality_default)
        self.assertEquals(input.customer.address.locality.name,
                          locality_default)
Пример #17
0
    def test_parse_nested_dict_customer_deep_defaults_sio_level(self):

        locality = Dict('locality', '-type', '-name')
        address = Dict('address', locality, 'street')
        email = Dict('email', 'personal', 'business')
        customer = Dict('customer', 'name', email, address)

        _default_input_value = 'default-input-value'

        class MyService(Service):
            class SimpleIO:
                input = customer
                default_input_value = 'default-input-value'

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        # Note that locality has no type nor name and we expect for the SimpleIO-level default value to be used
        data = Bunch()
        data.customer = Bunch()
        data.customer.name = 'my-name'
        data.customer.email = Bunch()
        data.customer.email.personal = 'my-personal-email'
        data.customer.email.business = 'my-business-email'
        data.customer.address = Bunch()
        data.customer.address.street = 'my-street'
        data.customer.address.locality = Bunch()

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)

        self.assertEquals(input.customer.name, data.customer.name)
        self.assertEquals(input.customer.email.personal,
                          data.customer.email.personal)
        self.assertEquals(input.customer.email.business,
                          data.customer.email.business)
        self.assertEquals(input.customer.address.street,
                          data.customer.address.street)
        self.assertEquals(input.customer.address.locality.type,
                          _default_input_value)
        self.assertEquals(input.customer.address.locality.name,
                          _default_input_value)
Пример #18
0
    def __init__(self):
        self.host = None
        self.port = None
        self.crypto_manager = None
        self.odb = None
        self.odb_data = None
        self.config = None
        self.repo_location = None
        self.user_conf_location = None
        self.sql_pool_store = None
        self.soap11_content_type = None
        self.soap12_content_type = None
        self.plain_xml_content_type = None
        self.json_content_type = None
        self.internal_service_modules = None  # Zato's own internal services
        self.service_modules = None  # Set programmatically in Spring
        self.service_sources = None  # Set in a config file
        self.base_dir = None
        self.tls_dir = None
        self.static_dir = None
        self.hot_deploy_config = None
        self.pickup = None
        self.fs_server_config = None
        self.fs_sql_config = None
        self.pickup_config = None
        self.logging_config = None
        self.logging_conf_path = None
        self.sio_config = None
        self.sso_config = None
        self.connector_server_grace_time = None
        self.id = None
        self.name = None
        self.worker_id = None
        self.worker_pid = None
        self.cluster = None
        self.cluster_id = None
        self.kvdb = None
        self.startup_jobs = None
        self.worker_store = None
        self.request_dispatcher_dispatch = None
        self.deployment_lock_expires = None
        self.deployment_lock_timeout = None
        self.deployment_key = ''
        self.app_context = None
        self.has_gevent = None
        self.delivery_store = None
        self.static_config = None
        self.component_enabled = Bunch()
        self.client_address_headers = [
            'HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR'
        ]
        self.broker_client = None
        self.return_tracebacks = None
        self.default_error_message = None
        self.time_util = None
        self.preferred_address = None
        self.crypto_use_tls = None
        self.servers = None
        self.zato_lock_manager = None
        self.pid = None
        self.sync_internal = None
        self.ipc_api = IPCAPI(False)
        self.ipc_forwarder = IPCAPI(True)
        self.wmq_ipc_tcp_port = None
        self.fifo_response_buffer_size = None  # Will be in megabytes
        self.is_first_worker = None
        self.shmem_size = -1.0
        self.server_startup_ipc = ServerStartupIPC()
        self.keyutils = KeyUtils()
        self.sso_api = None
        self.is_sso_enabled = False
        self.audit_pii = audit_pii
        self.startup_callable_tool = None
        self.default_internal_pubsub_endpoint_id = None
        self._hash_secret_method = None
        self._hash_secret_rounds = None
        self._hash_secret_salt_size = None

        # Allows users store arbitrary data across service invocations
        self.user_ctx = Bunch()
        self.user_ctx_lock = gevent.lock.RLock()

        self.access_logger = logging.getLogger('zato_access_log')
        self.access_logger_log = self.access_logger._log
        self.needs_access_log = self.access_logger.isEnabledFor(INFO)
        self.has_pubsub_audit_log = logging.getLogger(
            'zato_pubsub_audit').isEnabledFor('INFO')
        self.is_enabled_for_warn = logging.getLogger('zato').isEnabledFor(
            'WARN')

        # The main config store
        self.config = ConfigStore()

        gevent.signal(signal.SIGINT, self.destroy)
Пример #19
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # This also cannot be done in __init__ which doesn't have this variable yet
        self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(),
                                                 uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Create all POSIX IPC objects now that we have the deployment key
        self.shmem_size = int(float(self.fs_server_config.shmem.size) *
                              10**6)  # Convert to megabytes as integer
        self.server_startup_ipc.create(self.deployment_key, self.shmem_size)

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_up_odb()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato',
                                             self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster
        self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id,
                                              self.worker_pid, new_cid())

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(
            self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name, self.decrypt)
        logger.info(
            'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`',
            self.name, self.cluster.name, self.pid, 's' if use_tls else '',
            self.preferred_address, self.port)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(
            self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(
            self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(
            os.path.join(self.repo_location,
                         self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(
            self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        # Configure remaining parts of SSO
        self.configure_sso()

        # Cannot be done in __init__ because self.sso_config is not available there yet
        salt_size = self.sso_config.hash_secret.salt_size
        self.crypto_manager.add_hash_scheme('zato.default',
                                            self.sso_config.hash_secret.rounds,
                                            salt_size)

        for name in ('current_work_dir', 'backup_work_dir',
                     'last_backup_work_dir', 'delete_after_pick_up'):

            # New in 2.0
            if name == 'delete_after_pick_up':
                value = asbool(self.fs_server_config.hot_deploy.get(
                    name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(
                    os.path.join(self.hot_deploy_config.work_dir,
                                 self.fs_server_config.hot_deploy[name]))

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]:
            self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]:
            self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel',
                                          broker_callbacks,
                                          self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        self._after_init_accepted(locally_deployed)

        self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True,
                                self.host, self.port, self.preferred_address,
                                use_tls)

        if is_first:

            logger.info('First worker of `%s` is %s', self.name, self.pid)

            self.startup_callable_tool.invoke(
                SERVER_STARTUP.PHASE.IN_PROCESS_FIRST,
                kwargs={
                    'parallel_server': self,
                })

            # Startup services
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

            # IPC
            ipc_forwarder_name = '{}-{}'.format(self.cluster.name, self.name)
            ipc_forwarder_name = fs_safe_name(ipc_forwarder_name)

            self.ipc_forwarder.name = ipc_forwarder_name
            self.ipc_forwarder.pid = self.pid
            spawn_greenlet(self.ipc_forwarder.run)

            # Set up IBM MQ connections if that component is enabled
            if self.fs_server_config.component_enabled.ibm_mq:

                # Will block for a few seconds at most, until is_ok is returned
                # which indicates that a connector started or not.
                is_ok = self.start_ibm_mq_connector(
                    int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))
                if is_ok:
                    self.create_initial_wmq_definitions(
                        self.worker_store.worker_config.definition_wmq)
                    self.create_initial_wmq_outconns(
                        self.worker_store.worker_config.out_wmq)
                    self.create_initial_wmq_channels(
                        self.worker_store.worker_config.channel_wmq)

        else:
            self.startup_callable_tool.invoke(
                SERVER_STARTUP.PHASE.IN_PROCESS_OTHER,
                kwargs={
                    'parallel_server': self,
                })

        # IPC
        self.ipc_api.name = self.name
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED,
                                          kwargs={
                                              'parallel_server': self,
                                          })

        logger.info('Started `%s@%s` (pid: %s)', server.name,
                    server.cluster.name, self.pid)
Пример #20
0
    def __init__(self):
        self.host = None
        self.port = None
        self.crypto_manager = None
        self.odb = None
        self.odb_data = None
        self.config = None
        self.repo_location = None
        self.user_conf_location = None
        self.sql_pool_store = None
        self.soap11_content_type = None
        self.soap12_content_type = None
        self.plain_xml_content_type = None
        self.json_content_type = None
        self.service_modules = None # Set programmatically in Spring
        self.service_sources = None # Set in a config file
        self.base_dir = None        # type: unicode
        self.tls_dir = None         # type: unicode
        self.static_dir = None      # type: unicode
        self.json_schema_dir = None # type: unicode
        self.hot_deploy_config = None
        self.pickup = None
        self.fs_server_config = None
        self.fs_sql_config = None
        self.pickup_config = None
        self.logging_config = None
        self.logging_conf_path = None
        self.sio_config = None
        self.sso_config = None
        self.connector_server_grace_time = None
        self.id = None
        self.name = None
        self.worker_id = None
        self.worker_pid = None
        self.cluster = None
        self.cluster_id = None
        self.kvdb = None
        self.startup_jobs = None
        self.worker_store = None # type: WorkerStore
        self.service_store = None # type: ServiceStore
        self.request_dispatcher_dispatch = None
        self.deployment_lock_expires = None
        self.deployment_lock_timeout = None
        self.deployment_key = ''
        self.has_gevent = None
        self.delivery_store = None
        self.static_config = None
        self.component_enabled = Bunch()
        self.client_address_headers = ['HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR']
        self.broker_client = None
        self.return_tracebacks = None
        self.default_error_message = None
        self.time_util = None
        self.preferred_address = None
        self.crypto_use_tls = None
        self.servers = None
        self.zato_lock_manager = None
        self.pid = None
        self.sync_internal = None
        self.ipc_api = IPCAPI()
        self.fifo_response_buffer_size = None # Will be in megabytes
        self.is_first_worker = None
        self.shmem_size = -1.0
        self.server_startup_ipc = ServerStartupIPC()
        self.connector_config_ipc = ConnectorConfigIPC()
        self.keyutils = KeyUtils()
        self.sso_api = None
        self.is_sso_enabled = False
        self.audit_pii = audit_pii
        self.has_fg = False
        self.startup_callable_tool = None
        self.default_internal_pubsub_endpoint_id = None
        self._hash_secret_method = None
        self._hash_secret_rounds = None
        self._hash_secret_salt_size = None

        # Our arbiter may potentially call the cleanup procedure multiple times
        # and this will be set to True the first time around.
        self._is_process_closing = False

        # Allows users store arbitrary data across service invocations
        self.user_ctx = Bunch()
        self.user_ctx_lock = gevent.lock.RLock()

        # Connectors
        self.connector_ibm_mq = IBMMQIPC(self)
        self.connector_sftp   = SFTPIPC(self)

        # HTTP methods allowed as a Python list
        self.http_methods_allowed = []

        # As above, but as a regular expression pattern
        self.http_methods_allowed_re = ''

        self.access_logger = logging.getLogger('zato_access_log')
        self.access_logger_log = self.access_logger._log
        self.needs_access_log = self.access_logger.isEnabledFor(INFO)
        self.has_pubsub_audit_log = logging.getLogger('zato_pubsub_audit').isEnabledFor(INFO)
        self.is_enabled_for_warn = logging.getLogger('zato').isEnabledFor(WARN)

        # The main config store
        self.config = ConfigStore()
Пример #21
0
    def copy(self):
        """ Creates a copy of this ConfigStore. All configuration data is copied
        over except for SQL connections.
        """
        config_store = ConfigStore()

        # Grab all ConfigDicts - even if they're actually ZATO_NONE - and make their copies
        for attr_name in dir(self):
            attr = getattr(self, attr_name)
            if isinstance(attr, ConfigDict):
                copy_func = getattr(attr, 'copy')
                setattr(config_store, attr_name, copy_func())
            elif attr is ZATO_NONE:
                setattr(config_store, attr_name, ZATO_NONE)

        http_soap = MultiDict()
        dict_of_lists = self.http_soap.dict_of_lists()
        for url_path, lists in dict_of_lists.items():
            _info = Bunch()
            for elem in lists:
                for soap_action, item in elem.items():
                    _info[soap_action] = Bunch()
                    _info[soap_action].id = item.id
                    _info[soap_action].name = item.name
                    _info[soap_action].is_active = item.is_active
                    _info[soap_action].is_internal = item.is_internal
                    _info[soap_action].url_path = item.url_path
                    _info[soap_action].method = item.method
                    _info[soap_action].soap_version = item.soap_version
                    _info[soap_action].service_id = item.service_id
                    _info[soap_action].service_name = item.service_name
                    _info[soap_action].impl_name = item.impl_name
                    _info[soap_action].transport = item.transport
                    _info[soap_action].connection = item.connection
            http_soap.add(url_path, _info)

        config_store.http_soap = http_soap
        config_store.url_sec = self.url_sec
        config_store.broker_config = self.broker_config
        config_store.odb_data = deepcopy(self.odb_data)

        return config_store
Пример #22
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server # type: ParallelServer

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # This also cannot be done in __init__ which doesn't have this variable yet
        self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Create all POSIX IPC objects now that we have the deployment key
        self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer

        self.server_startup_ipc.create(self.deployment_key, self.shmem_size)
        self.connector_config_ipc.create(self.deployment_key, self.shmem_size)

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_up_odb()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster
        self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid())

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name, self.decrypt)
        logger.info('Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name,
                    self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address,
            self.port)

        # Configure which HTTP methods can be invoked via REST or SOAP channels
        methods_allowed = self.fs_server_config.http.methods_allowed
        methods_allowed = methods_allowed if isinstance(methods_allowed, list) else [methods_allowed]
        self.http_methods_allowed.extend(methods_allowed)

        # As above, as a regular expression to be used in pattern matching
        http_methods_allowed_re = '|'.join(self.http_methods_allowed)
        self.http_methods_allowed_re = '({})'.format(http_methods_allowed_re)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.pickup_dir = absolutize(self.fs_server_config.hot_deploy.pickup_dir, self.repo_location)

        self.hot_deploy_config.work_dir = os.path.normpath(os.path.join(
            self.repo_location, self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        # Added in 3.1, hence optional
        max_batch_size = int(self.fs_server_config.hot_deploy.get('max_batch_size', 1000))

        # Turn it into megabytes
        max_batch_size = max_batch_size * 1000

        # Finally, assign it to ServiceStore
        self.service_store.max_batch_size = max_batch_size

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Configure remaining parts of SSO
        self.configure_sso()

        # Cannot be done in __init__ because self.sso_config is not available there yet
        salt_size = self.sso_config.hash_secret.salt_size
        self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size)

        for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pickup'):

            # New in 2.0
            if name == 'delete_after_pickup':

                # For backward compatibility, we need to support both names
                old_name = 'delete_after_pick_up'

                if old_name in self.fs_server_config.hot_deploy:
                    _name = old_name
                else:
                    _name = name

                value = asbool(self.fs_server_config.hot_deploy.get(_name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(os.path.join(
                    self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name]))

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        # Make sure that broker client's connection is ready before continuing
        # to rule out edge cases where, for instance, hot deployment would
        # try to publish a locally found package (one of extra packages found)
        # before the client's thread connected to KVDB.
        if not self.broker_client.ready:
            start = now = datetime.utcnow()
            max_seconds = 120
            until = now + timedelta(seconds=max_seconds)

            while not self.broker_client.ready:
                now = datetime.utcnow()
                delta = (now - start).total_seconds()
                if now < until:
                    # Do not log too early so as not to clutter logs
                    if delta > 2:
                        logger.info('Waiting for broker client to become ready (%s, max:%s)', delta, max_seconds)
                    gevent.sleep(0.5)
                else:
                    raise Exception('Broker client did not become ready within {} seconds'.format(max_seconds))

        self._after_init_accepted(locally_deployed)
        self.odb.server_up_down(
            server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls)

        if is_first:

            logger.info('First worker of `%s` is %s', self.name, self.pid)

            self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={
                'parallel_server': self,
            })

            # Clean up any old WSX connections possibly registered for this server
            # which may be still linger around, for instance, if the server was previously
            # shut down forcibly and did not have an opportunity to run self.cleanup_on_stop
            self.cleanup_wsx()

            # Startup services
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

            # Set up subprocess-based IBM MQ connections if that component is enabled
            if self.fs_server_config.component_enabled.ibm_mq:

                # Will block for a few seconds at most, until is_ok is returned
                # which indicates that a connector started or not.
                is_ok = self.connector_ibm_mq.start_ibm_mq_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))

                try:
                    if is_ok:
                        self.connector_ibm_mq.create_initial_wmq_definitions(self.worker_store.worker_config.definition_wmq)
                        self.connector_ibm_mq.create_initial_wmq_outconns(self.worker_store.worker_config.out_wmq)
                        self.connector_ibm_mq.create_initial_wmq_channels(self.worker_store.worker_config.channel_wmq)
                except Exception as e:
                    logger.warn('Could not create initial IBM MQ objects, e:`%s`', e)

            # Set up subprocess-based SFTP connections
            is_ok = self.connector_sftp.start_sftp_connector(int(self.fs_server_config.ibm_mq.ipc_tcp_start_port))
            if is_ok:
                self.connector_sftp.create_initial_sftp_outconns(self.worker_store.worker_config.out_sftp)

        else:
            self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={
                'parallel_server': self,
            })

        # IPC
        self.ipc_api.name = self.ipc_api.get_endpoint_name(self.cluster.name, self.name, self.pid)
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={
            'parallel_server': self,
        })

        logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
Пример #23
0
    def test_parse_nested_dict_all_sio_elems_some_missing(self):

        _default_input_value = 'default-input-value'
        default_locality = 'default-locality'
        default_address = 'default-address'

        locality = Dict('locality',
                        Int('type'),
                        Text('name'),
                        AsIs('-coords'),
                        Decimal('geo_skip'),
                        Float('geo_diff'),
                        default=default_locality)

        address = Dict('address',
                       locality,
                       UUID('-street_id'),
                       CSV('prefs'),
                       DateTime('since'),
                       List('types'),
                       Opaque('opaque1'),
                       default=default_address)

        email = Dict('email', Text('value'), Bool('is_business'),
                     Date('-join_date'),
                     DictList('preferred_order', 'name', '-pos'))
        customer = Dict('customer', 'name', email, address)

        class MyService(Service):
            class SimpleIO:
                input = customer
                default_input_value = 'default-input-value'

        CySimpleIO.attach_sio(self.get_server_config(), MyService)

        # Note that 'join_date', 'street_id', 'coords' and one of 'pos' keys are missing in input below,
        # the test ensures that default values are used in their place.

        data = Bunch()
        data.customer = Bunch()
        data.customer.name = 'my-name'
        data.customer.email = Bunch()
        data.customer.email.value = 'my-email'
        data.customer.email.is_business = True
        data.customer.email.preferred_order = [{
            'name': 'address2',
            'pos': '2'
        }, {
            'name': 'address1'
        }]
        data.customer.address = Bunch()
        data.customer.address.locality = Bunch()
        data.customer.address.locality.type = '111'
        data.customer.address.locality.name = 'my-locality'
        data.customer.address.locality.geo_skip = '123.456'
        data.customer.address.locality.geo_diff = '999.777'
        data.customer.address.prefs = '1,2,3,4'
        data.customer.address.since = '27-11-1988T11:22:33'
        data.customer.address.types = ['a', 'b', 'c', 'd']
        data.customer.address.opaque1 = object()

        input = MyService._sio.parse_input(data, DATA_FORMAT.JSON)
        self.assertIsInstance(input, Bunch)

        self.assertEquals(input.customer.name, data.customer.name)
        self.assertEquals(input.customer.email.value,
                          data.customer.email.value)
        self.assertEquals(input.customer.email.is_business,
                          data.customer.email.is_business)
        self.assertEquals(input.customer.email.join_date, _default_input_value)

        self.assertDictEqual(input.customer.email.preferred_order[0],
                             data.customer.email.preferred_order[0])
        self.assertEquals(input.customer.email.preferred_order[1].name,
                          data.customer.email.preferred_order[1]['name'])
        self.assertEquals(input.customer.email.preferred_order[1].pos,
                          _default_input_value)

        self.assertEquals(input.customer.address.locality.type,
                          int(data.customer.address.locality.type))
        self.assertEquals(input.customer.address.locality.name,
                          data.customer.address.locality.name)
        self.assertEquals(input.customer.address.locality.coords,
                          default_locality)
        self.assertEquals(
            input.customer.address.locality.geo_skip,
            decimal_Decimal(data.customer.address.locality.geo_skip))
        self.assertEquals(input.customer.address.locality.geo_diff,
                          float(data.customer.address.locality.geo_diff))
        self.assertEquals(input.customer.address.street_id, default_address)
        self.assertEquals(input.customer.address.prefs,
                          data.customer.address.prefs.split(','))
        self.assertEquals(input.customer.address.since,
                          dt_parse(data.customer.address.since))
        self.assertEquals(input.customer.address.types,
                          data.customer.address.types)
        self.assertIs(input.customer.address.opaque1,
                      data.customer.address.opaque1)
Пример #24
0
    def set_up_config(self, server):

        # Which components are enabled
        self.component_enabled.stats = asbool(self.fs_server_config.component_enabled.stats)
        self.component_enabled.slow_response = asbool(self.fs_server_config.component_enabled.slow_response)
        self.component_enabled.live_msg_browser = asbool(self.fs_server_config.component_enabled.live_msg_browser)

        # Details of what is enabled in live message browser
        self.live_msg_browser = self.fs_server_config.live_msg_browser
        self.live_msg_browser.include_internal = asbool(self.live_msg_browser.include_internal)

        # Pub/sub
        self.pubsub = PubSubAPI(RedisPubSub(self.kvdb.conn))

        #
        # Cassandra - start
        #

        query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
        self.config.cassandra_conn = ConfigDict.from_query('cassandra_conn', query)

        query = self.odb.get_cassandra_query_list(server.cluster.id, True)
        self.config.cassandra_query = ConfigDict.from_query('cassandra_query', query)

        #
        # Cassandra - end
        #

        #
        # Search - start
        #

        query = self.odb.get_search_es_list(server.cluster.id, True)
        self.config.search_es = ConfigDict.from_query('search_es', query)

        query = self.odb.get_search_solr_list(server.cluster.id, True)
        self.config.search_solr = ConfigDict.from_query('search_solr', query)

        #
        # Search - end
        #

        #
        # Cloud - start
        #

        # OpenStack - Swift

        query = self.odb.get_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.cloud_openstack_swift = ConfigDict.from_query('cloud_openstack_swift', query)

        query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
        self.config.cloud_aws_s3 = ConfigDict.from_query('cloud_aws_s3', query)

        #
        # Cloud - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Services
        query = self.odb.get_service_list(server.cluster.id, True)
        self.config.service = ConfigDict.from_query('service_list', query)

        #
        # Definitions - start
        #

        # AMQP
        query = self.odb.get_definition_amqp_list(server.cluster.id, True)
        self.config.definition_amqp = ConfigDict.from_query('definition_amqp', query)

        #
        # Definitions - end
        #

        #
        # Channels - start
        #

        # AMQP
        query = self.odb.get_channel_amqp_list(server.cluster.id, True)
        self.config.channel_amqp = ConfigDict.from_query('channel_amqp', query)

        # STOMP
        query = self.odb.get_channel_stomp_list(server.cluster.id, True)
        self.config.channel_stomp = ConfigDict.from_query('channel_stomp', query)

        #
        # Channels - end
        #

        #
        # Outgoing connections - start
        #

        # AMQP
        query = self.odb.get_out_amqp_list(server.cluster.id, True)
        self.config.out_amqp = ConfigDict.from_query('out_amqp', query)

        # FTP
        query = self.odb.get_out_ftp_list(server.cluster.id, True)
        self.config.out_ftp = ConfigDict.from_query('out_ftp', query)

        # JMS WMQ
        query = self.odb.get_out_jms_wmq_list(server.cluster.id, True)
        self.config.out_jms_wmq = ConfigDict.from_query('out_jms_wmq', query)

        # Odoo
        query = self.odb.get_out_odoo_list(server.cluster.id, True)
        self.config.out_odoo = ConfigDict.from_query('out_odoo', query)

        # Plain HTTP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'plain_http', True)
        self.config.out_plain_http = ConfigDict.from_query('out_plain_http', query)

        # SOAP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'soap', True)
        self.config.out_soap = ConfigDict.from_query('out_soap', query)

        # SQL
        query = self.odb.get_out_sql_list(server.cluster.id, True)
        self.config.out_sql = ConfigDict.from_query('out_sql', query)

        # STOMP
        query = self.odb.get_out_stomp_list(server.cluster.id, True)
        self.config.out_stomp = ConfigDict.from_query('out_stomp', query)

        # ZMQ channels
        query = self.odb.get_channel_zmq_list(server.cluster.id, True)
        self.config.channel_zmq = ConfigDict.from_query('channel_zmq', query)

        # ZMQ outgoing
        query = self.odb.get_out_zmq_list(server.cluster.id, True)
        self.config.out_zmq = ConfigDict.from_query('out_zmq', query)

        # WebSocket channels
        query = self.odb.get_channel_web_socket_list(server.cluster.id, True)
        self.config.channel_web_socket = ConfigDict.from_query('channel_web_socket', query)

        #
        # Outgoing connections - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Notifications - start
        #

        # OpenStack Swift
        query = self.odb.get_notif_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.notif_cloud_openstack_swift = ConfigDict.from_query('notif_cloud_openstack_swift', query)

        # SQL
        query = self.odb.get_notif_sql_list(server.cluster.id, True)
        self.config.notif_sql = ConfigDict.from_query('notif_sql', query)

        #
        # Notifications - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Security - start
        #

        # API keys
        query = self.odb.get_apikey_security_list(server.cluster.id, True)
        self.config.apikey = ConfigDict.from_query('apikey', query)

        # AWS
        query = self.odb.get_aws_security_list(server.cluster.id, True)
        self.config.aws = ConfigDict.from_query('aws', query)

        # HTTP Basic Auth
        query = self.odb.get_basic_auth_list(server.cluster.id, None, True)
        self.config.basic_auth = ConfigDict.from_query('basic_auth', query)

        # HTTP Basic Auth
        query = self.odb.get_jwt_list(server.cluster.id, None, True)
        self.config.jwt = ConfigDict.from_query('jwt', query)

        # NTLM
        query = self.odb.get_ntlm_list(server.cluster.id, True)
        self.config.ntlm = ConfigDict.from_query('ntlm', query)

        # OAuth
        query = self.odb.get_oauth_list(server.cluster.id, True)
        self.config.oauth = ConfigDict.from_query('oauth', query)

        # OpenStack
        query = self.odb.get_openstack_security_list(server.cluster.id, True)
        self.config.openstack_security = ConfigDict.from_query('openstack_security', query)

        # RBAC - permissions
        query = self.odb.get_rbac_permission_list(server.cluster.id, True)
        self.config.rbac_permission = ConfigDict.from_query('rbac_permission', query)

        # RBAC - roles
        query = self.odb.get_rbac_role_list(server.cluster.id, True)
        self.config.rbac_role = ConfigDict.from_query('rbac_role', query)

        # RBAC - client roles
        query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
        self.config.rbac_client_role = ConfigDict.from_query('rbac_client_role', query)

        # RBAC - role permission
        query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
        self.config.rbac_role_permission = ConfigDict.from_query('rbac_role_permission', query)

        # Technical accounts
        query = self.odb.get_tech_acc_list(server.cluster.id, True)
        self.config.tech_acc = ConfigDict.from_query('tech_acc', query)

        # TLS CA certs
        query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
        self.config.tls_ca_cert = ConfigDict.from_query('tls_ca_cert', query)

        # TLS channel security
        query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
        self.config.tls_channel_sec = ConfigDict.from_query('tls_channel_sec', query)

        # TLS key/cert pairs
        query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
        self.config.tls_key_cert = ConfigDict.from_query('tls_key_cert', query)

        # WS-Security
        query = self.odb.get_wss_list(server.cluster.id, True)
        self.config.wss = ConfigDict.from_query('wss', query)

        # Vault connections
        query = self.odb.get_vault_connection_list(server.cluster.id, True)
        self.config.vault_conn_sec = ConfigDict.from_query('vault_conn_sec', query)

        # XPath
        query = self.odb.get_xpath_sec_list(server.cluster.id, True)
        self.config.xpath_sec = ConfigDict.from_query('xpath_sec', query)

        #
        # Security - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # All the HTTP/SOAP channels.
        http_soap = []
        for item in self.odb.get_http_soap_list(server.cluster.id, 'channel'):

            hs_item = {}
            for key in item.keys():
                hs_item[key] = getattr(item, key)

            hs_item['replace_patterns_json_pointer'] = item.replace_patterns_json_pointer
            hs_item['replace_patterns_xpath'] = item.replace_patterns_xpath

            hs_item['match_target'] = '{}{}{}'.format(hs_item['soap_action'], MISC.SEPARATOR, hs_item['url_path'])
            hs_item['match_target_compiled'] = Matcher(hs_item['match_target'])

            http_soap.append(hs_item)

        self.config.http_soap = http_soap

        # Namespaces
        query = self.odb.get_namespace_list(server.cluster.id, True)
        self.config.msg_ns = ConfigDict.from_query('msg_ns', query)

        # XPath
        query = self.odb.get_xpath_list(server.cluster.id, True)
        self.config.xpath = ConfigDict.from_query('msg_xpath', query)

        # JSON Pointer
        query = self.odb.get_json_pointer_list(server.cluster.id, True)
        self.config.json_pointer = ConfigDict.from_query('json_pointer', query)

        # SimpleIO
        self.config.simple_io = ConfigDict('simple_io', Bunch())
        self.config.simple_io['int_parameters'] = self.int_parameters
        self.config.simple_io['int_parameter_suffixes'] = self.int_parameter_suffixes
        self.config.simple_io['bool_parameter_prefixes'] = self.bool_parameter_prefixes

        # Pub/sub config
        self.config.pubsub = Bunch()
        self.config.pubsub.default_consumer = Bunch()
        self.config.pubsub.default_producer = Bunch()

        query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
        self.config.pubsub.topics = ConfigDict.from_query('pubsub_topics', query)

        id, name = self.odb.get_pubsub_default_client(server.cluster.id, 'zato.pubsub.default-consumer')
        self.config.pubsub.default_consumer.id, self.config.pubsub.default_consumer.name = id, name

        id, name = self.odb.get_pubsub_default_client(server.cluster.id, 'zato.pubsub.default-producer')
        self.config.pubsub.default_producer.id, self.config.pubsub.default_producer.name = id, name

        query = self.odb.get_pubsub_producer_list(server.cluster.id, True)
        self.config.pubsub.producers = ConfigDict.from_query('pubsub_producers', query, list_config=True)

        query = self.odb.get_pubsub_consumer_list(server.cluster.id, True)
        self.config.pubsub.consumers = ConfigDict.from_query('pubsub_consumers', query, list_config=True)

        # E-mail - SMTP
        query = self.odb.get_email_smtp_list(server.cluster.id, True)
        self.config.email_smtp = ConfigDict.from_query('email_smtp', query)

        # E-mail - IMAP
        query = self.odb.get_email_imap_list(server.cluster.id, True)
        self.config.email_imap = ConfigDict.from_query('email_imap', query)

        # Message paths
        self.config.msg_ns_store = NamespaceStore()
        self.config.json_pointer_store = JSONPointerStore()
        self.config.xpath_store = XPathStore()

        # Assign config to worker
        self.worker_store.worker_config = self.config
        self.worker_store.pubsub = self.pubsub
Пример #25
0
    def set_up_config(self, server):

        # Which components are enabled
        self.component_enabled.stats = asbool(self.fs_server_config.component_enabled.stats)
        self.component_enabled.slow_response = asbool(self.fs_server_config.component_enabled.slow_response)

        #
        # Cassandra - start
        #

        query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
        self.config.cassandra_conn = ConfigDict.from_query('cassandra_conn', query, decrypt_func=self.decrypt)

        query = self.odb.get_cassandra_query_list(server.cluster.id, True)
        self.config.cassandra_query = ConfigDict.from_query('cassandra_query', query, decrypt_func=self.decrypt)

        #
        # Cassandra - end
        #

        #
        # Search - start
        #

        query = self.odb.get_search_es_list(server.cluster.id, True)
        self.config.search_es = ConfigDict.from_query('search_es', query, decrypt_func=self.decrypt)

        query = self.odb.get_search_solr_list(server.cluster.id, True)
        self.config.search_solr = ConfigDict.from_query('search_solr', query, decrypt_func=self.decrypt)

        #
        # Search - end
        #

        #
        # SMS - start
        #

        query = self.odb.get_sms_twilio_list(server.cluster.id, True)
        self.config.sms_twilio = ConfigDict.from_query('sms_twilio', query, decrypt_func=self.decrypt)

        #
        # SMS - end
        #

        #
        # Cloud - start
        #

        # OpenStack - Swift

        query = self.odb.get_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.cloud_openstack_swift = ConfigDict.from_query('cloud_openstack_swift', query, decrypt_func=self.decrypt)

        query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
        self.config.cloud_aws_s3 = ConfigDict.from_query('cloud_aws_s3', query, decrypt_func=self.decrypt)

        #
        # Cloud - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Services
        query = self.odb.get_service_list(server.cluster.id, True)
        self.config.service = ConfigDict.from_query('service_list', query, decrypt_func=self.decrypt)

        #
        # Definitions - start
        #

        # AMQP
        query = self.odb.get_definition_amqp_list(server.cluster.id, True)
        self.config.definition_amqp = ConfigDict.from_query('definition_amqp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_definition_wmq_list(server.cluster.id, True)
        self.config.definition_wmq = ConfigDict.from_query('definition_wmq', query, decrypt_func=self.decrypt)

        #
        # Definitions - end
        #

        #
        # Channels - start
        #

        # AMQP
        query = self.odb.get_channel_amqp_list(server.cluster.id, True)
        self.config.channel_amqp = ConfigDict.from_query('channel_amqp', query, decrypt_func=self.decrypt)

        # STOMP
        query = self.odb.get_channel_stomp_list(server.cluster.id, True)
        self.config.channel_stomp = ConfigDict.from_query('channel_stomp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_channel_wmq_list(server.cluster.id, True)
        self.config.channel_wmq = ConfigDict.from_query('channel_wmq', query, decrypt_func=self.decrypt)

        #
        # Channels - end
        #

        #
        # Outgoing connections - start
        #

        # AMQP
        query = self.odb.get_out_amqp_list(server.cluster.id, True)
        self.config.out_amqp = ConfigDict.from_query('out_amqp', query, decrypt_func=self.decrypt)

        # Caches
        query = self.odb.get_cache_builtin_list(server.cluster.id, True)
        self.config.cache_builtin = ConfigDict.from_query('cache_builtin', query, decrypt_func=self.decrypt)

        query = self.odb.get_cache_memcached_list(server.cluster.id, True)
        self.config.cache_memcached = ConfigDict.from_query('cache_memcached', query, decrypt_func=self.decrypt)

        # FTP
        query = self.odb.get_out_ftp_list(server.cluster.id, True)
        self.config.out_ftp = ConfigDict.from_query('out_ftp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_out_wmq_list(server.cluster.id, True)
        self.config.out_wmq = ConfigDict.from_query('out_wmq', query, decrypt_func=self.decrypt)

        # Odoo
        query = self.odb.get_out_odoo_list(server.cluster.id, True)
        self.config.out_odoo = ConfigDict.from_query('out_odoo', query, decrypt_func=self.decrypt)

        # SAP RFC
        query = self.odb.get_out_sap_list(server.cluster.id, True)
        self.config.out_sap = ConfigDict.from_query('out_sap', query, decrypt_func=self.decrypt)

        # REST
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'plain_http', True)
        self.config.out_plain_http = ConfigDict.from_query('out_plain_http', query, decrypt_func=self.decrypt)

        # SFTP
        query = self.odb.get_out_sftp_list(server.cluster.id, True)
        self.config.out_sftp = ConfigDict.from_query('out_sftp', query, decrypt_func=self.decrypt, drop_opaque=True)

        # SOAP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'soap', True)
        self.config.out_soap = ConfigDict.from_query('out_soap', query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_out_sql_list(server.cluster.id, True)
        self.config.out_sql = ConfigDict.from_query('out_sql', query, decrypt_func=self.decrypt)

        # STOMP
        query = self.odb.get_out_stomp_list(server.cluster.id, True)
        self.config.out_stomp = ConfigDict.from_query('out_stomp', query, decrypt_func=self.decrypt)

        # ZMQ channels
        query = self.odb.get_channel_zmq_list(server.cluster.id, True)
        self.config.channel_zmq = ConfigDict.from_query('channel_zmq', query, decrypt_func=self.decrypt)

        # ZMQ outgoing
        query = self.odb.get_out_zmq_list(server.cluster.id, True)
        self.config.out_zmq = ConfigDict.from_query('out_zmq', query, decrypt_func=self.decrypt)

        # WebSocket channels
        query = self.odb.get_channel_web_socket_list(server.cluster.id, True)
        self.config.channel_web_socket = ConfigDict.from_query('channel_web_socket', query, decrypt_func=self.decrypt)

        #
        # Outgoing connections - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Generic - start
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Connections
        query = self.odb.get_generic_connection_list(server.cluster.id, True)
        self.config.generic_connection = ConfigDict.from_query('generic_connection', query, decrypt_func=self.decrypt)

        #
        # Generic - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Notifications - start
        #

        # OpenStack Swift
        query = self.odb.get_notif_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.notif_cloud_openstack_swift = ConfigDict.from_query('notif_cloud_openstack_swift',
            query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_notif_sql_list(server.cluster.id, True)
        self.config.notif_sql = ConfigDict.from_query('notif_sql', query, decrypt_func=self.decrypt)

        #
        # Notifications - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Security - start
        #

        # API keys
        query = self.odb.get_apikey_security_list(server.cluster.id, True)
        self.config.apikey = ConfigDict.from_query('apikey', query, decrypt_func=self.decrypt)

        # AWS
        query = self.odb.get_aws_security_list(server.cluster.id, True)
        self.config.aws = ConfigDict.from_query('aws', query, decrypt_func=self.decrypt)

        # HTTP Basic Auth
        query = self.odb.get_basic_auth_list(server.cluster.id, None, True)
        self.config.basic_auth = ConfigDict.from_query('basic_auth', query, decrypt_func=self.decrypt)

        # JWT
        query = self.odb.get_jwt_list(server.cluster.id, None, True)
        self.config.jwt = ConfigDict.from_query('jwt', query, decrypt_func=self.decrypt)

        # NTLM
        query = self.odb.get_ntlm_list(server.cluster.id, True)
        self.config.ntlm = ConfigDict.from_query('ntlm', query, decrypt_func=self.decrypt)

        # OAuth
        query = self.odb.get_oauth_list(server.cluster.id, True)
        self.config.oauth = ConfigDict.from_query('oauth', query, decrypt_func=self.decrypt)

        # OpenStack
        query = self.odb.get_openstack_security_list(server.cluster.id, True)
        self.config.openstack_security = ConfigDict.from_query('openstack_security', query, decrypt_func=self.decrypt)

        # RBAC - permissions
        query = self.odb.get_rbac_permission_list(server.cluster.id, True)
        self.config.rbac_permission = ConfigDict.from_query('rbac_permission', query, decrypt_func=self.decrypt)

        # RBAC - roles
        query = self.odb.get_rbac_role_list(server.cluster.id, True)
        self.config.rbac_role = ConfigDict.from_query('rbac_role', query, decrypt_func=self.decrypt)

        # RBAC - client roles
        query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
        self.config.rbac_client_role = ConfigDict.from_query('rbac_client_role', query, decrypt_func=self.decrypt)

        # RBAC - role permission
        query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
        self.config.rbac_role_permission = ConfigDict.from_query('rbac_role_permission', query, decrypt_func=self.decrypt)

        # TLS CA certs
        query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
        self.config.tls_ca_cert = ConfigDict.from_query('tls_ca_cert', query, decrypt_func=self.decrypt)

        # TLS channel security
        query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
        self.config.tls_channel_sec = ConfigDict.from_query('tls_channel_sec', query, decrypt_func=self.decrypt)

        # TLS key/cert pairs
        query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
        self.config.tls_key_cert = ConfigDict.from_query('tls_key_cert', query, decrypt_func=self.decrypt)

        # WS-Security
        query = self.odb.get_wss_list(server.cluster.id, True)
        self.config.wss = ConfigDict.from_query('wss', query, decrypt_func=self.decrypt)

        # Vault connections
        query = self.odb.get_vault_connection_list(server.cluster.id, True)
        self.config.vault_conn_sec = ConfigDict.from_query('vault_conn_sec', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_sec_list(server.cluster.id, True)
        self.config.xpath_sec = ConfigDict.from_query('xpath_sec', query, decrypt_func=self.decrypt)

        # Encrypt all secrets
        self._encrypt_secrets()

        #
        # Security - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # All the HTTP/SOAP channels.
        http_soap = []

        for item in elems_with_opaque(self.odb.get_http_soap_list(server.cluster.id, 'channel')):

            hs_item = {}
            for key in item.keys():
                hs_item[key] = getattr(item, key)

            hs_item['match_target'] = get_match_target(hs_item, http_methods_allowed_re=self.http_methods_allowed_re)
            hs_item['match_target_compiled'] = Matcher(hs_item['match_target'], hs_item.get('match_slash', ''))

            http_soap.append(hs_item)

        self.config.http_soap = http_soap

        # Namespaces
        query = self.odb.get_namespace_list(server.cluster.id, True)
        self.config.msg_ns = ConfigDict.from_query('msg_ns', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_list(server.cluster.id, True)
        self.config.xpath = ConfigDict.from_query('msg_xpath', query, decrypt_func=self.decrypt)

        # JSON Pointer
        query = self.odb.get_json_pointer_list(server.cluster.id, True)
        self.config.json_pointer = ConfigDict.from_query('json_pointer', query, decrypt_func=self.decrypt)

        # SimpleIO
        # In preparation for a SIO rewrite, we loaded SIO config from a file
        # but actual code paths require the pre-3.0 format so let's prepare it here.
        self.config.simple_io = ConfigDict('simple_io', Bunch())

        int_exact = self.sio_config.int.exact
        int_suffix = self.sio_config.int.suffix
        bool_prefix = self.sio_config.bool.prefix

        self.config.simple_io['int_parameters'] = int_exact if isinstance(int_exact, list) else [int_exact]
        self.config.simple_io['int_parameter_suffixes'] = int_suffix if isinstance(int_suffix, list) else [int_suffix]
        self.config.simple_io['bool_parameter_prefixes'] = bool_prefix if isinstance(bool_prefix, list) else [bool_prefix]

        # Maintain backward-compatibility with pre-3.1 versions that did not specify any particular encoding
        bytes_to_str = self.sio_config.get('bytes_to_str')
        if not bytes_to_str:
            bytes_to_str = {'encoding': None}

        self.config.simple_io['bytes_to_str'] = bytes_to_str

        # Pub/sub
        self.config.pubsub = Bunch()

        # Pub/sub - endpoints
        query = self.odb.get_pubsub_endpoint_list(server.cluster.id, True)
        self.config.pubsub_endpoint = ConfigDict.from_query('pubsub_endpoint', query, decrypt_func=self.decrypt)

        # Pub/sub - topics
        query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
        self.config.pubsub_topic = ConfigDict.from_query('pubsub_topic', query, decrypt_func=self.decrypt)

        # Pub/sub - subscriptions
        query = self.odb.get_pubsub_subscription_list(server.cluster.id, True)
        self.config.pubsub_subscription = ConfigDict.from_query('pubsub_subscription', query, decrypt_func=self.decrypt)

        # E-mail - SMTP
        query = self.odb.get_email_smtp_list(server.cluster.id, True)
        self.config.email_smtp = ConfigDict.from_query('email_smtp', query, decrypt_func=self.decrypt)

        # E-mail - IMAP
        query = self.odb.get_email_imap_list(server.cluster.id, True)
        self.config.email_imap = ConfigDict.from_query('email_imap', query, decrypt_func=self.decrypt)

        # Message paths
        self.config.msg_ns_store = NamespaceStore()
        self.config.json_pointer_store = JSONPointerStore()
        self.config.xpath_store = XPathStore()

        # HTTP access log should optionally ignore certain requests
        access_log_ignore = self.fs_server_config.get('logging', {}).get('http_access_log_ignore')
        if access_log_ignore:
            access_log_ignore = access_log_ignore if isinstance(access_log_ignore, list) else [access_log_ignore]
            self.needs_all_access_log = False
            self.access_log_ignore.update(access_log_ignore)

        # Assign config to worker
        self.worker_store.worker_config = self.config
Пример #26
0
    def set_up_config(self, server):

        # Which components are enabled
        self.component_enabled.stats = asbool(self.fs_server_config.component_enabled.stats)
        self.component_enabled.slow_response = asbool(self.fs_server_config.component_enabled.slow_response)
        self.component_enabled.live_msg_browser = asbool(self.fs_server_config.component_enabled.live_msg_browser)

        # Details of what is enabled in live message browser
        self.live_msg_browser = self.fs_server_config.live_msg_browser
        self.live_msg_browser.include_internal = asbool(self.live_msg_browser.include_internal)

        #
        # Cassandra - start
        #

        query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
        self.config.cassandra_conn = ConfigDict.from_query('cassandra_conn', query, decrypt_func=self.decrypt)

        query = self.odb.get_cassandra_query_list(server.cluster.id, True)
        self.config.cassandra_query = ConfigDict.from_query('cassandra_query', query, decrypt_func=self.decrypt)

        #
        # Cassandra - end
        #

        #
        # Search - start
        #

        query = self.odb.get_search_es_list(server.cluster.id, True)
        self.config.search_es = ConfigDict.from_query('search_es', query, decrypt_func=self.decrypt)

        query = self.odb.get_search_solr_list(server.cluster.id, True)
        self.config.search_solr = ConfigDict.from_query('search_solr', query, decrypt_func=self.decrypt)

        #
        # Search - end
        #

        #
        # SMS - start
        #

        query = self.odb.get_sms_twilio_list(server.cluster.id, True)
        self.config.sms_twilio = ConfigDict.from_query('sms_twilio', query, decrypt_func=self.decrypt)

        #
        # SMS - end
        #

        #
        # Cloud - start
        #

        # OpenStack - Swift

        query = self.odb.get_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.cloud_openstack_swift = ConfigDict.from_query('cloud_openstack_swift', query, decrypt_func=self.decrypt)

        query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
        self.config.cloud_aws_s3 = ConfigDict.from_query('cloud_aws_s3', query, decrypt_func=self.decrypt)

        #
        # Cloud - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Services
        query = self.odb.get_service_list(server.cluster.id, True)
        self.config.service = ConfigDict.from_query('service_list', query, decrypt_func=self.decrypt)

        #
        # Definitions - start
        #

        # AMQP
        query = self.odb.get_definition_amqp_list(server.cluster.id, True)
        self.config.definition_amqp = ConfigDict.from_query('definition_amqp', query, decrypt_func=self.decrypt)

        query = self.odb.get_definition_wmq_list(server.cluster.id, True)
        self.config.definition_wmq = ConfigDict.from_query('definition_wmq', query, decrypt_func=self.decrypt)

        #
        # Definitions - end
        #

        #
        # Channels - start
        #

        # AMQP
        query = self.odb.get_channel_amqp_list(server.cluster.id, True)
        self.config.channel_amqp = ConfigDict.from_query('channel_amqp', query, decrypt_func=self.decrypt)

        # STOMP
        query = self.odb.get_channel_stomp_list(server.cluster.id, True)
        self.config.channel_stomp = ConfigDict.from_query('channel_stomp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_channel_wmq_list(server.cluster.id, True)
        self.config.channel_wmq = ConfigDict.from_query('channel_wmq', query, decrypt_func=self.decrypt)

        #
        # Channels - end
        #

        #
        # Outgoing connections - start
        #

        # AMQP
        query = self.odb.get_out_amqp_list(server.cluster.id, True)
        self.config.out_amqp = ConfigDict.from_query('out_amqp', query, decrypt_func=self.decrypt)

        # Caches
        query = self.odb.get_cache_builtin_list(server.cluster.id, True)
        self.config.cache_builtin = ConfigDict.from_query('cache_builtin', query, decrypt_func=self.decrypt)

        query = self.odb.get_cache_memcached_list(server.cluster.id, True)
        self.config.cache_memcached = ConfigDict.from_query('cache_memcached', query, decrypt_func=self.decrypt)

        # FTP
        query = self.odb.get_out_ftp_list(server.cluster.id, True)
        self.config.out_ftp = ConfigDict.from_query('out_ftp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_out_wmq_list(server.cluster.id, True)
        self.config.out_wmq = ConfigDict.from_query('out_wmq', query, decrypt_func=self.decrypt)

        # Odoo
        query = self.odb.get_out_odoo_list(server.cluster.id, True)
        self.config.out_odoo = ConfigDict.from_query('out_odoo', query, decrypt_func=self.decrypt)

        # Plain HTTP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'plain_http', True)
        self.config.out_plain_http = ConfigDict.from_query('out_plain_http', query, decrypt_func=self.decrypt)

        # SOAP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'soap', True)
        self.config.out_soap = ConfigDict.from_query('out_soap', query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_out_sql_list(server.cluster.id, True)
        self.config.out_sql = ConfigDict.from_query('out_sql', query, decrypt_func=self.decrypt)

        # STOMP
        query = self.odb.get_out_stomp_list(server.cluster.id, True)
        self.config.out_stomp = ConfigDict.from_query('out_stomp', query, decrypt_func=self.decrypt)

        # ZMQ channels
        query = self.odb.get_channel_zmq_list(server.cluster.id, True)
        self.config.channel_zmq = ConfigDict.from_query('channel_zmq', query, decrypt_func=self.decrypt)

        # ZMQ outgoing
        query = self.odb.get_out_zmq_list(server.cluster.id, True)
        self.config.out_zmq = ConfigDict.from_query('out_zmq', query, decrypt_func=self.decrypt)

        # WebSocket channels
        query = self.odb.get_channel_web_socket_list(server.cluster.id, True)
        self.config.channel_web_socket = ConfigDict.from_query('channel_web_socket', query, decrypt_func=self.decrypt)

        #
        # Outgoing connections - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Notifications - start
        #

        # OpenStack Swift
        query = self.odb.get_notif_cloud_openstack_swift_list(server.cluster.id, True)
        self.config.notif_cloud_openstack_swift = ConfigDict.from_query('notif_cloud_openstack_swift', query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_notif_sql_list(server.cluster.id, True)
        self.config.notif_sql = ConfigDict.from_query('notif_sql', query, decrypt_func=self.decrypt)

        #
        # Notifications - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Security - start
        #

        # API keys
        query = self.odb.get_apikey_security_list(server.cluster.id, True)
        self.config.apikey = ConfigDict.from_query('apikey', query, decrypt_func=self.decrypt)

        # AWS
        query = self.odb.get_aws_security_list(server.cluster.id, True)
        self.config.aws = ConfigDict.from_query('aws', query, decrypt_func=self.decrypt)

        # HTTP Basic Auth
        query = self.odb.get_basic_auth_list(server.cluster.id, None, True)
        self.config.basic_auth = ConfigDict.from_query('basic_auth', query, decrypt_func=self.decrypt)

        # JWT
        query = self.odb.get_jwt_list(server.cluster.id, None, True)
        self.config.jwt = ConfigDict.from_query('jwt', query, decrypt_func=self.decrypt)

        # NTLM
        query = self.odb.get_ntlm_list(server.cluster.id, True)
        self.config.ntlm = ConfigDict.from_query('ntlm', query, decrypt_func=self.decrypt)

        # OAuth
        query = self.odb.get_oauth_list(server.cluster.id, True)
        self.config.oauth = ConfigDict.from_query('oauth', query, decrypt_func=self.decrypt)

        # OpenStack
        query = self.odb.get_openstack_security_list(server.cluster.id, True)
        self.config.openstack_security = ConfigDict.from_query('openstack_security', query, decrypt_func=self.decrypt)

        # RBAC - permissions
        query = self.odb.get_rbac_permission_list(server.cluster.id, True)
        self.config.rbac_permission = ConfigDict.from_query('rbac_permission', query, decrypt_func=self.decrypt)

        # RBAC - roles
        query = self.odb.get_rbac_role_list(server.cluster.id, True)
        self.config.rbac_role = ConfigDict.from_query('rbac_role', query, decrypt_func=self.decrypt)

        # RBAC - client roles
        query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
        self.config.rbac_client_role = ConfigDict.from_query('rbac_client_role', query, decrypt_func=self.decrypt)

        # RBAC - role permission
        query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
        self.config.rbac_role_permission = ConfigDict.from_query('rbac_role_permission', query, decrypt_func=self.decrypt)

        # TLS CA certs
        query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
        self.config.tls_ca_cert = ConfigDict.from_query('tls_ca_cert', query, decrypt_func=self.decrypt)

        # TLS channel security
        query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
        self.config.tls_channel_sec = ConfigDict.from_query('tls_channel_sec', query, decrypt_func=self.decrypt)

        # TLS key/cert pairs
        query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
        self.config.tls_key_cert = ConfigDict.from_query('tls_key_cert', query, decrypt_func=self.decrypt)

        # WS-Security
        query = self.odb.get_wss_list(server.cluster.id, True)
        self.config.wss = ConfigDict.from_query('wss', query, decrypt_func=self.decrypt)

        # Vault connections
        query = self.odb.get_vault_connection_list(server.cluster.id, True)
        self.config.vault_conn_sec = ConfigDict.from_query('vault_conn_sec', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_sec_list(server.cluster.id, True)
        self.config.xpath_sec = ConfigDict.from_query('xpath_sec', query, decrypt_func=self.decrypt)

        # New in 3.0 - encrypt all old secrets
        self._migrate_30_encrypt_secrets()

        #
        # Security - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # All the HTTP/SOAP channels.
        http_soap = []
        for item in self.odb.get_http_soap_list(server.cluster.id, 'channel'):

            hs_item = {}
            for key in item.keys():
                hs_item[key] = getattr(item, key)

            hs_item['replace_patterns_json_pointer'] = item.replace_patterns_json_pointer
            hs_item['replace_patterns_xpath'] = item.replace_patterns_xpath

            hs_item['match_target'] = '{}{}{}'.format(hs_item['soap_action'], MISC.SEPARATOR, hs_item['url_path'])
            hs_item['match_target_compiled'] = Matcher(hs_item['match_target'])

            http_soap.append(hs_item)

        self.config.http_soap = http_soap

        # Namespaces
        query = self.odb.get_namespace_list(server.cluster.id, True)
        self.config.msg_ns = ConfigDict.from_query('msg_ns', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_list(server.cluster.id, True)
        self.config.xpath = ConfigDict.from_query('msg_xpath', query, decrypt_func=self.decrypt)

        # JSON Pointer
        query = self.odb.get_json_pointer_list(server.cluster.id, True)
        self.config.json_pointer = ConfigDict.from_query('json_pointer', query, decrypt_func=self.decrypt)

        # SimpleIO
        # In preparation for a SIO rewrite, we loaded SIO config from a file
        # but actual code paths require the pre-3.0 format so let's prepare it here.
        self.config.simple_io = ConfigDict('simple_io', Bunch())
        self.config.simple_io['int_parameters'] = self.sio_config.int.exact
        self.config.simple_io['int_parameter_suffixes'] = self.sio_config.int.suffix
        self.config.simple_io['bool_parameter_prefixes'] = self.sio_config.bool.prefix

        # Pub/sub
        self.config.pubsub = Bunch()

        # Pub/sub - endpoints
        query = self.odb.get_pubsub_endpoint_list(server.cluster.id, True)
        self.config.pubsub_endpoint = ConfigDict.from_query('pubsub_endpoint', query, decrypt_func=self.decrypt)

        # Pub/sub - topics
        query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
        self.config.pubsub_topic = ConfigDict.from_query('pubsub_topic', query, decrypt_func=self.decrypt)

        # Pub/sub - subscriptions
        query = self.odb.get_pubsub_subscription_list(server.cluster.id, True)
        self.config.pubsub_subscription = ConfigDict.from_query('pubsub_subscription', query, decrypt_func=self.decrypt)

        # E-mail - SMTP
        query = self.odb.get_email_smtp_list(server.cluster.id, True)
        self.config.email_smtp = ConfigDict.from_query('email_smtp', query, decrypt_func=self.decrypt)

        # E-mail - IMAP
        query = self.odb.get_email_imap_list(server.cluster.id, True)
        self.config.email_imap = ConfigDict.from_query('email_imap', query, decrypt_func=self.decrypt)

        # Message paths
        self.config.msg_ns_store = NamespaceStore()
        self.config.json_pointer_store = JSONPointerStore()
        self.config.xpath_store = XPathStore()

        # Assign config to worker
        self.worker_store.worker_config = self.config
Пример #27
0
    def copy(self):
        """ Creates a copy of this ConfigStore. All configuration data is copied
        over except for SQL connections.
        """
        config_store = ConfigStore()

        # Grab all ConfigDicts - even if they're actually ZATO_NONE - and make their copies
        for attr_name in dir(self):
            attr = getattr(self, attr_name)
            if isinstance(attr, ConfigDict):
                copy_func = getattr(attr, 'copy')
                setattr(config_store, attr_name, copy_func())
            elif attr is ZATO_NONE:
                setattr(config_store, attr_name, ZATO_NONE)

        http_soap = MultiDict()
        dict_of_lists = self.http_soap.dict_of_lists()
        for url_path, lists in dict_of_lists.items():
            _info = Bunch()
            for elem in lists:
                for soap_action, item in elem.items():
                    _info[soap_action] = Bunch()
                    _info[soap_action].id = item.id
                    _info[soap_action].name = item.name
                    _info[soap_action].is_active = item.is_active
                    _info[soap_action].is_internal = item.is_internal
                    _info[soap_action].url_path = item.url_path
                    _info[soap_action].method = item.method
                    _info[soap_action].soap_version = item.soap_version
                    _info[soap_action].service_id = item.service_id
                    _info[soap_action].service_name = item.service_name
                    _info[soap_action].impl_name = item.impl_name
                    _info[soap_action].transport = item.transport
                    _info[soap_action].connection = item.connection
            http_soap.add(url_path, _info)

        config_store.http_soap = http_soap
        config_store.url_sec = self.url_sec
        config_store.broker_config = self.broker_config
        config_store.odb_data = deepcopy(self.odb_data)

        return config_store
Пример #28
0
    def start_server(parallel_server, zato_deployment_key=None):

        # Easier to type
        self = parallel_server

        # This cannot be done in __init__ because each sub-process obviously has its own PID
        self.pid = os.getpid()

        # Used later on
        use_tls = asbool(self.fs_server_config.crypto.use_tls)

        # Will be None if we are not running in background.
        if not zato_deployment_key:
            zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(),
                                                 uuid4().hex)

        self.deployment_key = zato_deployment_key

        register_diag_handlers()

        # Store the ODB configuration, create an ODB connection pool and have self.odb use it
        self.config.odb_data = self.get_config_odb_data(self)
        self.set_odb_pool()

        # Now try grabbing the basic server's data from the ODB. No point
        # in doing anything else if we can't get past this point.
        server = self.odb.fetch_server(self.config.odb_data)

        if not server:
            raise Exception('Server does not exist in the ODB')

        # Set up the server-wide default lock manager
        odb_data = self.config.odb_data
        backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine
        self.zato_lock_manager = LockManager(backend_type, 'zato',
                                             self.odb.session)

        # Just to make sure distributed locking is configured correctly
        with self.zato_lock_manager(uuid4().hex):
            pass

        # Basic metadata
        self.id = server.id
        self.name = server.name
        self.cluster_id = server.cluster_id
        self.cluster = self.odb.cluster

        # Looked up upfront here and assigned to services in their store
        self.enforce_service_invokes = asbool(
            self.fs_server_config.misc.enforce_service_invokes)

        # For server-to-server communication
        self.servers = Servers(self.odb, self.cluster.name)
        logger.info(
            'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`',
            self.name, self.cluster.name, self.pid, 's' if use_tls else '',
            self.preferred_address, self.port)

        # Reads in all configuration from ODB
        self.worker_store = WorkerStore(self.config, self)
        self.worker_store.invoke_matcher.read_config(
            self.fs_server_config.invoke_patterns_allowed)
        self.worker_store.target_matcher.read_config(
            self.fs_server_config.invoke_target_patterns_allowed)
        self.set_up_config(server)

        # Deploys services
        is_first, locally_deployed = self._after_init_common(server)

        # Initializes worker store, including connectors
        self.worker_store.init()
        self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch

        # Normalize hot-deploy configuration
        self.hot_deploy_config = Bunch()

        self.hot_deploy_config.work_dir = os.path.normpath(
            os.path.join(self.repo_location,
                         self.fs_server_config.hot_deploy.work_dir))

        self.hot_deploy_config.backup_history = int(
            self.fs_server_config.hot_deploy.backup_history)
        self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format

        for name in ('current_work_dir', 'backup_work_dir',
                     'last_backup_work_dir', 'delete_after_pick_up'):

            # New in 2.0
            if name == 'delete_after_pick_up':
                value = asbool(self.fs_server_config.hot_deploy.get(
                    name, True))
                self.hot_deploy_config[name] = value
            else:
                self.hot_deploy_config[name] = os.path.normpath(
                    os.path.join(self.hot_deploy_config.work_dir,
                                 self.fs_server_config.hot_deploy[name]))

        self._after_init_accepted(locally_deployed)

        broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]:
            self.worker_store.on_broker_msg,
            TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]:
            self.worker_store.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.kvdb, 'parallel',
                                          broker_callbacks,
                                          self.get_lua_programs())
        self.worker_store.set_broker_client(self.broker_client)

        self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True,
                                self.host, self.port, self.preferred_address,
                                use_tls)

        # Startup services
        if is_first:
            self.invoke_startup_services(is_first)
            spawn_greenlet(self.set_up_pickup)

        # IPC
        if is_first:
            self.ipc_forwarder.name = self.name
            self.ipc_forwarder.pid = self.pid
            spawn_greenlet(self.ipc_forwarder.run)

        # IPC
        self.ipc_api.name = self.name
        self.ipc_api.pid = self.pid
        self.ipc_api.on_message_callback = self.worker_store.on_ipc_message
        spawn_greenlet(self.ipc_api.run)

        logger.info('Started `%s@%s` (pid: %s)', server.name,
                    server.cluster.name, self.pid)
Пример #29
0
    def __init__(self):
        self.host = None
        self.port = None
        self.crypto_manager = None
        self.odb = None
        self.odb_data = None
        self.config = None
        self.repo_location = None
        self.user_conf_location = None
        self.sql_pool_store = None
        self.int_parameters = None
        self.int_parameter_suffixes = None
        self.bool_parameter_prefixes = None
        self.soap11_content_type = None
        self.soap12_content_type = None
        self.plain_xml_content_type = None
        self.json_content_type = None
        self.internal_service_modules = None  # Zato's own internal services
        self.service_modules = None  # Set programmatically in Spring
        self.service_sources = None  # Set in a config file
        self.base_dir = None
        self.tls_dir = None
        self.hot_deploy_config = None
        self.pickup = None
        self.fs_server_config = None
        self.pickup_config = None
        self.connector_server_grace_time = None
        self.id = None
        self.name = None
        self.cluster = None
        self.cluster_id = None
        self.kvdb = None
        self.startup_jobs = None
        self.worker_store = None
        self.request_dispatcher_dispatch = None
        self.deployment_lock_expires = None
        self.deployment_lock_timeout = None
        self.deployment_key = ''
        self.app_context = None
        self.has_gevent = None
        self.delivery_store = None
        self.static_config = None
        self.component_enabled = Bunch()
        self.client_address_headers = [
            'HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR'
        ]
        self.broker_client = None
        self.return_tracebacks = None
        self.default_error_message = None
        self.time_util = None
        self.preferred_address = None
        self.crypto_use_tls = None
        self.servers = None
        self.zato_lock_manager = None
        self.pid = None
        self.sync_internal = None
        self.ipc_api = IPCAPI(False)
        self.ipc_forwarder = IPCAPI(True)
        self.fifo_response_buffer_size = 0.1  # In megabytes
        self.live_msg_browser = None

        # Allows users store arbitrary data across service invocations
        self.user_ctx = Bunch()
        self.user_ctx_lock = gevent.lock.RLock()

        self.access_logger = logging.getLogger('zato_access_log')
        self.access_logger_log = self.access_logger._log
        self.needs_access_log = self.access_logger.isEnabledFor(INFO)

        # The main config store
        self.config = ConfigStore()

        gevent.signal(signal.SIGINT, self.destroy)
Пример #30
0
    def get_config_odb_data(self, parallel_server):
        """ Returns configuration with regards to ODB data.
        """
        odb_data = Bunch()
        odb_data.db_name = parallel_server.odb_data['db_name']
        odb_data.extra = parallel_server.odb_data['extra']
        odb_data.engine = parallel_server.odb_data['engine']
        odb_data.token = parallel_server.fs_server_config.main.token

        odb_data.is_odb = True

        if odb_data.engine != 'sqlite':
            odb_data.password = parallel_server.odb_data['password']
            odb_data.host = parallel_server.odb_data['host']
            odb_data.port = parallel_server.odb_data['port']
            odb_data.engine = parallel_server.odb_data['engine']
            odb_data.pool_size = parallel_server.odb_data['pool_size']
            odb_data.username = parallel_server.odb_data['username']

        # Note that we don't read is_active off of anywhere - ODB always must
        # be active and it's not a regular connection pool anyway.
        odb_data.is_active = True

        return odb_data
Пример #31
0
    def become_cluster_wide(self, connector_server_keep_alive_job_time, connector_server_grace_time,
            server_id, cluster_id, starting_up):
        """ Attempts to become a connector server, the one to start the connector
        processes.
        """
        base_job_data = Bunch({
            'id': 0, # Dummy ID
            'weeks': None, 'days': None,
            'hours': None, 'minutes': None,
            'seconds': connector_server_keep_alive_job_time,
            'repeats': None,
            'extra': 'server_id:{};cluster_id:{}'.format(server_id, cluster_id),
        })
        job_data = None

        if self.parallel_server.odb.become_cluster_wide(connector_server_grace_time):
            self.is_cluster_wide = True

            # Schedule a job for letting the other servers know we're still alive
            job_data = Bunch(base_job_data.copy())
            job_data.start_date = datetime.utcnow()
            job_data.name = 'zato.server.cluster-wide-singleton-keep-alive'
            job_data.service = 'zato.server.cluster-wide-singleton-keep-alive'

        else:
            # All other singleton servers that are just starting up get this job
            # for checking whether the connector server is alive or not
            if starting_up:
                job_data = Bunch(base_job_data.copy())
                job_data.start_date = datetime.utcnow() + timedelta(seconds=10) # Let's give the other server some time to warm up
                job_data.name = ENSURE_SINGLETON_JOB
                job_data.service = 'zato.server.ensure-cluster-wide-singleton'

        if job_data:
            self.scheduler.create_interval_based(job_data, MESSAGE_TYPE.TO_AMQP_PUBLISHING_CONNECTOR_ALL)

        return self.is_cluster_wide