def get_service_table_line(self, idx, name, docs, sio): name_fs_safe = 'service_{}'.format(fs_safe_name(name)) file_name = '{}.rst'.format(name_fs_safe) summary = docs.summary if summary: summary = self._make_sphinx_safe(summary) return bunchify({ 'ns': str(idx), 'orig_name': name, 'sphinx_name': name.replace('_', '\_'), # Needed for Sphinx to ignore undescores 'name': name_fs_safe, 'name_link': """:doc:`{} <./{}>`""".format(name, name_fs_safe), 'file_name': file_name, 'description': summary or no_value, 'docs': docs, 'sio': sio })
def handle_return_data(self, return_data): for item in return_data['items']: item.id = fs_safe_name('{}-{}'.format(item.name, item.pid)) if item.last_gd_run: item.last_gd_run_utc = item.last_gd_run item.last_gd_run = from_utc_to_user(item.last_gd_run_utc + '+00:00', self.req.zato.user_profile) if item.last_task_run: item.last_task_run_utc = item.last_task_run item.last_task_run = from_utc_to_user(item.last_task_run_utc + '+00:00', self.req.zato.user_profile) return return_data
def handle_return_data(self, return_data): for item in return_data['items']: item.id = fs_safe_name('{}-{}'.format(item.thread_id, item.object_id)) if item.last_gd_run: item.last_gd_run_utc = item.last_gd_run item.last_gd_run = from_utc_to_user( item.last_gd_run_utc + '+00:00', self.req.zato.user_profile) if item.last_delivery: item.last_delivery_utc = item.last_delivery item.last_delivery = from_utc_to_user( item.last_delivery_utc + '+00:00', self.req.zato.user_profile) return return_data
def handle_return_data(self, return_data): for item in return_data['items']: item.id = fs_safe_name(item.py_object) if item.last_sync: item.last_sync_utc = item.last_sync item.last_sync = from_utc_to_user( item.last_sync_utc + '+00:00', self.req.zato.user_profile) if item.last_sync_sk: item.last_sync_sk_utc = item.last_sync_sk item.last_sync_sk = from_utc_to_user( item.last_sync_sk_utc + '+00:00', self.req.zato.user_profile) if item.last_iter_run: item.last_iter_run_utc = item.last_iter_run item.last_iter_run = from_utc_to_user( item.last_iter_run_utc + '+00:00', self.req.zato.user_profile) return return_data
def start_server(parallel_server, zato_deployment_key=None): # Easier to type self = parallel_server # This cannot be done in __init__ because each sub-process obviously has its own PID self.pid = os.getpid() # This also cannot be done in __init__ which doesn't have this variable yet self.is_first_worker = int(os.environ['ZATO_SERVER_WORKER_IDX']) == 0 # Used later on use_tls = asbool(self.fs_server_config.crypto.use_tls) # Will be None if we are not running in background. if not zato_deployment_key: zato_deployment_key = '{}.{}'.format(datetime.utcnow().isoformat(), uuid4().hex) self.deployment_key = zato_deployment_key register_diag_handlers() # Create all POSIX IPC objects now that we have the deployment key self.shmem_size = int(float(self.fs_server_config.shmem.size) * 10**6) # Convert to megabytes as integer self.server_startup_ipc.create(self.deployment_key, self.shmem_size) # Store the ODB configuration, create an ODB connection pool and have self.odb use it self.config.odb_data = self.get_config_odb_data(self) self.set_up_odb() # Now try grabbing the basic server's data from the ODB. No point # in doing anything else if we can't get past this point. server = self.odb.fetch_server(self.config.odb_data) if not server: raise Exception('Server does not exist in the ODB') # Set up the server-wide default lock manager odb_data = self.config.odb_data backend_type = 'fcntl' if odb_data.engine == 'sqlite' else odb_data.engine self.zato_lock_manager = LockManager(backend_type, 'zato', self.odb.session) # Just to make sure distributed locking is configured correctly with self.zato_lock_manager(uuid4().hex): pass # Basic metadata self.id = server.id self.name = server.name self.cluster_id = server.cluster_id self.cluster = self.odb.cluster self.worker_id = '{}.{}.{}.{}'.format(self.cluster_id, self.id, self.worker_pid, new_cid()) # Looked up upfront here and assigned to services in their store self.enforce_service_invokes = asbool( self.fs_server_config.misc.enforce_service_invokes) # For server-to-server communication self.servers = Servers(self.odb, self.cluster.name, self.decrypt) logger.info( 'Preferred address of `%s@%s` (pid: %s) is `http%s://%s:%s`', self.name, self.cluster.name, self.pid, 's' if use_tls else '', self.preferred_address, self.port) # Reads in all configuration from ODB self.worker_store = WorkerStore(self.config, self) self.worker_store.invoke_matcher.read_config( self.fs_server_config.invoke_patterns_allowed) self.worker_store.target_matcher.read_config( self.fs_server_config.invoke_target_patterns_allowed) self.set_up_config(server) # Deploys services is_first, locally_deployed = self._after_init_common(server) # Initializes worker store, including connectors self.worker_store.init() self.request_dispatcher_dispatch = self.worker_store.request_dispatcher.dispatch # Normalize hot-deploy configuration self.hot_deploy_config = Bunch() self.hot_deploy_config.work_dir = os.path.normpath( os.path.join(self.repo_location, self.fs_server_config.hot_deploy.work_dir)) self.hot_deploy_config.backup_history = int( self.fs_server_config.hot_deploy.backup_history) self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format # Configure remaining parts of SSO self.configure_sso() # Cannot be done in __init__ because self.sso_config is not available there yet salt_size = self.sso_config.hash_secret.salt_size self.crypto_manager.add_hash_scheme('zato.default', self.sso_config.hash_secret.rounds, salt_size) for name in ('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'): # New in 2.0 if name == 'delete_after_pick_up': value = asbool(self.fs_server_config.hot_deploy.get( name, True)) self.hot_deploy_config[name] = value else: self.hot_deploy_config[name] = os.path.normpath( os.path.join(self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name])) broker_callbacks = { TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: self.worker_store.on_broker_msg, TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: self.worker_store.on_broker_msg, } self.broker_client = BrokerClient(self.kvdb, 'parallel', broker_callbacks, self.get_lua_programs()) self.worker_store.set_broker_client(self.broker_client) self._after_init_accepted(locally_deployed) self.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True, self.host, self.port, self.preferred_address, use_tls) if is_first: logger.info('First worker of `%s` is %s', self.name, self.pid) self.startup_callable_tool.invoke( SERVER_STARTUP.PHASE.IN_PROCESS_FIRST, kwargs={ 'parallel_server': self, }) # Startup services self.invoke_startup_services(is_first) spawn_greenlet(self.set_up_pickup) # IPC ipc_forwarder_name = '{}-{}'.format(self.cluster.name, self.name) ipc_forwarder_name = fs_safe_name(ipc_forwarder_name) self.ipc_forwarder.name = ipc_forwarder_name self.ipc_forwarder.pid = self.pid spawn_greenlet(self.ipc_forwarder.run) # Set up IBM MQ connections if that component is enabled if self.fs_server_config.component_enabled.ibm_mq: # Will block for a few seconds at most, until is_ok is returned # which indicates that a connector started or not. is_ok = self.start_ibm_mq_connector( int(self.fs_server_config.ibm_mq.ipc_tcp_start_port)) if is_ok: self.create_initial_wmq_definitions( self.worker_store.worker_config.definition_wmq) self.create_initial_wmq_outconns( self.worker_store.worker_config.out_wmq) self.create_initial_wmq_channels( self.worker_store.worker_config.channel_wmq) else: self.startup_callable_tool.invoke( SERVER_STARTUP.PHASE.IN_PROCESS_OTHER, kwargs={ 'parallel_server': self, }) # IPC self.ipc_api.name = self.name self.ipc_api.pid = self.pid self.ipc_api.on_message_callback = self.worker_store.on_ipc_message spawn_greenlet(self.ipc_api.run) self.startup_callable_tool.invoke(SERVER_STARTUP.PHASE.AFTER_STARTED, kwargs={ 'parallel_server': self, }) logger.info('Started `%s@%s` (pid: %s)', server.name, server.cluster.name, self.pid)
def _get_response_name(self, service_name): return b'response_{}'.format(fs_safe_name(service_name))
def get_endpoint_name(cluster_name, server_name, target_pid): return fs_safe_name('{}-{}-{}'.format(cluster_name, server_name, target_pid))
def test_generate_open_api(self): service_store_services = { 'my.impl.name': { 'name': service_name, 'service_class': MyService, } } include = ['*'] exclude = [] query = '' tags = 'public' generator = Generator(service_store_services, sio_config, include, exclude, query, tags, needs_sio_desc=False) info = generator.get_info() info = bunchify(info) channel_data = [{ 'service_name': service_name, 'transport': URL_TYPE.PLAIN_HTTP, 'url_path': '/test/{phone_number}', 'match_target_compiled': _MatchTestCompiled() }] needs_api_invoke = True needs_rest_channels = True api_invoke_path = APISPEC.GENERIC_INVOKE_PATH open_api_generator = OpenAPIGenerator(info, channel_data, needs_api_invoke, needs_rest_channels, api_invoke_path) result = open_api_generator.generate() result = yaml_load(result) result = bunchify(result) result_components = result.components # type: Bunch result_info = result.info # type: Bunch result_openapi = result.openapi # type: Bunch result_paths = result.paths # type: Bunch result_servers = result.servers # type: Bunch localhost = result_servers[0] self.assertEqual(localhost.url, 'http://localhost:11223') self.assertEqual(result_info.title, 'API spec') self.assertEqual(result_info.version, '1.0') self.assertEqual(result_openapi, '3.0.2') self.assertEqual(len(result_components.schemas), 2) request_my_service_properties = result_components.schemas.request_my_service.properties request_my_service_required = result_components.schemas.request_my_service.required request_my_service_title = result_components.schemas.request_my_service.title request_my_service_type = result_components.schemas.request_my_service.type response_my_service_required = result_components.schemas.response_my_service.required response_my_service_title = result_components.schemas.response_my_service.title response_my_service_type = result_components.schemas.response_my_service.type self.assertEqual(request_my_service_title, 'Request object for my.service') self.assertEqual(response_my_service_title, 'Response object for my.service') self.assertEqual(request_my_service_type, 'object') self.assertEqual(response_my_service_type, 'object') self.assertListEqual(sorted(request_my_service_required), ['input_req_customer_id', 'input_req_user_id']) self.assertListEqual(sorted(response_my_service_required), ['output_req_address_id', 'output_req_address_name']) self.assertEqual(request_my_service_properties.input_req_user_id.type, 'integer') self.assertEqual(request_my_service_properties.input_req_user_id.format, 'int32') self.assertEqual(request_my_service_properties.input_req_user_id.description, 'This is the first line.\nHere is another.\nAnd here are some more lines.') self.assertEqual(request_my_service_properties.input_req_customer_id.type, 'integer') self.assertEqual(request_my_service_properties.input_req_customer_id.format, 'int32') self.assertEqual(request_my_service_properties.input_req_customer_id.description, '') self.assertEqual(request_my_service_properties.input_opt_user_name.type, 'string') self.assertEqual(request_my_service_properties.input_opt_user_name.format, 'string') self.assertEqual(request_my_service_properties.input_opt_user_name.description, 'b111') self.assertEqual(request_my_service_properties.input_opt_customer_name.type, 'string') self.assertEqual(request_my_service_properties.input_opt_customer_name.format, 'string') self.assertEqual(request_my_service_properties.input_opt_customer_name.description, '') self.assertEqual(len(result_paths), 2) for url_path in ['/test/{phone_number}', '/zato/api/invoke/my.service']: my_service_path = result_paths[url_path] # type: Bunch post = my_service_path.post self.assertListEqual(post.consumes, ['application/json']) self.assertEqual(post.operationId, 'post_{}'.format(fs_safe_name(url_path))) self.assertTrue(post.requestBody.required) self.assertEqual( post.requestBody.content['application/json'].schema['$ref'], '#/components/schemas/request_my_service') self.assertEqual( post.responses['200'].content['application/json'].schema['$ref'], '#/components/schemas/response_my_service')
def _get_request_name(self, service_name): return 'request_{}'.format(fs_safe_name(service_name))
def generate(self): # Basic information, always available out = Bunch() out.openapi = '3.0.2' out.info = { 'title': 'API spec', 'version': '1.0', } out.servers = [{'url': 'http://localhost:11223'}] # Responses to refer to in paths out.components = Bunch() out.components.schemas = Bunch() # REST paths out.paths = Bunch() # Schemas for all services - it is possible that not all of them will be output, # for instance, if a service is not exposed through any REST channel. request_schemas = self._get_message_schemas(self.data, True) response_schemas = self._get_message_schemas(self.data, False) schemas = {} schemas.update(request_schemas) schemas.update(response_schemas) out.components.schemas.update(schemas) for item in self.data.services: # Container for all the URL paths found for this item (service) url_paths = [] # Parameters carried in URL paths, e.g. /user/{username}/{lang_code}, # all of them will be treated as required and all of them will be string ones. channel_params = [] # Now, collect all the paths that the spec will contain .. # .. generic API invoker, e.g. /zato/api/invoke/{service_name} .. if self.needs_api_invoke and self.api_invoke_path: for path in self.api_invoke_path: url_paths.append(path.format(service_name=item.name)) # .. per-service specific REST channels. if self.needs_rest_channels: rest_channel = self.get_rest_channel(item.name) if rest_channel: # This is always needed, whether path parameters exist or not url_paths.append(rest_channel.url_path) # Path parameters group_names = rest_channel.match_target_compiled.group_names if group_names: # Populate details of path parameters for channel_param_name in sorted( group_names): # type: str channel_params.append({ 'name': channel_param_name, 'description': '', 'in': 'path', 'required': True, 'schema': { 'type': 'string', 'format': 'string', } }) # Translate the service name into a normalised form service_name_fs = fs_safe_name(item.name) for url_path in url_paths: out_path = out.paths.setdefault(url_path, Bunch()) # type: Bunch post = out_path.setdefault('post', Bunch()) # type: Bunch operation_id = 'post_{}'.format(fs_safe_name(url_path)) consumes = ['application/json'] request_ref = '#/components/schemas/{}'.format( self._get_request_name(service_name_fs)) response_ref = '#/components/schemas/{}'.format( self._get_response_name(service_name_fs)) request_body = Bunch() request_body.required = True request_body.content = Bunch() request_body.content['application/json'] = Bunch() request_body.content['application/json'].schema = Bunch() request_body.content['application/json'].schema[ '$ref'] = request_ref responses = Bunch() responses['200'] = Bunch() responses['200'].content = Bunch() responses['200'].content['application/json'] = Bunch() responses['200'].content['application/json'].schema = Bunch() responses['200'].content['application/json'].schema[ '$ref'] = response_ref post['operationId'] = operation_id post['consumes'] = consumes post['requestBody'] = request_body post['responses'] = responses if channel_params: post['parameters'] = channel_params return yaml_dump(out.toDict(), Dumper=YAMLDumper, default_flow_style=False)