def _test(): backend = _FakeBackend() server_transport = RedisServerTransport( 'threaded', NoOpMetricsRecorder(), backend_type=REDIS_BACKEND_TYPE_STANDARD, ) server_transport.core._backend_layer = backend client_transport = RedisClientTransport( 'threaded', NoOpMetricsRecorder(), backend_type=REDIS_BACKEND_TYPE_STANDARD, ) client_transport.core._backend_layer = backend server = _FakeEchoingServer(server_transport) client1 = _FakeClient('client-1', client_transport, {'key1': 'value1'}, 1.0) client2 = _FakeClient('client-2', client_transport, {'key2': 'value2'}, 0.25) server.start() client1.start() client2.start() client1.join(timeout=2) client2.join(timeout=2) server.shutdown() server.join(timeout=2) return server, client1, client2
def __init__(self, service_name='test', metrics=None, action_map=None): """ Configure a StubServer to handle requests. Creates a new subclass of StubServer using the service name and action mapping provided. Args: service_name: string action_map: dict of {action_name: {'body': action_body, 'errors': action_errors}} where action_body is a dict and action_errors is a list """ action_map = action_map or {} # Build the action_class_map property for the new Server class action_class_map = { name: _make_stub_action(name, a.get('body', {}), a.get('errors', [])) for name, a in action_map.items() } # Create the new Server subclass server_class_name = ''.join([ part.capitalize() for part in re.split(r'[^a-zA-Z0-9]+', service_name) ]) + 'Server' server_class = type( str(server_class_name), (StubServer, ), dict(service_name=service_name, action_class_map=action_class_map), ) super(StubClientTransport, self).__init__(service_name, metrics or NoOpMetricsRecorder(), server_class, {})
def __init__(self, service_name, metrics=NoOpMetricsRecorder()): """ :param service_name: The name of the service for which this transport will receive requests and send responses :type service_name: union[str, unicode] :param metrics: The optional metrics recorder :type metrics: MetricsRecorder """ self.service_name = service_name self.metrics = metrics
class Http2ServerTransportCore(object): backend_type = attr.ib(validator=valid_backend_type) backend_layer_kwargs = attr.ib( # Keyword args for the backend layer (Standard Redis and Sentinel Redis modes) default={}, validator=attr.validators.instance_of(dict), ) log_messages_larger_than_bytes = attr.ib( default=DEFAULT_MAXIMUM_MESSAGE_BYTES_CLIENT, converter=int, ) message_expiry_in_seconds = attr.ib( # How long after a message is sent before it's considered "expired" and not received by default, unless # overridden in the send_message argument `message_expiry_in_seconds` default=60, converter=int, ) metrics = attr.ib( default=NoOpMetricsRecorder(), validator=attr.validators.instance_of(MetricsRecorder), ) metrics_prefix = attr.ib( default='', validator=attr.validators.instance_of(six.text_type), ) queue_capacity = attr.ib( # The capacity for queues to which messages are sent default=10000, converter=int, ) queue_full_retries = attr.ib( # Number of times to retry when the send queue is full default=10, converter=int, ) receive_timeout_in_seconds = attr.ib( # How long to block when waiting to receive a message by default, unless overridden in the receive_message # argument `receive_timeout_in_seconds` default=5, converter=int, ) default_serializer_config = attr.ib( # Configuration for which serializer should be used by this transport default={'object': MsgpackSerializer, 'kwargs': {}}, converter=dict, ) service_name = attr.ib( # Service name used for error messages default='', validator=attr.validators.instance_of(six.text_type), ) EXPONENTIAL_BACK_OFF_FACTOR = 4.0 def __attrs_post_init__(self): self.requests_queue = mp.Queue(maxsize=self.queue_capacity) self.responses_queue = mp.Queue(maxsize=self.queue_capacity) # Run backend layer thread self.backend_layer.start() self._default_serializer = None @property def backend_layer(self): kwargs = { 'requests_queue': self.requests_queue, 'responses_queue': self.responses_queue, 'backend_layer_config': self.backend_layer_kwargs } if self.backend_type == HTTP2_BACKEND_TYPE_TWISTED: return TwistedHTTP2BackendThread(**kwargs) else: return HyperH2BackendThread(**kwargs) # noinspection PyAttributeOutsideInit @property def default_serializer(self): if self._default_serializer is None: self._default_serializer = self.default_serializer_config['object']( **self.default_serializer_config.get('kwargs', {}) ) return self._default_serializer def send_message(self, request_id, meta, body, message_expiry_in_seconds=None): protocol_key = meta.get('protocol_key') stream_id = meta.get('stream_id') if request_id is None: raise InvalidMessageError('No request ID') if message_expiry_in_seconds: message_expiry = time.time() + message_expiry_in_seconds else: message_expiry = time.time() + self.message_expiry_in_seconds meta['__expiry__'] = message_expiry message = {'request_id': request_id, 'meta': meta, 'body': body} with self._get_timer('send.serialize'): serializer = self.default_serializer if 'serializer' in meta: # TODO: Breaking change: Assume a MIME type is always specified. This should not be done until all # TODO servers and clients have Step 2 code. This will be a Step 3 breaking change. serializer = meta.pop('serializer') serialized_message = ( 'content-type:{};'.format(serializer.mime_type).encode('utf-8') + serializer.dict_to_blob(message) ) message_size_in_bytes = len(serialized_message) response_headers = [ (':status', '200'), ('content-type', 'application/json'), ('content-length', str(message_size_in_bytes)), ('server', 'pysoa-h2'), ] if self.log_messages_larger_than_bytes and message_size_in_bytes > self.log_messages_larger_than_bytes: _oversized_message_logger.warning( 'Oversized message sent for PySOA service {}'.format(self.service_name), extra={'data': { 'message': RecursivelyCensoredDictWrapper(message), 'serialized_length_in_bytes': message_size_in_bytes, 'threshold': self.log_messages_larger_than_bytes, }}, ) for i in range(-1, self.queue_full_retries): if i >= 0: time.sleep((2 ** i + random.random()) / self.EXPONENTIAL_BACK_OFF_FACTOR) self._get_counter('send.responses_queue_full_retry').increment() self._get_counter('send.responses_queue_full_retry.retry_{}'.format(i + 1)).increment() try: with self._get_timer('send.send_message_response_http2_queue'): self.responses_queue.put(( protocol_key, stream_id, request_id, serialized_message, response_headers, ), timeout=0) return except six.moves.queue.Full: continue except Exception as e: self._get_counter('send.error.unknown').increment() raise MessageSendError( 'Unknown error sending message for service {}'.format(self.service_name), six.text_type(type(e).__name__), *e.args ) self._get_counter('send.error.responses_queue_full').increment() raise MessageSendError( 'Http2 responses queue was full after {retries} retries'.format( retries=self.queue_full_retries, ) ) def receive_message(self, receive_timeout_in_seconds=None): try: with self._get_timer('receive.get_from_requests_queue'): stream_id, protocol_key, serialized_message = self.requests_queue.get( timeout=receive_timeout_in_seconds or self.receive_timeout_in_seconds, ) except six.moves.queue.Empty: raise MessageReceiveTimeout('No message received for service {}'.format(self.service_name)) except Exception as e: self._get_counter('receive.error.unknown').increment() raise MessageReceiveError( 'Unknown error receiving message for service {}'.format(self.service_name), six.text_type(type(e).__name__), *e.args ) with self._get_timer('receive.deserialize'): serializer = self.default_serializer if serialized_message.startswith(b'content-type'): # TODO: Breaking change: Assume all messages start with a content type. This should not be done until # TODO all servers and clients have Step 2 code. This will be a Step 3 breaking change. header, serialized_message = serialized_message.split(b';', 1) mime_type = header.split(b':', 1)[1].decode('utf-8').strip() if mime_type in Serializer.all_supported_mime_types: serializer = Serializer.resolve_serializer(mime_type) message = serializer.blob_to_dict(serialized_message) message.setdefault('meta', {})['serializer'] = serializer meta = message.get('meta') meta['stream_id'] = stream_id meta['protocol_key'] = protocol_key if self._is_message_expired(message): self._get_counter('receive.error.message_expired').increment() raise MessageReceiveTimeout('Message expired for service {}'.format(self.service_name)) request_id = message.get('request_id') if request_id is None: self._get_counter('receive.error.no_request_id').increment() raise InvalidMessageError('No request ID for service {}'.format(self.service_name)) return request_id, message.get('meta', {}), message.get('body') @staticmethod def _is_message_expired(message): return message.get('meta', {}).get('__expiry__') and message['meta']['__expiry__'] < time.time() def _get_metric_name(self, name): if self.metrics_prefix: return '{prefix}.transport.http2_gateway.{name}'.format(prefix=self.metrics_prefix, name=name) else: return 'transport.http2_gateway.{}'.format(name) def _get_counter(self, name): return self.metrics.counter(self._get_metric_name(name)) def _get_timer(self, name): return self.metrics.timer(self._get_metric_name(name), resolution=TimerResolution.MICROSECONDS)
class Http2ClientTransportCore(object): backend_type = attr.ib(validator=valid_backend_type) backend_layer_kwargs = attr.ib( # Keyword args for the backend layer (Standard Redis and Sentinel Redis modes) default={}, validator=attr.validators.instance_of(dict), ) message_expiry_in_seconds = attr.ib( # How long after a message is sent before it's considered "expired" and not received by default, unless # overridden in the send_message argument `message_expiry_in_seconds` default=60, converter=int, ) metrics = attr.ib( default=NoOpMetricsRecorder(), validator=attr.validators.instance_of(MetricsRecorder), ) metrics_prefix = attr.ib( default='', validator=attr.validators.instance_of(six.text_type), ) receive_timeout_in_seconds = attr.ib( # How long to block when waiting to receive a message by default, unless overridden in the receive_message # argument `receive_timeout_in_seconds` default=5, converter=int, ) default_serializer_config = attr.ib( # Configuration for which serializer should be used by this transport default={'object': MsgpackSerializer, 'kwargs': {}}, converter=dict, ) service_name = attr.ib( # Service name used for error messages default='', validator=attr.validators.instance_of(six.text_type), ) def __attrs_post_init__(self): self._default_serializer = None self.requests = collections.deque() self.http_host = self.backend_layer_kwargs.get('http_host', '127.0.0.1') self.http_port = self.backend_layer_kwargs.get('http_port', '60061') self.ssl_context = None self.secure = False # Twisted only supports tls connections for http2 if self.backend_type == HTTP2_BACKEND_TYPE_TWISTED: self.secure = True self.ssl_context = pssl.create_default_context(pssl.Purpose.CLIENT_AUTH) self.ssl_context.options |= ( pssl.OP_NO_TLSv1 | pssl.OP_NO_TLSv1_1 | pssl.OP_NO_COMPRESSION ) # self.ssl_context.load_cert_chain(certfile="host.cert", keyfile="host.key") self.ssl_context.set_alpn_protocols(["h2"]) # noinspection PyAttributeOutsideInit @property def default_serializer(self): if self._default_serializer is None: self._default_serializer = self.default_serializer_config['object']( **self.default_serializer_config.get('kwargs', {}) ) return self._default_serializer def send_request_message(self, request_id, meta, body, message_expiry_in_seconds=None): connection = HTTP20Connection( host=self.http_host, port=self.http_port, secure=self.secure, ssl_context=self.ssl_context, ) message = {'request_id': request_id, 'meta': meta, 'body': body} serializer = self.default_serializer non_default_serializer = False if 'serializer' in meta: # TODO: Breaking change: Assume a MIME type is always specified. This should not be done until all # TODO servers and clients have Step 2 code. This will be a Step 3 breaking change. serializer = meta.pop('serializer') non_default_serializer = True serialized_message = serializer.dict_to_blob(message) if non_default_serializer: # TODO: Breaking change: Make this happen always, not just when a specific MIME type was requested. # TODO This should not be done until all servers and clients have this Step 1 code. This will be a Step # TODO 2 breaking change. serialized_message = ( 'content-type:{};'.format(serializer.mime_type).encode('utf-8') + serialized_message ) request = connection.request('POST', '/', body=serialized_message) self.requests.append((connection, request)) def receive_message(self, receive_timeout_in_seconds): connection, request = self.requests.popleft() response = connection.get_response(request) body = response.read() # headers = response.headers serializer = self.default_serializer if body.startswith(b'content-type'): # TODO: Breaking change: Assume all messages start with a content type. This should not be done until # TODO all servers and clients have Step 2 code. This will be a Step 3 breaking change. header, body = body.split(b';', 1) mime_type = header.split(b':', 1)[1].decode('utf-8').strip() if mime_type in Serializer.all_supported_mime_types: serializer = Serializer.resolve_serializer(mime_type) message = serializer.blob_to_dict(body) return (message.get('request_id'), message.get('meta', {}), message.get('body'))
def _get_transport(service='my_service', **kwargs): return RedisClientTransport(service, NoOpMetricsRecorder(), **kwargs)
def __init__(self, service_name, metrics=NoOpMetricsRecorder()): self.service_name = service_name self.metrics = metrics
class RedisTransportCore(object): """Handles communication with Redis.""" # The backend layer holds Redis connections and should be reused as much as possible to reduce Redis connections. # Given identical input settings, two given backend layer instances will operate identically, and so we cash using # input variables as a key. This applies even across services--backend layers have no service-specific code, so # a single backend can be used for multiple services if those services' backend settings are the same. _backend_layer_cache = {} backend_type = attr.ib(validator=valid_backend_type) backend_layer_kwargs = attr.ib( # Keyword args for the backend layer (Standard Redis and Sentinel Redis modes) default={}, validator=attr.validators.instance_of(dict), ) log_messages_larger_than_bytes = attr.ib( default=DEFAULT_MAXIMUM_MESSAGE_BYTES_CLIENT, converter=int, ) maximum_message_size_in_bytes = attr.ib( default=DEFAULT_MAXIMUM_MESSAGE_BYTES_CLIENT, converter=int, ) message_expiry_in_seconds = attr.ib( # How long after a message is sent before it's considered "expired" and not received by default, unless # overridden in the send_message argument `message_expiry_in_seconds` default=60, converter=int, ) metrics = attr.ib( default=NoOpMetricsRecorder(), validator=attr.validators.instance_of(MetricsRecorder), ) metrics_prefix = attr.ib( default='', validator=attr.validators.instance_of(six.text_type), ) queue_capacity = attr.ib( # The capacity for queues to which messages are sent default=10000, converter=int, ) queue_full_retries = attr.ib( # Number of times to retry when the send queue is full default=10, converter=int, ) receive_timeout_in_seconds = attr.ib( # How long to block when waiting to receive a message by default, unless overridden in the receive_message # argument `receive_timeout_in_seconds` default=5, converter=int, ) serializer_config = attr.ib( # Configuration for which serializer should be used by this transport default={ 'object': MsgpackSerializer, 'kwargs': {} }, converter=dict, ) service_name = attr.ib( # Service name used for error messages default='', validator=attr.validators.instance_of(six.text_type), ) EXPONENTIAL_BACK_OFF_FACTOR = 4.0 QUEUE_NAME_PREFIX = 'pysoa:' GLOBAL_QUEUE_SPECIFIER = '!' def __attrs_post_init__(self): # set the hosts property after all attrs are validated if self.backend_layer_kwargs.get('hosts'): final_hosts = [] for host in self.backend_layer_kwargs['hosts']: if isinstance(host, tuple) and len(host) == 2: final_hosts.append(host) elif isinstance(host, six.string_types): final_hosts.append( (host, self.backend_layer_kwargs.get('redis_port', 6379))) else: raise Exception( "connection_kwargs['hosts'] must be a list of tuples of (host, port), or strings" ) self.backend_layer_kwargs['hosts'] = final_hosts if self.backend_layer_kwargs.get('redis_db') is not None: self.backend_layer_kwargs.setdefault( 'connection_kwargs', {})['db'] = self.backend_layer_kwargs['redis_db'] self.backend_layer_kwargs.pop('redis_db', None) self.backend_layer_kwargs.pop('redis_port', None) self._backend_layer = None self._serializer = None # noinspection PyAttributeOutsideInit @property def backend_layer(self): if self._backend_layer is None: cache_key = (self.backend_type, dict_to_hashable(self.backend_layer_kwargs)) if cache_key not in self._backend_layer_cache: with self._get_timer('backend.initialize'): backend_layer_kwargs = deepcopy(self.backend_layer_kwargs) if self.backend_type == REDIS_BACKEND_TYPE_SENTINEL: self._backend_layer_cache[ cache_key] = SentinelRedisClient( **backend_layer_kwargs) else: self._backend_layer_cache[ cache_key] = StandardRedisClient( **backend_layer_kwargs) self._backend_layer = self._backend_layer_cache[cache_key] # Each time the backend layer is accessed, use _this_ transport's metrics recorder for the backend layer self._backend_layer.metrics_counter_getter = lambda name: self._get_counter( name) return self._backend_layer # noinspection PyAttributeOutsideInit @property def serializer(self): if self._serializer is None: self._serializer = self.serializer_config['object']( **self.serializer_config.get('kwargs', {})) return self._serializer def send_message(self, queue_name, request_id, meta, body, message_expiry_in_seconds=None): """ Send a message to the specified queue in Redis. :param queue_name: The name of the queue to which to send the message :type queue_name: union(str, unicode) :param request_id: The message's request ID :type request_id: int :param meta: The message meta information, if any (should be an empty dict if no metadata) :type meta: dict :param body: The message body (should be a dict) :type body: dict :param message_expiry_in_seconds: The optional message expiry, which defaults to the setting with the same name :type message_expiry_in_seconds: int :raise: InvalidMessageError, MessageTooLarge, MessageSendError """ if request_id is None: raise InvalidMessageError('No request ID') if message_expiry_in_seconds: message_expiry = time.time() + message_expiry_in_seconds redis_expiry = message_expiry_in_seconds + 10 else: message_expiry = time.time() + self.message_expiry_in_seconds redis_expiry = self.message_expiry_in_seconds meta['__expiry__'] = message_expiry message = {'request_id': request_id, 'meta': meta, 'body': body} with self._get_timer('send.serialize'): serialized_message = self.serializer.dict_to_blob(message) message_size_in_bytes = len(serialized_message) if message_size_in_bytes > self.maximum_message_size_in_bytes: self._get_counter('send.error.message_too_large').increment() raise MessageTooLarge(message_size_in_bytes) elif self.log_messages_larger_than_bytes and message_size_in_bytes > self.log_messages_larger_than_bytes: _oversized_message_logger.warning( 'Oversized message sent for PySOA service {}'.format( self.service_name), extra={ 'data': { 'message': RecursivelyCensoredDictWrapper(message), 'serialized_length_in_bytes': message_size_in_bytes, 'threshold': self.log_messages_larger_than_bytes, } }, ) queue_key = self.QUEUE_NAME_PREFIX + queue_name # Try at least once, up to queue_full_retries times, then error for i in range(-1, self.queue_full_retries): if i >= 0: time.sleep((2**i + random.random()) / self.EXPONENTIAL_BACK_OFF_FACTOR) self._get_counter('send.queue_full_retry').increment() self._get_counter( 'send.queue_full_retry.retry_{}'.format(i + 1)).increment() try: with self._get_timer('send.get_redis_connection'): connection = self.backend_layer.get_connection(queue_key) with self._get_timer('send.send_message_to_redis_queue'): self.backend_layer.send_message_to_queue( queue_key=queue_key, message=serialized_message, expiry=redis_expiry, capacity=self.queue_capacity, connection=connection, ) return except redis.exceptions.ResponseError as e: # The Lua script handles capacity checking and sends the "full" error back if e.args[0] == 'queue full': continue self._get_counter('send.error.response').increment() raise MessageSendError( 'Redis error sending message for service {}'.format( self.service_name), *e.args) except CannotGetConnectionError as e: self._get_counter('send.error.connection').increment() raise MessageSendError('Cannot get connection: {}'.format( e.args[0])) except Exception as e: self._get_counter('send.error.unknown').increment() raise MessageSendError( 'Unknown error sending message for service {}'.format( self.service_name), six.text_type(type(e).__name__), *e.args) self._get_counter('send.error.redis_queue_full').increment() raise MessageSendError( 'Redis queue {queue_name} was full after {retries} retries'.format( queue_name=queue_name, retries=self.queue_full_retries, )) def receive_message(self, queue_name, receive_timeout_in_seconds=None): """ Receive a message from the specified queue in Redis. :param queue_name: The name of the queue to which to send the message :type queue_name: union(str, unicode) :param receive_timeout_in_seconds: The optional timeout, which defaults to the setting with the same name :type receive_timeout_in_seconds: int :return: A tuple of request ID, message meta-information dict, and message body dict :rtype: tuple(int, dict, dict) :raise: MessageReceiveError, MessageReceiveTimeout, InvalidMessageError """ queue_key = self.QUEUE_NAME_PREFIX + queue_name try: with self._get_timer('receive.get_redis_connection'): connection = self.backend_layer.get_connection(queue_key) # returns message or None if no new messages within timeout with self._get_timer('receive.pop_from_redis_queue'): result = connection.blpop( [queue_key], timeout=receive_timeout_in_seconds or self.receive_timeout_in_seconds, ) serialized_message = None if result: serialized_message = result[1] except CannotGetConnectionError as e: self._get_counter('receive.error.connection').increment() raise MessageReceiveError('Cannot get connection: {}'.format( e.args[0])) except Exception as e: self._get_counter('receive.error.unknown').increment() raise MessageReceiveError( 'Unknown error receiving message for service {}'.format( self.service_name), six.text_type(type(e).__name__), *e.args) if serialized_message is None: raise MessageReceiveTimeout( 'No message received for service {}'.format(self.service_name)) with self._get_timer('receive.deserialize'): message = self.serializer.blob_to_dict(serialized_message) if self._is_message_expired(message): self._get_counter('receive.error.message_expired').increment() raise MessageReceiveTimeout( 'Message expired for service {}'.format(self.service_name)) request_id = message.get('request_id') if request_id is None: self._get_counter('receive.error.no_request_id').increment() raise InvalidMessageError('No request ID for service {}'.format( self.service_name)) return request_id, message.get('meta', {}), message.get('body') @staticmethod def _is_message_expired(message): return message.get('meta', {}).get( '__expiry__') and message['meta']['__expiry__'] < time.time() def _get_metric_name(self, name): if self.metrics_prefix: return '{prefix}.transport.redis_gateway.{name}'.format( prefix=self.metrics_prefix, name=name) else: return 'transport.redis_gateway.{}'.format(name) def _get_counter(self, name): return self.metrics.counter(self._get_metric_name(name)) def _get_timer(self, name): return self.metrics.timer(self._get_metric_name(name), resolution=TimerResolution.MICROSECONDS)
def _get_transport(service='my_service', **kwargs): return Http2ServerTransport(service, NoOpMetricsRecorder(), **kwargs)