def _handle_response(self, msg_type, resp_proto, req): self._stream.wait_for_ready() future = self._stream.send(message_type=msg_type, content=req.SerializeToString()) resp = resp_proto() try: resp.ParseFromString(future.result().content) except (DecodeError, AttributeError): raise ClientException( 'Failed to parse "content" string from validator') except ValidatorConnectionError as vce: LOGGER.error('Error: %s' % vce) raise ClientException( 'Failed with ZMQ interaction: {0}'.format(vce)) data = message_to_dict(resp) # NOTE: Not all protos have this status with suppress(AttributeError): if resp.status == resp_proto.NO_RESOURCE: raise KeyNotFound("404") if resp.status != resp_proto.OK: raise ClientException("Error: %s" % data) return data
def _handle_response(self, msg_type, resp_proto, req): self._stream.wait_for_ready() future = self._stream.send(message_type=msg_type, content=req.SerializeToString()) resp = resp_proto() try: resp.ParseFromString(future.result(ZMQ_CONNECTION_TIMEOUT).content) except (DecodeError, AttributeError): raise ClientException( 'Failed to parse "content" string from validator') except ValidatorConnectionError as vce: raise ClientException( 'Failed with ZMQ interaction: {0}'.format(vce)) except (asyncio.TimeoutError, FutureTimeoutError): raise ClientException('Validator connection timeout') except Exception as e: LOGGER.exception(e) raise ClientException('Unexpected validator error') data = message_to_dict(resp) with suppress(AttributeError): LOGGER.debug(f'The response parsed data: {data}') if resp.status == resp_proto.NO_RESOURCE: raise KeyNotFound('Resource not found') elif resp.status == resp_proto.NOT_READY: raise ValidatorNotReadyException('Validator is not ready yet') elif resp.status != resp_proto.OK: raise ClientException('Error occured') return data
def get_batch(self, batch_id): self._stream.wait_for_ready() future = self._stream.send( message_type=Message.CLIENT_BATCH_STATUS_REQUEST, content=client_batch_submit_pb2.ClientBatchStatusRequest( batch_ids=[batch_id], ).SerializeToString()) try: resp = future.result(ZMQ_CONNECTION_TIMEOUT).content except ValidatorConnectionError as vce: LOGGER.error('ZMQ error: %s' % vce) raise Exception('Failed with ZMQ interaction: {0}'.format(vce)) except (asyncio.TimeoutError, FutureTimeoutError): LOGGER.error(f'Task with batch_id {batch_id} timeouted') raise Exception('Timeout') batch_resp = client_batch_submit_pb2.ClientBatchStatusResponse() batch_resp.ParseFromString(resp) LOGGER.debug(f'Batch: {resp}') batch_resp_str = repr(batch_resp).replace("\n", "") LOGGER.debug(f'Batch parsed: {batch_resp_str}') hash_sum = hashlib.sha256(batch_resp.SerializeToString()).hexdigest() LOGGER.debug(f'got hashsum: {hash_sum}') data = message_to_dict(batch_resp) LOGGER.debug(f'data: {data}') try: batch_data = data['batch_statuses'][0] except IndexError: raise Exception(f'Batch with id "{batch_id}" not found') assert batch_id == batch_data['batch_id'], \ f'Batches not matched (req: {batch_id}, ' \ f'got: {batch_data["batch_id"]})' prep_resp = {'batch_statuses': batch_data} return prep_resp, hash_sum
def get_root_block(self): resp = self._handle_response( Message.CLIENT_BLOCK_LIST_REQUEST, ClientBlockListResponse, ClientBlockListRequest(paging=ClientPagingControls(limit=1))) block = resp['blocks'][0] header = BlockHeader() try: header_bytes = base64.b64decode(block['header']) header.ParseFromString(header_bytes) except (KeyError, TypeError, ValueError, DecodeError): header = block.get('header', None) LOGGER.error( 'The validator sent a resource with %s %s', 'a missing header' if header is None else 'an invalid header:', header or '') raise ClientException() block['header'] = message_to_dict(header) return ( block['header_signature'], block['header']['state_root_hash'], )
def get_batch(self, batch_id): self._stream.wait_for_ready() future = self._stream.send( message_type=Message.CLIENT_BATCH_STATUS_REQUEST, content=client_batch_submit_pb2.ClientBatchStatusRequest( batch_ids=[batch_id], ).SerializeToString()) try: resp = future.result(10).content except ValidatorConnectionError as vce: LOGGER.error('ZMQ error: %s' % vce) raise Exception('Failed with ZMQ interaction: {0}'.format(vce)) except asyncio.TimeoutError: LOGGER.error(f'Task with batch_id {batch_id} timeouted') raise Exception('Timeout') batch_resp = client_batch_submit_pb2.ClientBatchStatusResponse() batch_resp.ParseFromString(resp) LOGGER.debug('Batch: %s', resp) LOGGER.info('Batch parsed: %s', batch_resp) hash_sum = hashlib.sha256(batch_resp.SerializeToString()).hexdigest() data = message_to_dict(batch_resp) LOGGER.debug('data: %s', data) try: batch_data = data['batch_statuses'][0] except IndexError: raise Exception(f'Batch with id "{batch_id}" not found') assert batch_id == batch_data['batch_id'], \ 'Batches not matched (req: {0}, got: {1})'.format(batch_id, batch_data['batch_id']) prep_resp = {'batch_statuses': batch_data} return prep_resp, hash_sum
async def _process_msg(request, msg): LOGGER.debug(f'Message type: {msg.message_type}') if msg.message_type != Message.CLIENT_EVENTS: LOGGER.debug(f'Skip unexpected msg type {msg.message_type}') return evt_resp = EventList() evt_resp.ParseFromString(msg.content) ws = request.ws subsevt = request.rpc._subsevt.get(ws, {}) for proto_data in evt_resp.events: evt = message_to_dict(proto_data) LOGGER.debug(f'Dicted response evt: {evt}') event_type = evt['event_type'] evt_names = SAWTOOTH_TO_REMME_EVENT.get(event_type) if not evt_names: LOGGER.debug(f'Evt names for type "{event_type}" ' 'not configured') continue for evt_name in evt_names: evt_tr = EVENT_HANDLERS[evt_name] # Check if evt_name is subscribed if evt_name not in subsevt: LOGGER.debug('No active ws connection ' f'for evt "{evt_name}"') continue # Check if ws has stored hashes if ws not in request.rpc._evthashes: LOGGER.warning(f'Connection {ws} not found') del subsevt[evt_name] continue # get response of updated state LOGGER.debug(f'Got evt data: {evt}') updated_state = evt_tr.parse_evt(evt) if not updated_state: LOGGER.debug('Skiping evt with no state update') continue validated_data = subsevt[evt_name]['validated_data'] LOGGER.debug(f'Loaded validated data: {validated_data}') msg_id = subsevt[evt_name]['msg_id'] response = evt_tr.prepare_response(updated_state, validated_data) if asyncio.iscoroutine(response): response = await response if not response: LOGGER.debug('Skiping evt with empty response') continue LOGGER.debug(f'Got response: {response}') evthash = evt_tr.prepare_evt_hash(response) LOGGER.debug(f'Evt hash calculated: {evthash}') # Check if we already have sent update if evthash in request.rpc._evthashes[ws]: LOGGER.debug(f'Connection {ws} already ' 'received this notification') continue result = encode_result(msg_id, { 'event_type': evt_name, 'attributes': response }) await request.rpc._ws_send_str(request, result) request.rpc._evthashes[ws].add(evthash)
def load_proto(cls, pb): data = message_to_dict(pb) form = cls.load_data(data) form._pb_class = pb.__class__ return form