def inner_processor(message_buffer): in_transport = TMemoryBuffer(message_buffer.getvalue()) out_transport = TMemoryBuffer() in_prot = proto_factory.getProtocol(in_transport) out_prot = proto_factory.getProtocol(out_transport) method = processor.process(in_prot, out_prot) return (method, out_transport.getvalue())
def test_with_headers(self): client_memory_trans = TMemoryBuffer() client_prot = THeaderProtocol(client_memory_trans) client_header_trans = client_prot.trans client_header_trans.set_header("Trace", "1234") client_header_trans.set_header("Parent", "2345") client_header_trans.set_header("Span", "3456") client = BaseplateService.Client(client_prot) try: client.is_healthy() except: pass # we don't have a test response for the client self.itrans._readBuffer = StringIO(client_memory_trans.getvalue()) self.processor.process(self.iprot, self.oprot, self.server_context) self.assertEqual(self.observer.on_root_span_created.call_count, 1) context, root_span = self.observer.on_root_span_created.call_args[0] self.assertEqual(root_span.trace_id, "1234") self.assertEqual(root_span.parent_id, "2345") self.assertEqual(root_span.id, "3456") self.assertTrue(self.root_observer.on_start.called) self.assertTrue(self.root_observer.on_stop.called)
def test_with_headers(self): client_memory_trans = TMemoryBuffer() client_prot = THeaderProtocol(client_memory_trans) client_header_trans = client_prot.trans client_header_trans.set_header("Trace", "1234") client_header_trans.set_header("Parent", "2345") client_header_trans.set_header("Span", "3456") client_header_trans.set_header("Sampled", "1") client_header_trans.set_header("Flags", "1") client = TestService.Client(client_prot) try: client.example_simple() except TTransportException: pass # we don't have a test response for the client self.itrans._readBuffer = StringIO(client_memory_trans.getvalue()) self.processor.process(self.iprot, self.oprot, self.server_context) self.assertEqual(self.observer.on_server_span_created.call_count, 1) context, server_span = self.observer.on_server_span_created.call_args[0] self.assertEqual(server_span.trace_id, 1234) self.assertEqual(server_span.parent_id, 2345) self.assertEqual(server_span.id, 3456) self.assertTrue(server_span.sampled) self.assertEqual(server_span.flags, 1) self.assertEqual(self.server_observer.on_start.call_count, 1) self.assertEqual(self.server_observer.on_finish.call_count, 1) self.assertEqual(self.server_observer.on_finish.call_args[0], (None,))
def handle_request(self, event): # t = time.time() # 0.2ms # 1. 将zeromq的消息转换成为 thrift的 protocols trans_input = TMemoryBuffer(event.msg) trans_output = TMemoryBuffer() proto_input = self.proto_factory_input.getProtocol(trans_input) proto_output = self.proto_factory_output.getProtocol(trans_output) # 2. 交给processor来处理 try: self.processor.process(proto_input, proto_output) # 3. 将thirft的结果转换成为 zeromq 格式的数据 msg = trans_output.getvalue() # print "Return Msg: ", msg, event.id if self.profile: event.id.extend(["", "%.4f" % time.time()]) self.events.emit(msg, event.id) else: self.events.emit(msg, event.id) except Exception as e: # 如何出现了异常该如何处理呢 # 程序不能挂 logging.exception("Exception: %s", e)
def __auth_headers(self, headers, body, support_account_key): auth_headers = dict() if self.credential and self.credential.type and self.credential.secretKeyId: if self.credential.type in SIGNATURE_SUPPORT: auth_headers[HOST] = self.host # timestamp auth_headers[TIMESTAMP] = str(int(time.time() + self.__clock_offset)) auth_headers[MI_DATE] = formatdate(usegmt=True) # content md5 auth_headers[CONTENT_MD5] = hashlib.md5(body).hexdigest() headers_to_sign = defaultdict(lambda :[]) for k, v in headers.iteritems(): headers_to_sign[str(k).lower()].append(v) for k, v in auth_headers.iteritems(): headers_to_sign[str(k).lower()].append(v) signature = base64.b64encode(self.sign(self.__form_sign_content("POST", self.uri, headers_to_sign))).strip() auth_string = "Galaxy-V2 %s:%s" % (self.credential.secretKeyId, signature) auth_headers[AUTHORIZATION] = auth_string else: auth_header = HttpAuthorizationHeader() auth_header.secretKeyId = self.credential.secretKeyId auth_header.userType = self.credential.type auth_header.secretKey = self.credential.secretKey auth_header.supportAccountKey = support_account_key mb = TMemoryBuffer() protocol = TJSONProtocol(mb) auth_header.write(protocol) auth_headers[AUTHORIZATION] = str(mb.getvalue()) return auth_headers
def call_processor(self, input, client_type, protocol_type, client_principal): try: # The input string has already had the header removed, but # the python processor will expect it to be there. In # order to reconstitute the message with headers, we use # the THeaderProtocol object to write into a memory # buffer, then pass that buffer to the python processor. write_buf = TMemoryBuffer() trans = THeaderTransport(write_buf, client_types=[client_type]) trans.set_protocol_id(protocol_type) trans.write(input) trans.flush() prot_buf = TMemoryBuffer(write_buf.getvalue()) prot = THeaderProtocol(prot_buf) ctx = TCppConnectionContext(client_principal) self.processor.process(prot, prot, ctx) # And on the way out, we need to strip off the header, # because the C++ code will expect to add it. read_buf = TMemoryBuffer(prot_buf.getvalue()) trans = THeaderTransport(read_buf, client_types=[client_type]) trans.readFrame(0) return trans.cstringio_buf.read() except: # Don't let exceptions escape back into C++ traceback.print_exc()
def write_communication_to_buffer(comm): ''' Serialize communication to buffer (binary string) and return buffer. ''' transport = TMemoryBuffer() protocol = factory.createProtocol(transport) comm.write(protocol) return transport.getvalue()
def encode_and_decode(self, obj): trans = TMemoryBuffer() if self.PROTO == 0: proto = TBinaryProtocol.TBinaryProtocol(trans) else: proto = TCompactProtocol.TCompactProtocol(trans) obj.write(proto) obj_new = obj.__class__() trans = TMemoryBuffer(trans.getvalue()) proto = proto.__class__(trans) obj_new.read(proto)
def test_expected_exception_not_passed_to_server_span_finish(self): client_memory_trans = TMemoryBuffer() client_prot = THeaderProtocol(client_memory_trans) client = TestService.Client(client_prot) try: client.example_throws(crash=False) except TTransportException: pass # we don't have a test response for the client self.itrans._readBuffer = StringIO(client_memory_trans.getvalue()) self.processor.process(self.iprot, self.oprot, self.server_context) self.assertEqual(self.server_observer.on_start.call_count, 1) self.assertEqual(self.server_observer.on_finish.call_count, 1) self.assertEqual(self.server_observer.on_finish.call_args[0], (None,))
def write_communication_to_buffer(comm): ''' Serialize communication to buffer (binary string) and return buffer. Args: comm (Communication): communication to serialize Returns: Communication: Communication read from buffer ''' transport = TMemoryBuffer() protocol = factory.createProtocol(transport) comm.write(protocol) return transport.getvalue()
def message_received(self, frame): tmi = TMemoryBuffer(frame) tmo = TMemoryBuffer() iprot = THeaderProtocol(tmi) oprot = THeaderProtocol(tmo) try: yield from self.processor.process(iprot, oprot, self.server_context) msg = tmo.getvalue() if len(msg) > 0: self.transport.write(msg) except Exception: logging.exception("Exception while processing request") self.transport.close()
def http_handler(request): if request.method != 'POST': return HttpResponseNotAllowed(['GET', 'PUT']) server_info.client_ip = request.META['REMOTE_ADDR'] if server_info.client_ip[0:7] == '::ffff:': server_info.client_ip = server_info.client_ip[7:] server_info.client_port = None itrans = TMemoryBuffer(request.body) otrans = TMemoryBuffer() iproto = TBinaryProtocol(itrans) oproto = TBinaryProtocol(otrans) thrift_handler(iproto, oproto) return HttpResponse(otrans.getvalue(), content_type="application/x-thrift")
def call(self, procedure, args): if isinstance(procedure, str): try: procedure = processor._procedures[procedure] except KeyError: raise TApplicationException(TApplicationException.UNKNOWN_METHOD, "Unknown method '{0}'".format(name)) otrans = TMemoryBuffer() oproto = TBinaryProtocol(otrans) # perf.begin('send') oproto.writeMessageBegin(procedure.name, TMessageType.CALL, self._processor.seqid) self._processor.seqid = self._processor.seqid + 1 self._processor.send_struct(Namespace(args), procedure.parameters_struct, oproto) oproto.writeMessageEnd() # perf.end('send') self._http.request('POST', self._url, otrans.getvalue(), {}) resp = self._http.getresponse() data = resp.read() iproto = TBinaryProtocol(TMemoryBuffer(data)) # perf.begin('wait') (fname, mtype, rseqid) = iproto.readMessageBegin() # perf.end('wait') # perf.begin('recv') if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iproto) iproto.readMessageEnd() x.args = (x.message,) raise x result = self._processor.recv_struct(procedure.results_struct, iproto) iproto.readMessageEnd() # perf.end('recv') # perf.end('call') if result.result is not None: return result.result for field in procedure.results_struct.fields: if getattr(result, field.name) is not None: raise getattr(result, field.name) return None
def round_robin(self, compress=None): original = b'A' * MAX_FRAME_SIZE mb = TMemoryBuffer() trans = THeaderTransport(mb, client_type=CLIENT_TYPE.HEADER) trans.set_max_frame_size(MAX_FRAME_SIZE + MIN_HEADER_SIZE) if compress: trans.add_transform(compress) trans.write(original) trans.flush() frame = mb.getvalue() # Cleanup the memory buffer mb.close() del mb if compress is None: # Partial Decode the frame and see if its correct size wise sz = struct.unpack('!I', frame[:4])[0] self.assertEqual(sz, BIG_FRAME_MAGIC) sz = struct.unpack('!Q', frame[4:12])[0] self.assertEqual(len(frame), sz + 12) # Read it back mb = TMemoryBuffer(frame) trans = THeaderTransport(mb, client_type=CLIENT_TYPE.HEADER) trans.set_max_frame_size(len(frame)) trans.readFrame(0) result = trans.read(MAX_FRAME_SIZE) mb.close() del mb self.assertEqual(result, original, 'round-robin different from original')
def DeserializeThriftCall(self, buf): """Deserialize a stream and context to a MethodReturnMessage. Args: buf - The buffer. ctx - The context from serialization. Returns: A MethodCallMessage. """ thrift_buffer = TMemoryBuffer() thrift_buffer._buffer = buf protocol = self._protocol_factory.getProtocol(thrift_buffer) (fn_name, msg_type, seq_id) = protocol.readMessageBegin() if msg_type == TMessageType.EXCEPTION: x = TApplicationException() x.read(protocol) protocol.readMessageEnd() return MethodReturnMessage(error=x) result_cls = self._FindClass('%s_result' % fn_name) if result_cls: result = result_cls() result.read(protocol) else: result = None protocol.readMessageEnd() if not result: return MethodReturnMessage() if getattr(result, 'success', None) is not None: return MethodReturnMessage(return_value=result.success) result_spec = getattr(result_cls, 'thrift_spec', None) if result_spec: exceptions = result_spec[1:] for e in exceptions: attr_val = getattr(result, e[2], None) if attr_val is not None: return MethodReturnMessage(error=attr_val) return MethodReturnMessage(TApplicationException( TApplicationException.MISSING_RESULT, "%s failed: unknown result" % fn_name))
def decode_helper(self, obj, split=1.0): trans = TMemoryBuffer() if self.PROTO == 0: proto = TBinaryProtocol.TBinaryProtocol(trans) else: proto = TCompactProtocol.TCompactProtocol(trans) obj.write(proto) index = int(split * len(trans.getvalue())) trans = ReadOnlyBufferWithRefill(index, trans.getvalue()) obj_new = obj.__class__() fastproto.decode(obj_new, trans, [obj.__class__, obj.thrift_spec, obj.isUnion()], utf8strings=0, protoid=self.PROTO) self.assertEqual(obj, obj_new) # Verify the entire buffer is read self.assertEqual(len(trans._readBuffer.read()), 0) if split != 1.0: self.assertEqual(1, trans.refill_called)
def call_processor(self, input, headers, client_type, protocol_type, context_data): try: # The input string has already had the header removed, but # the python processor will expect it to be there. In # order to reconstitute the message with headers, we use # the THeaderProtocol object to write into a memory # buffer, then pass that buffer to the python processor. write_buf = TMemoryBuffer() trans = THeaderTransport(write_buf) trans._THeaderTransport__client_type = client_type trans._THeaderTransport__write_headers = headers trans.set_protocol_id(protocol_type) trans.write(input) trans.flush() prot_buf = TMemoryBuffer(write_buf.getvalue()) prot = THeaderProtocol(prot_buf, client_types=[client_type]) ctx = TCppConnectionContext(context_data) self.processor.process(prot, prot, ctx) # Check for empty result. If so, return an empty string # here. This is probably a oneway request, but we can't # reliably tell. The C++ code does basically the same # thing. response = prot_buf.getvalue() if len(response) == 0: return response # And on the way out, we need to strip off the header, # because the C++ code will expect to add it. read_buf = TMemoryBuffer(response) trans = THeaderTransport(read_buf, client_types=[client_type]) trans.readFrame(len(response)) return trans.cstringio_buf.read() except: # Don't let exceptions escape back into C++ traceback.print_exc()
def setUp(self): """Create two buffers, transports, and protocols. self._h_trans uses THeaderTransport self._f_trans uses TFuzzyHeaderTransport """ cls = self.__class__ # THeaderTransport attributes self._h_buf = TMemoryBuffer() self._h_trans = THeaderTransport(self._h_buf) self._h_prot = THeaderProtocol(self._h_trans) # TFuzzyHeaderTransport attributes self._f_buf = TMemoryBuffer() self._f_trans = TFuzzyHeaderTransport( self._f_buf, fuzz_fields=cls.fuzz_fields, fuzz_all_if_empty=False, verbose=False) self._f_prot = THeaderProtocol(self._f_trans)
def schedule_timeout(self, fname, seqid): timeout = self.timeouts[fname] if not timeout: return tmo = TMemoryBuffer() thp = THeaderTransport(tmo) oprot = THeaderProtocol(thp) exc = TApplicationException( TApplicationException.TIMEOUT, "Call to {} timed out".format(fname) ) oprot.writeMessageBegin(fname, TMessageType.EXCEPTION, seqid) exc.write(oprot) oprot.writeMessageEnd() thp.flush() timeout_task = self.loop.create_task( self.message_received(tmo.getvalue(), delay=timeout), ) self.update_pending_tasks(seqid, timeout_task)
def handle_request(self, event): # 1. 将zeromq的消息转换成为 thrift的 protocols trans_input = TMemoryBuffer(event.msg) trans_output = TMemoryBuffer() proto_input = self.proto_factory_input.getProtocol(trans_input) proto_output = self.proto_factory_output.getProtocol(trans_output) # 2. 交给processor来处理 try: self.processor.process(proto_input, proto_output) # 3. 将thirft的结果转换成为 zeromq 格式的数据 msg = trans_output.getvalue() # print "Return Msg: ", msg, event.id self.events.emit(msg, event.id) except Exception as e: # 如何出现了异常该如何处理呢 # 程序不能挂 print "Exception: ", e
def message_received(self, frame): # We support the deprecated FRAMED transport for old fb303 # clients that were otherwise failing miserably. client_types = { THeaderTransport.HEADERS_CLIENT_TYPE, THeaderTransport.FRAMED_DEPRECATED, } tm = TMemoryBuffer(frame) prot = THeaderProtocol(tm, client_types=client_types) try: yield from self.processor.process( prot, prot, self.server_context, ) msg = tm.getvalue() if len(msg) > 0: self.transport.write(msg) except Exception: logger.exception("Exception while processing request") self.transport.close()
def test_forward_compatibility_nested(self): obj = OldStructureNested() obj.features = [{}] obj.features[0][1] = 314 obj.features[0][2] = 271 trans = TMemoryBuffer() proto = self.createProto(trans) obj.write(proto) obj_new = NewStructureNested() trans = TMemoryBuffer(trans.getvalue()) proto = proto.__class__(trans) fastproto.decode(obj_new, trans, [obj_new.__class__, obj_new.thrift_spec, obj_new.isUnion()], utf8strings=0, protoid=self.PROTO, forward_compatibility=True) self.assertAlmostEqual(obj_new.features[0][1], 314.0) self.assertAlmostEqual(obj_new.features[0][2], 271.0) trans2 = TMemoryBuffer() proto2 = self.createProto(trans2) obj_new.write(proto2) obj_new2 = NewStructureNested() trans2 = TMemoryBuffer(trans2.getvalue()) proto2 = proto2.__class__(trans2) fastproto.decode(obj_new2, trans2, [obj_new2.__class__, obj_new2.thrift_spec, obj_new2.isUnion()], utf8strings=0, protoid=self.PROTO) self.assertAlmostEqual(obj_new2.features[0][1], 314.0) self.assertAlmostEqual(obj_new2.features[0][2], 271.0)
def test_no_headers(self): client_memory_trans = TMemoryBuffer() client_prot = THeaderProtocol(client_memory_trans) client = BaseplateService.Client(client_prot) try: client.is_healthy() except: pass # we don't have a test response for the client self.itrans._readBuffer = StringIO(client_memory_trans.getvalue()) self.processor.process(self.iprot, self.oprot, self.server_context) self.assertEqual(self.observer.on_root_span_created.call_count, 1) context, root_span = self.observer.on_root_span_created.call_args[0] self.assertEqual(root_span.trace_id, "no-trace") self.assertEqual(root_span.parent_id, "no-parent") self.assertEqual(root_span.id, "no-span") mock_root_observer = self.observer.on_root_span_created.return_value self.assertTrue(mock_root_observer.on_start.called) self.assertTrue(mock_root_observer.on_stop.called)
def call_processor(self, input, headers, client_type, protocol_type, context_data, callback): try: # The input string has already had the header removed, but # the python processor will expect it to be there. In # order to reconstitute the message with headers, we use # the THeaderProtocol object to write into a memory # buffer, then pass that buffer to the python processor. write_buf = TMemoryBuffer() trans = THeaderTransport(write_buf) trans._THeaderTransport__client_type = client_type trans._THeaderTransport__write_headers = headers trans.set_protocol_id(protocol_type) trans.write(input) trans.flush() prot_buf = TMemoryBuffer(write_buf.getvalue()) prot = THeaderProtocol(prot_buf, client_types=[client_type]) ctx = TCppConnectionContext(context_data) ret = self.processor.process(prot, prot, ctx) done_callback = partial(_ProcessorAdapter.done, prot_buf=prot_buf, client_type=client_type, callback=callback) # This future is created by and returned from the processor's # ThreadPoolExecutor, which keeps a reference to it. So it is # fine for this future to end its lifecycle here. if isinstance(ret, Future): ret.add_done_callback(lambda x, d=done_callback: d()) else: done_callback() except: # Don't let exceptions escape back into C++ traceback.print_exc()
def SerializeThriftCall(self, msg, buf): """Serialize a MethodCallMessage to a stream Args: msg - The MethodCallMessage to serialize. buf - The buffer to serialize into. """ thrift_buffer = TMemoryBuffer() thrift_buffer._buffer = buf protocol = self._protocol_factory.getProtocol(thrift_buffer) method, args, kwargs = msg.method, msg.args, msg.kwargs is_one_way = self._FindClass('%s_result' % method) is None args_cls = self._FindClass('%s_args' % method) if not args_cls: raise AttributeError('Unable to find args class for method %s' % method) protocol.writeMessageBegin( msg.method, TMessageType.ONEWAY if is_one_way else TMessageType.CALL, self._seq_id) thrift_args = args_cls(*args, **kwargs) thrift_args.write(protocol) protocol.writeMessageEnd()
def test_no_trace_headers(self, getrandbits): getrandbits.return_value = 1234 client_memory_trans = TMemoryBuffer() client_prot = THeaderProtocol(client_memory_trans) client = BaseplateService.Client(client_prot) try: client.is_healthy() except: pass # we don't have a test response for the client self.itrans._readBuffer = StringIO(client_memory_trans.getvalue()) self.processor.process(self.iprot, self.oprot, self.server_context) self.assertEqual(self.observer.on_server_span_created.call_count, 1) context, server_span = self.observer.on_server_span_created.call_args[0] self.assertEqual(server_span.trace_id, 1234) self.assertEqual(server_span.parent_id, None) self.assertEqual(server_span.id, 1234) self.assertTrue(self.server_observer.on_start.called) self.assertTrue(self.server_observer.on_finish.called)
def handle_stream(self, stream, address): host, port = address trans = TTornadoStreamTransport(host=host, port=port, stream=stream, io_loop=self.io_loop) oprot = self._oprot_factory.getProtocol(trans) try: while not trans.stream.closed(): frame = yield trans.readFrame() tr = TMemoryBuffer(frame) iprot = self._iprot_factory.getProtocol(tr) yield self._processor.process(iprot, oprot) except Exception: logging.exception('thrift exception in handle_stream') trans.close() logging.info('client disconnected %s:%d', host, port)
def encode_helper(self, obj): buf = fastproto.encode( obj, [obj.__class__, obj.thrift_spec, obj.isUnion()], utf8strings=0, protoid=self.PROTO, ) trans = TMemoryBuffer(buf) if self.PROTO == 0: proto = TBinaryProtocol.TBinaryProtocol(trans) else: proto = TCompactProtocol.TCompactProtocol(trans) obj_new = obj.__class__() obj_new.read(proto) self.assertEqual(obj, obj_new)
def done(prot_buf, client_type, callback): try: response = prot_buf.getvalue() if len(response) == 0: callback.call(response) else: # And on the way out, we need to strip off the header, # because the C++ code will expect to add it. read_buf = TMemoryBuffer(response) trans = THeaderTransport(read_buf, client_types=[client_type]) trans.set_max_frame_size(MAX_BIG_FRAME_SIZE) trans.readFrame(len(response)) callback.call(trans.cstringio_buf.read()) except: # noqa # Don't let exceptions escape back into C++ traceback.print_exc()
def __init__(self, source): super(ThriftResult, self).__init__('SELECT') #transport = TIOStreamTransport(source) transport = TMemoryBuffer(source.read()) #protocol = TCompactProtocol(transport) protocol = TCompactProtocolAccelerated(transport) # t = RDF_Term() # t.read(protocol) # print t _vars = RDF_VarTuple() _vars.read(protocol) self.vars = [Variable(v.name) for v in _vars.vars] self.bindings = self._read_and_convert(protocol)
def parse_message_by_byte(byte, offset): """ 从原始byte数据转换到MTMessage :param byte: byte数组 :param offset: 偏移量 :return: MTMessage 或者 None """ try: int_len = 4 if not byte or len(byte) < int_len: return None, 0 msg = WxMessage() total_len = struct.unpack_from('i', byte, offset)[0] if len(byte) < total_len + int_len: return None, 0 offset += int_len head_len = struct.unpack_from('i', byte, offset)[0] offset += int_len msg.headBuf = struct.unpack_from('<%ds' % (head_len, ), byte, offset)[0] head = WxHead() t_memory_o = TMemoryBuffer(msg.headBuf) t_binary_protocol_o = TBinaryProtocol(t_memory_o) head.read(t_binary_protocol_o) if head: msg.head = head offset += head_len body_len = struct.unpack_from('i', byte, offset)[0] offset += int_len msg.bodyBuf = struct.unpack_from('<%ds' % (body_len, ), byte, offset)[0] return msg, total_len except BufferError as e: log.err() return None, 0
def request(self, context, payload): """ Write the current buffer and return the response. """ self._preflight_request_check(payload) encoded = base64.b64encode(payload) request = HTTPRequest(self._url, method='POST', body=encoded, headers=self._headers, request_timeout=context.timeout / 1000.0) try: response = yield self._http.fetch(request) except HTTPError as e: if e.code == httplib.REQUEST_ENTITY_TOO_LARGE: raise TTransportException( type=TTransportExceptionType.RESPONSE_TOO_LARGE, message='response was too large') # Tornado HttpClient uses 599 as the HTTP code to indicate a # request timeout if e.code == 599: raise TTransportException( type=TTransportExceptionType.TIMED_OUT, message='request timed out') message = 'response errored with code {0} and body {1}'.format( e.code, e.message) raise TTransportException(type=TTransportExceptionType.UNKNOWN, message=message) decoded = base64.b64decode(response.body) if len(decoded) < 4: raise TTransportException(type=TTransportExceptionType.UNKNOWN, message='invalid frame size') if len(decoded) == 4: # One-way method, drop response return raise gen.Return(TMemoryBuffer(decoded[4:]))
def DealRecv(self): if self.dbc_client.IsHaveData() == 0: return True recvdata = self.dbc_client.GetData() lenth = len(recvdata) if lenth <= 0: return False m = TMemoryBuffer(recvdata) p = TBinaryProtocol(m) packet_header_len, protocol_type = decode_packet_header(p) try: h = msg_header() h.read(p) except EOFError: print("Error: msg header decode failure") return print("package_len:") print(packet_header_len) print "msg header: " pprint(vars(h), indent=4) msg_name = h.msg_name try: s = h.msg_name + "_body" if s in globals(): t = globals()[s] else: t = empty body = t() body.read(p) except EOFError: print("Error: msg body decode failure") return print("body: ") pprint(vars(body), indent=4, width=24) if msg_name == "ver_resp": self.deal_ver_resp() return True
def test_process_missing_function(self): processor = FBaseProcessor() frame = bytearray( b'\x00\x00\x00\x004\x00\x00\x00\x04_cid\x00\x00\x00\x06someid' b'\x00\x00\x00\x05_opid\x00\x00\x00\x011\x00\x00\x00\x08_timeout' b'\x00\x00\x00\x045000' # End of context b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) otrans = TMemoryOutputBuffer(1000) oprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(otrans) yield processor.process(iprot, oprot) expected_response = bytearray( b'\x80\x01\x00\x03\x00\x00\x00\x08basePing\x00\x00' b'\x00\x00\x0b\x00\x01\x00\x00\x00\x1aUnknown function: basePing' b'\x08\x00\x02\x00\x00\x00\x01\x00') self.assertEqual(otrans.getvalue()[41:], expected_response)
async def msg_handler(frame, _): _logger.debug('received stomp message on topic \'{}\''.format( self._destination)) try: ret = callback(TMemoryBuffer(frame.body[4:])) if inspect.iscoroutine(ret): await ret _logger.debug( 'finished processing stomp message from topic \'{}\''. format(self._destination)) # aiostomp acks message automatically in client-individual mode # as long as handler function returns non-falsy value return True except Exception: # catch exceptions so the stomp library doesn't barf and keeps # processing messages _logger.exception( 'unable to process stomp message from topic \'{}\''.format( self._destination))
def test_process(self): processor = FBaseProcessor() proc = Mock() future = Future() future.set_result(None) proc.process.return_value = future processor.add_to_processor_map("basePing", proc) frame = bytearray( b'\x00\x00\x00\x00\x0e\x00\x00\x00\x05_opid\x00\x00\x00\x011' b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) oprot = Mock() yield processor.process(iprot, oprot) assert (proc.process.call_args) args, _ = proc.process.call_args assert (args[0]._get_op_id() == 1) assert (args[1] == iprot) assert (args[2] == oprot)
async def test_process(self): processor = FBaseProcessor() proc = Mock() future = Future() future.set_result(None) proc.process.return_value = future processor.add_to_processor_map("basePing", proc) frame = bytearray( b'\x00\x00\x00\x00\x0e\x00\x00\x00\x05_opid\x00\x00\x00\x011' b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) oprot = Mock() await processor.process(iprot, oprot) assert (proc.process.call_args) args, _ = proc.process.call_args self.assertEqual(args[0].get_response_header(_OPID_HEADER), '1') assert (args[1] == iprot) assert (args[2] == oprot)
async def request(self, context: FContext, payload): self._preflight_request_check(payload) op_id = str(context._get_op_id()) future = asyncio.Future() async with self._futures_lock: if op_id in self._futures: raise TTransportException( type=TTransportExceptionType.UNKNOWN, message="request already in flight for context") self._futures[op_id] = future try: with async_timeout.timeout(context.timeout / 1000): await self.flush(payload) return TMemoryBuffer(await future) except asyncio.TimeoutError: raise TTransportException(type=TTransportExceptionType.TIMED_OUT, message='request timed out') from None finally: async with self._futures_lock: del self._futures[op_id]
def read_communication_from_buffer(buf, add_references=True): ''' Deserialize buf (a binary string) and return resulting communication. Add references if requested. Args: buf (str): String representing communication encoded from thrift add_references (bool): If True, calls :func:`concrete.util.references.add_references_to_communication` on :class:`.Communication` read from buffer Returns: Communication: Communication read from buffer ''' transport_in = TMemoryBuffer(buf) protocol_in = factory.createProtocol(transport_in) comm = Communication() comm.read(protocol_in) if add_references: add_references_to_communication(comm) return comm
def setUp(self): self.otrans = TMemoryBuffer() self.oprot = THeaderProtocolFactory().getProtocol(self.otrans) self.observer = mock.Mock(spec=BaseplateObserver) self.server_observer = mock.Mock(spec=ServerSpanObserver) def _register_mock(context, server_span): server_span.register(self.server_observer) self.observer.on_server_span_created.side_effect = _register_mock self.logger = mock.Mock(spec=logging.Logger) mock_filewatcher = mock.Mock(spec=FileWatcher) mock_filewatcher.get_data.return_value = { "secrets": { "secret/authentication/public-key": { "type": "versioned", "current": AUTH_TOKEN_PUBLIC_KEY, }, }, "vault": { "token": "test", "url": "http://vault.example.com:8200/", } } self.secrets = store.SecretsStore("/secrets") self.secrets._filewatcher = mock_filewatcher baseplate = Baseplate() baseplate.register(self.observer) self.edge_context_factory = EdgeRequestContextFactory(self.secrets) handler = TestHandler() processor = TestService.Processor(handler) self.processor = baseplateify_processor(processor, self.logger, baseplate, self.edge_context_factory)
async def loginWithQrcode(self, path=None): self.url(config.MAIN_PATH) qr = await self.call('getAuthQrcode', True, "AsyncLine", "") p_key = generate_asymmetric_keypair() secret_query = create_secret_query(p_key.public_key) print(f"line://au/q/{qr.verifier}?secret={secret_query}&e2eeVersion=1") r = self.waitForPhoneConfirm(qr.verifier) vr = r.json() key_chain = vr['result']['metadata']['encryptedKeyChain'] public_key = vr['result']['metadata']['publicKey'] data_key = decrypt_keychain(p_key, key_chain, public_key) keychain = E2EEKeyChain() tbuffer = TMemoryBuffer(data_key) protocol = TCompactProtocol(tbuffer) keychain.read(protocol) self.url(config.AUTH_PATH) rq = LoginRequest( type=LoginType.QRCODE, identityProvider=IdentityProvider.LINE, keepLoggedIn=True, accessLocation=config.LOGIN_LOCATION, systemName="AsyncLine", verifier=vr["result"]["verifier"], secret=p_key.public_key, e2eeVersion=2 ) lr = await self.call('loginZ', rq) self.updateHeaders({ 'X-Line-Access': lr.authToken }) self.authToken = lr.authToken self.cert = lr.certificate if path: with open(path, "w") as fp: fp.write(lr.authToken) await self.afterLogin()
def message_received(self, frame, delay=0): tmi = TMemoryBuffer(frame) iprot = THeaderProtocol(tmi) (fname, mtype, rseqid) = iprot.readMessageBegin() if delay: yield from asyncio.sleep(delay) else: try: timeout_task = self.pending_tasks.pop(rseqid) except KeyError: # Task doesn't have a timeout or has already been cancelled # and pruned from `pending_tasks`. pass else: timeout_task.cancel() method = getattr(self.client, "recv_" + fname.decode(), None) if method is None: logger.error("Method %r is not supported", method) self.transport.abort() else: method(iprot, mtype, rseqid)
async def request(self, context: FContext, payload) -> TTransportBase: """ Write the current buffer payload over the network and return the response. """ self._preflight_request_check(payload) encoded = base64.b64encode(payload) status, text = await self._make_request(context, encoded) if status == 413: raise TTransportException( type=TTransportExceptionType.RESPONSE_TOO_LARGE, message='response was too large for the transport' ) if status >= 300: raise TTransportException( type=TTransportExceptionType.UNKNOWN, message='request errored with code {0} and message {1}'.format( status, str(text) ) ) decoded = base64.b64decode(text) if len(decoded) < 4: raise TTransportException(type=TTransportExceptionType.UNKNOWN, message='invalid frame size') if len(decoded) == 4: if any(decoded): raise TTransportException(type=TTransportExceptionType.UNKNOWN, message='missing data') # One-way method, drop response return return TMemoryBuffer(decoded[4:])
def post(self): self.set_header('content-type', 'application/x-frugal') # check for response size limit response_limit = 0 limit_header_name = "x-frugal-payload-limit" if self.request.headers.get(limit_header_name) is not None: response_limit = int(self.request.headers[limit_header_name]) # decode payload and process payload = base64.b64decode(self.request.body) iprot = self._protocol_factory.get_protocol(TMemoryBuffer(payload[4:])) # TODO could be better with this limit otrans = TMemoryOutputBuffer(0) oprot = self._protocol_factory.get_protocol(otrans) try: yield gen.maybe_future(self._processor.process(iprot, oprot)) except TApplicationException: # Continue so the exception is sent to the client pass except Exception: self.send_error(status_code=400) return # write back response output_data = otrans.getvalue() if len(output_data) > response_limit > 0: self.send_error(status_code=413) return output_payload = base64.b64encode(output_data) self.set_header('content-transfer-encoding', 'base64') self.write(output_payload)
def testNoInitialValue(self): buf = TMemoryBuffer() data = buf.read(5) buf.write(b"world") self.assertEquals(data, b"") self.assertEquals(buf.getvalue(), b"world")
def testReadWrite(self): buf = TMemoryBuffer(b"hello") data = buf.read(5) buf.write(b"world") self.assertEquals(data, b"hello") self.assertEquals(buf.getvalue(), b"world")
def testClose(self): buf = TMemoryBuffer(b"hello") buf.close() self.assertRaises(RuntimeError, buf.read, 5) self.assertRaises(RuntimeError, buf.write, b"world")
if __name__ == '__main__': # 测试数据 #test_entity_extractor_info = test_entity_data.data_foo ifile = open("entity_extractor.pkb") test_entity_extractor_info = pickle.load(ifile) aa = json.loads(test_entity_extractor_info.entity_data) aa["registered_capital"] = "1234545" test_entity_extractor_info.entity_data = json.dumps(aa) print test_entity_extractor_info # test_entity_extractor_info = test_entity_data.data_ssgg #test_entity_extractor_info = test_entity_data.data_ktgg # 将EntityExtractorInfo序列化 tMemory_b = TMemoryBuffer() tBinaryProtocol_b = TBinaryProtocol(tMemory_b) test_entity_extractor_info.write(tBinaryProtocol_b) serialized_data = tMemory_b.getvalue() #logging.debug('serialized_data: %s' % repr(serialized_data)) # 使用beanstalk发出数据 # conn = beanstalkc.Connection('127.0.0.1', 11300) # conn.use('entity_info') # conn.put(serialized_data) # # logging.debug('data sent!') # # import time # time.sleep(0.1)
def to_bytes(obj): """Creates the standard binary representation of a thrift object.""" b = TMemoryBuffer() p = TBinaryProtocol(b) obj.write(p) return b.getvalue()
async def nats_callback(message): ret = callback(TMemoryBuffer(message.data[4:])) if inspect.iscoroutine(ret): ret = await ret return ret
def make_start_training_req(task_path): try: ifd = open(task_path, 'r') result = {} # print("open file", task_path, "fd is", ifd) for strline in ifd.readlines(): # print(strline) if not len(strline): continue temp_store = strline.split('=') result[temp_store[0]] = temp_store[1].strip("\n").strip("\r") ifd.close() m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = AI_TRAINING_NOTIFICATION_REQ nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) # head.write(p) task_id = get_random_id() print("task_id: %s, nonce:%s" % (task_id, nonce)) # select_mode=bytes(result["select_mode"])[0] select_mode = 0x00 master = "" pns = result["peer_nodes_list"] peer_nodes_list = pns.split(",") # peer_node=gen_node_id() #peer_node_list=[] #peer_node_list.append(peer_node) server_specification = "" server_count = 0 #training_engine = result["training_engine"] training_engine = result["training_engine"] code_dir = result["code_dir"] entry_file = result["entry_file"] data_dir = "" checkpoint_dir = "" hyper_parameters = "" req = start_training_req_body(task_id, select_mode, master, peer_nodes_list, server_specification, server_count, training_engine, code_dir, entry_file, data_dir, checkpoint_dir, hyper_parameters) message = task_id + code_dir + nonce print("message:", message) sign_algo = "ecdsa" origin = get_node_id() # print("sign_origin:", origin) exten_info = {} exten_info["origin_id"] = origin exten_info["sign_algo"] = sign_algo exten_info["sign"] = dbc_sign(message) print("sign:", exten_info["sign"]) head.exten_info = exten_info head.write(p) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m) except EOFError: print "Error: msg body decode failure" return except IOError: print "Error: IO Error" return
def writeToJSON(obj): trans = TMemoryBuffer() proto = NomadJSONProtocol(trans) obj.write(proto) return trans.getvalue()
def call_processor(self, input, headers, client_type, protocol_type, context_data, callback): try: # TCppServer threads are not created by Python so they are # missing settrace() hooks. We need to manually set the # hook here for things to work (e.g. coverage and pdb). if sys.gettrace() is None and threading._trace_hook is not None: sys.settrace(threading._trace_hook) # The input string has already had the header removed, but # the python processor will expect it to be there. In # order to reconstitute the message with headers, we use # the THeaderProtocol object to write into a memory # buffer, then pass that buffer to the python processor. should_sample = self._shouldSample() timestamps = CallTimestamps() timestamps.processBegin = 0 timestamps.processEnd = 0 if self.observer and should_sample: timestamps.processBegin = int(time.time() * 10**6) write_buf = TMemoryBuffer() trans = THeaderTransport(write_buf) trans.set_max_frame_size(MAX_BIG_FRAME_SIZE) trans._THeaderTransport__client_type = client_type trans._THeaderTransport__write_headers = headers trans.set_protocol_id(protocol_type) trans.write(input) trans.flush() prot_buf = TMemoryBuffer(write_buf.getvalue()) prot = THeaderProtocol(prot_buf, client_types=[client_type]) prot.trans.set_max_frame_size(MAX_BIG_FRAME_SIZE) ctx = TCppConnectionContext(context_data) ret = self.processor.process(prot, prot, ctx) done_callback = partial(_ProcessorAdapter.done, prot_buf=prot_buf, client_type=client_type, callback=callback) if self.observer: if should_sample: timestamps.processEnd = int(time.time() * 10**6) # This only bumps counters if `processBegin != 0` and # `processEnd != 0` and these will only be non-zero if # we are sampling this request. self.observer.callCompleted(timestamps) # This future is created by and returned from the processor's # ThreadPoolExecutor, which keeps a reference to it. So it is # fine for this future to end its lifecycle here. if isinstance(ret, Future): ret.add_done_callback(lambda x, d=done_callback: d()) else: done_callback() except: # noqa # Don't let exceptions escape back into C++ traceback.print_exc()
def serialize_one(feature_parameters): trans = TMemoryBuffer() proto = TSimpleJSONProtocol(trans) feature_parameters.write(proto) return trans.getvalue().decode("utf-8").replace("\n", "")
class FuzzyTransportTest(object): """Test class that sets up a THeaderTransport and a TFuzzyHeaderTransport. Used for writing and comparing messages using both transports. """ fuzz_fields = [] # Sample TestService method arguments sampleListStruct = ttypes.ListStruct( a=[True, False], b=[1, 2, 3], c=[1.2, 3.4], d=["ab", "cd"], e=[list(sm.xrange(n)) for n in sm.xrange(20)], f=[{ 1: 2 }, { 3: 4, 5: 6 }], g=[{"a", "b"}, {"c"}, set()]) def setUp(self): """Create two buffers, transports, and protocols. self._h_trans uses THeaderTransport self._f_trans uses TFuzzyHeaderTransport """ cls = self.__class__ # THeaderTransport attributes self._h_buf = TMemoryBuffer() self._h_trans = THeaderTransport(self._h_buf) self._h_prot = THeaderProtocol(self._h_trans) # TFuzzyHeaderTransport attributes self._f_buf = TMemoryBuffer() self._f_trans = TFuzzyHeaderTransport(self._f_buf, fuzz_fields=cls.fuzz_fields, fuzz_all_if_empty=False, verbose=False) self._f_prot = THeaderProtocol(self._f_trans) def writeObject(self, obj=sampleListStruct): """Write an object to the test and reference protocols. Return the contents of both buffers. """ obj.write(self._h_prot) obj.write(self._f_prot) self._h_trans.flush() self._f_trans.flush() h_buf = self._h_buf.getvalue() f_buf = self._f_buf.getvalue() return h_buf, f_buf def differentIndices(self, header, fuzzy): """Return a list of byte positions at which two messages' bytes differ. Header should be the contents of self._h_buf Fuzzy should be the contents of self._f_buf """ indices = [] for i, (h, f) in enumerate(itertools.izip(header, fuzzy)): if h != f: indices.append(i) return indices def assertEqualsExceptIndices(self, header, fuzzy, indices): """Assert that the buffers `header` and `fuzzy` are equal, except possibly at the byte positions included in `indices`. This ensures that the message produced by TFuzzyHeaderProtocol (fuzzy) is equal to the message produced by THeaderProtocol (header), except at the byte positions that are expected to be fuzzed.""" self.assertEquals(len(header), len(fuzzy)) for diff in self.differentIndices(header, fuzzy): self.assertIn(diff, indices)