def question4(out64, out32and32): out64bytes = binascii.a2b_hex(out64) out32and32bytes = binascii.a2b_hex(out32and32) b = bytearray(x ^ y for (x, y) in zip(out64bytes, out32and32bytes)) return binascii.b2a_hex(b)
def create_predict_response(ret, req_id_map, message, code, context=None): """ Create inference response. :param ret: :param req_id_map: :param message: :param code: :return: """ msg = bytearray() msg += struct.pack('!i', code) buf = message.encode("utf-8") msg += struct.pack('!i', len(buf)) msg += buf for idx in req_id_map: buf = req_id_map[idx].encode('utf-8') msg += struct.pack("!i", len(buf)) msg += buf if context is None: msg += struct.pack('!i', 0) # content_type else: content_type = context.get_response_content_type(req_id_map[idx]) if content_type is None or len(content_type) == 0: msg += struct.pack('!i', 0) # content_type else: msg += struct.pack('!i', len(content_type)) msg += content_type.encode('utf-8') if ret is None: buf = b"error" msg += struct.pack('!i', len(buf)) msg += buf else: val = ret[idx] if isinstance(val, str): buf = val.encode("utf-8") msg += struct.pack('!i', len(buf)) msg += buf elif isinstance(val, (bytes, bytearray)): msg += struct.pack('!i', len(val)) msg += val else: try: json_value = json.dumps(val, indent=2).encode("utf-8") msg += struct.pack('!i', len(json_value)) msg += json_value except TypeError: logging.warning("Unable to serialize model output.", exc_info=True) return create_predict_response( None, req_id_map, "Unsupported model output data type.", 503) msg += struct.pack('!i', -1) # End of list return msg
def encode_response_headers(resp_hdr_map): msg = bytearray() msg += struct.pack('!i', len(resp_hdr_map)) for k, v in resp_hdr_map.items(): msg += struct.pack('!i', len(k.encode('utf-8'))) msg += k.encode('utf-8') msg += struct.pack('!i', len(v.encode('utf-8'))) msg += v.encode('utf-8') return msg
def buildAuthXmlFederated(username, password, stsurl): timestamp = time.time() ctime = time.gmtime(timestamp) etime = time.gmtime(timestamp + 1 * 60) ctime_str = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", ctime) etime_str = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", etime) nonce = base64.standard_b64encode( bytearray(random.getrandbits(8) for _ in range(32))) return authXmlFederated.format(stsurl, ctime_str, etime_str, username, password, nonce.decode('utf-8'))
def _retrieve_buffer(conn, length): data = bytearray() while length > 0: pkt = conn.recv(length) if len(pkt) == 0: logging.info("Frontend disconnected.") sys.exit(0) data += pkt length -= len(pkt) return data
def _retrieve_buffer(conn, length): if length > MAX_BUFFER_SIZE: raise ValueError("Exceed max buffer size: {}".format(length)) data = bytearray() while length > 0: pkt = conn.recv(length) if len(pkt) == 0: logging.info("Frontend disconnected.") exit(0) data += pkt length -= len(pkt) return data
def callback(data): joy_axes = [] byte_joy_axes = bytearray() joy_axes.append(data.axes[0]) joy_axes.append(data.axes[1]) rospy.loginfo(data.axes) print(joy_axes) byte_joy_axes += struct.pack('f', joy_axes[0]) byte_joy_axes += struct.pack('f', joy_axes[1]) print(byte_joy_axes) serialData.write(byte_joy_axes) reciver = [] reciver.append(serialData.read(4)) print(*reciver)
def create_load_model_response(code, message): """ Create load model response. :param code: :param message: :return: """ msg = bytearray() msg += struct.pack('!i', code) buf = message.encode("utf-8") msg += struct.pack('!i', len(buf)) msg += buf msg += struct.pack('!i', -1) # no predictions return msg
def create_predict_response(ret, req_id_map, message, code, context=None): """ Create inference response. :param context: :param ret: :param req_id_map: :param message: :param code: :return: """ msg = bytearray() msg += struct.pack('!i', code) buf = message.encode("utf-8") msg += struct.pack('!i', len(buf)) msg += buf for idx in req_id_map: req_id = req_id_map.get(idx).encode('utf-8') msg += struct.pack("!i", len(req_id)) msg += req_id # Encoding Content-Type if context is None: msg += struct.pack('!i', 0) # content_type else: content_type = context.get_response_content_type(idx) if content_type is None or len(content_type) == 0: msg += struct.pack('!i', 0) # content_type else: msg += struct.pack('!i', len(content_type)) msg += content_type.encode('utf-8') # Encoding the per prediction HTTP response code if context is None: # status code and reason phrase set to none msg += struct.pack('!i', code) msg += struct.pack('!i', 0) # No code phrase is returned # Response headers none msg += struct.pack('!i', 0) else: sc, phrase = context.get_response_status(idx) http_code = sc if sc is not None else 200 http_phrase = phrase if phrase is not None else "" msg += struct.pack('!i', http_code) msg += struct.pack("!i", len(http_phrase)) msg += http_phrase.encode("utf-8") # Response headers msg += encode_response_headers(context.get_response_headers(idx)) if ret is None: buf = b"error" msg += struct.pack('!i', len(buf)) msg += buf else: val = ret[idx] # NOTE: Process bytes/bytearray case before processing the string case. if isinstance(val, (bytes, bytearray)): msg += struct.pack('!i', len(val)) msg += val elif isinstance(val, str): buf = val.encode("utf-8") msg += struct.pack('!i', len(buf)) msg += buf else: try: json_value = json.dumps(val, indent=2).encode("utf-8") msg += struct.pack('!i', len(json_value)) msg += json_value except TypeError: logging.warning("Unable to serialize model output.", exc_info=True) return create_predict_response(None, req_id_map, "Unsupported model output data type.", 503) msg += struct.pack('!i', -1) # End of list return msg
lambda *args, **kwargs: builtins.bin(*args, **kwargs), builtins.bin) bin._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.bin)(*args, **kwargs), builtins.bin) bool = functools.update_wrapper( lambda *args, **kwargs: builtins.bool(*args, **kwargs), builtins.bool) bool._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.bool)(*args, **kwargs), builtins.bool) breakpoint = functools.update_wrapper( lambda *args, **kwargs: builtins.breakpoint(*args, **kwargs), builtins.breakpoint) breakpoint._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.breakpoint)(*args, **kwargs), builtins.breakpoint) bytearray = functools.update_wrapper( lambda *args, **kwargs: builtins.bytearray(*args, **kwargs), builtins.bytearray) bytearray._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.bytearray)(*args, **kwargs), builtins.bytearray) bytes = functools.update_wrapper( lambda *args, **kwargs: builtins.bytes(*args, **kwargs), builtins.bytes) bytes._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.bytes)(*args, **kwargs), builtins.bytes) chr = functools.update_wrapper( lambda *args, **kwargs: builtins.chr(*args, **kwargs), builtins.chr) chr._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.chr)(*args, **kwargs), builtins.chr) compile = functools.update_wrapper( lambda *args, **kwargs: builtins.compile(*args, **kwargs),
def __init__(self, num_bits: int): self._num_bits = num_bits self._bytes = bytearray(num_bits // 8 + 1) # 一个字节 8个Bit,, 如果有4个字节 则是32