def encode(self, in_stream, out_stream): extra_bits = self.num_bits bs = BitStream() try: while True: chunk = in_stream.read(self.byte_buffer) #print >> sys.stderr, 'chunk:', chunk if (chunk): bs.append(BitStream(bytes=chunk)) else: while True: self.print_index(bs.read(self.int_type), out_stream) try: while True: self.print_index(bs.read(self.int_type), out_stream) except ReadError, e: #print >> sys.stderr, 'inner:', e pass except ReadError, e: #print >> sys.stderr, 'outer:', e extra_bits = bs.len - bs.bitpos if extra_bits > 0: #print >> sys.stderr, 'extra_bits:', extra_bits self.print_index(bs.read('uint:' + str(extra_bits)), out_stream) else: extra_bits = self.num_bits
def concatenated_sms(data: str, length_bits: int = 8) -> Dict[str, Any]: io_data = BitStream(hex=data) return { 'reference': io_data.read(f'uintbe:{length_bits}'), 'parts_count': io_data.read('uintbe:8'), 'part_number': io_data.read('uintbe:8'), }
def parse(stream: BitStream) -> "Packet": version = stream.read(3).uint type_ = stream.read(3).uint if type_ == 4: return Value.parse(version, stream) else: return Operator.parse(version, type_, stream)
def encode(self, in_stream, out_stream): extra_bits = self.num_bits bs = BitStream() try: while True: chunk = in_stream.read(self.byte_buffer) #print >> sys.stderr, 'chunk:', chunk if(chunk): bs.append(BitStream(bytes=chunk)) else: while True: self.print_index(bs.read(self.int_type), out_stream) try: while True: self.print_index(bs.read(self.int_type), out_stream) except ReadError, e: #print >> sys.stderr, 'inner:', e pass except ReadError, e: #print >> sys.stderr, 'outer:', e extra_bits = bs.len - bs.bitpos if extra_bits > 0: #print >> sys.stderr, 'extra_bits:', extra_bits self.print_index(bs.read('uint:' + str(extra_bits)), out_stream) else: extra_bits = self.num_bits
def main(): with open("eeprom.bin", "rb") as f: bytes = bytearray(f.read()) bytes = bytearray([b for b in reversed(bytes)]) bs = BitStream(bytes) size = len(bytes) / 4 curr = np.zeros([size], dtype=np.float64) volt = np.zeros([size], dtype=np.float64) temp = np.zeros([size], dtype=np.float64) for i in xrange(size): curr[i] = bs.read(12).uint * (3.35693e-3 * 4) volt[i] = 1.1 * 1023.0 / bs.read(10).uint temp[i] = bs.read(10).uint print curr, volt, temp plt.figure() plt.plot(curr) plt.figure() plt.plot(volt) plt.figure() plt.plot(temp) plt.show()
def uncompress_golomb_coding(coded_bytes, hash_length, M): """Given a bytstream produced using golomb_coded_bytes, uncompress it.""" ret_list = [] instream = BitStream(bytes=coded_bytes, length=len(coded_bytes) * 8) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) # First item is a full hash value. prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while (instream.bitpos + m_bits) <= instream.length: # Read Unary-encoded value. read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 # Read r, assuming M bits were used to represent it. r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
def run(self): import subprocess server, port, username, password, mountpoint, rtcm_callback = self._Thread__args nt = subprocess.Popen([ "./ntripclient", "--server", server, "--password", password, "--user", username, "--mountpoint", mountpoint ], stdout=subprocess.PIPE) """nt = subprocess.Popen(["./ntrip.py", server, str(port), username, password, mountpoint], stdout=subprocess.PIPE)""" if nt is None or nt.stdout is None: indev = sys.stdin else: indev = nt.stdout sio = indev while not self._stopevent.isSet(): d = ord(sio.read(1)) if d != RTCMv3_PREAMBLE: continue pack_stream = BitStream() l1 = ord(sio.read(1)) l2 = ord(sio.read(1)) pack_stream.append(bs.pack('2*uint:8', l1, l2)) pack_stream.read(6) pkt_len = pack_stream.read(10).uint pkt = sio.read(pkt_len) parity = sio.read(3) if len(pkt) != pkt_len: print "Length error {} {}".format(len(pkt), pkt_len) continue if True: #TODO check parity for d in pkt: pack_stream.append(bs.pack('uint:8', ord(d))) msg = parse_rtcmv3(pack_stream) # MARCO forward unchanged pack_stream.append( bs.pack('3*uint:8', ord(parity[0]), ord(parity[1]), ord(parity[2]))) msg = struct.pack('<B', RTCMv3_PREAMBLE) + pack_stream.tobytes() if msg is not None and rtcm_callback is not None: rtcm_callback(msg) if nt is not None: nt.terminate() nt.wait() nt = None
def uncompress_golomb_coding(coded_bytes, hash_length, M): """Given a bytstream produced using golomb_coded_bytes, uncompress it.""" ret_list = [] instream = BitStream( bytes=coded_bytes, length=len(coded_bytes) * 8) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) # First item is a full hash value. prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while (instream.bitpos + m_bits) <= instream.length: # Read Unary-encoded value. read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 # Read r, assuming M bits were used to represent it. r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
def parsePacket(): #read header serialRaw = [c for c in ser.read()] header = BitStream(uint=serialRaw[0], length=8) #parse header length = header.read('uint:4') packetType = header.read('uint:4') #read and parse body serialRaw = [c for c in ser.read(length)] body = BitStream(uint=serialRaw[0], length=8) data = body.read('uint:8') #read and parse footer serialRaw = [c for c in ser.read(1)] footer = BitStream(uint=serialRaw[0], length=8) checkSum = footer.read('uint:8') #recompute checksum and mark packet as error if both checksums don't match if (calculateCheckSum(length, 4) + calculateCheckSum(packetType, 4) + calculateCheckSum(data, 8)) != checkSum: print("bad checksum") return {"type": "error"} return {"type": packetType, "data": data}
class Section: def __init__(self, data): if (data != None): self._data = BitStream(hex=data.hex()) else: self._data = None self.x = self._read_x() self.y = self._read_y() self.flag = self._read_flag() self.vector = [] self.vector.append(self.x) self.vector.append(self.y) def _read_x(self): self._data.pos = 6 return self._data.read('int:13') return None def _read_y(self): self._data.pos = 19 return self._data.read('int:13') return None def _read_flag(self): self._data.pos = 0 return self._data.read('uint:6') return None
def decode(cls, data: str) -> Dict[str, str]: """ Decodes the Type Of Address octet. Returns a dictionary. Example: >>> TypeOfAddress.decode('91') {'ton': 'international', 'npi': 'isdn'} """ io_data = BitStream(hex=data) first_bit = io_data.read('bool') if not first_bit: raise ValueError("Invalid first bit of the Type Of Address octet") # Type Of Number ton = cls.TON.get(io_data.read('bits:3').uint) if ton is None: assert False, "Type-Of-Number bits should be exaustive" raise ValueError("Invalid Type Of Number bits") # Numbering Plan Identification npi = cls.NPI.get(io_data.read('bits:4').uint) if npi is None: raise ValueError("Invalid Numbering Plan Identification bits") return { 'ton': ton, 'npi': npi, }
def from_bytes(cls, input_bytes: bitstring.BitStream): descriptor_identifier = input_bytes.read('uint:32') if descriptor_identifier != CUEI_IDENTIFIER: raise Exception("Avail Descriptor identifier is not CUEI as required") provider_avail_id = input_bytes.read('uint:32') descriptor = cls(provider_avail_id) return descriptor
class bit_file: def __init__(self, f, bits_per_op, mode='read', keep_open=False, read_size=MAX_ENCODING, read_count=1): if mode not in ['read', 'write']: raise Exception('bad bit_file mode. Try "read" or "write"') self.mode = mode if isinstance(f, str): fmode = 'wb' if mode == 'write' else 'rb' self.f = open(f, fmode) self.keep_open = False else: self.f = f self.keep_open = keep_open # determines whether __exit__ is a flush()+close(), or just a flush() self.bits_per_op = bits_per_op self.stream = BitStream() self.read_size = read_size self.read_count = read_count def __enter__(self): return self def __exit__(self, type, value, traceback): if self.mode == 'write' and not self.f.closed: self.save() if not self.keep_open and not self.f.closed: with self.f: # close file pass def write(self, bits): if isinstance(bits, bit_write_buffer): bits = bits.stream if not isinstance(bits, Bits): bits = Bits(uint=bits, length=self.bits_per_op) self.stream.append(bits) def read(self): if self.read_count and self.stream.bitpos == self.stream.length: self.stream.clear() self.stream.append(Bits(bytes=self.f.read(self.read_size))) self.read_count -= 1 try: bits = self.stream.read(f'uint:{self.bits_per_op}') except bitstring.ReadError: try: bits = self.stream.read('uint') except bitstring.InterpretError: bits = 0 return bits def save(self): self.f.write(self.stream.tobytes())
def from_bytes(cls, input_bytes: bitstring.BitStream): time_specified_flag = input_bytes.read('bool') if time_specified_flag: reserved = input_bytes.read('uint:6') if reserved != 0x3F: raise ReservedBitsException() pts_time = input_bytes.read('uint:33') return cls(immediate=False, pts_time=pts_time) else: return cls(immediate=True, pts_time=None)
def RTCM_converter_thread(server, port, username, password, mountpoint, rtcm_callback=None): import subprocess nt = subprocess.Popen([ "./ntripclient", "--server", server, "--password", password, "--user", username, "--mountpoint", mountpoint ], stdout=subprocess.PIPE) """nt = subprocess.Popen(["./ntrip.py", server, str(port), username, password, mountpoint], stdout=subprocess.PIPE)""" if nt is None or nt.stdout is None: indev = sys.stdin else: indev = nt.stdout print("RTCM using input {}".format(indev)) while True: sio = indev d = ord(sio.read(1)) if d != RTCMv3_PREAMBLE: continue pack_stream = BitStream() l1 = ord(sio.read(1)) l2 = ord(sio.read(1)) pack_stream.append(bs.pack('2*uint:8', l1, l2)) pack_stream.read(6) pkt_len = pack_stream.read(10).uint pkt = sio.read(pkt_len) parity = sio.read(3) if len(pkt) != pkt_len: print("Length error {} {}".format(len(pkt), pkt_len)) continue if True: #TODO check parity for d in pkt: pack_stream.append(bs.pack('uint:8', ord(d))) msg = parse_rtcmv3(pack_stream) if msg is not None and rtcm_callback is not None: rtcm_callback(msg)
def __init__(self, chunk_id, size, data): super(HeaderChunk, self).__init__(chunk_id, size) data_stream = BitStream(data) # First two bytes are the format type self.format_type = data_stream.read('bits:16').int # Second two bytes are the number of tracks self.num_of_tracks = data_stream.read('bits:16').int # Third two bytes are the time division self.time_division = Bits(data_stream.read('bits:16'))
def from_bytes(cls, input_bytes: bitstring.BitStream): descriptor_identifier = input_bytes.read('uint:32') if descriptor_identifier != CUEI_IDENTIFIER: raise Exception("DTMF Descriptor identifier is not CUEI as required") preroll = input_bytes.read('uint:8') dtmf_count = input_bytes.read('uint:3') reserved = input_bytes.read('uint:5') if reserved != 0x1F: raise ReservedBitsException("DTMF Descriptor") dtmf_string = input_bytes.read('bytes:%s' % dtmf_count) descriptor = cls(preroll, dtmf_string) return descriptor
def RTCM_converter_thread(server, port, username, password, mountpoint, rtcm_callback = None): import subprocess nt = subprocess.Popen(["./ntripclient", "--server", server, "--password", password, "--user", username, "--mountpoint", mountpoint ], stdout=subprocess.PIPE) """nt = subprocess.Popen(["./ntrip.py", server, str(port), username, password, mountpoint], stdout=subprocess.PIPE)""" if nt is None or nt.stdout is None: indev = sys.stdin else: indev = nt.stdout print("RTCM using input {}".format(indev)) while True: sio = indev d = ord(sio.read(1)) if d != RTCMv3_PREAMBLE: continue pack_stream = BitStream() l1 = ord(sio.read(1)) l2 = ord(sio.read(1)) pack_stream.append(bs.pack('2*uint:8', l1, l2)) pack_stream.read(6) pkt_len = pack_stream.read(10).uint pkt = sio.read(pkt_len) parity = sio.read(3) if len(pkt) != pkt_len: print "Length error {} {}".format(len(pkt), pkt_len) continue if True: #TODO check parity for d in pkt: pack_stream.append(bs.pack('uint:8',ord(d))) msg = parse_rtcmv3(pack_stream) if msg is not None and rtcm_callback is not None: rtcm_callback(msg)
def parse(version: int, type_: int, stream: BitStream) -> "Operator": length_type_id = stream.read(1) sub_packets = [] if length_type_id: nb_sub_packets = stream.read(11).uint for _ in range(nb_sub_packets): sub_packets.append(Packet.parse(stream)) else: bit_length = stream.read(15).uint sub_packet_data = stream.read(bit_length) while sub_packet_data.pos < bit_length: sub_packets.append(Packet.parse(sub_packet_data)) return Operator(version=version, type_=type_, sub_packets=sub_packets)
def decompress(data): feed = [] pos = 0 binary = BitStream(bytes=data) binary_length = len(binary.bin) while binary_length - binary.pos >= (WINDOW_BITS + LENGTH_BITS): distance = binary.read('uint:%d' % WINDOW_BITS) c_or_length = binary.read('uint:%d' % LENGTH_BITS) if distance == 0: c_or_length = chr(c_or_length) feed.append([distance, c_or_length]) return feed2text(feed)
def unpack_data(data): bin_data = base64.b64decode(data) data = BitStream(bytes=bin_data) record_size = data.read(8).intle record_count = data.read(8).intle result = [] for i in range(record_count): result.append(unpack_record(data.read(record_size * 8))) return { "record_size": record_size, "record_count": record_count, "results": result }
def uncompress_delta_diff(compressed_input, hash_length): ret_list = [] instream = BitStream(bytes=compressed_input, length=len(compressed_input) * 8) hash_len_bits = hash_length * 8 prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) # Must always have at least 6 bits to read. while (instream.bitpos + 6) < instream.length: curr_diff_len = instream.read("uint:6") + 1 curr_diff = instream.read("bits:%d" % curr_diff_len) curr_item = prev[:hash_len_bits - curr_diff_len] + curr_diff assert curr_item.length == hash_len_bits ret_list.append(curr_item.tobytes()) prev = curr_item return ret_list
def adjust_tiles(tiles, size, isd_text): ChunkMap = re.split(r"<ChunkMap>", isd_text) if len(ChunkMap) != 2: e = "Previous split should have resulted in 2 strings. {} found".format(len(ChunkMap)) raise NotImplementedError(e) ChunkMap = ChunkMap[1] # first 2 characters after <Width> tag in <ChunkMap> => up to 99 chunks (= 1584 x 1584 tiles per island) (width_tiles, height_tiles) = size #@UnusedVariable width_chunks = int( re.split(r"<Width>", ChunkMap[:100])[1][:2].strip("<") ) height_chunks = int( re.split(r"<Height>", ChunkMap[:100])[1][:2].strip("<") ) #@UnusedVariable chunks = re.split(r"<Element>", ChunkMap)[1:] for i in range(len(chunks)): VertexResolution = re.split(r"<VertexResolution>", chunks[i])[1][0] if VertexResolution in ("-", "5"): # -1 => empty chunk continue VertexResolution = int(VertexResolution) HeightMap = re.split(r"HeightMap[^C]*CDATA\[", chunks[i])[1:] start_x = i%width_chunks start_z = i//width_chunks resolution = {4: [ 4, 4], 5: [ 25,15], 6: [142,58]}[VertexResolution] useful_bytes = 17*17*resolution[1] load_bytes = resolution[0] + useful_bytes bits_per_tile = resolution[1] * 8 data = BitStream( bytes=HeightMap [0][:load_bytes][-useful_bytes:] ) read_string = "uint:{}".format(bits_per_tile) for z in range(16): for x in range(17): position = start_z*16*width_tiles + z*240 + start_x*16 + x d = int( data.read(read_string)) if x != 16 and d == 858993471: #trial and error, 0x3333333f=858993471, 0x33=51, 0x3f=63, 0x3333=13107, 0x333f=13119 tiles[position] = 255 return None
class Mem(object): def __init__(self): self.real = BitStream(600*16) self.jumps = 0 def load(self, file): self.real = BitStream(filename=file) def save(self, file): self.real.tofile(file) def jump(self, pos): self.jumps += 1 self.real.bytepos = pos def read(self, size=16): return self.real.read(16) def get(self, pos, size=16): realpos = pos * 8 return self.real[realpos:realpos+size] def set(self, pos, bits): realpos = pos * 8 self.real[realpos:realpos+len(bits)] = bits @property def pos(self): return self.real.bytepos
def Pid(self): self.spi1.spi_xfer(self.h,self.Read_Pid) (count,st)=self.spi1.spi_xfer(self.h,self.Read_Pid) f2 = BitStream(bytes = st) f2.pos= 11 a=f2.read(16).bin return a
def get_frame_type(self, slice_): bs = BitStream(slice_) first_mb_in_slice = bs.read('ue') slice_type = bs.read('ue') if slice_type == 0 or slice_type == 5: # FRAME_P return FRAME_P elif slice_type == 1 or slice_type == 6: # FRAME_B return FRAME_B elif slice_type == 2 or slice_type == 7: # FRAME_I return FRAME_I elif slice_type == 3 or slice_type == 8: # FRMAE_SP return FRAME_P elif slice_type == 4 or slice_type == 9: # FRAME_SI return FRAME_I else: return None
def Sensor_Data(self): self.spi1.spi_xfer(self.h,self.Sensor_data_CHK_de_asrRead_Rate) (count,st)=self.spi1.spi_xfer(self.h,self.Sensor_data_CHK_de_asrRead_Rate) f2 = BitStream(bytes = st ) f2.pos= 6 a=f2.read(16).int return a
class Mem(object): def __init__(self): self.real = BitStream(600 * 16) self.jumps = 0 def load(self, file): self.real = BitStream(filename=file) def save(self, file): self.real.tofile(file) def jump(self, pos): self.jumps += 1 self.real.bytepos = pos def read(self, size=16): return self.real.read(16) def get(self, pos, size=16): realpos = pos * 8 return self.real[realpos:realpos + size] def set(self, pos, bits): realpos = pos * 8 self.real[realpos:realpos + len(bits)] = bits @property def pos(self): return self.real.bytepos
class CIMap(vector_quantization.CIVQ): def __init__(self, image_file_path=None, binary_file_path=None, rec_image_file_path=None, N=None, M=None): ##### Call parent constructor without 'N' parameter. super().__init__(image_file_path, binary_file_path, rec_image_file_path, None, M) return def _separate_blocks(self): ##### Read Image and use colors as blocks. Therefore, pixels are the patches here pil_image = Image.open(self.image_file_path) self.image = np.asarray(pil_image) self.patches = self.image ##### Get dimensions difference self.patch_dims_diff = -np.subtract(self.patches.shape[0], self.patches.shape[1]) ##### Reshape patches: flatten the patches (pixels inside patches are already flattened). self.patches = np.reshape( self.patches, (int(np.prod(self.patches.shape[:2])), self.patches.shape[-1])) return def _instantiate_bitstring(self): ##### Write information about codebook length. # NOTE: The available codebook sizes are 16, 32, 64, 128 and 256. codebook_size_flag = int(math.log2(self.M) - 4) self.bitstring = BitStream( f'int:10={self.patch_dims_diff}, uint:3={codebook_size_flag}') return def _read_bitstring_header(self): ##### Read info about image dimensions and codebook lenght. self.patch_dims_diff = self.bitstring.read('int:10') codebook_size_flag = self.bitstring.read('uint:3') self.M = 2**(codebook_size_flag + 4) self.N = 1 self.block_length = 3 self.bits_in_header = 10 + 3 return def _build_image_from_patches(self): self.quantized_image = self.flattened_patches.astype(np.uint8) return
def parse(version: int, stream: BitStream) -> "Value": value = BitStream() while True: group = stream.read(5) last_group = not group.read(1) value += group.read(4) if last_group: return Value(version=version, value=value.uint)
def main(): for datum in DATA: as_hex = ":".join("{:02x}".format(h) for h in datum) as_bin = ":".join("{:08b}".format(h) for h in datum) print(as_hex) print(as_bin) a = BitStream(datum) first_mb_in_slice = a.read('ue') slice_type = a.read('ue') pic_parameter_set_id = a.read('ue') frame_num = a.read(9) print("first-mb-in-slice: {}".format(first_mb_in_slice)) print("slice-type: {}".format(slice_type)) print("pic-parameter-set-id: {}".format(pic_parameter_set_id)) print("frame-num: {}".format(frame_num.int))
def instruction(self, opcode, address): ''' 0b will be put automatically ''' bopcode = BitStream('0b' + opcode) baddress = BitStream('0b' + address) self.ops[bopcode.read('bin:8')](baddress.int)
def gen(): consonants = "bcdfghjklmnpqrstvwxyz" vowels = "aeiou" generated = "" randdata = subprocess.check_output(["openssl", "rand", "9"]) assert len(randdata) == 9 bs = BitStream(randdata) generated += consonants[bs.read('int:5') % len(consonants)] for i in range(5): generated += vowels[bs.read('int:3') % len(vowels)] generated += consonants[bs.read('int:5') % len(consonants)] generated += consonants[bs.read('int:5') % len(consonants)] return generated
def Rate(self): self.spi1.spi_xfer(self.h,self.Read_Rate) (count,st)=self.spi1.spi_xfer(self.h,self.Read_Rate) f2 = BitStream(bytes = st ) f2.pos= 11 a=f2.read(16).int a=a/80 return a
def decode_fat(self, stream: bitstring.BitStream): entries_count = self.bpb.sec_per_fat * self.bpb.bytes_per_sector // 4 for i in range(entries_count): entry = stream.read('uintle:32') if entry != 0: self.fat_clusters[i] = entry self.skip_sectors(stream, self.bpb.sec_per_fat)
def unformat_message(text:BitStream)->bytes : """Raises ValueError if the message has been tampered with.""" try : sha, len = text.readlist("bytes:64, ue") message = text.read(f"bytes:{len}") except ReadError as e : raise ValueError("No valid message found — read error.") from e if hashlib.sha512(message).digest() != sha : raise ValueError("No valid message found — SHA mismatch.") return message
def decompress(self, data): bitstream = BitStream(data) pad = bitstream.read(8).int # remove pad bits if pad > 0: bitstream = bitstream[:-pad] bitstream.read(8) # false read 1 B to move read pointer tree_len = bitstream.read(16).int tree_serial = bitstream.read(tree_len) tree = HuffmanNode() tree.deserialize(tree_serial) dictionary = tree.assign_codes() dictionary = {v: k for k, v in dictionary.items()} # reverse dict result = bytearray() sequence = "" while True: try: bit = bitstream.read(1) except ReadError: break if bit: sequence += '1' else: sequence += '0' if sequence in dictionary: result.append(dictionary[sequence]) sequence = "" return result
def decode(cls, pdu_data: StringIO) -> Dict[str, Any]: """ Decodes an incomming PDU header. >>> PDUHeader.decode(StringIO('44')) {'rp': False, 'udhi': True, 'sri': False, 'lp': False, 'mms': True, 'mti': 'deliver'} """ result = dict() io_data = BitStream(hex=pdu_data.read(2)) # Reply Path result['rp'] = io_data.read('bool') # User Data PDUHeader Indicator result['udhi'] = io_data.read('bool') # Status Report Indication result['sri'] = io_data.read('bool') io_data.pos += 1 # skips a bit # Loop Prevention result['lp'] = io_data.read('bool') # More Messages to Send result['mms'] = io_data.read('bool') # Message Type Indicator result['mti'] = cls.MTI.get(io_data.read('bits:2').uint) if result['mti'] is None: raise ValueError("Invalid Message Type Indicator") return result
def parse_raw(cls, data): instance = cls(None) stream = BitStream(bytes=data) instance._type_covered = stream.read(f'uint:16') instance._algorithm = stream.read(f'uint:8') instance.labels = stream.read(f'uint:8') instance.original_ttl = stream.read(f'uint:32') instance._signature_expiration = stream.read(f'uint:32') instance._signature_inception = stream.read(f'uint:32') instance.key_tag = stream.read(f'uint:16') instance.signers_name = Domain.decode(None) end = stream.pos instance._signature = stream.read(f'bytes:{int(len(data) - end / 8)}') return instance
def uncompress_golomb_coding(coded_bytes, hash_length, M): ret_list = [] instream = BitStream( bytes=coded_bytes, length=len(coded_bytes) * hash_length) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while instream.bitpos < instream.length: read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
def dataEntryAppend(self, eventLogEntryBitStream:BitStream): """ since latset entry is in end of whole ECDA dump, collect all entries and store in reverse order :param eventLogEntryBitStream: :return: """ timestampHex = eventLogEntryBitStream.read('hex:{}'.format(32)) eventIdHex = eventLogEntryBitStream.read('hex:{}'.format(16)) eventId = int(eventIdHex, 16) extraInfo = eventLogEntryBitStream.read('hex:{}'.format(16)) self._dataDict['timestampHex'].append(timestampHex) ##self._dataDict['timestamp'].append(timestamp) ##self._dataDict['timeLast'].append(abs(timestamp-self._time) ) ##self._dataDict['timeLastInUs'].append(abs(timestamp - self._time )*self._timeGranunityUs) ##self._time = timestamp self._dataDict['eventId'].append(eventId) self._dataDict['eventIdHex'].append(eventIdHex) self._dataDict['extraInfo'].append(extraInfo) pass
def decrypt(ciphertext, key): bstream = BitStream() p, g, y = key[0] u = key[1] #trying to improve execution speed #a_pow_u = mod_exp(ciphertext[1][0], u, p) #inv_a_pow_u = modinv(a_pow_u, p) for block in ciphertext[1:]: #sys.stdout.write(".") #trying to improve execution speed a_pow_u = mod_exp(block[0], u, p) inv_a_pow_u = modinv(a_pow_u, p) x = (block[1] * inv_a_pow_u) % p block_size = math.floor(math.log(p,2)) bstream.append('0b' + bin(x)[2:].zfill(int(block_size))) return bstream.read(ciphertext[0])
class MicroKorgPGM(MicroKorgAbstractData): def __init__(self, bitstream): self.program_bitstream = BitStream(bitstream) print 'GENERAL' #bytes 0~11 self.program_name = self.read_bytes(12).bytes print 'Program name: %s' % self.program_name #bytes 12,13 (dummy bytes) self.read_bytes(2) print 'ARPEGGIO TRIGGER CTRL' ##ARPEGGIO_TRIGGER #byte 14 !!!BITMAP length_data = self.get_trigger_length_data() self.arp_trigger_length = arpeggio.TriggerLength(length_data) print self.arp_trigger_length #byte 15 self.arp_trigger_pattern = arpeggio.TriggerPattern( self.read_bytes()) print self.arp_trigger_pattern #byte 16 !!!BITMAP self.voice_mode = arpeggio.VoiceMode(self.get_voice_mode()) print self.voice_mode #byte 17 !!!BITMAP scale_key, scale_type = self.get_scale_key_and_type() self.scale_key = arpeggio.ScaleKey(scale_key) self.scale_type = arpeggio.ScaleType(scale_type) print self.scale_key print self.scale_type #byte 18 (dummy) self.read_bytes(1) print 'DELAY FX' ##DELAY FX #byte 19 !!!BITMAP delay_sync, delay_time_base = self.get_delay_sync_and_time_base() self.delay_sync = delay_fx.Sync(delay_sync) self.delay_time_base = delay_fx.TimeBase(delay_time_base) print self.delay_sync print self.delay_time_base #byte 20 self.delay_time = delay_fx.Time(self.read_bytes()) print self.delay_time #byte 21 self.delay_depth = delay_fx.Depth(self.read_bytes()) print self.delay_depth #byte 22 self.delay_type = delay_fx.Type(self.get_delay_type()) print self.delay_type print 'MOD FX' ##MOD FX #byte 23 self.mod_lfo_speed = mod_fx.LFOSpeed(self.read_bytes()) print self.mod_lfo_speed #byte 24 self.mod_depth = mod_fx.Depth(self.read_bytes()) print self.mod_depth #byte 25 self.mod_type = mod_fx.Type(self.get_mod_type()) print self.mod_type print 'EQ' ##EQ #byte 26 self.eq_hi_freq = eq.HiFreq(self.get_freq()) print self.eq_hi_freq #byte 27 self.eq_hi_gain = eq.HiGain(self.get_gain()) print self.eq_hi_gain #byte 28 self.eq_low_freq = eq.LoFreq(self.get_freq()) print self.eq_low_freq #byte 29 self.eq_low_gain = eq.LoGain(self.get_gain()) print self.eq_low_gain print 'ARPEGGIO' ##ARPEGGIO #byte 30 & 31 self.arp_tempo = arpeggio.Tempo(self.read_bytes(2)) print self.arp_tempo #byte 32 !!!BITMAP arp_on_off, arp_latch, arp_target, arp_key_sync = self.get_arp_bmp_32() self.arp_on_off = arpeggio.OnOff(arp_on_off) self.arp_latch = arpeggio.Latch(arp_latch) self.arp_target = arpeggio.Target(arp_target) self.arp_key_sync = arpeggio.KeySync(arp_key_sync) print self.arp_on_off print self.arp_latch print self.arp_target print self.arp_key_sync #byte 33 !!!BITMAP arp_type, arp_range = self.get_arp_type_and_range() self.arp_type = arpeggio.Type(arp_type) self.arp_range = arpeggio.Range(arp_range) print self.arp_type print self.arp_range #byte 34 self.arp_gate_time = arpeggio.GateTime(self.read_bytes()) print self.arp_gate_time #byte 35 self.arp_resolution = arpeggio.Resolution(self.get_arp_resolution()) print self.arp_resolution #byte 36 self.arp_swing = arpeggio.Swing(self.read_bytes()) print self.arp_swing print 'KBD OCTAVE' ##KBD OCTAVE #byte 37 self.kbd_octave = kbd_octave.KeyboardOctave(self.read_bytes()) print self.kbd_octave ###EITHER if self.voice_mode.value.intle in [0, 2]: ##TIMBRE1 DATA #bytes 38-145 print 'TIMBRE1' self.timbre1 = MicroKorgTimbreData( bitstream=self.program_bitstream.read(107 * 8)) if self.voice_mode.value.intle == 2: #i think?? ##TIMBRE2 DATA #bytes 146-253 print 'TIMBRE2' self.timbre2 = MicroKorgTimbreData( bitstream=self.program_bitstream.read(107 * 8)) ###OR elif self.voice_mode.value.intle == 3: ##VOCODER DATA #bytes 38-141 print 'VOCODER' self.vocoder = MicroKorgVocoderData( bitstream=self.program_bitstream.read(103 * 8)) #bytes 142-253 (dummy if vocoder) self.program_bitstream.read(111 * 8) def get_trigger_length_data(self): b = self.read_bytes() length_data = b.bin[0:3] trigger_length = BitStream(bin='0b00000%s' % length_data) return trigger_length def get_voice_mode(self): b = self.read_bytes() data = b.bin[4:6] voice_mode = BitStream(bin='0b000000%s' % data) return voice_mode def get_scale_key_and_type(self): b = self.read_bytes() key_data = b.bin[0:4] type_data = b.bin[4:] scale_key = BitStream(bin='0b0000%s' % key_data) scale_type = BitStream(bin='0b0000%s' % type_data) return scale_key, scale_type def get_delay_sync_and_time_base(self): b = self.read_bytes() sync_data = b.bin[7] time_base_data = b.bin[0:3] delay_sync = BitStream(bin='0b0000000%s' % sync_data) delay_time_base = BitStream(bin='0b00000%s' % time_base_data) return delay_sync, delay_time_base def get_arp_bmp_32(self): b = self.read_bytes() on_off_data = b.bin[7] latch_data = b.bin[6] target_data = b.bin[4:6] # this says 4&5? key_sync_data = b.bin[0] arp_on_off = BitStream(bin='0b0000000%s' % on_off_data) latch = BitStream(bin='0b0000000%s' % latch_data) target = BitStream(bin='0b000000%s' % target_data) key_sync = BitStream(bin='0b0000000%s' % key_sync_data) return arp_on_off, latch, target, key_sync def get_arp_type_and_range(self): b = self.read_bytes() type_data = b.bin[0:3] range_data = b.bin[4:] arp_type = BitStream(bin='0b00000%s' % type_data) arp_range = BitStream(bin='0b0000%s' % range_data) return arp_type, arp_range def get_delay_type(self): b = self.read_bytes() type_data = b.bin[0:2] delay_type = BitStream(bin='0b000000%s' % type_data) return delay_type def get_mod_type(self): b = self.read_bytes() type_data = b.bin[0:3] mod_type = BitStream(bin='0b00000%s' % type_data) return mod_type def get_freq(self): b = self.read_bytes() freq_data = b.bin[0:6] freq = BitStream(bin='0b00%s' % freq_data) return freq def get_gain(self): b = self.read_bytes() gain_data = b.bin[0:7] gain = BitStream(bin='0b0%s' % gain_data) return gain def get_arp_resolution(self): b = self.read_bytes() reso_data = b.bin[0:4] arp_reso = BitStream(bin='0b0000%s' % reso_data) return arp_reso
def parse(data, byte_offset): ts = TSPacket(None) ts.byte_offset = byte_offset data = BitStream(data) sync_byte = data.read("uint:8") if sync_byte != TSPacket.SYNC_BYTE: raise Exception( "First byte of TS packet at offset {} is not a sync byte." .format(byte_offset)) ts.transport_error_indicator = data.read("bool") ts.payload_unit_start_indicator = data.read("bool") ts.transport_priority = data.read("bool") ts.pid = data.read("uint:13") ts.scrambling_control = data.read("uint:2") # adaptation_field_control has_adaptation_field = data.read("bool") has_payload = data.read("bool") ts.continuity_counter = data.read("uint:4") if has_adaptation_field: adaptation_field_length = data.read("uint:8") if adaptation_field_length: ts.discontinuity_indicator = data.read("bool") ts.random_access_indicator = data.read("bool") ts.elementary_stream_priority_indicator = data.read("bool") pcr_flag = data.read("bool") opcr_flag = data.read("bool") splicing_point_flag = data.read("bool") transport_private_data_flag = data.read("bool") adaptation_field_extension_flag = data.read("bool") if pcr_flag: ts.program_clock_reference_base = data.read("uint:33") data.read(6) # reserved ts.program_clock_reference_extension = data.read("uint:9") if opcr_flag: ts.original_program_clock_reference_base = data.read( "uint:33") data.read(6) # reserved ts.original_program_clock_reference_extension = data.read( "uint:9") if splicing_point_flag: ts.splice_countdown = data.read("uint:8") if transport_private_data_flag: transport_private_data_length = data.read("uint:8") ts.private_data = data.read( transport_private_data_length * 8).bytes if adaptation_field_extension_flag: adaptation_field_extension_length = data.read("uint:8") ltw_flag = data.read("bool") piecewise_rate_flag = data.read("bool") seamless_splice_flag = data.read("bool") data.read(5) # reserved if ltw_flag: ts.ltw_valid_flag = data.read("bool") ts.ltw_offset = data.read("uint:15") if piecewise_rate_flag: data.read(2) # reserved ts.piecewise_rate = data.read("uint:22") if seamless_splice_flag: ts.splice_type = data.read("uint:4") ts.dts_next_au = read_timestamp("DTS_next_AU", data) # Skip the rest of the header and padding bytes data.bytepos = adaptation_field_length + 5 if has_payload: ts.payload = data.read("bytes") return ts
def __init__(self, data): data = BitStream(data) pointer_field = data.read("uint:8") if pointer_field: data.read(pointer_field) self.table_id = data.read("uint:8") if self.table_id != self.TABLE_ID: raise Exception( "table_id for PAT is {} but should be {}".format( self.table_id, self.TABLE_ID)) self.section_syntax_indicator = data.read("bool") self.private_indicator = data.read("bool") data.read(2) # reserved section_length = data.read("uint:12") self.transport_stream_id = data.read("uint:16") data.read(2) # reserved self.version_number = data.read("uint:5") self.current_next_indicator = data.read("bool") self.section_number = data.read("uint:8") self.last_section_number = data.read("uint:8") num_programs = (section_length - 9) // 4 self.programs = OrderedDict() for _ in range(num_programs): program_number = data.read("uint:16") data.read(3) # reserved pid = data.read("uint:13") self.programs[program_number] = pid data.read("uint:32") # crc calculated_crc = crc32(data.bytes[pointer_field + 1:data.bytepos]) if calculated_crc != 0: raise Exception( "CRC of entire PAT should be 0, but saw %s." \ % (calculated_crc)) while data.bytepos < len(data.bytes): padding_byte = data.read("uint:8") if padding_byte != 0xFF: raise Exception("Padding byte at end of PAT was 0x{:X} but " "should be 0xFF".format(padding_byte))
def __init__(self, data): data = BitStream(data) pointer_field = data.read("uint:8") if pointer_field: data.read(pointer_field) self.table_id = data.read("uint:8") if self.table_id != self.TABLE_ID: raise Exception( "table_id for PMT is {} but should be {}".format( self.table_id, self.TABLE_ID)) self.section_syntax_indicator = data.read("bool") self.private_indicator = data.read("bool") data.read(2) # reserved section_length = data.read("uint:12") self.program_number = data.read("uint:16") data.read(2) # reserved self.version_number = data.read("uint:5") self.current_next_indicator = data.read("bool") self.section_number = data.read("uint:8") self.last_section_number = data.read("uint:8") data.read(3) # reserved self.pcr_pid = data.read("uint:13") data.read(4) # reserved program_info_length = data.read("uint:12") self.descriptors = Descriptor.read_descriptors( data, program_info_length) self.streams = OrderedDict() while data.bytepos < section_length + 3 - 4: stream = Stream(data) if stream.elementary_pid in self.streams: raise Exception( "PMT contains the same elementary PID more than once.") self.streams[stream.elementary_pid] = stream data.read("uint:32") # crc calculated_crc = crc32(data.bytes[pointer_field + 1:data.bytepos]) if calculated_crc != 0: raise Exception( "CRC of entire PMT should be 0, but saw %s." \ % (calculated_crc)) while data.bytepos < len(data.bytes): padding_byte = data.read("uint:8") if padding_byte != 0xFF: raise Exception("Padding byte at end of PMT was 0x{:02X} but " "should be 0xFF".format(padding_byte))
class Serializer(object): """ Class for serialize and de-serialize messages. """ def __init__(self): """ Initialize a Serializer. """ self._reader = None self._writer = None def deserialize(self, raw, host, port): """ De-serialize a stream of byte to a message. :param raw: received bytes :param host: source host :param port: source port :return: the message """ self._reader = BitStream(bytes=raw, length=(len(raw) * 8)) version = self._reader.read(defines.VERSION_BITS).uint message_type = self._reader.read(defines.TYPE_BITS).uint token_length = self._reader.read(defines.TOKEN_LENGTH_BITS).uint code = self._reader.read(defines.CODE_BITS).uint mid = self._reader.read(defines.MESSAGE_ID_BITS).uint if self.is_response(code): message = Response() message.code = code elif self.is_request(code): message = Request() message.code = code else: message = Message() message.source = (host, port) message.destination = None message.version = version message.type = message_type message.mid = mid if token_length > 0: message.token = self._reader.read(token_length * 8).bytes else: message.token = None current_option = 0 try: while self._reader.pos < self._reader.len: next_byte = self._reader.peek(8).uint if next_byte != int(defines.PAYLOAD_MARKER): # the first 4 bits of the byte represent the option delta delta = self._reader.read(4).uint # the second 4 bits represent the option length length = self._reader.read(4).uint current_option += self.read_option_value_from_nibble(delta) option_length = self.read_option_value_from_nibble(length) # read option try: option_name, option_type, option_repeatable, default = defines.options[current_option] except KeyError: log.err("unrecognized option") return message, "BAD_OPTION" if option_length == 0: value = None elif option_type == defines.INTEGER: value = self._reader.read(option_length * 8).uint else: value = self._reader.read(option_length * 8).bytes option = Option() option.number = current_option option.value = self.convert_to_raw(current_option, value, option_length) message.add_option(option) else: self._reader.pos += 8 # skip payload marker if self._reader.len <= self._reader.pos: log.err("Payload Marker with no payload") return message, "BAD_REQUEST" to_end = self._reader.len - self._reader.pos message.payload = self._reader.read(to_end).bytes return message except ReadError, e: log.err("Error parsing message: " + str(e)) return None
def __init__(self, data, ts_packets): self.bytes = data first_ts = ts_packets[0] self.pid = first_ts.pid self.byte_offset = first_ts.byte_offset self.size = len(ts_packets) * TSPacket.SIZE self.random_access = first_ts.random_access_indicator self.ts_packets = ts_packets data = BitStream(data) start_code = data.read("uint:24") if start_code != 0x000001: raise Exception("packet_start_code_prefix is 0x{:06X} but should " "be 0x000001".format(start_code)) self.stream_id = data.read("uint:8") pes_packet_length = data.read("uint:16") if StreamID.has_pes_header(self.stream_id): bits = data.read("uint:2") if bits != 2: raise Exception("First 2 bits of a PES header should be 0x2 " "but saw 0x{:02X}'".format(bits)) self.pes_scrambling_control = data.read("uint:2") self.pes_priority = data.read("bool") self.data_alignment_indicator = data.read("bool") self.copyright = data.read("bool") self.original_or_copy = data.read("bool") pts_dts_flags = data.read("uint:2") escr_flag = data.read("bool") es_rate_flag = data.read("bool") dsm_trick_mode_flag = data.read("bool") additional_copy_info_flag = data.read("bool") pes_crc_flag = data.read("bool") pes_extension_flag = data.read("bool") pes_header_data_length = data.read("uint:8") if pts_dts_flags & 2: bits = data.read("uint:4") if bits != pts_dts_flags: raise Exception( "2 bits before PTS should be 0x{:02X} but saw 0x{" ":02X}".format(pts_dts_flags, bits)) self.pts = read_timestamp("PTS", data) if pts_dts_flags & 1: bits = data.read("uint:4") if bits != 0x1: raise Exception("2 bits before DTS should be 0x1 but saw " "0x{:02X}".format(bits)) self.dts = read_timestamp("DTS", data) # skip the rest of the header and stuffing bytes data.bytepos = pes_header_data_length + 9 if self.stream_id == StreamID.PADDING: self.payload = None else: self.payload = data.read("bytes")
def main(argv): if len(argv) < 4: print("Usage: " + argv[0] + " <header dump> <type (hex|bin|raw)> <definition>") return filename = argv[1] typ = argv[2] definition = [] for argc in range(3, len(argv)): definition.append(argv[argc]) print("File: " + filename) print("Definition: " + ", ".join(definition)) print("Type: " + typ) bits = [] for d in definition: bits = parseDefinition(d, bits) content = None field = None with open(filename) as f: if typ == "hex": content = f.read().strip() field = BitStream("0x" + content) if typ == "bin": content = f.read().strip() field = BitStream("0b" + content) if typ == "raw": field = BitStream(f) parsed = {} pos = 0 last_header = None for bit in bits: field.pos = pos bitlen = 0 lenexpr = bit.getBits() condexpr = bit.getCondition() for p in sorted(parsed.keys(), key=len, reverse=True): lenexpr = lenexpr.replace(p, str(parsed[p])) condexpr = condexpr.replace(p, str(parsed[p])) try: bitlen = eval(lenexpr) except: print("Error: " + str(bit.getBits()) + " is not a valid size!") return try: cond = eval(condexpr) except: print("Error: " + str(bit.getCondition()) + " is not a valid condition! (Parsed to " + condexpr + ")") return if cond: try: val = field.read(bitlen) except: print("Could not read field " + str(bit.getName()) + " with " + str(bitlen) + " bits!") continue parsed[bit.getName()] = val.uint pos += bitlen if last_header is not bit.getPrefix(): last_header = bit.getPrefix() print("") out = bit.getName() + ": " for t, endian in bit.getTypes(): if t == "hex": out += "0x" + val.hex + " " if t == "int": out += str(endianness(val.uint, endian, int(bitlen))) + " " if t == "bin": out += "0b" + val.bin + " " if t == "str": s = "" for si in range(0, len(str(val)[2:]), 2): out += str(chr(int(str(val)[si + 2:si + 4], 16))) print(out)
class NALUnit: def __init__(self, filp, pos, utype, size, tid, nosetup=False): self.filp = filp self.pos = pos self.num_bytes_in_nalu = size self.nal_unit_type = utype self.temporal_id = tid self.rbsp_byte = BitStream() self.setup_rbsp() self.print_bin() if not nosetup: self.setup() def rbsp_read(self, fmt): return self.rbsp_byte.read(fmt) def next_bits(self, fmt, forward=False): try: if not forward: ret = self.filp.peek(fmt) else: ret = self.filp.read(fmt) except ReadError: return 0 return ret def setup_rbsp(self): self.filp.bytepos += 2 self.num_bytes_in_rbsp = 0 i = 2 while i < self.num_bytes_in_nalu: if i + 2 < self.num_bytes_in_nalu and self.next_bits('hex: 24') == '000003': self.rbsp_byte.append(self.filp.read(8)) self.num_bytes_in_rbsp += 1 self.rbsp_byte.append(self.filp.read(8)) self.num_bytes_in_rbsp += 1 # discard emulation_prevention_three_byte self.filp.read(8) i += 3 else: self.rbsp_byte.append(self.filp.read(8)) self.num_bytes_in_rbsp += 1 i += 1 self.filp.bytepos = self.pos def more_rbsp_data(self): try: while self.rbsp_read('uint: 1') != 1: pass return True except ReadError: return False def rbsp_trailing_bits(self): pos = self.rbsp_byte.pos bits = self.rbsp_byte.bytealign() self.rbsp_byte.pos = pos if bits == 0 or self.rbsp_read('uint: %d' % bits) != 1 << bits: print('Wrong rbsp_trailing_bits at NALU begined at bytes %d' % self.pos) exit(1) def profile_tier_level(self, ProfilePresentFlag, MaxNumSubLayersMinus1): if ProfilePresentFlag: self.general_profile_space = self.rbsp_read('uint: 2') self.general_tier_flag = self.rbsp_read('uint: 1') self.general_profile_idc = self.rbsp_read('uint: 5') self.general_profile_compatibility_flag = [0] * 32 for i in range(0, 32): self.general_profile_compatibility_flag[i] = self.rbsp_read("uint: 1") self.general_reserved_zero_16bits = self.rbsp_read('uint: 16') self.general_level_idc = self.rbsp_read('uint: 8') self.sub_layer_profile_present_flag = [0] * MaxNumSubLayersMinus1 self.sub_layer_level_present_flag = [0] * MaxNumSubLayersMinus1 self.sub_layer_profile_space = [0] * MaxNumSubLayersMinus1 self.sub_layer_tier_flag = [0] * MaxNumSubLayersMinus1 self.sub_layer_profile_idc = [0] * MaxNumSubLayersMinus1 self.sub_layer_reserved_zero_16bits = [0] * MaxNumSubLayersMinus1 self.sub_layer_level_idc = [0] * MaxNumSubLayersMinus1 self.sub_layer_profile_compatibility_flag = [[0] * 32] * MaxNumSubLayersMinus1 for i in range(0, MaxNumSubLayersMinus1): self.sub_layer_profile_present_flag[i] = self.rbsp_read('uint: 1') self.sub_layer_level_present_flag[i] = self.rbsp_read('uint: 1') if ProfilePresentFlag and self.sub_layer_profile_present_flag[i]: self.sub_layer_profile_space[i] = self.rbsp_read('uint: 2') self.sub_layer_tier_flag[i] = self.rbsp_read('uint: 1') self.sub_layer_profile_idc[i] = self.rbsp_read('uint: 5') for j in range(0, 32): self.sub_layer_profile_compatibility_flag[i][j] = self.rbsp_read('uint: 1') self.sub_layer_reserved_zero_16bits[i] = self.rbsp_read('uint: 16') if self.sub_layer_level_present_flag[i]: self.sub_layer_level_idc[i] = self.rbsp_read('uint: 8') def op_point(self, opIdx): self.op_num_layer_id_values_minus1 = [opIdx] = self.rbsp_read('ue') self.op_layer_id[opIdx] = [0] * self.op_num_layer_id_values_minus1 for i in range(0, self.op_num_layer_id_values_minus1): self.op_layer_id[opIdx][i] = self.rbsp_read('uint: 6') def short_term_ref_pic_set(self, idxRps): if idxRps != 0: self.inter_ref__pic_set_prediction_flag = self.rbsp_read('uint: 1') else: self.inter_ref__pic_set_prediction_flag = 0 if self.inter_ref__pic_set_prediction_flag: if idxRps == self.num_short_term_ref_pic_sets: self.delta_idx_minus1 = self.rbsp_read('ue') else: self.delta_idx_minus1 = 0 RIdx = idxRps - self.delta_idx_minus1 - 1 self.delta_rps_sign = self.rbsp_read('uint: 1') self.abs_delta_rps_minus1 = self.rbsp_read('ue') self.used_by_curr_pic_flag = [0] * (self.NumDeltaPocs[RIdx] + 1) self.use_delta_flag = [0] * (self.NumDeltaPocs[RIdx] + 1) for i in range(0, self.NumDeltaPocs[RIdx] + 1): self.used_by_curr_pic_flag[i] = self.rbsp_read('uint: 1') if not self.used_by_currpic_flag[i]: self.use_delta_flag[i] = self.rbsp_read('uint: 1') else: num_negative_pics = self.rbsp_read('ue') num_positive_pics = self.rbsp_read('ue') self.delta_poc_s0_minus1 = [0] * num_negative_pics self.used_by_curr_pic_s0_flag = [0] * num_negative_pics for i in range(0, num_negative_pics): self.delta_poc_s0_minus1[i] = self.rbsp_read('ue') self.used_by_curr_pic_s0_flag[i] = self.rbsp_read('uint: 1') self.delta_poc_s1_minus1 = [0] * num_positive_pics self.used_by_curr_pic_s1_flag = [0] * num_positive_pics for i in range(0, num_positive_pics): self.delta_poc_s1_minus1[i] = self.rbsp_read('ue') self.used_by_curr_pic_s1_flag[i] = self.rbsp_read('uint: 1') self.NumDeltaPocs[idxRps] = num_negative_pics + num_positive_pics def setup(self): pass def __str__(self): return 'NALU: pos=%d, length=%d, type=%s, tid=%d' % (self.pos, self.num_bytes_in_nalu, NAL_UNIT_TYPE[self.nal_unit_type], self.temporal_id) def print_bin(self): i = 1 for abyte in self.rbsp_byte.tobytes(): if i == 16: print('%3s' % hex(abyte)[2: ]) i = 1 else: print('%3s' % hex(abyte)[2: ], end=' ') i += 1 print('\n')
if( usbEndpoint != INCOMING_ENDPOINT and usbEndpoint != OUTGOING_ENDPOINT ): continue usbBuffer = BitStream('0x%s' % ( packet.data.usb_capdata.replace( ':', '' ) ) ) usbHeader = usbBuffer.readlist( 'bytes:3, uint:8' ) # Validate the header if( usbEndpoint == OUTGOING_ENDPOINT and usbHeader[0].encode( 'hex' ) != '000000' ): print 'Unexpected USB Header. Expected "0x000000", got "0x%s".' % ( usbHeader[0].encode( 'hex' ) ) raise Exception if( usbEndpoint == INCOMING_ENDPOINT and usbHeader[0] != 'ABC' ): print 'Unexpected USB Header. Expected "0x414243", got "0x%s".' % ( usbHeader[0].encode( 'hex' ) ) raise Exception messageBuffer.append( usbBuffer.read( usbHeader[1] * 8 ) ) # Clear the messageBuffer if we have a full message # TODO - we need to be able to figure out if all 60 bytes are conusumed, but it's the end of the message if( usbHeader[1] < USB_PACKET_SIZE ): print >> sys.stderr, 'Message %s' % ( 'OUT' if usbEndpoint == OUTGOING_ENDPOINT else 'IN' ) print >> sys.stderr, 'Hex: %s' % ( messageBuffer.hex ) # TODO - make a bayerMessage to also handle standard command sequences and ASTM messages if( messageBuffer.bytes[0:2] != 'Q\x03' ): print >> sys.stderr, 'String: %s\n' % ( messageBuffer.bytes ) else: msg = BayerBinaryMessage.MessageFactory( messageBuffer, BayerBinaryMessage.OUT if usbEndpoint == OUTGOING_ENDPOINT else BayerBinaryMessage.IN, pumpSession ) if( msg is not None ): if( isinstance( msg, MtGetAttachedPumpMessage ) ): pumpSession.pumpSerial = msg.pumpSerial
def instruction(self, opcode, address): # you don't put the 0b, I do, you just punch the cards right bopcode = BitStream('0b'+opcode) baddress = BitStream('0b'+address) self.ops[bopcode.read('bin:8')](baddress.int)
class Bitstream: """ class for input bitstream """ def __init__(self, file): self.srcfile = BitStream(file) self.cur_num_bytes_in_nalu = 0 self.cur_index = 0 self.nalu_list = [] def get_next_nalu(self): self.cur_index += 1 return self.nalu_list[self.cur_index-1] def set_next_nalu_pos(self, pos): self.cur_index = pos def get_all_nalu(self): return self.nalu_list def next_bits(self, bits, forward=False): try: if not forward: ret = self.srcfile.peek('hex:%d' % bits) else: ret = self.srcfile.read('hex:%d' % bits) except ReadError: return 0 return ret def calculate_nalu_size(self): head_pos = self.srcfile.bytepos value = self.next_bits(24) while value != 0 \ and value != '000000' \ and value != '000001': self.srcfile.bytepos += 1 value = self.next_bits(24) else: if value == 0: # will be a bug here if last NALU is only 0 or 1 byte # but NALU header needs 2 bytes, so ignore that size = self.srcfile.bytepos - head_pos + 2 else: size = self.srcfile.bytepos - head_pos if self.srcfile.bytealign() != 0: print('Byte Align Error when calculate NALU Size!') exit(1) self.srcfile.bytepos = head_pos return size def init(self): """ save all NALU positions in the list """ while 1: while self.next_bits(24) != '000001' \ and self.next_bits(32) != '00000001': value = self.next_bits(bits_of_leading_zero_8bits, forward=True) if value != leading_zero_8bits: print('wrong leading_zero_8bits') exit(1) if self.next_bits(24) != '000001': value = self.next_bits(bits_of_zero_byte, forward=True) if value != zero_byte: print('wrong zero_byte') exit(1) value = self.next_bits(bits_of_start_code_prefix_one_3bytes, forward=True) if value != start_code_prefix_one_3bytes: print('wrong start_code_prefix_one_3bytes') exit(1) self.cur_num_bytes_in_nalu = self.calculate_nalu_size() self.nalu_list.append(nalunit.get_nalu(self.srcfile, self.srcfile.bytepos, self.cur_num_bytes_in_nalu)) self.srcfile.bytepos = self.srcfile.bytepos + self.cur_num_bytes_in_nalu while self.next_bits(8) != 0 \ and self.next_bits(24) != '000001' \ and self.next_bits(32) != '00000001': value = self.next_bits(bits_of_trailing_zero_8bits, forward=True) if value != trailing_zero_8bits: print('wrong trailing_zero_8bits') exit(1) if self.next_bits(8) == 0: return def get_nalu_nums(self): return len(self.nalu_list) def __str__(self): return 'bitstream: length=%d, numofNALU=%d' % (len(self.srcfile), self.get_nalu_nums())
from microkorg_pgm import MicroKorgPGM here = os.path.abspath(os.path.dirname(__file__)) f_sysex = open(os.path.join(here, 'sample_data/microkorg-6-30-13.syx'), 'rb') sysex = f_sysex.read() f_sysex.close() # print len(sysex) sysex_bitstream = BitStream(bytes=sysex) status = sysex_bitstream.read(8).hex if status != GOOD_STATUS: raise ValueError('Bad Status: %s Expected: 0xf0' % status) else: print 'Status OK!' mfgr_code = sysex_bitstream.read(8).hex if mfgr_code == '0x00': # Indicates extended MFGR bytes # discard the first byte and read the next two (e.g. see MS above) mfgr_code = sysex_bitstream.read(16).hex mfgr = MFGR_LOOKUPS[mfgr_code] print 'SysEx for "%s" device.' % mfgr family_id = sysex_bitstream.read(8).hex
def from_data(cls, databytes): values = dict.fromkeys(MdatStateMessage.fields) # accept either the full frame payload or just the data after the CCL type identifier if (len(databytes) > 31): databytes = databytes[1:] d = BitStream(bytes=databytes) values['mode'] = 'MDAT_STATE' lat_bits = BitArray(d.read('bits:24')) values['latitude'] = decode_latlon(lat_bits) lon_bits = BitArray(d.read('bits:24')) values['longitude'] = decode_latlon(lon_bits) values['fix_age'] = d.read('uint:8') * 4 values['time_date'] = decode_time_date(d.read('bits:24')) values['heading'] = d.read('uint:8') * (360.0 / 255.0) mission_mode_depth_bits = d.read('bits:16') (values['mission_mode'], values['depth']) = decode_mission_mode_and_depth(mission_mode_depth_bits) values['faults_bits'] = d.read('bits:40') values['mission_leg'] = d.read('uint:8') values['estimated_velocity'] = d.read('uint:8') / 25.0 values['objective_index'] = d.read('uint:8') values['power_watts'] = d.read('uint:8') * 4.0 goal_lat_bits = BitArray(d.read('bits:24')) values['goal_latitude'] = decode_latlon(goal_lat_bits) goal_lon_bits = BitArray(d.read('bits:24')) values['goal_longitude'] = decode_latlon(goal_lon_bits) values['battery_percent'] = d.read('uint:8') gfi_pitch_oil_encoded = BitArray(d.read('bits:16')) gfi_pitch_oil_encoded.byteswap() values['gfi_percent'] = gfi_pitch_oil_encoded[11:].uint * 100.0 / 31.0 values['oil'] = gfi_pitch_oil_encoded[6:11].uint * 100.0 / 31.0 values['pitch'] = gfi_pitch_oil_encoded[0:6].int * 180.0 / 63.0 # Make a message mdat_state = cls(values) return mdat_state
class _RawBinaryData(object): """ Hold a location reference description as a bit stream.""" MIN_VERSION = BINARY_VERSION_2 MAX_VERSION = LATEST_BINARY_VERSION def __init__(self, data, base64=False): """ Constructor. :param string data: Binaray data :param bool base64: True if data is coded in base64 """ if base64: data = b64decode(data) #: raw data size self._sz = len(data) #: bit stream used to read data self._bs = BitStream(bytes=data) def getbits(self, *bits): """ Read the given numbers of bits. :param tuple bits: Tuple of number of bits to read :returns: Tuple of bit fields :rtype: tuple """ return tuple(self._bs.read(v) for v in bits) def get_position(self): """ Returns position in the bit stream. :returns: Position in the bit stream :rtype: int """ return self._bs.pos @property def num_bytes(self): """ Size of the decoded data. :returns: Size of the decoded data. :rtype: int """ return self._sz @property def version(self): """ Return binary version of the data :returns: Binary version of the data. :rtype: int """ return self.header.ver @lazyproperty def header(self): """ Parse header (once) location type :returns: Header data :rtype: _BinaryHeader """ # Validate data size if self._sz < min(MIN_BYTES_LINE_LOCATION, MIN_BYTES_POINT_LOCATION, MIN_BYTES_CLOSED_LINE_LOCATION): raise InvalidDataSizeError("not enough bytes in data") _, arf1, pf, arf0, af, ver = self.getbits(*HEADER_BITS) arf = 2 * arf1 + arf0 return _BinaryHeader(arf, af, pf, ver) @lazyproperty def location_type(self): """ Parse location type (once) :returns: Location type :rtype: LocationType """ header = self.header # Check version if not self.MIN_VERSION <= header.ver <= self.MAX_VERSION: raise BinaryVersionError("Invalid binary version {}".format(header.ver)) is_point = (header.pf == IS_POINT) has_attributes = (header.af == HAS_ATTRIBUTES) area_code = header.arf is_area = ((area_code == 0 and not is_point and not has_attributes) or area_code > 0) total_bytes = self._sz loc_type = LocationType.UNKNOWN if not is_point and not is_area and has_attributes: loc_type = LocationType.LINE_LOCATION elif is_point and not is_area: if not has_attributes: if total_bytes == GEOCOORD_SIZE: loc_type = LocationType.GEO_COORDINATES else: raise InvalidDataSizeError("Invalid byte size") else: if total_bytes == POINT_ALONG_LINE_SIZE or total_bytes == (POINT_ALONG_LINE_SIZE + POINT_OFFSET_SIZE): loc_type = LocationType.POINT_ALONG_LINE elif total_bytes == POINT_WITH_ACCESS_SIZE or total_bytes == (POINT_WITH_ACCESS_SIZE + POINT_OFFSET_SIZE): loc_type = LocationType.POI_WITH_ACCESS_POINT else: raise InvalidDataSizeError("Invalid byte size") elif is_area and not is_point and has_attributes: if total_bytes >= MIN_BYTES_CLOSED_LINE_LOCATION: loc_type = LocationType.CLOSED_LINE else: raise InvalidDataSizeError("Invalid byte size") else: if area_code == AREA_CODE_CIRCLE: loc_type = LocationType.CIRCLE elif area_code == AREA_CODE_RECTANGLE: # includes case AREA_CODE_GRID if total_bytes == RECTANGLE_SIZE or total_bytes == LARGE_RECTANGLE_SIZE: loc_type = LocationType.RECTANGLE elif total_bytes == GRID_SIZE or total_bytes == LARGE_GRID_SIZE: loc_type = LocationType.GRID else: raise InvalidDataSizeError("Invalid byte size") elif area_code == AREA_CODE_POLYGON: if not has_attributes and total_bytes >= MIN_BYTES_POLYGON: loc_type = LocationType.POLYGON else: raise InvalidDataSizeError("Invalid byte size") else: raise BinaryParseError('Invalid header') return loc_type