def crossover(code1, code2, numpoints=1, mutrate=0.01): offspring1 = BitStream() offspring2 = BitStream() r = random.random() if r < 0.9: minsize = min([len(code1), len(code2)]) points = [random.randint(0, minsize - 1) for p in range(numpoints)] points.append(minsize) points.sort() last = 0 switch = False for p in points: if not switch: offspring1 += code1[last:p] offspring2 += code2[last:p] else: offspring1 += code2[last:p] offspring2 += code1[last:p] last = p switch = not switch else: offspring1 = BitStream(code1) offspring2 = BitStream(code2) bitflipmutation(offspring1, mutrate) bitflipmutation(offspring2, mutrate) return [offspring1, offspring2]
def AHash(k, p, bstream): block_size = int((k - 1) * math.floor(math.log(p, 2))) n_block = int(math.ceil(bstream.len / float(block_size))) message_size = block_size * n_block b0 = message_size #reset bitstream position bstream.pos = 0 for i in range(1, n_block + 1): try: b0 = roundHash(k, p, BitStream(bstream.read(block_size)), b0) except: padding_size = block_size - bstream.len % block_size b0 = roundHash( k, p, BitStream("0b" + bstream.read(bstream.len - bstream.pos).bin + "0" * padding_size), b0) if (i != n_block): tmp_b0 = BitArray("uint:" + str(int(math.ceil(math.log(p, 2)))) + "=" + str(b0)) tmp_b0.ror(int(math.ceil(math.log(p, 2)) // 2)) b0 = tmp_b0.uint return b0
def __init__(self): self.ops = { #TRANSFERÊNCA DE DADOS '00001010': self.loadToAC, #LOAD MQ Transfere o conteúdo do registro MQ para o AC '00001001': self.loadToMQ, #LOAD MQ,M(X) Transfere o contéudo do local de memória X para MQ '00100001': self.store, #STOR M(X) Transfere o conteúdo de AC para o local de memória X '00000001': self.load, #LOAD M(X) Transfere M(X) para o AC '00000010': self.loadNeg, #LOAD -M(X) Transfere -M(X) para o AC '00000011': self.loadAbs, #LOAD |M(X)| Transfere o valor absoluto de M(X) para o AC '00000100': self.loadNegAbs, #LOAD -|M(X)| Transfere -|M(X)| para o acumulador #DESVIO INCONDICIONAL '00001101': self.jumpL, #JUMP M(X,0:19) Apanha a próxima instrução da metade esquerda de M(X) '00001110': self.jumpR, #JUMP M(X,20:39) Apanha a próxima instrução da metade direita de M(X) #DESVIO CONDICIONAL '00001111': self.condJumpL, #JUMP+M(X,0:19) Se o número no AC for não negativo, apanha a próxima instrução da metade esquerda de M(X) '00010000': self.condJumpR, #JUMP+M(X,20:39) Se o número no AC for não negativo, apanha a próxima instrução da metade direita de M(X) #ARITIMÉTICA '00000101': self.add, #ADD M(X) Soma M(X) com AC; coloca o resultado em AC '00000111': self.addAbs, #ADD |M(X)| Soma |M(X)| com AC; coloca o resultado em AC '00000110': self.sub, #SUB M(X) Subitrai M(X) de AC; coloca o resultado em AC '00001000': self.subRem, #SUB |M(X)| Subitrai |M(X) de AC; coloca o resultado em AC '00001011': self.mul, #MUL M(X) Multiplica M(X) por M(Q); coloca os bits mais significativos do resultado em AC; coloca os bits menos significtivos em M(Q) '00001100': self.div, #DIV M(X) Divide AC por M(X); coloca cociente em MQ e o resto em AC '00010100': self.ls, #LSH Multiplica o AC por 2; ou seja, desloca à esquerda uma posição de bit '00010101': self.rs, #RSH Divide o AC por 2; ou seja, desloca uma posição à direita #MODIFICAÇÃO DE ENDEREÇO '00010010': self.storL, #STOR M(X,8:19) Substitui campo de endereço da esquerda em M(X) por 12 bits mais aà direita de AC '00010011': self.storR, #STOR M(X,28:39) Substitui campo de endereço da direita em M(X) por 12 bits mais aà direita de AC } self.AC = BitStream(int=0, length=40) self.MQ = BitStream(int=0, length=40) self.memoria = [] for i in range(1024): self.memoria.append(BitStream(int=0, length=40))
def adjust_tiles(tiles, size, isd_text): ChunkMap = re.split(r"<ChunkMap>", isd_text) if len(ChunkMap) != 2: e = "Previous split should have resulted in 2 strings. {} found".format(len(ChunkMap)) raise NotImplementedError(e) ChunkMap = ChunkMap[1] # first 2 characters after <Width> tag in <ChunkMap> => up to 99 chunks (= 1584 x 1584 tiles per island) (width_tiles, height_tiles) = size #@UnusedVariable width_chunks = int( re.split(r"<Width>", ChunkMap[:100])[1][:2].strip("<") ) height_chunks = int( re.split(r"<Height>", ChunkMap[:100])[1][:2].strip("<") ) #@UnusedVariable chunks = re.split(r"<Element>", ChunkMap)[1:] for i in range(len(chunks)): VertexResolution = re.split(r"<VertexResolution>", chunks[i])[1][0] if VertexResolution in ("-", "5"): # -1 => empty chunk continue VertexResolution = int(VertexResolution) HeightMap = re.split(r"HeightMap[^C]*CDATA\[", chunks[i])[1:] start_x = i%width_chunks start_z = i//width_chunks resolution = {4: [ 4, 4], 5: [ 25,15], 6: [142,58]}[VertexResolution] useful_bytes = 17*17*resolution[1] load_bytes = resolution[0] + useful_bytes bits_per_tile = resolution[1] * 8 data = BitStream( bytes=HeightMap [0][:load_bytes][-useful_bytes:] ) read_string = "uint:{}".format(bits_per_tile) for z in range(16): for x in range(17): position = start_z*16*width_tiles + z*240 + start_x*16 + x d = int( data.read(read_string)) if x != 16 and d == 858993471: #trial and error, 0x3333333f=858993471, 0x33=51, 0x3f=63, 0x3333=13107, 0x333f=13119 tiles[position] = 255 return None
def uncompress_golomb_coding(coded_bytes, hash_length, M): """Given a bytstream produced using golomb_coded_bytes, uncompress it.""" ret_list = [] instream = BitStream( bytes=coded_bytes, length=len(coded_bytes) * 8) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) # First item is a full hash value. prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while (instream.bitpos + m_bits) <= instream.length: # Read Unary-encoded value. read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 # Read r, assuming M bits were used to represent it. r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
def multiply(self, address) : """ Multiply M(X) by MQ; put most significant bits of result in AC, put least significant bits in MQ """ res = BitStream(int=self.__MQ.int * self.memory.data_memory[address].int, length=80) self.__AC = BitStream(int=res[40:80].int, length=40) self.__MQ = BitStream(int=res[0:39].int, length=40)
def send_data(self, tms_stream, tdi_stream): """ Performs a general-purpose JTAG communication. tms_stream -- The values to be transmitted over the Test Most Select (TMS) line. tdi_stream -- The values to be transmitted to the target device. """ #Create a new bitstream object to store the result of the transmission. tdo_stream = BitStream() #For each simulatenous pair of bits in the transmission... for (tms, tdi) in zip(tms_stream, tdi_stream): #...perform the core transmission... tdo = self.tick(tms, tdi) #... and add the result to our resultant stream. tdo_stream += BitStream(bool=tdo) #Track where we are the JTAG state machine, in case TMS changes. self.track_tms(tms) #... return the values returned over TDO. return tdo_stream
def from_bytes(cls, input_bytes: bitstring.BitStream): descriptor_identifier = input_bytes.read('uint:32') if descriptor_identifier != CUEI_IDENTIFIER: raise Exception("Avail Descriptor identifier is not CUEI as required") provider_avail_id = input_bytes.read('uint:32') descriptor = cls(provider_avail_id) return descriptor
def parse(stream: BitStream) -> "Packet": version = stream.read(3).uint type_ = stream.read(3).uint if type_ == 4: return Value.parse(version, stream) else: return Operator.parse(version, type_, stream)
class Mem(object): def __init__(self): self.real = BitStream(600*16) self.jumps = 0 def load(self, file): self.real = BitStream(filename=file) def save(self, file): self.real.tofile(file) def jump(self, pos): self.jumps += 1 self.real.bytepos = pos def read(self, size=16): return self.real.read(16) def get(self, pos, size=16): realpos = pos * 8 return self.real[realpos:realpos+size] def set(self, pos, bits): realpos = pos * 8 self.real[realpos:realpos+len(bits)] = bits @property def pos(self): return self.real.bytepos
def parsePacket(): #read header serialRaw = [c for c in ser.read()] header = BitStream(uint=serialRaw[0], length=8) #parse header length = header.read('uint:4') packetType = header.read('uint:4') #read and parse body serialRaw = [c for c in ser.read(length)] body = BitStream(uint=serialRaw[0], length=8) data = body.read('uint:8') #read and parse footer serialRaw = [c for c in ser.read(1)] footer = BitStream(uint=serialRaw[0], length=8) checkSum = footer.read('uint:8') #recompute checksum and mark packet as error if both checksums don't match if (calculateCheckSum(length, 4) + calculateCheckSum(packetType, 4) + calculateCheckSum(data, 8)) != checkSum: print("bad checksum") return {"type": "error"} return {"type": packetType, "data": data}
class Section: def __init__(self, data): if (data != None): self._data = BitStream(hex=data.hex()) else: self._data = None self.x = self._read_x() self.y = self._read_y() self.flag = self._read_flag() self.vector = [] self.vector.append(self.x) self.vector.append(self.y) def _read_x(self): self._data.pos = 6 return self._data.read('int:13') return None def _read_y(self): self._data.pos = 19 return self._data.read('int:13') return None def _read_flag(self): self._data.pos = 0 return self._data.read('uint:6') return None
def encode(block): """ Entropy of CAVLC Args: block: input macroblock, should be 4x4 intger matrix returns: A bitstream of CAVLC code """ #use Zigzag to scan zig = ZigZag.ZigzagMatrix() print("ZiaZag scan:") res = zig.matrix2zig(block) print(res) #Step1: get TotalCoeffs& T1s totalCoeffs = getTotalCoeffs(res) print("TotalCoeffs: ", totalCoeffs) if (totalCoeffs==0): NoFurther = BitStream('0b0') print("CAVLC: ", NoFurther.bin) return NoFurther t1s = getT1s(res) print("T1s: ", t1s) part1 = '0b' + vlc.coeff_token[0][totalCoeffs][t1s] print("coeff token:", part1) #step2: Encode the sign of each T1 part2, remains = encodeT1s(res, t1s) print("T1 sign:", part2) #step3: Encode the levels of the remaining non-zero coefficients part3 = encodeLevels(remains, totalCoeffs, t1s) print("encode Levels:", part3) #Step4: Encode the total number of zeros before the last coefficient totalZeros = getTotalZeros(res) print("TotalZeros: ", totalZeros) part4 = '' if ( totalCoeffs<block.size ): part4 = vlc.total_zeros[totalZeros][totalCoeffs] print("encode TotalZeros: ", part4) #step5: Encode each run of zeros part5 = encodeRunBefore(res, totalCoeffs, totalZeros) stream = BitStream() temp = part1 + part2 + part3 + part4 + part5 stream.append(temp) logging.debug("CAVLC: %s", stream.bin) # supplement zero at the end of stream, so we can print the hex code of the stream output_str = stream addon = 8 - len(stream.bin) % 8 for i in range(0, addon): output_str.append('0b0') logging.debug("CAVLC hex: %s", output_str.hex) return stream
def isFrameNeeded(self, frameNumber): '''This determines whether a given frame is needed for this stream or not.''' # Is the stream already assembled? If so, no more frames need to be accepted. if self.isAssembled == True: return False # The stream is not assembled. else: # If the stream header binary preamble isn't loaded yet, then by default we accept the frame, unless that # frame number is already in self.framesPriorToBinaryPreamble, which is a list of processed frames prior to # the binary preamble being read. if self.streamHeaderPreambleComplete == False: if frameNumber not in self.framesPriorToBinaryPreamble: return True else: return False # The preamble has been loaded, checking the reference table. else: # Frame reference table is not in memory and must be loaded. if self.frameReferenceTable == None: self.frameReferenceTable = BitStream( self._readFile('\\frameReferenceTable', toDecompress=True)) self.frameReferenceTable.bitpos = frameNumber - 1 isFrameLoaded = self.frameReferenceTable.read('bool') return not isFrameLoaded
def pack_awb(dir): file_list = sorted(os.listdir(dir)) file_count = len(file_list) file_end_offset = 0x10 + (2 * file_count) data = AWB_MAGIC + \ AWB_UNK + \ bitstring.pack("uintle:32, uintle:32", file_count, AWB_ALIGN) + \ bitstring.pack(", ".join(["uintle:16=%d" % id for id in range(file_count)])) + \ BitStream(uintle = 0, length = (file_count + 1) * 32) # Plus one for the header. for i, file in enumerate(file_list): file_end = data.len / 8 data.overwrite(bitstring.pack("uintle:32", file_end), (file_end_offset + (i * 4)) * 8) padding = 0 if file_end % AWB_ALIGN > 0: padding = AWB_ALIGN - (file_end % AWB_ALIGN) data.append(BitStream(uintle=0, length=padding * 8)) file_data = ConstBitStream(filename=os.path.join(dir, file)) data.append(file_data) # One last file end. file_end = data.len / 8 data.overwrite(bitstring.pack("uintle:32", file_end), (file_end_offset + (file_count * 4)) * 8) return data
def jtag_data(self, TDI_stream, tdo): if (tdo): data = struct.pack("<BI", TDI_TDO_CMD, len(TDI_stream)) else: data = struct.pack("<BI", TDI_CMD, len(TDI_stream)) self.handle.bulkWrite(usb.ENDPOINT_OUT + 1, data) n_bits = TDI_stream.len TDI_stream += bitstring.BitStream((8 - n_bits) % 8) TDI_stream.reverse() TDI_stream.byteswap() data = TDI_stream.tobytes() self.handle.bulkWrite(usb.ENDPOINT_OUT + 1, data, timeout=10000) if (tdo): r = self.handle.bulkRead(usb.ENDPOINT_IN + 1, len(data), timeout=10000) TDO_stream = bitstring.pack('bytes:{}'.format(len(r)), r) TDO_stream.byteswap() TDO_stream.reverse() TDO_stream = TDO_stream[0:n_bits] else: TDO_stream = BitStream(n_bits) # TDI_TDO_CMD and TDI_CMD always ends with TMS=1 self.track_tms(True) # Go back to "Shift DR" or "Shift IR" self.jtag_general(BitStream('0b010'), BitStream('0b000')) return TDO_stream
class Mem(object): def __init__(self): self.real = BitStream(600 * 16) self.jumps = 0 def load(self, file): self.real = BitStream(filename=file) def save(self, file): self.real.tofile(file) def jump(self, pos): self.jumps += 1 self.real.bytepos = pos def read(self, size=16): return self.real.read(16) def get(self, pos, size=16): realpos = pos * 8 return self.real[realpos:realpos + size] def set(self, pos, bits): realpos = pos * 8 self.real[realpos:realpos + len(bits)] = bits @property def pos(self): return self.real.bytepos
def decode(cls, data: str) -> Dict[str, str]: """ Decodes the Type Of Address octet. Returns a dictionary. Example: >>> TypeOfAddress.decode('91') {'ton': 'international', 'npi': 'isdn'} """ io_data = BitStream(hex=data) first_bit = io_data.read('bool') if not first_bit: raise ValueError("Invalid first bit of the Type Of Address octet") # Type Of Number ton = cls.TON.get(io_data.read('bits:3').uint) if ton is None: assert False, "Type-Of-Number bits should be exaustive" raise ValueError("Invalid Type Of Number bits") # Numbering Plan Identification npi = cls.NPI.get(io_data.read('bits:4').uint) if npi is None: raise ValueError("Invalid Numbering Plan Identification bits") return { 'ton': ton, 'npi': npi, }
def returnFrameHeader(self, paletteType): '''This method returns the bits carrying the frame header for the frame, as well as the "carry over" bits, which were the excess capacity within those blocks, assuming there was extra space. ''' # always 608 bits, plus whatever remainder bits that may be present in the final block. Protocol v1 only! if paletteType != 'streamPalette' and paletteType != 'headerPalette' and paletteType != 'primaryPalette': raise ValueError( "FrameHandler.returnFrameHeader: invalid paletteType argument." ) fullBlockData = self._blocksToBits( math.ceil(608 / self.paletteDict[paletteType].bitLength), f'{paletteType}') carryOverBits = BitStream() if fullBlockData.len > 608: fullBlockData.pos = 608 carryOverBits.append( fullBlockData.read(f'bits : {fullBlockData.len - 608}')) fullBlockData.pos = 0 config.statsHandler.dataRead += carryOverBits.len return fullBlockData.read('bits : 608'), carryOverBits
def test_get_frame_sort(self): assert (Frame.get_frame_sort(BitStream( self.ifr.bitarr[8:24])) == Frame.Sort.I) assert (Frame.get_frame_sort(BitStream( self.sfr.bitarr[8:24])) == Frame.Sort.S) assert (Frame.get_frame_sort(BitStream( self.hfr.bitarr[8:24])) == Frame.Sort.H)
def test_crcNmea(self): nmeaStr = ( "$GPGGA,092751.000,5321.6802,N,00630.3371,W,1,8,1.03,61.7,M,55.3,M,,*75" ) nmeaBits = BitStream(nmeaStr[1:-3].encode()) nmeaCrcBits = BitStream("0x" + nmeaStr[-2:]) self.assertEqual(crcNmea(nmeaBits), BitStream(nmeaCrcBits))
def _instantiate_bitstring(self): ##### Write information about codebook length. # NOTE: The available codebook sizes are 16, 32, 64, 128 and 256. codebook_size_flag = int(math.log2(self.M) - 4) self.bitstring = BitStream( f'int:10={self.patch_dims_diff}, uint:3={codebook_size_flag}') return
def Fetch(self): # Checks if IBR is empty if (self.IAS.registers.IBR == BitStream(int=0, length=20)): self.IAS.registers.MAR = BitStream(int=self.IAS.registers.PC, length=12) self.IAS.registers.MBR = self.IAS.memory.loc[ self.IAS.registers.MAR.int] # Checks if Left instruction is present if (self.IAS.registers.MBR[0:20] != BitStream(int=0, length=20)): self.IAS.registers.IBR = self.IAS.registers.MBR[20:40] self.IAS.registers.IR = self.IAS.registers.MBR[0:8] self.IAS.registers.MAR = self.IAS.registers.MBR[8:20] # Left instruction is not present else: self.IAS.registers.IR = self.IAS.registers.MBR[20:28] self.IAS.registers.MAR = self.IAS.registers.MBR[28:40] self.IAS.registers.PC += 1 # IBR is filled already else: self.IAS.registers.IR = self.IAS.registers.IBR[0:8] self.IAS.registers.MAR = self.IAS.registers.IBR[8:20] self.IAS.registers.IBR = BitStream(int=0, length=20) #Empty the IBR again self.IAS.registers.PC += 1
def main(): with open("eeprom.bin", "rb") as f: bytes = bytearray(f.read()) bytes = bytearray([b for b in reversed(bytes)]) bs = BitStream(bytes) size = len(bytes) / 4 curr = np.zeros([size], dtype=np.float64) volt = np.zeros([size], dtype=np.float64) temp = np.zeros([size], dtype=np.float64) for i in xrange(size): curr[i] = bs.read(12).uint * (3.35693e-3 * 4) volt[i] = 1.1 * 1023.0 / bs.read(10).uint temp[i] = bs.read(10).uint print curr, volt, temp plt.figure() plt.plot(curr) plt.figure() plt.plot(volt) plt.figure() plt.plot(temp) plt.show()
class DataBlock(Block): def __init__(self, length: int) -> None: super().__init__() self.buffer = BitStream(length=length * 8) self.pointers: List[Tuple[int, Optional[Block]]] = [] def _add_pointer(self, offset: int, block: Optional[Block]) -> None: self.pointers.append((offset, block)) def _at_offset(self, offset: int, *args: Any, **kwargs: Any) -> bitstream_offset: return bitstream_offset(self.buffer, offset, *args, **kwargs) def prepare_bitstream(self) -> BitStream: for offset, obj in self.pointers: with bitstream_offset(self.buffer, offset): if obj is not None: self.buffer.overwrite(pack('uintle:64', obj.offset)) else: self.buffer.overwrite(pack('uintle:64', 0)) return self.buffer def get_all_pointers(self) -> List[int]: return sorted( [x + self.offset for x, p in self.pointers if p is not None]) @abc.abstractmethod def alignment(self) -> int: pass def __len__(self) -> int: return len(self.buffer) // 8
def concatenated_sms(data: str, length_bits: int = 8) -> Dict[str, Any]: io_data = BitStream(hex=data) return { 'reference': io_data.read(f'uintbe:{length_bits}'), 'parts_count': io_data.read('uintbe:8'), 'part_number': io_data.read('uintbe:8'), }
def Pid(self): self.spi1.spi_xfer(self.h,self.Read_Pid) (count,st)=self.spi1.spi_xfer(self.h,self.Read_Pid) f2 = BitStream(bytes = st) f2.pos= 11 a=f2.read(16).bin return a
def Sensor_Data(self): self.spi1.spi_xfer(self.h,self.Sensor_data_CHK_de_asrRead_Rate) (count,st)=self.spi1.spi_xfer(self.h,self.Sensor_data_CHK_de_asrRead_Rate) f2 = BitStream(bytes = st ) f2.pos= 6 a=f2.read(16).int return a
def save(self, filename, font_type = FONT_TYPES.font01, game = GAMES.dr): data = BitStream(SPFT_MAGIC) data += BitStream(uintle = len(self.data), length = 32) mapping_table_len = self.find_max_char() + 1 # zero-indexed so +1 for the size. mapping_table_start = 0x20 font_table_start = mapping_table_len * 2 + mapping_table_start data += BitStream(uintle = font_table_start, length = 32) data += BitStream(uintle = mapping_table_len, length = 32) data += BitStream(uintle = mapping_table_start, length = 32) data += UNKNOWN1[game][font_type] + UNKNOWN2 data += self.gen_mapping_table(mapping_table_len) data += self.gen_font_table() padding = BitStream(hex = '0x00') * (16 - ((data.len / 8) % 16)) data += padding f = open(filename, "wb") data.tofile(f) f.close()
def bytestoBase91(data): """ this is the way a microcontroller would encode a signal. Data must be in 13-bit "bytes." They will be zero padded, so make sure you fill that last 13-bit byte enough so that the padded zeros don't become an extra pixel value! It just means maximize the number of pixels in the payload. Technically can be 13 or 14 bit bytes, but since our data isn't byte aligned to anything, we pick 13. this is lazy base91, and it works as long as we fill up as many payload bits as possible with data maybe implement real base91 just for fun? Or even, base9194 where it can be either 91 or 94 (or anything in between?) Doesn't really matter though data is a BitStream or bytes returns BitStream """ base91data = '' while data.len - data.pos >= 13: bits = data.read('uint:13') base91data = base91data + chr((bits // 91) + 33) + chr((bits % 91) + 33) # lazy base91, technically it's 6 or 7 depending if data.len - data.pos >= 7: # the number of bits left is greater or equal to 7, less eq to 12 data = data + BitStream(13 - (data.len - data.pos)) # zero pad bits = data.read('uint:13') base91data = base91data + chr((bits // 91) + 33) + chr((bits % 91) + 33) elif data.len - data.pos > 0: # number of bits left greater than 0 <= 6 data = data + BitStream(6 - (data.len - data.pos)) # pad up to 6 bits = data.read('uint:6') base91data = base91data + chr(bits + 33) return base91data.encode()
def __init__(self, f, verbose=False, use_bitstream=None): """ Create a new extractor for a .264/h264 file in Annex B format. f: input file use_bitstream: blob to use as bitstream (for testing) verbose: whether to print out NAL structure and fields """ if use_bitstream: # testing the parser in a bitstream self.file = None self.stream = BitStream(use_bitstream) else: fn, ext = os.path.splitext(os.path.basename(f)) valid_input_ext = ['.264', '.h264'] # TODO: extend for H.265 # valid_input_ext = ['.264', 'h264', '.265', '.h265'] if not ext in valid_input_ext: raise RuntimeError("Valid input types: " + str(valid_input_ext)) bitstream_file = f self.file = bitstream_file self.stream = BitStream(filename=bitstream_file) self.verbose = verbose self.callbacks = {}
def inovonics_serial_read(self): ser = serial.Serial(self.serial_port) self.logging.info("Opening " + str(ser.name)) previous_packet = 0 while True: header = ser.read(1) # Incoming Message if header == b'\x13': data_length = ser.read(1) packet = ser.read(self.serial_to_dec(data_length) - 1) packet_bitstream = BitStream(bytes=header + data_length + packet) # Filter out duplicate messages, don't filter check in messages, Eg: (\x10) stat0 = (packet_bitstream[96:104]) packet_without_rssi = (packet_bitstream[:-24]) if packet_without_rssi == previous_packet and stat0 is not b'\x10': pass else: self.message_queue.put(packet_bitstream) previous_packet = packet_without_rssi # Receiver Message elif header == b'\x11': data_length = ser.read(1) packet = ser.read(self.serial_to_dec(data_length) - 1) packet_bitstream = BitStream(bytes=header + data_length + packet) self.message_queue.put(packet_bitstream) # Toss Everything Else else: # self.logging.debug("Unknown Byte -- Discarded") self.logging.debug(''.join('%02x' % c for c in header))
def parse(self): bs = BitStream(filename=self.file_path) [index_start, extension_data_start] = parse_header(bs) self.app_info = parse_app_info(bs) bs.bytepos = index_start self.indexes = parse_index(bs) self.parsed = True
def instruction(self, opcode, address): ''' 0b will be put automatically ''' bopcode = BitStream('0b' + opcode) baddress = BitStream('0b' + address) self.ops[bopcode.read('bin:8')](baddress.int)
def mul(self, register): ''' MUL M(X): Multiply M(X) by M(Q); put most significant bits of result in AC, put less significant bits in M(Q) ''' res = BitStream(int=self.MQ.int * self.selectron[register].int, length=80) self.AC = BitStream(int=res[40:80].int, length=40) self.MQ = BitStream(int=res[0:39].int, length=40)
def save(self, filename): data = BitStream(self.magic) + BitStream(uintle = len(self.lines), length = 16) for line in self.lines: data += line.to_data() with open(filename, "wb") as f: data.tofile(f)
def __init__(self, chunk_id, size, data): super(HeaderChunk, self).__init__(chunk_id, size) data_stream = BitStream(data) # First two bytes are the format type self.format_type = data_stream.read('bits:16').int # Second two bytes are the number of tracks self.num_of_tracks = data_stream.read('bits:16').int # Third two bytes are the time division self.time_division = Bits(data_stream.read('bits:16'))
def decompress(data): feed = [] pos = 0 binary = BitStream(bytes=data) binary_length = len(binary.bin) while binary_length - binary.pos >= (WINDOW_BITS + LENGTH_BITS): distance = binary.read('uint:%d' % WINDOW_BITS) c_or_length = binary.read('uint:%d' % LENGTH_BITS) if distance == 0: c_or_length = chr(c_or_length) feed.append([distance, c_or_length]) return feed2text(feed)
def decode(self, in_stream, out_stream): bs = BitStream() dq = deque() at_least_three = False for word in self.words_from_file(in_stream): if not word or word not in self.word_dict: continue #print >> sys.stderr, 'word:"', word, '"' dq.append(self.word_dict[word]) if at_least_three or len(dq) == 3: bs.append(pack(self.int_type, dq.popleft())) at_least_three = True if bs.len > self.bit_buffer: cut = 0 for byte in bs.cut(self.bit_buffer): cut += 1 byte.tofile(out_stream) del bs[:cut * self.bit_buffer] # dq has to have exactly 2 elements here, the last is the bit length of the first, unless it's 0 #print >> sys.stderr, 'dq:', dq extra_bits = dq.pop() bs.append(pack('uint:' + str(extra_bits), dq.popleft())) bs.tofile(out_stream)
def uncompress_delta_diff(compressed_input, hash_length): ret_list = [] instream = BitStream(bytes=compressed_input, length=len(compressed_input) * 8) hash_len_bits = hash_length * 8 prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) # Must always have at least 6 bits to read. while (instream.bitpos + 6) < instream.length: curr_diff_len = instream.read("uint:6") + 1 curr_diff = instream.read("bits:%d" % curr_diff_len) curr_item = prev[:hash_len_bits - curr_diff_len] + curr_diff assert curr_item.length == hash_len_bits ret_list.append(curr_item.tobytes()) prev = curr_item return ret_list
def write_char_array(self, max_length, value): self.write_int(bit_count(max_length), len(value)) if self._bits.len > 0: more = 8 - self._bits.len tail = (BitStream(int=0, length=more) + self._bits).tobytes() self._bits = BitStream() self._bytes += tail self._bytes += value
def gen(): consonants = "bcdfghjklmnpqrstvwxyz" vowels = "aeiou" generated = "" randdata = subprocess.check_output(["openssl", "rand", "9"]) assert len(randdata) == 9 bs = BitStream(randdata) generated += consonants[bs.read('int:5') % len(consonants)] for i in range(5): generated += vowels[bs.read('int:3') % len(vowels)] generated += consonants[bs.read('int:5') % len(consonants)] generated += consonants[bs.read('int:5') % len(consonants)] return generated
def main(): for datum in DATA: as_hex = ":".join("{:02x}".format(h) for h in datum) as_bin = ":".join("{:08b}".format(h) for h in datum) print(as_hex) print(as_bin) a = BitStream(datum) first_mb_in_slice = a.read('ue') slice_type = a.read('ue') pic_parameter_set_id = a.read('ue') frame_num = a.read(9) print("first-mb-in-slice: {}".format(first_mb_in_slice)) print("slice-type: {}".format(slice_type)) print("pic-parameter-set-id: {}".format(pic_parameter_set_id)) print("frame-num: {}".format(frame_num.int))
def decompress(self, data): bitstream = BitStream(data) pad = bitstream.read(8).int # remove pad bits if pad > 0: bitstream = bitstream[:-pad] bitstream.read(8) # false read 1 B to move read pointer tree_len = bitstream.read(16).int tree_serial = bitstream.read(tree_len) tree = HuffmanNode() tree.deserialize(tree_serial) dictionary = tree.assign_codes() dictionary = {v: k for k, v in dictionary.items()} # reverse dict result = bytearray() sequence = "" while True: try: bit = bitstream.read(1) except ReadError: break if bit: sequence += '1' else: sequence += '0' if sequence in dictionary: result.append(dictionary[sequence]) sequence = "" return result
def decrypt(ciphertext, key): bstream = BitStream() p, g, y = key[0] u = key[1] #trying to improve execution speed #a_pow_u = mod_exp(ciphertext[1][0], u, p) #inv_a_pow_u = modinv(a_pow_u, p) for block in ciphertext[1:]: #sys.stdout.write(".") #trying to improve execution speed a_pow_u = mod_exp(block[0], u, p) inv_a_pow_u = modinv(a_pow_u, p) x = (block[1] * inv_a_pow_u) % p block_size = math.floor(math.log(p,2)) bstream.append('0b' + bin(x)[2:].zfill(int(block_size))) return bstream.read(ciphertext[0])
def RTCM_converter_thread(server, port, username, password, mountpoint, rtcm_callback = None): import subprocess nt = subprocess.Popen(["./ntripclient", "--server", server, "--password", password, "--user", username, "--mountpoint", mountpoint ], stdout=subprocess.PIPE) """nt = subprocess.Popen(["./ntrip.py", server, str(port), username, password, mountpoint], stdout=subprocess.PIPE)""" if nt is None or nt.stdout is None: indev = sys.stdin else: indev = nt.stdout print("RTCM using input {}".format(indev)) while True: sio = indev d = ord(sio.read(1)) if d != RTCMv3_PREAMBLE: continue pack_stream = BitStream() l1 = ord(sio.read(1)) l2 = ord(sio.read(1)) pack_stream.append(bs.pack('2*uint:8', l1, l2)) pack_stream.read(6) pkt_len = pack_stream.read(10).uint pkt = sio.read(pkt_len) parity = sio.read(3) if len(pkt) != pkt_len: print "Length error {} {}".format(len(pkt), pkt_len) continue if True: #TODO check parity for d in pkt: pack_stream.append(bs.pack('uint:8',ord(d))) msg = parse_rtcmv3(pack_stream) if msg is not None and rtcm_callback is not None: rtcm_callback(msg)
def create_sample_sync_frame(): stream = BitStream(bin="0" * 32) stream.set(True, range(0, 12)) # frame sync stream.set(True, 14) # Layer III stream.set(True, 15) # protection bit stream.set(True, 17) # bitrate, 128k return stream
def load_data(self, data, offset = 0): if not data[offset * 8 : offset * 8 + GMO_MAGIC.len] == GMO_MAGIC: _LOGGER.error("GMO header not found at 0x%04X." % offset) return data.bytepos = offset + GMO_SIZE_OFFSET gmo_size = data.read("uintle:32") + GMO_SIZE_DIFF self.data = BitStream(data[offset * 8 : (offset + gmo_size) * 8]) self.__find_gims()
def serialize(self): bitstream = BitStream() if self.leaf(): bitstream.append('bin=1') bitstream.append("{0:#0{1}x}".format(self.value, 4)) else: bitstream.append('bin=0') bitstream += self.left.serialize() + self.right.serialize() return bitstream
def pack_txt(filename): if os.path.basename(os.path.dirname(filename)) in SCRIPT_NONSTOP: is_nonstop = True else: is_nonstop = False script = ScriptFile(filename) text = script[common.editor_config.lang_trans] if not text: text = script[common.editor_config.lang_orig] # Nonstop Debate lines need an extra newline at the end # so they show up in the backlog properly. if is_nonstop and not text[-1] == u"\n": text += u"\n" text = SCRIPT_BOM + text + SCRIPT_NULL text = BitStream(bytes = text.encode("UTF-16LE")) return text
def __init__(self, filp, pos, utype, size, tid, nosetup=False): self.filp = filp self.pos = pos self.num_bytes_in_nalu = size self.nal_unit_type = utype self.temporal_id = tid self.rbsp_byte = BitStream() self.setup_rbsp() self.print_bin() if not nosetup: self.setup()
class BitWriter: def __init__(self): self._bytes = "" self._bits = BitStream() def write_int(self, length, value): news = BitStream(uint=value, length=length) start = 0 if self._bits.len > 0: left = 8 - self._bits.len if news.len < left: left = news.len self._bits = news[:left] + self._bits start += left if self._bits.len == 8: self._bytes += self._bits.tobytes() self._bits = BitStream() byte_len = (news.len - start) / 8 if byte_len > 0: more = byte_len * 8 self._bytes += news[start:start+more].tobytes() start += more if news.len > start: self._bits = news[start:] def write_int64(self, length, value): if length <= 32: self.write_int(length, value) return count = length - 32 self.write_int(32, value >> count) self.write_int(count, value & ((1 << count) - 1)) def write_char_array(self, max_length, value): self.write_int(bit_count(max_length), len(value)) if self._bits.len > 0: more = 8 - self._bits.len tail = (BitStream(int=0, length=more) + self._bits).tobytes() self._bits = BitStream() self._bytes += tail self._bytes += value def get_bytes(self): if self._bits.len > 0: more = 8 - self._bits.len tail = (BitStream(int=0, length=more) + self._bits).tobytes() return self._bytes + tail else: return self._bytes
def encode(self, in_stream, out_stream): extra_bits = self.num_bits bs = BitStream() try: while True: chunk = in_stream.read(self.byte_buffer) #print >> sys.stderr, 'chunk:', chunk if(chunk): bs.append(BitStream(bytes=chunk)) else: while True: self.print_index(bs.read(self.int_type), out_stream) try: while True: self.print_index(bs.read(self.int_type), out_stream) except ReadError, e: #print >> sys.stderr, 'inner:', e pass except ReadError, e: #print >> sys.stderr, 'outer:', e extra_bits = bs.len - bs.bitpos if extra_bits > 0: #print >> sys.stderr, 'extra_bits:', extra_bits self.print_index(bs.read('uint:' + str(extra_bits)), out_stream) else: extra_bits = self.num_bits
def uncompress_golomb_coding(coded_bytes, hash_length, M): ret_list = [] instream = BitStream( bytes=coded_bytes, length=len(coded_bytes) * hash_length) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while instream.bitpos < instream.length: read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
def __init__(self, data, base64=False): """ Constructor. :param string data: Binaray data :param bool base64: True if data is coded in base64 """ if base64: data = b64decode(data) #: raw data size self._sz = len(data) #: bit stream used to read data self._bs = BitStream(bytes=data)
def pack_pak(dir, file_list = None, align_toc = 16, align_files = 16, eof = False): if file_list == None: file_list = sorted(os.listdir(dir)) num_files = len(file_list) toc_length = (num_files + 1) * 4 if eof: toc_length += 1 if toc_length % align_toc > 0: toc_length += align_toc - (toc_length % align_toc) archive_data = BitStream(uintle = 0, length = toc_length * 8) archive_data.overwrite(bitstring.pack("uintle:32", num_files), 0) for file_num, item in enumerate(file_list): full_path = os.path.join(dir, item) if os.path.isfile(full_path): data = pack_file(full_path) else: data = pack_dir(full_path, align_toc, align_files, eof) file_size = data.len / 8 padding = 0 if file_size % align_files > 0: padding = align_files - (file_size % align_files) data.append(BitStream(uintle = 0, length = padding * 8)) file_pos = archive_data.len / 8 archive_data.overwrite(bitstring.pack("uintle:32", file_pos), (file_num + 1) * 32) archive_data.append(data) del data if eof: archive_data.overwrite(bitstring.pack("uintle:32", archive_data.len / 8), (num_files + 1) * 32) return archive_data
def dataEntryAppend(self, eventLogEntryBitStream:BitStream): """ since latset entry is in end of whole ECDA dump, collect all entries and store in reverse order :param eventLogEntryBitStream: :return: """ timestampHex = eventLogEntryBitStream.read('hex:{}'.format(32)) eventIdHex = eventLogEntryBitStream.read('hex:{}'.format(16)) eventId = int(eventIdHex, 16) extraInfo = eventLogEntryBitStream.read('hex:{}'.format(16)) self._dataDict['timestampHex'].append(timestampHex) ##self._dataDict['timestamp'].append(timestamp) ##self._dataDict['timeLast'].append(abs(timestamp-self._time) ) ##self._dataDict['timeLastInUs'].append(abs(timestamp - self._time )*self._timeGranunityUs) ##self._time = timestamp self._dataDict['eventId'].append(eventId) self._dataDict['eventIdHex'].append(eventIdHex) self._dataDict['extraInfo'].append(extraInfo) pass
def compress(self, data): weight = Counter(data) priority_queue = [HuffmanNode(value=byte, weight=weight[byte]) for byte in weight] heapify(priority_queue) while len(priority_queue) > 1: left = heappop(priority_queue) right = heappop(priority_queue) node = HuffmanNode(left, right, weight=left.weight + right.weight) heappush(priority_queue, node) root = heappop(priority_queue) dictionary = root.assign_codes() """ we need to add the tree to the compressed data, so that the decompressor can rebuild it in order do to it's work """ tree = root.serialize() result = BitStream() tree_len_bits = len(bin(len(tree))[2:]) if tree_len_bits > 16: raise ValueError("Huffman tree len is max 10*255-1 bit") # this converts len(tree) to hex with zero front pad to two bytes result.append("{0:#0{1}x}".format(len(tree), 6)) result += tree for byte in data: result.append('bin=' + dictionary[byte]) pad = 0 if len(result) % 8 != 0: pad = 8 - len(result) % 8 result.append('bin=' + '0' * pad) """ the compressed data layout is as follows: * 1B - number of pad bits (for byte align) * 2B- Huffman tree length (which btw = 10*num_of_chars_in_the_tree -1) * the Huffman tree itself (not byte aligned) * the encoded data paded with 0-7 bits at the end """ result = BitStream("{0:#0{1}x}".format(pad, 4)) + result return bytearray(result.bytes)
def to_bin(commands): data = BitStream() lines = 0 for op, params in commands: if op == WRD_HEADER: continue if not op in OP_PARAMS: # raise Exception("Unknown op: 0x%02X" % op) print "Unknown op: 0x%02X" % op continue param_info = OP_PARAMS[op] # If it has a custom parsing function, use the equivalent packing function. if isinstance(param_info, basestring): command = globals()[OP_FUNCTIONS[op]](**params) data.append(command) else: if op == WRD_SHOW_LINE: lines += 1 data.append(bitstring.pack("uint:8, uint:8", CMD_MARKER, op)) unnamed_param_id = 0 for param_name, param_type in param_info: if param_name == None: data.append(bitstring.pack(param_type, params[param_name][unnamed_param_id])) unnamed_param_id += 1 else: data.append(bitstring.pack(param_type, params[param_name])) return bitstring.pack("uint:8, uint:8, uintle:16", CMD_MARKER, WRD_HEADER, lines) + data
def load_data(self, data): files = [entry_data for (entry_name, entry_data) in get_pak_files(data)] # There are always at least four files in a model pak. # The first three I don't know a lot about, and then # the GMO files come after that. if len(files) < 4: _LOGGER.error("Invalid model PAK. %d files found, but at least 4 needed." % len(files)) return # The name pak contains a list of null-terminated names for # each of the models, stored in our standard pak format. name_pak = files[0] names = [entry_data.bytes.strip('\0') for (entry_name, entry_data) in get_pak_files(name_pak)] # Most of the model paks in SDR2 have a fourth unknown file before the models # start, so we'll just take everything from the back end and call it a day. models = files[-len(names):] # Now, we don't get file positions from the unpacker, so let's find those # and start filling out our internal list of GMO files. file_starts, file_ends = parse_pak_toc(data) model_starts = file_starts[-len(names):] for i, model in enumerate(models): # First of all, not all of the "models" present are actually GMO files. # It's rare, but there is the occasional other unknown format. # So let's make sure we have a GMO file. if not model[:GMO_MAGIC.len] == GMO_MAGIC: # print i, "Not a GMO." continue name = names[i] gmo = GmoFile(data = model) size = model.len / 8 start = model_starts[i] self.__gmo_files.append({ _NAME: name, _START: start, _SIZE: size, _DATA: gmo, }) self.__data = BitStream(data)