def save(self, filename, font_type = FONT_TYPES.font01, game = GAMES.dr): data = BitStream(SPFT_MAGIC) data += BitStream(uintle = len(self.data), length = 32) mapping_table_len = self.find_max_char() + 1 # zero-indexed so +1 for the size. mapping_table_start = 0x20 font_table_start = mapping_table_len * 2 + mapping_table_start data += BitStream(uintle = font_table_start, length = 32) data += BitStream(uintle = mapping_table_len, length = 32) data += BitStream(uintle = mapping_table_start, length = 32) data += UNKNOWN1[game][font_type] + UNKNOWN2 data += self.gen_mapping_table(mapping_table_len) data += self.gen_font_table() padding = BitStream(hex = '0x00') * (16 - ((data.len / 8) % 16)) data += padding f = open(filename, "wb") data.tofile(f) f.close()
def uncompress_golomb_coding(coded_bytes, hash_length, M): """Given a bytstream produced using golomb_coded_bytes, uncompress it.""" ret_list = [] instream = BitStream( bytes=coded_bytes, length=len(coded_bytes) * 8) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) # First item is a full hash value. prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while (instream.bitpos + m_bits) <= instream.length: # Read Unary-encoded value. read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 # Read r, assuming M bits were used to represent it. r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
class Mem(object): def __init__(self): self.real = BitStream(600*16) self.jumps = 0 def load(self, file): self.real = BitStream(filename=file) def save(self, file): self.real.tofile(file) def jump(self, pos): self.jumps += 1 self.real.bytepos = pos def read(self, size=16): return self.real.read(16) def get(self, pos, size=16): realpos = pos * 8 return self.real[realpos:realpos+size] def set(self, pos, bits): realpos = pos * 8 self.real[realpos:realpos+len(bits)] = bits @property def pos(self): return self.real.bytepos
def adjust_tiles(tiles, size, isd_text): ChunkMap = re.split(r"<ChunkMap>", isd_text) if len(ChunkMap) != 2: e = "Previous split should have resulted in 2 strings. {} found".format(len(ChunkMap)) raise NotImplementedError(e) ChunkMap = ChunkMap[1] # first 2 characters after <Width> tag in <ChunkMap> => up to 99 chunks (= 1584 x 1584 tiles per island) (width_tiles, height_tiles) = size #@UnusedVariable width_chunks = int( re.split(r"<Width>", ChunkMap[:100])[1][:2].strip("<") ) height_chunks = int( re.split(r"<Height>", ChunkMap[:100])[1][:2].strip("<") ) #@UnusedVariable chunks = re.split(r"<Element>", ChunkMap)[1:] for i in range(len(chunks)): VertexResolution = re.split(r"<VertexResolution>", chunks[i])[1][0] if VertexResolution in ("-", "5"): # -1 => empty chunk continue VertexResolution = int(VertexResolution) HeightMap = re.split(r"HeightMap[^C]*CDATA\[", chunks[i])[1:] start_x = i%width_chunks start_z = i//width_chunks resolution = {4: [ 4, 4], 5: [ 25,15], 6: [142,58]}[VertexResolution] useful_bytes = 17*17*resolution[1] load_bytes = resolution[0] + useful_bytes bits_per_tile = resolution[1] * 8 data = BitStream( bytes=HeightMap [0][:load_bytes][-useful_bytes:] ) read_string = "uint:{}".format(bits_per_tile) for z in range(16): for x in range(17): position = start_z*16*width_tiles + z*240 + start_x*16 + x d = int( data.read(read_string)) if x != 16 and d == 858993471: #trial and error, 0x3333333f=858993471, 0x33=51, 0x3f=63, 0x3333=13107, 0x333f=13119 tiles[position] = 255 return None
def parse(self): bs = BitStream(filename=self.file_path) [index_start, extension_data_start] = parse_header(bs) self.app_info = parse_app_info(bs) bs.bytepos = index_start self.indexes = parse_index(bs) self.parsed = True
def save(self, filename): data = BitStream(self.magic) + BitStream(uintle = len(self.lines), length = 16) for line in self.lines: data += line.to_data() with open(filename, "wb") as f: data.tofile(f)
def __init__(self, chunk_id, size, data): super(HeaderChunk, self).__init__(chunk_id, size) data_stream = BitStream(data) # First two bytes are the format type self.format_type = data_stream.read('bits:16').int # Second two bytes are the number of tracks self.num_of_tracks = data_stream.read('bits:16').int # Third two bytes are the time division self.time_division = Bits(data_stream.read('bits:16'))
def decompress(data): feed = [] pos = 0 binary = BitStream(bytes=data) binary_length = len(binary.bin) while binary_length - binary.pos >= (WINDOW_BITS + LENGTH_BITS): distance = binary.read('uint:%d' % WINDOW_BITS) c_or_length = binary.read('uint:%d' % LENGTH_BITS) if distance == 0: c_or_length = chr(c_or_length) feed.append([distance, c_or_length]) return feed2text(feed)
def decode(self, in_stream, out_stream): bs = BitStream() dq = deque() at_least_three = False for word in self.words_from_file(in_stream): if not word or word not in self.word_dict: continue #print >> sys.stderr, 'word:"', word, '"' dq.append(self.word_dict[word]) if at_least_three or len(dq) == 3: bs.append(pack(self.int_type, dq.popleft())) at_least_three = True if bs.len > self.bit_buffer: cut = 0 for byte in bs.cut(self.bit_buffer): cut += 1 byte.tofile(out_stream) del bs[:cut * self.bit_buffer] # dq has to have exactly 2 elements here, the last is the bit length of the first, unless it's 0 #print >> sys.stderr, 'dq:', dq extra_bits = dq.pop() bs.append(pack('uint:' + str(extra_bits), dq.popleft())) bs.tofile(out_stream)
def uncompress_delta_diff(compressed_input, hash_length): ret_list = [] instream = BitStream(bytes=compressed_input, length=len(compressed_input) * 8) hash_len_bits = hash_length * 8 prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) # Must always have at least 6 bits to read. while (instream.bitpos + 6) < instream.length: curr_diff_len = instream.read("uint:6") + 1 curr_diff = instream.read("bits:%d" % curr_diff_len) curr_item = prev[:hash_len_bits - curr_diff_len] + curr_diff assert curr_item.length == hash_len_bits ret_list.append(curr_item.tobytes()) prev = curr_item return ret_list
def write_char_array(self, max_length, value): self.write_int(bit_count(max_length), len(value)) if self._bits.len > 0: more = 8 - self._bits.len tail = (BitStream(int=0, length=more) + self._bits).tobytes() self._bits = BitStream() self._bytes += tail self._bytes += value
def main(): for datum in DATA: as_hex = ":".join("{:02x}".format(h) for h in datum) as_bin = ":".join("{:08b}".format(h) for h in datum) print(as_hex) print(as_bin) a = BitStream(datum) first_mb_in_slice = a.read('ue') slice_type = a.read('ue') pic_parameter_set_id = a.read('ue') frame_num = a.read(9) print("first-mb-in-slice: {}".format(first_mb_in_slice)) print("slice-type: {}".format(slice_type)) print("pic-parameter-set-id: {}".format(pic_parameter_set_id)) print("frame-num: {}".format(frame_num.int))
def gen(): consonants = "bcdfghjklmnpqrstvwxyz" vowels = "aeiou" generated = "" randdata = subprocess.check_output(["openssl", "rand", "9"]) assert len(randdata) == 9 bs = BitStream(randdata) generated += consonants[bs.read('int:5') % len(consonants)] for i in range(5): generated += vowels[bs.read('int:3') % len(vowels)] generated += consonants[bs.read('int:5') % len(consonants)] generated += consonants[bs.read('int:5') % len(consonants)] return generated
def decompress(self, data): bitstream = BitStream(data) pad = bitstream.read(8).int # remove pad bits if pad > 0: bitstream = bitstream[:-pad] bitstream.read(8) # false read 1 B to move read pointer tree_len = bitstream.read(16).int tree_serial = bitstream.read(tree_len) tree = HuffmanNode() tree.deserialize(tree_serial) dictionary = tree.assign_codes() dictionary = {v: k for k, v in dictionary.items()} # reverse dict result = bytearray() sequence = "" while True: try: bit = bitstream.read(1) except ReadError: break if bit: sequence += '1' else: sequence += '0' if sequence in dictionary: result.append(dictionary[sequence]) sequence = "" return result
def decrypt(ciphertext, key): bstream = BitStream() p, g, y = key[0] u = key[1] #trying to improve execution speed #a_pow_u = mod_exp(ciphertext[1][0], u, p) #inv_a_pow_u = modinv(a_pow_u, p) for block in ciphertext[1:]: #sys.stdout.write(".") #trying to improve execution speed a_pow_u = mod_exp(block[0], u, p) inv_a_pow_u = modinv(a_pow_u, p) x = (block[1] * inv_a_pow_u) % p block_size = math.floor(math.log(p,2)) bstream.append('0b' + bin(x)[2:].zfill(int(block_size))) return bstream.read(ciphertext[0])
def RTCM_converter_thread(server, port, username, password, mountpoint, rtcm_callback = None): import subprocess nt = subprocess.Popen(["./ntripclient", "--server", server, "--password", password, "--user", username, "--mountpoint", mountpoint ], stdout=subprocess.PIPE) """nt = subprocess.Popen(["./ntrip.py", server, str(port), username, password, mountpoint], stdout=subprocess.PIPE)""" if nt is None or nt.stdout is None: indev = sys.stdin else: indev = nt.stdout print("RTCM using input {}".format(indev)) while True: sio = indev d = ord(sio.read(1)) if d != RTCMv3_PREAMBLE: continue pack_stream = BitStream() l1 = ord(sio.read(1)) l2 = ord(sio.read(1)) pack_stream.append(bs.pack('2*uint:8', l1, l2)) pack_stream.read(6) pkt_len = pack_stream.read(10).uint pkt = sio.read(pkt_len) parity = sio.read(3) if len(pkt) != pkt_len: print "Length error {} {}".format(len(pkt), pkt_len) continue if True: #TODO check parity for d in pkt: pack_stream.append(bs.pack('uint:8',ord(d))) msg = parse_rtcmv3(pack_stream) if msg is not None and rtcm_callback is not None: rtcm_callback(msg)
def create_sample_sync_frame(): stream = BitStream(bin="0" * 32) stream.set(True, range(0, 12)) # frame sync stream.set(True, 14) # Layer III stream.set(True, 15) # protection bit stream.set(True, 17) # bitrate, 128k return stream
def load_data(self, data, offset = 0): if not data[offset * 8 : offset * 8 + GMO_MAGIC.len] == GMO_MAGIC: _LOGGER.error("GMO header not found at 0x%04X." % offset) return data.bytepos = offset + GMO_SIZE_OFFSET gmo_size = data.read("uintle:32") + GMO_SIZE_DIFF self.data = BitStream(data[offset * 8 : (offset + gmo_size) * 8]) self.__find_gims()
def __init__(self, filp, pos, utype, size, tid, nosetup=False): self.filp = filp self.pos = pos self.num_bytes_in_nalu = size self.nal_unit_type = utype self.temporal_id = tid self.rbsp_byte = BitStream() self.setup_rbsp() self.print_bin() if not nosetup: self.setup()
def serialize(self): bitstream = BitStream() if self.leaf(): bitstream.append('bin=1') bitstream.append("{0:#0{1}x}".format(self.value, 4)) else: bitstream.append('bin=0') bitstream += self.left.serialize() + self.right.serialize() return bitstream
def pack_txt(filename): if os.path.basename(os.path.dirname(filename)) in SCRIPT_NONSTOP: is_nonstop = True else: is_nonstop = False script = ScriptFile(filename) text = script[common.editor_config.lang_trans] if not text: text = script[common.editor_config.lang_orig] # Nonstop Debate lines need an extra newline at the end # so they show up in the backlog properly. if is_nonstop and not text[-1] == u"\n": text += u"\n" text = SCRIPT_BOM + text + SCRIPT_NULL text = BitStream(bytes = text.encode("UTF-16LE")) return text
class BitWriter: def __init__(self): self._bytes = "" self._bits = BitStream() def write_int(self, length, value): news = BitStream(uint=value, length=length) start = 0 if self._bits.len > 0: left = 8 - self._bits.len if news.len < left: left = news.len self._bits = news[:left] + self._bits start += left if self._bits.len == 8: self._bytes += self._bits.tobytes() self._bits = BitStream() byte_len = (news.len - start) / 8 if byte_len > 0: more = byte_len * 8 self._bytes += news[start:start+more].tobytes() start += more if news.len > start: self._bits = news[start:] def write_int64(self, length, value): if length <= 32: self.write_int(length, value) return count = length - 32 self.write_int(32, value >> count) self.write_int(count, value & ((1 << count) - 1)) def write_char_array(self, max_length, value): self.write_int(bit_count(max_length), len(value)) if self._bits.len > 0: more = 8 - self._bits.len tail = (BitStream(int=0, length=more) + self._bits).tobytes() self._bits = BitStream() self._bytes += tail self._bytes += value def get_bytes(self): if self._bits.len > 0: more = 8 - self._bits.len tail = (BitStream(int=0, length=more) + self._bits).tobytes() return self._bytes + tail else: return self._bytes
def encode(self, in_stream, out_stream): extra_bits = self.num_bits bs = BitStream() try: while True: chunk = in_stream.read(self.byte_buffer) #print >> sys.stderr, 'chunk:', chunk if(chunk): bs.append(BitStream(bytes=chunk)) else: while True: self.print_index(bs.read(self.int_type), out_stream) try: while True: self.print_index(bs.read(self.int_type), out_stream) except ReadError, e: #print >> sys.stderr, 'inner:', e pass except ReadError, e: #print >> sys.stderr, 'outer:', e extra_bits = bs.len - bs.bitpos if extra_bits > 0: #print >> sys.stderr, 'extra_bits:', extra_bits self.print_index(bs.read('uint:' + str(extra_bits)), out_stream) else: extra_bits = self.num_bits
def __init__(self, data, base64=False): """ Constructor. :param string data: Binaray data :param bool base64: True if data is coded in base64 """ if base64: data = b64decode(data) #: raw data size self._sz = len(data) #: bit stream used to read data self._bs = BitStream(bytes=data)
def uncompress_golomb_coding(coded_bytes, hash_length, M): ret_list = [] instream = BitStream( bytes=coded_bytes, length=len(coded_bytes) * hash_length) hash_len_bits = hash_length * 8 m_bits = int(math.log(M, 2)) prev = instream.read("bits:%d" % hash_len_bits) ret_list.append(prev.tobytes()) while instream.bitpos < instream.length: read_prefix = 0 curr_bit = instream.read("uint:1") while curr_bit == 1: read_prefix += 1 curr_bit = instream.read("uint:1") assert curr_bit == 0 r = instream.read("uint:%d" % m_bits) curr_diff = read_prefix * M + r curr_value_int = prev.uint + curr_diff curr_value = Bits(uint=curr_value_int, length=hash_len_bits) ret_list.append(curr_value.tobytes()) prev = curr_value return ret_list
def pack_pak(dir, file_list = None, align_toc = 16, align_files = 16, eof = False): if file_list == None: file_list = sorted(os.listdir(dir)) num_files = len(file_list) toc_length = (num_files + 1) * 4 if eof: toc_length += 1 if toc_length % align_toc > 0: toc_length += align_toc - (toc_length % align_toc) archive_data = BitStream(uintle = 0, length = toc_length * 8) archive_data.overwrite(bitstring.pack("uintle:32", num_files), 0) for file_num, item in enumerate(file_list): full_path = os.path.join(dir, item) if os.path.isfile(full_path): data = pack_file(full_path) else: data = pack_dir(full_path, align_toc, align_files, eof) file_size = data.len / 8 padding = 0 if file_size % align_files > 0: padding = align_files - (file_size % align_files) data.append(BitStream(uintle = 0, length = padding * 8)) file_pos = archive_data.len / 8 archive_data.overwrite(bitstring.pack("uintle:32", file_pos), (file_num + 1) * 32) archive_data.append(data) del data if eof: archive_data.overwrite(bitstring.pack("uintle:32", archive_data.len / 8), (num_files + 1) * 32) return archive_data
def dataEntryAppend(self, eventLogEntryBitStream:BitStream): """ since latset entry is in end of whole ECDA dump, collect all entries and store in reverse order :param eventLogEntryBitStream: :return: """ timestampHex = eventLogEntryBitStream.read('hex:{}'.format(32)) eventIdHex = eventLogEntryBitStream.read('hex:{}'.format(16)) eventId = int(eventIdHex, 16) extraInfo = eventLogEntryBitStream.read('hex:{}'.format(16)) self._dataDict['timestampHex'].append(timestampHex) ##self._dataDict['timestamp'].append(timestamp) ##self._dataDict['timeLast'].append(abs(timestamp-self._time) ) ##self._dataDict['timeLastInUs'].append(abs(timestamp - self._time )*self._timeGranunityUs) ##self._time = timestamp self._dataDict['eventId'].append(eventId) self._dataDict['eventIdHex'].append(eventIdHex) self._dataDict['extraInfo'].append(extraInfo) pass
def compress(self, data): weight = Counter(data) priority_queue = [HuffmanNode(value=byte, weight=weight[byte]) for byte in weight] heapify(priority_queue) while len(priority_queue) > 1: left = heappop(priority_queue) right = heappop(priority_queue) node = HuffmanNode(left, right, weight=left.weight + right.weight) heappush(priority_queue, node) root = heappop(priority_queue) dictionary = root.assign_codes() """ we need to add the tree to the compressed data, so that the decompressor can rebuild it in order do to it's work """ tree = root.serialize() result = BitStream() tree_len_bits = len(bin(len(tree))[2:]) if tree_len_bits > 16: raise ValueError("Huffman tree len is max 10*255-1 bit") # this converts len(tree) to hex with zero front pad to two bytes result.append("{0:#0{1}x}".format(len(tree), 6)) result += tree for byte in data: result.append('bin=' + dictionary[byte]) pad = 0 if len(result) % 8 != 0: pad = 8 - len(result) % 8 result.append('bin=' + '0' * pad) """ the compressed data layout is as follows: * 1B - number of pad bits (for byte align) * 2B- Huffman tree length (which btw = 10*num_of_chars_in_the_tree -1) * the Huffman tree itself (not byte aligned) * the encoded data paded with 0-7 bits at the end """ result = BitStream("{0:#0{1}x}".format(pad, 4)) + result return bytearray(result.bytes)
def to_bin(commands): data = BitStream() lines = 0 for op, params in commands: if op == WRD_HEADER: continue if not op in OP_PARAMS: # raise Exception("Unknown op: 0x%02X" % op) print "Unknown op: 0x%02X" % op continue param_info = OP_PARAMS[op] # If it has a custom parsing function, use the equivalent packing function. if isinstance(param_info, basestring): command = globals()[OP_FUNCTIONS[op]](**params) data.append(command) else: if op == WRD_SHOW_LINE: lines += 1 data.append(bitstring.pack("uint:8, uint:8", CMD_MARKER, op)) unnamed_param_id = 0 for param_name, param_type in param_info: if param_name == None: data.append(bitstring.pack(param_type, params[param_name][unnamed_param_id])) unnamed_param_id += 1 else: data.append(bitstring.pack(param_type, params[param_name])) return bitstring.pack("uint:8, uint:8, uintle:16", CMD_MARKER, WRD_HEADER, lines) + data
def load_data(self, data): files = [entry_data for (entry_name, entry_data) in get_pak_files(data)] # There are always at least four files in a model pak. # The first three I don't know a lot about, and then # the GMO files come after that. if len(files) < 4: _LOGGER.error("Invalid model PAK. %d files found, but at least 4 needed." % len(files)) return # The name pak contains a list of null-terminated names for # each of the models, stored in our standard pak format. name_pak = files[0] names = [entry_data.bytes.strip('\0') for (entry_name, entry_data) in get_pak_files(name_pak)] # Most of the model paks in SDR2 have a fourth unknown file before the models # start, so we'll just take everything from the back end and call it a day. models = files[-len(names):] # Now, we don't get file positions from the unpacker, so let's find those # and start filling out our internal list of GMO files. file_starts, file_ends = parse_pak_toc(data) model_starts = file_starts[-len(names):] for i, model in enumerate(models): # First of all, not all of the "models" present are actually GMO files. # It's rare, but there is the occasional other unknown format. # So let's make sure we have a GMO file. if not model[:GMO_MAGIC.len] == GMO_MAGIC: # print i, "Not a GMO." continue name = names[i] gmo = GmoFile(data = model) size = model.len / 8 start = model_starts[i] self.__gmo_files.append({ _NAME: name, _START: start, _SIZE: size, _DATA: gmo, }) self.__data = BitStream(data)