示例#1
0
    def parse(cls,
              bstr=None,
              filename=None,
              bytes_input=None,
              file_input=None,
              offset_bytes=0,
              headers_only=False,
              recursive=True):
        """
        Parse an MP4 file or bytes into boxes

        :param bstr: The bitstring to parse
        :type bstr: bitstring.ConstBitStream
        :param filename: Filename of an mp4 file
        :type filename: str
        :param bytes_input: Bytes of an mp4 file
        :type bytes_input: bytes
        :param file_input: Filename or file object
        :type file_input: str, file
        :param offset_bytes: Start parsing at offset.
        :type offset_bytes: int
        :param headers_only: Ignore data and return just headers. Useful when data is cut short
        :type: headers_only: boolean
        :param recursive: Recursively load sub-boxes
        :type: recursive: boolean
        :return: BMFF Boxes or Headers
        """

        if filename:
            bstr = bs.ConstBitStream(filename=filename,
                                     offset=offset_bytes * 8)
        elif bytes_input:
            bstr = bs.ConstBitStream(bytes=bytes_input,
                                     offset=offset_bytes * 8)
        elif file_input:
            bstr = bs.ConstBitStream(auto=file_input, offset=offset_bytes * 8)

        log.debug("Starting parse")
        log.debug("Size is %d bits", bstr.len)

        while bstr.pos < bstr.len:
            log.debug("Byte pos before header: %d relative to (%d)",
                      bstr.bytepos, offset_bytes)
            log.debug("Reading header")
            header = cls.parse_header(bstr)
            log.debug("Header type: %s", header.box_type)
            log.debug("Byte pos after header: %d relative to (%d)",
                      bstr.bytepos, offset_bytes)

            if headers_only:
                yield header

                # move pointer to next header if possible
                try:
                    bstr.bytepos = header.start_pos + header.box_size
                except ValueError:
                    log.warning("Premature end of data")
                    raise
            else:
                yield cls.parse_box(bstr, header, recursive=recursive)
示例#2
0
def _message_to_bitarray(message: bytes) -> bitstring.ConstBitStream:
    """Encodes a message as a bitarray with length multiple of 5."""
    barr = bitstring.ConstBitStream(message)
    padding_len = 5 - (len(barr) % 5)
    if padding_len < 5:
        # The bitarray length has to be multiple of 5. If not, it is right-padded with zeros.
        barr = bitstring.ConstBitStream(
            bin="{}{}".format(barr.bin, '0' * padding_len))
    return barr
示例#3
0
 def testCBS(self):
     a = bitstring.ConstBitStream('0b11000')
     b = bitstring.ConstBitStream('0b11000')
     self.assertFalse(a is b)
 #        self.assertTrue(a._datastore is b._datastore)
     
     
     
     
示例#4
0
    def consume_subsignature(self, region):
        """
            Consume subsignature packets
            If a string is passed in as region, it is converted to a bitstream for you
        """
        if isinstance(region, string_types):
            if PY3:
                region = region.encode()
            region = bitstring.ConstBitStream(bytes=region)
        elif isinstance(region, binary_type):
            region = bitstring.ConstBitStream(bytes=region)

        self.subsignature_consumer.consume(self, region)
示例#5
0
    def __init__(self, path):
        self._header_raw = None
        self.header = None

        if isinstance(path, bytes):
            self._replay = bitstring.ConstBitStream(path)
        elif isinstance(path, str):
            with open(path, 'rb') as f:
                self._replay = bitstring.ConstBitStream(f.read())

        assert hasattr(self, '_replay')

        self._parse_meta()
        self._parse_header()
示例#6
0
def unpack_bit_packed_values(data_bytes, value_nbits, dtype):
    total_bit_count = len(data_bytes) * 8
    number_of_samples = total_bit_count / value_nbits
    number_of_unused_bits = total_bit_count % value_nbits

    # Note: We need to first reverse the order of the bytes. The BitStream library will read from the MSB of the first byte.
    # Due to the packing format we will therefore need to read it in reverse. First removing the remaining bits that do not contain any sample values.
    buffer = array.array("B", data_bytes[::-1]).tostring()
    bit_string = bitstring.ConstBitStream(bytes=buffer)

    unused_bits = bit_string.read(number_of_unused_bits)
    unpacked_values = []

    for sample_index in range(number_of_samples):
        unpacked_values.append(bit_string.read(value_nbits))

    if dtype == 'uint':
        unpacked_values = [x.uint for x in unpacked_values]

    if dtype == 'intbe':
        # Note that because we have revered the order of the bytes this is BE now rather than LE, as in the original format.
        unpacked_values = [x.intbe for x in unpacked_values]

    # Now we need to reverse the samples back to their original order.
    unpacked_values = unpacked_values[::-1]

    return unpacked_values
示例#7
0
def getbits_rom(f):
    # random rom file they gave me
    buff = bytearray()
    for b in f.read():
        # Reverse bits, swap nibbles
        buff += chr(munge(ord(b)))
    return bitstring.ConstBitStream(bytes=buff)
示例#8
0
def encode():
    st = bitstring.ConstBitStream(filename=sys.argv[2])
    f2 = open(sys.argv[3], 'w')
    first_code = True

    def write_b(b):
        nonlocal first_code
        nonlocal f2
        if first_code:
            f2.write(b)
            first_code = False
        else:
            f2.write(' ' + b)

    while True:
        try:
            a = st.read(18).uint
            b = num2word[a]
            write_b(b)
        except bitstring.ReadError:
            tail = st[st.pos:] + '0b1'
            if tail.len < 18:
                tail += bitstring.Bits(18 - tail.len)
            b = num2word[tail.uint]
            write_b(b)
            break
    f2.close()
示例#9
0
def bitarray_to_u5(barr):
    assert barr.len % 5 == 0
    ret = []
    s = bitstring.ConstBitStream(barr)
    while s.pos != s.len:
        ret.append(s.read(5).uint)
    return ret
示例#10
0
    def __init__(self, user, record):
        md5 = hashlib.md5()
        md5.update(user.encode())
        md5.update(record)

        _hash = md5.hexdigest()

        try:
            _datetime = dt.datetime.strptime(record[0:10].decode(),
                                             '%y%m%d%H%M')
        except ValueError:
            _datetime = dt.datetime(2015, 1, 1)

        pressure_data = binascii.unhexlify(record[16:24])

        # blood pressure data is in 30 bits
        bs = bitstring.ConstBitStream(bytes=pressure_data)
        bs.pos += 2
        _pulse, _dia, _sys = bs.readlist(['uint:10, uint:10, uint:10'])

        _pp = _sys - _dia
        _map = _dia + _pp / 3
        super().__init__(hash=_hash,
                         datetime=_datetime,
                         sys=_sys,
                         dia=_dia,
                         pulse=_pulse,
                         pp=_pp,
                         map=_map)
示例#11
0
    def __init__(self, data, offset, remaining=None):
        # okay, we need to calculate the size first
        self.size_in_bits = 0

        bs = bitstring.ConstBitStream(bytes=data)
        bs.bytepos = offset

        for field in self._struct:
            fmt, name, field_size = field
            # TODO: handle regular struct sizes later?
            # or merge back into Unpackable?

            if not isinstance(field_size, int):
                # look up the bit size from an earlier field
                field_size = self.__dict__[field_size]

            if fmt == 'UB':
                self.__dict__[name] = bs.read('uint:%d' % field_size)
            elif fmt == 'SB':
                self.__dict__[name] = bs.read('int:%d' % field_size)
            else:
                raise ValueError("unknown fmt " + fmt)

            self.size_in_bits += field_size

        bs.bytealign()
        self._size = bs.bytepos - offset
示例#12
0
def fixRIC(fileName, recordNumbers, correctRIC, recLen):
    numberRecords = os.stat(fileName).st_size / recLen
    fileHandle = bitstring.ConstBitStream(filename=fileName)
    newFileHandle = open(fileName + '.rw', 'wb')
    for recordCount in xrange(0, numberRecords):
        data = fileHandle.read(recLen * 8)
        if recordCount + 1 in recordNumbers and len(recordNumbers) > 0:
            dataStart = data[44 * 8:46 * 8].peek('uint:16')
            if debug:
                print 'Here is the data start: ' + str(dataStart)
                print 'Here is the RIC location: ' + str(data[
                    (dataStart + 8) * 8:(dataStart + 12) * 8].peek('int:32'))
            newdata = data[0:(dataStart + 8) * 8] + bitstring.pack(
                'int:32', correctRIC[0]) + data[(dataStart + 12) * 8:]
            newdata.tofile(newFileHandle)
            recordNumbers.pop(0)
            correctRIC.pop(0)
            if debug:
                print 'Here is how many bad records we have left: ' + str(
                    len(recordNumbers))
        else:
            data.tofile(newFileHandle)
    newFileHandle.close()
    if '.rw' in fileName:
        shutil.move(fileName + '.rw', fileName)
    return
 def testConstBitStreamCopy(self):
     cbs = bitstring.ConstBitStream(100)
     cbs.pos = 50
     cbs_copy = copy.copy(cbs)
     self.assertEqual(cbs_copy.pos, 0)
     self.assertTrue(cbs._datastore is cbs_copy._datastore)
     self.assertTrue(cbs == cbs_copy)
示例#14
0
def fixSampleCnt(fileName, recordNumbers, correctNumbers, recLen):

    numberRecords = os.stat(fileName).st_size / recLen

    fileHandle = bitstring.ConstBitStream(filename=fileName)
    newFileHandle = open(fileName + '.rw', 'wb')

    for recordCount in xrange(0, numberRecords):
        data = fileHandle.read(recLen * 8)
        if recordCount + 1 in recordNumbers and len(recordNumbers) > 0:
            if debug:
                print 'Here is the sample count: ' + str(
                    data[30 * 8:32 * 8].peek('uint:16'))
            newdata = data[0:30 * 8] + bitstring.pack(
                'uint:16', correctNumbers[0]) + data[32 * 8:]
            newdata.tofile(newFileHandle)
            recordNumbers.pop(0)
            correctNumbers.pop(0)
            if debug:
                print 'Here is how many bad records we have left: ' + str(
                    len(recordNumbers))
        else:
            data.tofile(newFileHandle)
    newFileHandle.close()
    if '.rw' in fileName:
        shutil.move(fileName + '.rw', fileName)
    return
示例#15
0
def read_record_ETL4(f, pos):
    f = bitstring.ConstBitStream(filename=f)
    f.bytepos = pos * 2952
    r = f.readlist(
        '2*uint:36,uint:8,pad:28,uint:8,pad:28,4*uint:6,pad:12,15*uint:36,pad:1008,bytes:21888'
    )
    """
    print('Serial Data Number:', r[0])
    print('Serial Sheet Number:', r[1])
    print('JIS Code:', r[2])
    print('EBCDIC Code:', r[3])
    print('4 Character Code:', ''.join([t56s[c] for c in r[4:8]]))
    print('Evaluation of Individual Character Image:', r[8])
    print('Evaluation of Character Group:', r[9])
    print('Sample Position Y on Sheet:', r[10])
    print('Sample Position X on Sheet:', r[11])
    print('Male-Female Code:', r[12])
    print('Age of Writer:', r[13])
    print('Industry Classification Code:', r[14])
    print('Occupation Classifiaction Code:', r[15])
    print('Sheet Gatherring Date:', r[16])
    print('Scanning Date:', r[17])
    print('Number of X-Axis Sampling Points:', r[18])
    print('Number of Y-Axis Sampling Points:', r[19])
    print('Number of Levels of Pixel:', r[20])
    print('Magnification of Scanning Lens:', r[21])
    print('Serial Data Number (old):', r[22])
    """
    return r
示例#16
0
 def testAppendToBits(self):
     a = Bits(BitArray())
     with self.assertRaises(AttributeError):
         a.append('0b1')
     self.assertEqual(type(a), Bits)
     b = bitstring.ConstBitStream(bitstring.BitStream())
     self.assertEqual(type(b), bitstring.ConstBitStream)
def main(source, destination):
    t56s = '0123456789[#@:>? ABCDEFGHI&.](<  JKLMNOPQR-$*);\'|/STUVWXYZ ,%="!'
    def T56(c):
        return t56s[c]
    
    with codecs.open('co59-utf8.txt', 'r', 'utf-8') as co59f:
        print(co59f)
        co59t = co59f.read()
    co59l = co59t.split()
    CO59 = {}
    for c in co59l:
        ch = c.split(':')
        co = ch[1].split(',')
        CO59[(int(co[0]),int(co[1]))] = ch[0]


    files = glob.glob(source)
    # filename = 'E:\kanji dataset\extract\ETL2\ETL2_1'
    filename = source
    #skip = 0
    f = bitstring.ConstBitStream(filename=filename)
    for skip in range(11420):
        f.pos = skip * 6 * 3660
        r = f.readlist('int:36,uint:6,pad:30,6*uint:6,6*uint:6,pad:24,2*uint:6,pad:180,bytes:2700') 
        # print(r[0], T56(r[1]), "".join(map(T56, r[2:8])), "".join(map(T56, r[8:14])), CO59[tuple(r[14:16])])
        iF = Image.frombytes('F', (60,60), r[16], 'bit', 6)
        iP = iF.convert('L')
        imagefilepath = destination+ '/%s' % CO59[tuple(r[14:16])]
        if not os.path.exists(imagefilepath):
            os.makedirs(imagefilepath)
        fn = '%s/{:d}.png'.format(r[0]) % imagefilepath
        iP.save(fn, 'PNG', bits=6)
        enhancer = ImageEnhance.Brightness(iP)
        iE = enhancer.enhance(4)
        iE.save(fn, 'PNG')
示例#18
0
 def last(self):
     self.pos = self.filesize - 188
     self.tsopen.seek(self.pos)
     self.bytes = self.tsopen.read(188)
     self.bits = bitstring.ConstBitStream(bytes=self.bytes, length=1504)
     self.packetnum = self.totalpackets
     tsdata = self.decodets()
     return tsdata    
示例#19
0
def decompress(inBytes, startOffset=0):
    # Define some useful constants.
    SEARCH_LOG2 = 8
    SEARCH_SIZE = 2**SEARCH_LOG2
    LOOKAHEAD_LOG2 = 4
    LOOKAHEAD_SIZE = 2**LOOKAHEAD_LOG2
    BIT_PASTCOPY = 0
    BIT_LITERAL = 1

    # Prepare to read the compressed bytes.
    inStream = bitstring.ConstBitStream(bytes=inBytes)
    inStream.bytepos = startOffset

    # Allocate memory for the decompression process.
    decompSize = inStream.read('uintle:16')
    decomp = bytearray([0x00] * decompSize)
    decompPos = 0
    window = bytearray([0x20] * SEARCH_SIZE)
    windowPos = 0xEF

    # Main decompression loop.
    while decompPos < decompSize:
        nextCommand = inStream.read('bool')

        if nextCommand == BIT_PASTCOPY:
            # 0: Pastcopy case.
            copySource = inStream.read(SEARCH_LOG2).uint
            copyLength = inStream.read(LOOKAHEAD_LOG2).uint
            copyLength += 2

            # Truncate copies that would exceed "decompSize" bytes.
            if (decompPos + copyLength) >= decompSize:
                copyLength = decompSize - decompPos

            for i in range(copyLength):
                decomp[decompPos] = window[copySource]
                decompPos += 1
                window[windowPos] = window[copySource]
                windowPos += 1
                windowPos %= SEARCH_SIZE
                copySource += 1
                copySource %= SEARCH_SIZE

        elif nextCommand == BIT_LITERAL:
            # 1: Literal case.
            literalByte = inStream.read('uint:8')
            decomp[decompPos] = literalByte
            decompPos += 1
            window[windowPos] = literalByte
            windowPos += 1
            windowPos %= SEARCH_SIZE

    # Calculate the end offset.
    inStream.bytealign()
    endOffset = inStream.bytepos

    # Return the decompressed data and end offset.
    return (decomp, endOffset)
示例#20
0
def UnpackMessageFromSocket( sock ):
	zeroLenChunkMax = 2
	
	# Read the header bytes to get the messageID and length.
	headerBytes = (16+32) >> 3
	
	zeroLenChunkCount = 0
	chunks = []
	messageLen = 0
	while messageLen < headerBytes:
		# print headerBytes, messageLen, headerBytes - messageLen
		chunk = sock.recv( headerBytes - messageLen )
		if not chunk:
			zeroLenChunkCount += 1
			if zeroLenChunkCount < zeroLenChunkMax:
				continue
			raise RuntimeError( 'LLRP socket connection broken' )
		chunks.append( chunk )
		messageLen += len(chunk)
		zeroLenChunkCount = 0
	
	# Convert to a BitStream to get the message TypeCode and Length.
	s = bitstring.ConstBitStream( bytes=b''.join(chunks) )
	TypeCode = s.read('uintbe:16')
	TypeCode &= ((1<<10)-1)
	Length = s.read('uintbe:32')
	
	# print( 'UnpackMessageFromSocket: TypeCode={} Length={} {}'.format(TypeCode, Length, _messageClassFromTypeCode[TypeCode].__name__) )
	
	# Read the remaining message based on the Length.
	zeroLenChunkCount = 0
	while messageLen < Length:
		chunk = sock.recv( Length - messageLen )
		if not chunk:
			zeroLenChunkCount += 1
			if zeroLenChunkCount < zeroLenChunkMax:
				continue
			raise RuntimeError( 'LLRP socket connection broken' )
		chunks.append( chunk )
		messageLen += len(chunk)
		zeroLenChunkCount = 0
	
	# Convert the full message to a BitStream and parse it.
	s = bitstring.ConstBitStream( bytes=b''.join(chunks) )
	return _MessagePackUnpackLookup[TypeCode].unpack( s )
def bitstream_from_file_object(file_input, begin_at_offset_in_bytes: int = 0):
    """
    :param file_input: opened file that contains CSME code binaries
                            (i.e. the input file provided by the user on the command line)
    :param begin_at_offset_in_bytes: where to fast-forward seek to in the file before attempting a search
    :return: a bitstring.ConstBitStream of the specified file's data, cued to the specified offset location
    """
    return bitstring.ConstBitStream(file_input,
                                    offset=begin_at_offset_in_bytes * 8)
示例#22
0
文件: parser.py 项目: avishayp/noyad
 def receive(self):
     self.header = self.read(9)
     log.info('header: %s', self.header)
     parsed = bitstring.ConstBitStream(self.header).readlist(KP_MSG_HEADER)
     size = parsed[-1]
     self.payload = self.read(size * 5)
     log.info('payload: %s', self.payload)
     self.footer = self.read(4)
     log.info('footer: %s', self.footer)
    def verify(self, msg, signature):
        hashedMsg = WinternitzPlusOTS.sha256Bytes(msg)
        hashedBitString = bitstring.ConstBitStream(hashedMsg)
        msgPubKey = []
        # Apply logic here

        if msgPubKey == self.publicKey:
            return True
        else:
            return False
示例#24
0
def reverse_bytewise(bitstream, dbg=False):
    result = []
    if dbg:
        print(bitstream.bin)
    for byte in bitstream.tobytes():
        if dbg:
            print(hex(byte))
        result.append(reverse_byte(byte))
    reverse_bytes = bitstring.ConstBitStream(bytes=result)
    return reverse_bytes
示例#25
0
def position_data_index(fn):
    """Given the filename of a position file, return the bit index of the first
    data record past the header.
    """
    hdr_end = "%%ENDHEADER\r\n"
    token = bstr.ConstBitArray(bytearray(hdr_end))
    bits = bstr.ConstBitStream(filename=fn)
    bits.find(token)
    bits.read('bytes:%d' % len(hdr_end))
    return bits.pos
示例#26
0
    def __init__(self, filename, data):
        super(AppleSoft, self).__init__()

        self.filename = filename
        data = bitstring.ConstBitStream(data)

        # TODO: assert length is met
        self.length = data.read('uintle:16')

        self.lines = []
        self.program = {}
        last_line_number = -1
        last_memory = 0x801
        while data:
            next_memory, line_number = data.readlist('uintle:16, uintle:16')
            if not next_memory:
                break

            line = []
            bytes_read = 4
            while True:
                token = data.read('uint:8')
                bytes_read += 1
                if token == 0:
                    self.lines.append(line_number)
                    self.program[line_number] = ''.join(line)
                    break

                if token >= 0x80:
                    try:
                        line.append(' ' + TOKENS[token] + ' ')
                    except KeyError:
                        self.anomalies.append(
                            anomaly.Anomaly(
                                self, anomaly.CORRUPTION,
                                'Line number %d contains unexpected token: %02X'
                                % (line_number, token)))
                else:
                    line.append(chr(token))

            if last_memory + bytes_read != next_memory:
                self.anomalies.append(
                    anomaly.Anomaly(
                        self, anomaly.UNUSUAL, "%x + %x == %x != %x (gap %d)" %
                        (last_memory, bytes_read, last_memory + bytes_read,
                         next_memory, next_memory - last_memory - bytes_read)))

            if line_number <= last_line_number:
                self.anomalies.append(
                    anomaly.Anomaly(
                        self, anomaly.UNUSUAL, "%d <= %d: %s" %
                        (line_number, last_line_number, ''.join(line))))

            last_line_number = line_number
            last_memory = next_memory
示例#27
0
def decode(file_name):

    # Reading bits
    b = bs.ConstBitStream(filename=file_name)
    bitSequence = []
    try:
        print(chr(27) + "[2J ", end='')
        print('READING FILE ...')
        while True:
            bitSequence.append(b.read('uint:1'))
    except bs.ReadError:
        pass

    # Cutting padding
    padding = int("".join(str(x) for x in bitSequence[0:8]), 2)
    bitSequence = bitSequence[8:len(bitSequence) - padding + 1]

    code_length = 8
    i = 0
    root = HeapNode('NYT', 0)
    heap = [root]
    currentNode = root
    output = ''
    last_progress = -1

    while i <= len(bitSequence):

        progress = round(i * 100 / len(bitSequence))

        if progress > last_progress:
            last_progress = progress
            print(chr(27) + "[2J ", end='')
            print(f"PROGRESS = {round(i*100/len(bitSequence))}%")

        # Node is a leaf
        if currentNode.left is None and currentNode.right is None:
            # Add new letter
            if currentNode.char == 'NYT':
                char = int(''.join(map(str, bitSequence[i:i + code_length])),
                           2)
                i += code_length
            # Letter is in the heap
            else:
                char = currentNode.char
            add_to_heap(char, heap)
            currentNode = root
            output += chr(char)

        # Traversing tree looking for a leaf
        else:
            if i != len(bitSequence):
                bit = bitSequence[i]
                currentNode = currentNode.left if bit == 0 else currentNode.right
            i += 1
    return output
示例#28
0
    def load_entries_to_memory(self):
        file_name = 'raw_data/etl2_entries.obj'
        try:
            file_handler = open(file_name, 'rb')
            entries = pickle.load(file_handler)
            print('restored pickled etl2 data')
            return entries

        except:
            print('processing raw etl2 data')
            entries = []
            image_size = (60, 60)
            bits_per_pixel = 6
            for file_directory, num_items in self.files:
                file = bitstring.ConstBitStream(filename=file_directory)

                # loop through the items in each file
                for item_index in range(num_items):

                    file.pos = item_index * 6 * 3660
                    item_data = file.readlist(
                        'int:36,uint:6,pad:30,6*uint:6,6*uint:6,pad:24,2*uint:6,pad:180,bytes:2700'
                    )

                    # specifications about each item's data
                    # http://etlcdb.db.aist.go.jp/?page_id=1721
                    # 0 -> serial index
                    # 1 -> source, in T56
                    # 2:8 -> name of type of character, kanji, kana, in T56
                    # 8:14 -> name of font type, in T56
                    # 14:16 -> label
                    # 16 -> image bits
                    # print item_data[0], T56(r[1]), "".join(map(T56, item_data[2:8])), "".join(map(T56, r[8:14])), CO59[tuple(r[14:16])])

                    # save only the label & image
                    label = Etl2Dataset.CO59[tuple(item_data[14:16])]

                    # image is grayscale, use otsu's algorithm to binarize it
                    pil_image = Image.frombytes('F', image_size, item_data[16],
                                                'bit', bits_per_pixel)
                    # np_image = np.array(pil_image)
                    # global_threshold = filters.threshold_otsu(np_image)
                    # binarized_image = np_image > global_threshold
                    # fromarray '1' is buggy, convert array to 0 & 255 uint8,
                    # then build image with PIL as 'L' & convert to '1'
                    # pil_image = Image.fromarray((binarized_image * 255).astype(np.uint8), mode='L')

                    entries.append(
                        CharacterEntry(pil_image=pil_image, label=label))

            # save the data to file so we don't have to load it again
            file_handler = open(file_name, 'wb')
            pickle.dump(entries, file_handler)
            return entries
示例#29
0
 def __init__(self, irsb, data, max_inst, max_bytes, bytes_offset, opt_level=None,
                 traceflags=None, allow_lookback=None):
     super(GymratLifter, self).__init__(irsb, data, max_inst, max_bytes,
             bytes_offset, opt_level, traceflags, allow_lookback)
     self.logger = logging.getLogger('lifter')
     self.logger.setLevel(logging.DEBUG)
     if 'CData' in str(type(data)):
         thedata = "".join([chr(data[x]) for x in range(max_bytes)])
     else:
         thedata = data
     self.bitstrm = bitstring.ConstBitStream(bytes=thedata)
 def testBaselineMemory(self):
     try:
         import pympler.asizeof.asizeof as size
     except ImportError:
         return
     # These values might be platform dependent, so don't fret too much.
     self.assertEqual(size(bitstring.ConstBitStream([0])), 64)
     self.assertEqual(size(bitstring.Bits([0])), 64)
     self.assertEqual(size(bitstring.BitStream([0])), 64)
     self.assertEqual(size(bitstring.BitArray([0])), 64)
     from bitstring.bitstore import ByteStore
     self.assertEqual(size(ByteStore(bytearray())), 100)