Пример #1
0
def test_seek_delimiter_endline():
    f = BytesIO(b'123\n456\n789')

    # if at zero, stay at zero
    seek_delimiter(f, b'\n', 5)
    assert f.tell() == 0

    # choose the first block
    for bs in [1, 5, 100]:
        f.seek(1)
        seek_delimiter(f, b'\n', blocksize=bs)
        assert f.tell() == 4

    # handle long delimiters well, even with short blocksizes
    f = BytesIO(b'123abc456abc789')
    for bs in [1, 2, 3, 4, 5, 6, 10]:
        f.seek(1)
        seek_delimiter(f, b'abc', blocksize=bs)
        assert f.tell() == 6

    # End at the end
    f = BytesIO(b'123\n456')
    f.seek(5)
    seek_delimiter(f, b'\n', 5)
    assert f.tell() == 7
Пример #2
0
class ZimCluster(object):
    """ A self contained zimcluster. """
    def __init__(self, blobs=[], *, compress=False):
        self.compress = compress
        self.relative_offset_table = [0]
        self.blobs_io = BytesIO()
        for blob in blobs:
            self.append(blob)
            
    def __len__(self):
        return len(self.relative_offset_table) -1 
    
    def raw_size(self):
        return self.blobs_io.tell()
    
    def append(self, blob):
        self.blobs_io.write(bytes(blob))
        self.relative_offset_table += [self.blobs_io.tell()]
    
    def raw_cluster(self):
        table_length = len(self.relative_offset_table)
        first_blob_offset = 4 * table_length
        offset_table = (first_blob_offset + x for x in self.relative_offset_table)
        format_string = "<"+str(table_length)+"I"
        output_table = struct.pack(format_string, *offset_table)
        self.blobs_io.seek(0)
        return output_table + self.blobs_io.read()

    def __bytes__(self):
        if self.compress:
            return bytes((4,)) + lzma.compress(self.raw_cluster())
        else:
            return bytes((1,)) + self.raw_cluster()
Пример #3
0
class TruncatedTailPipe(object):
    """
    Truncate the last `tail_size` bytes from the stream.
    """

    def __init__(self, output=None, tail_size=16):
        self.tail_size = tail_size
        self.output = output or BytesIO()
        self.buffer = BytesIO()

    def write(self, data):
        self.buffer.write(data)
        if self.buffer.tell() > self.tail_size:
            self._truncate_tail()

    def _truncate_tail(self):
        overflow_size = self.buffer.tell() - self.tail_size
        self.buffer.seek(0)
        self.output.write(self.buffer.read(overflow_size))
        remaining = self.buffer.read()
        self.buffer.seek(0)
        self.buffer.write(remaining)
        self.buffer.truncate()

    def close(self):
        return self.output
Пример #4
0
class FIFOBuffer(object):
    def __init__(self, source, buffer_size=4096):
        self._source = source
        self._buffer_size = buffer_size
        self._fifo = BytesIO()
        self._write_pos = 0
        self._read_pos = 0

    @property
    def size(self):
        return self._write_pos - self._read_pos

    def read_from_source(self, n):
        raise NotImplementedError

    def fill_buffer(self):
        self._fifo.seek(self._write_pos)
        data = self.read_from_source(self._buffer_size)
        self._fifo.write(data)
        self._write_pos = self._fifo.tell()
        return len(data) > 0

    def read(self, n=-1):
        while n is None or n < 0 or self.size < n:
            if not self.fill_buffer():
                break
        self._fifo.seek(self._read_pos)
        out = self._fifo.read(n)
        self._read_pos = self._fifo.tell()
        if self._read_pos > self._buffer_size:
            self._fifo = BytesIO(self._fifo.read())
            self._write_pos = self._fifo.tell()
            self._read_pos = 0
        return out
Пример #5
0
def dump_message(file, headers, props=None, content=None):
    msg = Message()
    for (name, value) in headers:
        msg[name] = value
    payload = BytesIO()
    
    if props is not None:
        start = payload.tell()
        for (key, value) in props.items():
            payload.write("K {}\n".format(len(key)).encode("ascii"))
            payload.writelines((key.encode("ascii"), b"\n"))
            payload.write("V {}\n".format(len(value)).encode("ascii"))
            payload.writelines((value.encode("ascii"), b"\n"))
        payload.write(b"PROPS-END\n")
        
        msg["Prop-content-length"] = format(payload.tell() - start)
    
    if content is not None:
        msg["Text-content-length"] = format(len(content))
        payload.write(content)
    
    if props is not None or content is not None:
        payload = payload.getvalue()
        msg["Content-length"] = format(len(payload))
        
        # Workaround for Python issue 18324, "set_payload does not handle
        # binary payloads correctly", http://bugs.python.org/issue18324
        msg.set_payload(payload.decode("ascii", "surrogateescape"))
    
    BytesGenerator(file, mangle_from_=False).flatten(msg)
Пример #6
0
 def make_binary(in_value):
     # シリアライズ木とスカラー値のバイト列を構築する。
     local_value_map = {}
     local_tree = _SerializeNode._make_node(
         local_value_map, in_value)
     local_scalar_binary = _SerializeNode._make_scalar_binary(
         local_value_map.values())
     # binarcヘッダをストリームへ出力する
     local_stream = BytesIO()
     local_stream.write(
         struct.pack(
             '<I',
             # 'RooT'
             (ord('R') << 24) |
             (ord('o') << 16) |
             (ord('o') <<  8) |
             (ord('T'))))
     assert local_stream.tell() % 4 == 0
     # タグ木をストリームへ出力する
     local_scalar_offset = local_tree._initialize_container_offset(
         local_stream.tell() // 4 + 1)
     local_scalar_offset = ((local_scalar_offset + 1) // 2) * 2
     local_tree._write_node_tag(local_stream, local_scalar_offset)
     if isinstance(local_tree._value, tuple):
         local_tree._write_tag_tree(local_stream, local_scalar_offset)
     # スカラ値のバイト列をストリームへ出力する。
     local_stream.write(
         struct.pack(
             str(local_scalar_offset * 4 - local_stream.tell()) + 'x'))
     local_stream.write(local_scalar_binary)
     return local_stream.getvalue()
Пример #7
0
class ContainerWriter(object):
    def __init__(self, fp, schema, sync_marker=None):
        self.writer = Writer(schema)
        self.fp = fp
        self.sync_marker = sync_marker or os.urandom(16)
        self.header_written = sync_marker is not None

        self.records = 0
        self.buffer = BytesIO()

    def write_header(self):
        assert not self.header_written, "Header is already written once"

        Writer(HEADER_SCHEMA).write(self.fp, {
            "magic": b"Obj\x01",
            "meta": {
                "avro.schema": json.dumps(self.schema.json).encode("utf8"),
                "avro.codec": b"null"
            },
            "sync": self.sync_marker
        })

        self.header_written = True

    def write(self, message):
        self.writer.write(self.buffer, message)
        self.records += 1

        if self.buffer.tell() > 1024 ** 2:
            self.flush()

    def flush(self):
        if not self.header_written:
            self.write_header()
            self.header_written = True

        if not self.records:
            return

        write_long(self.fp, self.records)
        write_long(self.fp, self.buffer.tell())
        self.fp.write(self.buffer.getbuffer())
        self.fp.write(self.sync_marker)
        self.fp.flush()

        self.records = 0
        self.buffer = BytesIO()

    @property
    def schema(self):
        """Returns the :class:`avrolight.schema.Schema` instance that this writer uses."""
        return self.writer.schema

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.flush()

    close = flush
Пример #8
0
class ChunkTest(unittest.TestCase):
    def setUp(self):
        self.fp = BytesIO()
        sig = 0x01020304
        self.fp.write(struct.pack('<L', sig))
        long = 10
        self.fp.write(struct.pack('<L', long))
        short = 5
        self.fp.write(struct.pack('<H', short))
        self.fp.seek(0)

        self.fake_fp = BytesIO()
        sig = 0x04030201
        self.fake_fp.write(struct.pack('<L', sig))
        self.fake_fp.seek(0)

    def test_match(self):
        self.assertTrue(TwoFieldChunk.matches(self.fp))
        self.assertEqual(self.fp.tell(), 4)

        self.assertFalse(TwoFieldChunk.matches(self.fake_fp))
        self.assertEqual(self.fake_fp.tell(), 0)

    def test_populate(self):
        self.assertTrue(TwoFieldChunk.matches(self.fp))
        c = TwoFieldChunk(self.fp, None)
        c.populate()
        self.assertEqual(c.long, 10)
        self.assertEqual(c.short, 5)

    def tearDown(self):
        self.fp.close()
        self.fake_fp.close()
Пример #9
0
class EchoLine(Connection):  # {{{

    bye_after_echo = False

    def connection_ready(self):
        self.rbuf = BytesIO()
        self.set_state(READ, self.read_line)

    def read_line(self, event):
        data = self.recv(1)
        if data:
            self.rbuf.write(data)
            if b"\n" == data:
                if self.rbuf.tell() < 3:
                    # Empty line
                    self.rbuf = BytesIO(b"bye" + self.rbuf.getvalue())
                    self.bye_after_echo = True
                self.set_state(WRITE, self.echo)
                self.rbuf.seek(0)

    def echo(self, event):
        pos = self.rbuf.tell()
        self.rbuf.seek(0, os.SEEK_END)
        left = self.rbuf.tell() - pos
        self.rbuf.seek(pos)
        sent = self.send(self.rbuf.read(512))
        if sent == left:
            self.rbuf = BytesIO()
            self.set_state(READ, self.read_line)
            if self.bye_after_echo:
                self.ready = False
        else:
            self.rbuf.seek(pos + sent)
Пример #10
0
class Buffer(object):

    chunk_size = 2 ** 14

    def __init__(self):
        self.buffer = BytesIO()
        self.target = None
        self.buffered = True

    def attachTarget(self, target):
        self.target = target
        task.cooperate(self._gen_data())

    def _gen_data(self):
        current = self.buffer.tell()
        self.buffer.seek(0, 0)
        data = self.buffer.read(self.chunk_size)
        self.last = len(data)
        self.buffer.seek(current)
        while data:
            yield
            self.target.write(data)
            current = self.buffer.tell()
            self.buffer.seek(self.last)
            data = self.buffer.read(self.chunk_size)
            self.last += len(data)
            self.buffer.seek(current)
        self.buffered = False

    def write(self, data):
        if self.buffered:
            self.buffer.write(data)
        else:
            self.target.write(data)
Пример #11
0
def unbind(filedata):
    """This is old code I didn't bother refactoring, sorry for the mess"""
    binded = BytesIO(filedata)

    binded.seek(-4, os.SEEK_END)
    indexsize = binded.read(4)
    indexsize = struct.unpack('<I', indexsize)[0]

    binded.seek(0 - (indexsize + 4), os.SEEK_END)
    endofdata = binded.tell()

    indexdata = binded.read(indexsize - 1)

    binded.seek(endofdata, os.SEEK_SET)

    files = []
    for line in reversed(indexdata.splitlines()):
        file = line.split(b'/')
        if (file[0] != b"__END_OF_SERIES_OF_BINDED_FILES__"):
            fstart = int(file[1], 10)
            fend = binded.tell()
            fsize = (fend - fstart)
            binded.seek(-fsize, os.SEEK_CUR)
            buffer = binded.read(fsize + 1)
            name = file[0].decode('utf-8') + getfileext(buffer)
            if (fstart > 0):
                binded.seek(fstart - 1, os.SEEK_SET)
            files.append((name, buffer))
    return files
Пример #12
0
class ChunkBuffer:
    BUFFER_SIZE = 1 * 1024 * 1024

    def __init__(self, key, chunker_params=CHUNKER_PARAMS):
        self.buffer = BytesIO()
        self.packer = msgpack.Packer(unicode_errors='surrogateescape')
        self.chunks = []
        self.key = key
        self.chunker = Chunker(self.key.chunk_seed, *chunker_params)

    def add(self, item):
        self.buffer.write(self.packer.pack(StableDict(item)))
        if self.is_full():
            self.flush()

    def write_chunk(self, chunk):
        raise NotImplementedError

    def flush(self, flush=False):
        if self.buffer.tell() == 0:
            return
        self.buffer.seek(0)
        chunks = list(bytes(s) for s in self.chunker.chunkify(self.buffer))
        self.buffer.seek(0)
        self.buffer.truncate(0)
        # Leave the last partial chunk in the buffer unless flush is True
        end = None if flush or len(chunks) == 1 else -1
        for chunk in chunks[:end]:
            self.chunks.append(self.write_chunk(chunk))
        if end == -1:
            self.buffer.write(chunks[-1])

    def is_full(self):
        return self.buffer.tell() > self.BUFFER_SIZE
Пример #13
0
 def handle_write(self):
     '''
     @see: dispatcher.handle_write
     '''
     assert self._writeq, 'Nothing to write'
     
     data, close = BytesIO(), False
     while self._writeq and data.tell() < self.bufferSize:
         content = self._writeq.popleft()
         if content is None:
             if self.close_connection: close = True
             break
         if isinstance(content, (bytes, memoryview)): data.write(content)
         elif isinstance(content, IInputStream):
             assert isinstance(content, IInputStream)
             byts = content.read(self.bufferSize - data.tell())
             if byts == b'':
                 if isinstance(content, IClosable): content.close()
                 continue
             data.write(byts)
             self._writeq.appendleft(content)
         else:
             while data.tell() < self.bufferSize:
                 try: byts = next(content)
                 except StopIteration: break
                 data.write(byts)
     
     sent = self.send(data.getbuffer())
     
     if close: self.close()
     elif sent < data.tell():
         self._writeq.appendleft(data.getbuffer()[sent:])
Пример #14
0
    def __init__(self, strings=()):
        self.strings = OrderedDict((s, 0) for s in strings)

        self.records = []
        offset = 0
        buf = BytesIO()
        for key in tuple(self.strings.iterkeys()):
            utf8 = utf8_text(key[:self.MAX_STRING_LENGTH])
            l = len(utf8)
            sz_bytes = encint(l)
            raw = sz_bytes + utf8
            if 0xfbf8 - buf.tell() < 6 + len(raw):
                # Records in PDB files cannot be larger than 0x10000, so we
                # stop well before that.
                pad = 0xfbf8 - buf.tell()
                buf.write(b'\0' * pad)
                self.records.append(buf.getvalue())
                buf.seek(0), buf.truncate(0)
                offset = len(self.records) * 0x10000
            buf.write(raw)
            self.strings[key] = offset
            offset += len(raw)

        val = buf.getvalue()
        if val:
            self.records.append(align_block(val))
Пример #15
0
    def test_no_preload(self):
        fp = BytesIO(b'foo')

        r = HTTPResponse(fp, preload_content=False)

        assert fp.tell() == 0
        assert r.data == b'foo'
        assert fp.tell() == len(b'foo')
Пример #16
0
    def test_no_preload(self):
        fp = BytesIO(b'foo')

        r = HTTPResponse(fp, preload_content=False)

        self.assertEqual(fp.tell(), 0)
        self.assertEqual(r.data, b'foo')
        self.assertEqual(fp.tell(), len(b'foo'))
Пример #17
0
    def write(self, file, encrypt=True):
        if encrypt:
            encrypted_file = file
            file = BytesIO()

        file.write(b'T6RP')
        file.write(pack('<HBB', self.version, self.character, self.rank))

        checksum_offset = file.tell()
        file.seek(4, 1) # For checksum
        file.write(pack('<BBB', self.unknown1, self.unknown2, self.key))

        file.write(pack('<B', self.unknown3))

        #TODO: find a more elegant method.
        n = 9 - len(self.date)
        file.write(self.date)
        file.write('\0' * n)
        n = 9 - len(self.name)
        file.write(self.name)
        file.write('\0' * n)

        file.write(pack('<HIIfI', self.unknown4, self.score, self.unknown5, self.slowdown, self.unknown6))

        stages_offsets_offset = file.tell()
        file.seek(7*4, 1) # Skip the stages offsets.

        stages_offsets = []
        for level in self.levels:
            if not level:
                stages_offsets.append(0)
                continue

            stages_offsets.append(file.tell())
            file.write(pack('<IHHBbbBI', level.score, level.random_seed,
                            level.point_items, level.power, level.lives,
                            level.bombs, level.difficulty, level.unknown))

            for time, keys, unknown in level.keys:
                file.write(pack('<IHH', time, keys, unknown))

            file.write(pack('<IHH', 9999999, 0, 0))

        file.seek(stages_offsets_offset)
        file.write(pack('<7I', *stages_offsets))

        # Write checksum
        file.seek(15)
        data = file.read()
        checksum = (sum(ord(c) for c in data) + 0x3f000318 + self.key) & 0xffffffff
        file.seek(checksum_offset)
        file.write(pack('<I', checksum))

        # Encrypt
        if encrypt:
            file.seek(0)
            encrypted_file.write(file.read(15))
            encrypted_file.write(b''.join(chr((ord(c) + self.key + 7*i) & 0xff) for i, c in enumerate(file.read())))
Пример #18
0
def test_read_write_json(data):
    stream = BytesIO()
    total = write_json(stream, data)
    assert stream.tell() == total
    stream.seek(0)
    length = read_i64(stream)
    assert stream.tell() == total - length
    assert json.loads(stream.read(length).decode('utf-8')) == data
    stream.seek(0)
    assert read_json(stream) == data
Пример #19
0
    def test_SDL_RWSeekTell(self):
        data = byteify("A Teststring of length 25", "utf-8")
        buf = BytesIO(data)
        rw = rwops.rw_from_object(buf)
        self.assertIsInstance(rw, rwops.SDL_RWops)

        pos = rwops.SDL_RWseek(rw, 0, rwops.RW_SEEK_END)
        self.assertTrue(pos == buf.tell() == len(data))
        pos = rwops.SDL_RWseek(rw, 0, rwops.RW_SEEK_SET)
        self.assertTrue(pos == buf.tell() == 0)

        pos = rwops.SDL_RWseek(rw, 15, rwops.RW_SEEK_CUR)
        self.assertTrue(pos == buf.tell() == 15)
        pos = rwops.SDL_RWseek(rw, -3, rwops.RW_SEEK_CUR)
        self.assertTrue(pos == buf.tell() == 12)
        pos = rwops.SDL_RWseek(rw, 7, rwops.RW_SEEK_CUR)
        self.assertTrue(pos == buf.tell() == 19)

        pos = rwops.SDL_RWseek(rw, -11, rwops.RW_SEEK_END)
        self.assertTrue(pos == buf.tell() == 14)

        pos = rwops.SDL_RWseek(rw, 8, rwops.RW_SEEK_SET)
        self.assertTrue(pos == buf.tell() == 8)

        pos = rwops.SDL_RWseek(rw, -2, rwops.RW_SEEK_SET)
        self.assertEqual(pos, -1)
        self.assertTrue(buf.tell() == 8)
        pos = rwops.SDL_RWseek(rw, 12, rwops.RW_SEEK_END)
        self.assertTrue(pos == buf.tell() == len(data) + 12)
Пример #20
0
class Stream(Dictionary):
    def __init__(self, filter=None, **items):
        # (Streams are always indirectly referenced)
        self._data = BytesIO()
        try:
            self.filter = FilterPipeline(filter)
        except TypeError:
            self.filter = filter or PassThrough()
        super().__init__(indirect=True, **items)
        self._coder = None

    def direct_bytes(self, document):
        out = bytearray()
        self.reset()
        if not isinstance(self.filter, PassThrough):
            self['Filter'] = self.filter.name
            if self.filter.params:
                self['DecodeParms'] = self.filter.params
        if 'Length' in self:
            self['Length'].delete(document)
        assert self._data.tell() == self._data.seek(0, SEEK_END)
        self['Length'] = Integer(self._data.tell())
        out += super().direct_bytes(document)
        out += b'\nstream\n'
        out += self._data.getvalue()
        out += b'\nendstream'
        return out

    def read(self, n=-1):
        try:
            return self._coder.read(n)
        except AttributeError:
            self._data.seek(0)
            self._coder = self.filter.decoder(self._data)
            return self.read(n)

    def write(self, b, **kwargs):
        try:
            return self._coder.write(b)
        except AttributeError:
            self._data.seek(0)
            self._coder = self.filter.encoder(self._data, **kwargs)
            return self.write(b)

    def write_raw(self, b):
        return self._data.write(b)

    def reset(self):
        if self._coder:
            self._coder.close()
            self._coder = None

    def __getattr__(self, name):
        # almost as good as inheriting from BytesIO (which is not possible)
        return getattr(self._data, name)
Пример #21
0
def extract_chunks(blob):
    """Splits the blob into chucks grouped by kind."""
    chunks = []
    stream = BytesIO(blob.bytes)
    current_pos = stream.tell()
    stream.seek(0, 2)
    length = stream.tell()
    stream.seek(current_pos, 0)
    while stream.tell() < length:
        chunks.append(read_chunk(stream))

    return chunks
Пример #22
0
def test_read_write_i64():
    stream = BytesIO()
    write_i64(stream, 10)
    assert stream.tell() == 8
    stream.seek(0)
    assert read_i64(stream) == 10
    write_i64(stream, 11, 12, 13)
    assert stream.tell() == 32
    stream.seek(8)
    assert read_i64(stream, 2) == (11, 12)
    assert read_i64(stream, 1) == (13,)
    assert read_i64(stream, 0) == ()
Пример #23
0
        def test_dict_uint32_to_set_of_uint32s(self):
            dict_uint32_to_set_of_uint32s_value = { sys.maxint: set([ sys.maxint-1, 1, sys.maxint, 0 ]), 32: set([ 16, 8, 32, 64 ]), }

            f = BytesIO()
            write_dict_uint32_to_set_of_uint32s(f, dict_uint32_to_set_of_uint32s_value)
            write_offset = f.tell()

            f.seek(0, os.SEEK_SET)
            dict_uint32_to_set_of_uint32s_value2 = read_dict_uint32_to_set_of_uint32s(f)
            read_offset = f.tell()

            self.assertEqual(dict_uint32_to_set_of_uint32s_value, dict_uint32_to_set_of_uint32s_value2)
            self.assertEqual(write_offset, read_offset)
Пример #24
0
        def test_dict_uint32_to_list_of_uint32s(self):
            test_value = { sys.maxint: [ sys.maxint-1, 1, sys.maxint, 0 ], 32: [ 16, 8, 32, 64 ], }

            f = BytesIO()
            write_dict_uint32_to_list_of_uint32s(f, test_value)
            write_offset = f.tell()

            f.seek(0, os.SEEK_SET)
            test_value2 = read_dict_uint32_to_list_of_uint32s(f)
            read_offset = f.tell()

            self.assertEqual(test_value, test_value2)
            self.assertEqual(write_offset, read_offset)
Пример #25
0
        def test_set_of_uint32s(self):
            test_value = set(random.randint(0, sys.maxint) for v in range(random.randint(15, 30)))

            f = BytesIO()
            write_set_of_uint32s(f, test_value)
            write_offset = f.tell()

            f.seek(0, os.SEEK_SET)
            test_value2 = read_set_of_uint32s(f)
            read_offset = f.tell()

            self.assertEqual(test_value, test_value2)
            self.assertEqual(write_offset, read_offset)
Пример #26
0
def main(fp):
    infont = False
    font_name = ''
    font_ids = set()
    try:
        for pos, line in enumerate(fp, start=1):
            line = line.strip()
            if line.startswith('#ifdef '):
                font_name = line.split(' ', 1)[1].split('_')[-1].lower()
                w, h = [int(x) for x in font_name.split('x')]
                font_id = (w, h)
                if font_id not in font_ids:
                    print('Font', font_name)
                    font_ids.add(font_id)
                    continue
                break
            if font_name:
                if line.startswith('__UG_FONT_DATA'):
                    infont = True
                    size = 0
                    io = BytesIO()
                    continue
            if infont:
                if line.startswith('{'):
                    end = line.index('}')
                    line = line[1:end]
                    data = bytes([int(b, 16) for b in line.split(',')])
                    data = rotate(data, w, h)
                    if not size:
                        size = len(data)
                    elif size != len(data):
                        raise RuntimeError('Incoherent size')
                    io.write(data)
            if line.startswith('#endif'):
                infont = False
                if not io:
                    font_name = ''
                    continue
                pos = io.tell()
                if pos:
                    with open('font%dx%d.bin' % (w, h), 'wb') as fp:
                        fp.write(bytes([w, h]))
                        fp.write(io.getvalue())
                print('Size: %d' % io.tell())
                io = None
                font_name = ''
    except Exception as ex:
        # from traceback import print_exc
        # print_exc(chain=False)
        print(ex)
        exit(0)
Пример #27
0
class Buffer(object):
    def __init__(self):
        self.data = BytesIO()
        self.readonly = False

    def __len__(self):
        pos = self.data.tell()
        self.data.seek(0, os.SEEK_END)
        size = self.data.tell()
        self.data.seek(pos, os.SEEK_SET)
        return size

    def getData(self):
        return self.data.getvalue()

    def setReadOnly(self, readonly):
        self.readonly = readonly

    def getPos(self):
        return self.data.tell()

    def setPos(self, pos):
        self.data.seek(pos, os.SEEK_SET)

    def rewind(self):
        self.data.seek(0, os.SEEK_SET)

    def skip(self, count):
        assert not self.readonly
        self.data.seek(count, os.SEEK_CUR)

    def write(self, buf):
        assert not self.readonly
        self.data.write(buf)

    def writeByte(self, val):
        assert not self.readonly
        self.data.write(struct.pack("<B", val))

    def writeInt(self, val):
        assert not self.readonly
        self.data.write(struct.pack("<I", val))

    def read(self, count):
        return self.data.read(count)

    def readByte(self):
        return struct.unpack("<B", self.data.read(1))[0]

    def readInt(self):
        return struct.unpack("<I", self.data.read(4))[0]
Пример #28
0
    def extract_chunks(cls, blob):
        chunks = OrderedDict()
        stream = BytesIO(blob.bytes)
        current_pos = stream.tell()
        stream.seek(0, 2)
        length = stream.tell()
        stream.seek(current_pos, 0)
        while stream.tell() < length:
            chunk = cls.read_chunk(stream)
            if not chunks.get(chunk.id):
                chunks[chunk.id] = []
            chunks[chunk.id].append(chunk)

        return chunks
Пример #29
0
 def test_header_read_restore(self):
     # Test that reading a header restores the file position
     trk_fname = DATA['simple_trk_fname']
     bio = BytesIO()
     bio.write(b'Along my very merry way')
     hdr_pos = bio.tell()
     hdr_from_fname = TrkFile._read_header(trk_fname)
     with open(trk_fname, 'rb') as fobj:
         bio.write(fobj.read())
     bio.seek(hdr_pos)
     # Check header is as expected
     hdr_from_fname['_offset_data'] += hdr_pos  # Correct for start position
     assert_arr_dict_equal(TrkFile._read_header(bio), hdr_from_fname)
     # Check fileobject file position has not changed
     assert_equal(bio.tell(), hdr_pos)
Пример #30
0
	def _readfromfp(self, fp):
		self._headers = fp.read(10).decode('utf-8')
		if self._msg_version_protocol == self._headers[:1]:
			self._msg_length = int(self._headers[2:10], base = 16)
			request = BytesIO()
			request.write(fp.read(self._msg_length))
			if request.tell() == 0:
				_logger.info(_("Error reading socket. Connection closed host:%s port:%s") % (self.connection.getpeername()))
			elif  request.tell() != self._msg_length:
				_logger.info(_("Error reading socket host:%s port:%s") % (self.connection.getpeername()))
				#request.truncate()
				return request
			else:
				request.seek(0,0)
				return self._ctx[self._msg_content_type].load(request)
Пример #31
0
class PyPyTwistedSandboxProtocol(ProcessProtocol, object):
    """A twisted version of pypy's sandlib.SandboxedProc"""
    def __init__(self, ended_deferred=None, timelimit=None, **kwargs):
        self.ended_deferred = ended_deferred

        self.__error = StringIO()

        self.exited = False
        if timelimit:

            def timesup():
                if not self.exited:
                    log.msg("Time limit reached. aborting.")
                    self.abort()

            reactor.callLater(timelimit, timesup)

        self.__instream = BytesIO()

        # This is a workaround for pypy's unmarshaller behavior. Since we get
        # strings in blocks from the twisted reactor, we may not have a
        # complete request in one call to outReceived(). However, the pypy
        # unmarshaler doesn't always throw errors when it requests more data
        # than is available, but rather silently returns the data it has. For
        # example, to unmarshal a length 10000 string, it will call
        # self.__instream.read(10000), but if less than that much of the string
        # was given in that call, it doesn't notice and just returns the
        # truncated string. This also means the next call with the rest of the
        # string will error because it is an invalid marshal request.
        #
        # So, we modify the read() method of this stringio object here so that
        # it raises EOFError in the event that not enough stream is available
        # to fulfill the request. That exception is caught in outReceived() and
        # the data is saved for the next call, where the new data is appended
        # and the marshal is restarted.
        oldread = self.__instream.read

        def newread(n):
            pos = self.__instream.tell()
            self.__instream.seek(0, 2)
            endpos = self.__instream.tell()
            self.__instream.seek(pos)
            if n > endpos - pos:
                raise EOFError("Not enough string to fulfill read request")
            return oldread(n)

        self.__instream.read = newread

    def connectionMade(self):
        # Because sandlib.write_exception() calls write() and flush() and we'd
        # like to be able to just pass the transport object
        self.transport.flush = lambda: None

    def errReceived(self, text):
        self.__error.write(text)

    @defer.inlineCallbacks
    def outReceived(self, text):
        self.__instream.write(text)
        #self.__instream.seek(0,2)
        #log.msg("Received {0} bytes of input (total {1}). Unmarshalling...".format(len(text), self.__instream.tell()))
        self.__instream.seek(0)
        try:
            fname = sandlib.marshal.load(self.__instream)
            args = sandlib.marshal.load(self.__instream)
        except EOFError as e:
            #log.msg("EOFError unmarshalling args ({0}). Deferring until we get more data".format(e))
            self.__instream.seek(0, 2)
            return
        except Exception as e:
            log.msg(traceback.format_exc())
            self.abort()
            return
        else:
            self.__instream.truncate(0)

        #log.msg("unmarshal successful. Sandbox func call: {0}{1!r}".format(fname, sandlib.shortrepr(args)))
        try:
            retval = self.handle_message(fname, *args)
            if isinstance(retval, defer.Deferred):
                answer, resulttype = (yield retval)
            else:
                answer, resulttype = retval
        except Exception as e:
            #log.msg("Raise exception: {1}, {0}".format(e, e.__class__.__name__))
            tb = sys.exc_info()[2]
            sandlib.write_exception(self.transport, e, tb)
        else:
            if not self.exited:
                #log.msg("Return: {0}".format(sandlib.shortrepr(answer)))
                sandlib.write_message(self.transport,
                                      0)  # error code - 0 for ok
                sandlib.write_message(self.transport, answer, resulttype)

    def abort(self):
        """Kill the process and bail out"""
        self.transport.loseConnection()
        if not self.exited:
            self.transport.signalProcess("KILL")
        if self.ended_deferred:
            self.ended_deferred.callback("Process aborted")
            self.ended_deferred = None

    def processExited(self, status):
        self.exited = True
        self.transport.loseConnection()

    def processEnded(self, reason):
        if self.ended_deferred:
            e = self.__error.getvalue()
            if e:
                self.ended_deferred.callback(e)
            else:
                self.ended_deferred.callback(
                    "Process exited with code {0}".format(
                        reason.value.exitCode))
            self.ended_deferred = None

    def handle_message(self, fnname, *args):
        if '__' in fnname:
            log.msg("Was going to exec {0} but it is unsafe".format(fnname))
            raise ValueError("unsafe fnname")
        try:
            handler = getattr(self, 'do_' + fnname.replace('.', '__'))
        except AttributeError:
            log.msg("Tried to exec {0} but no handler exists".format(fnname))
            raise RuntimeError("no handler for this function")
        resulttype = getattr(handler, 'resulttype', None)
        return handler(*args), resulttype
Пример #32
0
 def test_read_uint32_returns_a_number(self):
     io = BytesIO(codecs.decode('DEADBEEF' + self.padding, 'hex_codec'))
     self.assertEqual(parser.read_size(io), 0xDEADBEEF)
     self.assertEqual(io.tell(), 4)
class MultipartPart(object):

    def __init__(self, buffer_size=2 ** 16, memfile_limit=2 ** 18,
                 charset='latin1'):
        self.headerlist = []
        self.headers = None
        self.file = False
        self.size = 0
        self._buf = tob('')
        self.disposition, self.name, self.filename = None, None, None
        self.content_type, self.charset = None, charset
        self.memfile_limit = memfile_limit
        self.buffer_size = buffer_size

    def feed(self, line, nl=''):
        if self.file:
            return self.write_body(line, nl)
        return self.write_header(line, nl)

    def write_header(self, line, nl):
        line = line.decode(self.charset or 'latin1')
        if not nl:
            raise MultipartError('Unexpected end of line in header.')
        if not line.strip():  # blank line -> end of header segment
            self.finish_header()
        elif line[0] in ' \t' and self.headerlist:
            name, value = self.headerlist.pop()
            self.headerlist.append((name, value + line.strip()))
        else:
            if ':' not in line:
                raise MultipartError("Syntax error in header: No colon.")
            name, value = line.split(':', 1)
            self.headerlist.append((name.strip(), value.strip()))

    def write_body(self, line, nl):
        if not line and not nl:
            return  # This does not even flush the buffer
        if self.content_transfer_encoding and not nl:
            raise MultipartError('Line too long on transfer_encoded chunk.')
        if self.content_transfer_encoding == 'quoted-printable':
            if line.endswith(tob('=')):
                nl = tob('')
            line = quopri.decodestring(line)
        elif self.content_transfer_encoding == 'base64':
            line, nl = binascii.a2b_base64(line), tob('')
        self.size += len(line) + len(self._buf)
        self.file.write(self._buf + line)
        self._buf = nl
        if self.content_length > 0 and self.size > self.content_length:
            raise MultipartError('Size of body exceeds Content-Length header.')
        if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
            self.file, old = TemporaryFile(mode='w+b'), self.file
            old.seek(0)
            copy_file(old, self.file, self.size, self.buffer_size)

    def finish_header(self):
        self.file = BytesIO()
        self.headers = Headers(self.headerlist)
        cdis = self.headers.get('Content-Disposition', '')
        ctype = self.headers.get('Content-Type', '')
        if not cdis:
            raise MultipartError('Content-Disposition header is missing.')
        self.disposition, self.options = parse_options_header(cdis)
        self.name = self.options.get('name')
        self.filename = self.options.get('filename')
        self.content_type, options = parse_options_header(ctype)
        self.charset = options.get('charset') or self.charset
        self.content_length = int(self.headers.get('Content-Length', '-1'))
        self.content_transfer_encoding = \
                self.headers.get('Content-Transfer-Encoding')
        if self.content_transfer_encoding not in \
                [None, 'base64', 'quoted-printable']:
            raise MultipartError('invalid Content-Transfer-Encoding')

    def is_buffered(self):
        ''' Return true if the data is fully buffered in memory.'''
        return isinstance(self.file, BytesIO)

    def value(self, limit):
        ''' Data decoded with the specified charset '''
        pos = self.file.tell()
        try:
            self.file.seek(0)
            val = self.file.read(limit)
            if self.file.read(1):
                raise MultipartError("Request too big. Increase mem_limit.")
        finally:
            self.file.seek(pos)
        return val.decode(self.charset)

    def save_as(self, path):
        fp = open(path, 'wb')
        pos = self.file.tell()
        try:
            self.file.seek(0)
            size = copy_file(self.file, fp)
        finally:
            self.file.seek(pos)
        return size
Пример #34
0
    class Reader(Format.Reader):
        def _open(self, loop=False):
            if not _swf:
                load_lib()

            self._arg_loop = bool(loop)

            self._fp = self.request.get_file()

            # Check file ...
            tmp = self.request.firstbytes[0:3].decode('ascii', 'ignore')
            if tmp == 'FWS':
                pass  # OK
            elif tmp == 'CWS':
                # Compressed, we need to decompress
                bb = self._fp.read()
                bb = bb[:8] + zlib.decompress(bb[8:])
                # Wrap up in a file object
                self._fp = BytesIO(bb)
            else:
                raise IOError('This does not look like a valid SWF file')

            # Skip first bytes. This also tests support got seeking ...
            try:
                self._fp.seek(8)
                self._streaming_mode = False
            except Exception:
                self._streaming_mode = True
                self._fp_read(8)

            # Skip header
            # Note that the number of frames is there, which we could
            # potentially use, but the number of frames does not necessarily
            # correspond to the number of images.
            nbits = _swf.bits2int(self._fp_read(1), 5)
            nbits = 5 + nbits * 4
            Lrect = nbits / 8.0
            if Lrect % 1:
                Lrect += 1
            Lrect = int(Lrect)
            self._fp_read(Lrect + 3)

            # Now the rest is basically tags ...
            self._imlocs = []  # tuple (loc, sze, T, L1)
            if not self._streaming_mode:
                # Collect locations of frame, while skipping through the data
                # This does not read any of the tag *data*.
                try:
                    while True:
                        isimage, sze, T, L1 = self._read_one_tag()
                        loc = self._fp.tell()
                        if isimage:
                            # Still need to check if the format is right
                            format = ord(self._fp_read(3)[2:])
                            if format == 5:  # RGB or RGBA lossless
                                self._imlocs.append((loc, sze, T, L1))
                        self._fp.seek(loc + sze)  # Skip over tag
                except IndexError:
                    pass  # done reading

        def _fp_read(self, n):
            return read_n_bytes(self._fp, n)

        def _close(self):
            pass

        def _get_length(self):
            if self._streaming_mode:
                return np.inf
            else:
                return len(self._imlocs)

        def _get_data(self, index):
            # Check index
            if index < 0:
                raise IndexError('Index in swf file must be > 0')
            if not self._streaming_mode:
                if self._arg_loop and self._imlocs:
                    index = index % len(self._imlocs)
                if index >= len(self._imlocs):
                    raise IndexError('Index out of bounds')

            if self._streaming_mode:
                # Walk over tags until we find an image
                while True:
                    isimage, sze, T, L1 = self._read_one_tag()
                    bb = self._fp_read(sze)  # always read data
                    if isimage:
                        im = _swf.read_pixels(bb, 0, T, L1)  # can be None
                        if im is not None:
                            return im, {}

            else:
                # Go to corresponding location, read data, and convert to image
                loc, sze, T, L1 = self._imlocs[index]
                self._fp.seek(loc)
                bb = self._fp_read(sze)
                # Read_pixels should return ndarry, since we checked format
                im = _swf.read_pixels(bb, 0, T, L1)
                return im, {}

        def _read_one_tag(self):
            """ 
            Return (True, loc, size, T, L1) if an image that we can read.
            Return (False, loc, size, T, L1) if any other tag.
            """

            # Get head
            head = self._fp_read(6)
            if not head:  # pragma: no cover
                raise IndexError('Reached end of swf movie')

            # Determine type and length
            T, L1, L2 = _swf.get_type_and_len(head)
            if not L2:  # pragma: no cover
                raise RuntimeError('Invalid tag length, could not proceed')

            # Read data
            isimage = False
            sze = L2 - 6
            #bb = self._fp_read(L2 - 6)

            # Parse tag
            if T == 0:
                raise IndexError('Reached end of swf movie')
            elif T in [20, 36]:
                isimage = True
                #im = _swf.read_pixels(bb, 0, T, L1)  # can be None
            elif T in [6, 21, 35, 90]:  # pragma: no cover
                print('Ignoring JPEG image: cannot read JPEG.')
            else:
                pass  # Not an image tag

            # Done.  Return image. Can be None
            #return im
            return isimage, sze, T, L1

        def _get_meta_data(self, index):
            return {}  # This format does not support meta data
Пример #35
0
                    len(scripts)))

        if compress:
            # BW2 archives are gzip compressed and always end with .gz
            bwopen = gzip.open
        else:
            bwopen = open

        print("Writing to", output)

        f = BytesIO()

        #with bwopen(output, "wb") as f:
        if True:
            f.write(b"RXET")
            fxet_size_offset = f.tell()
            f.write(b"ABCD")
            write_uint32(f, len(resinfo["Level name"]))
            f.write(bytes(resinfo["Level name"], encoding="ascii"))

            if is_bw2:
                f.write(b"FTBG")
            else:
                f.write(b"FTBX")

            texsection_size_offset = f.tell()
            f.write(b"BACD")
            write_uint32(f, len(textures))

            for texdata in textures:
                #f.write(texdata.data)
Пример #36
0
 def convert_image_to_file(self, image, name):
     temp = BytesIO()
     image.save(temp, format='PNG')
     file_size = temp.tell()
     return InMemoryUploadedFile(temp, None, name, 'image/png', file_size,
                                 None)
Пример #37
0
class DataIO(BinFormat):
    """
    This class simply wraps a binary file or a bytes string and implements
    both the file and bytes interface. It allows an input to be provided as
    files of bytes and manipulated as either a file or a bytes object.
    """
    def __init__(self, f):
        if isinstance(f, bytes):
            from io import BytesIO

            self.f = BytesIO(f)
        else:
            self.f = f

    def __getitem__(self, i):
        stay = self.f.tell()
        sta = i.start or stay
        self.f.seek(sta, 0)
        if i.stop is None:
            data = self.f.read()
        else:
            data = self.f.read(i.stop - sta)
        self.f.seek(stay, 0)
        return data

    def read(self, size=-1):
        return self.f.read(size)

    def readline(self, size=-1):
        return self.f.readline(size)

    def readlines(self, size=-1):
        return self.f.readlines(size)

    def xreadlines(self, size=-1):
        return self.f.xreadlines(size)

    def write(self, s):
        return self.f.write(s)

    def writelines(self, l):
        return self.f.writelines(l)

    def seek(self, offset, whence=0):
        return self.f.seek(offset, whence)

    def tell(self):
        return self.f.tell()

    def flush(self):
        return self.f.flush()

    def fileno(self):
        return self.f.fileno()

    def isatty(self):
        return self.f.isatty()

    def next(self):
        return self.f.next()

    def truncate(self, size=0):
        return self.f.truncate(size)

    def close(self):
        return self.f.close()

    @property
    def closed(self):
        return self.f.closed

    @property
    def encoding(self):
        return self.f.encoding

    @property
    def errors(self):
        return self.f.errors

    @property
    def mode(self):
        return self.f.mode

    @property
    def name(self):
        try:
            return self.f.name
        except AttributeError:
            s = bytes(self.f.getvalue())
            return "(sc-%s...)" % ("".join(["%02x" % x for x in s])[:8])

    filename = name

    @property
    def newlines(self):
        return self.f.newlines

    @property
    def softspace(self):
        return self.f.softspace
Пример #38
0
    class Writer(Format.Writer):
        def _open(self, fps=12, loop=True, html=False, compress=False):
            if not _swf:
                load_lib()

            self._arg_fps = int(fps)
            self._arg_loop = bool(loop)
            self._arg_html = bool(html)
            self._arg_compress = bool(compress)

            self._fp = self.request.get_file()
            self._framecounter = 0
            self._framesize = (100, 100)

            # For compress, we use an in-memory file object
            if self._arg_compress:
                self._fp_real = self._fp
                self._fp = BytesIO()

        def _close(self):
            self._complete()
            # Get size of (uncompressed) file
            sze = self._fp.tell()
            # set nframes, this is in the potentially compressed region
            self._fp.seek(self._location_to_save_nframes)
            self._fp.write(_swf.int2uint16(self._framecounter))
            # Compress body?
            if self._arg_compress:
                bb = self._fp.getvalue()
                self._fp = self._fp_real
                self._fp.write(bb[:8])
                self._fp.write(zlib.compress(bb[8:]))
                sze = self._fp.tell()  # renew sze value
            # set size
            self._fp.seek(4)
            self._fp.write(_swf.int2uint32(sze))
            self._fp = None  # Disable

            # Write html?
            if self._arg_html and os.path.isfile(self.request.filename):
                dirname, fname = os.path.split(self.request.filename)
                filename = os.path.join(dirname, fname[:-4] + '.html')
                w, h = self._framesize
                html = HTML % (fname, w, h, fname)
                with open(filename, 'wb') as f:
                    f.write(html.encode('utf-8'))

        def _write_header(self, framesize, fps):
            self._framesize = framesize
            # Called as soon as we know framesize; when we get first frame
            bb = b''
            bb += 'FC'[self._arg_compress].encode('ascii')
            bb += 'WS'.encode('ascii')  # signature bytes
            bb += _swf.int2uint8(8)  # version
            bb += '0000'.encode('ascii')  # FileLength (leave open for now)
            bb += _swf.Tag().make_rect_record(0, framesize[0], 0,
                                              framesize[1]).tobytes()
            bb += _swf.int2uint8(0) + _swf.int2uint8(fps)  # FrameRate
            self._location_to_save_nframes = len(bb)
            bb += '00'.encode('ascii')  # nframes (leave open for now)
            self._fp.write(bb)

            # Write some initial tags
            taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0)
            for tag in taglist:
                self._fp.write(tag.get_tag())

        def _complete(self):
            # What if no images were saved?
            if not self._framecounter:
                self._write_header((10, 10), self._arg_fps)
            # Write stop tag if we do not loop
            if not self._arg_loop:
                self._fp.write(_swf.DoActionTag('stop').get_tag())
            # finish with end tag
            self._fp.write('\x00\x00'.encode('ascii'))

        def _append_data(self, im, meta):
            # Correct shape and type
            if im.ndim == 3 and im.shape[-1] == 1:
                im = im[:, :, 0]
            im = image_as_uint8(im)
            # Get frame size
            wh = im.shape[1], im.shape[0]
            # Write header on first frame
            isfirstframe = False
            if self._framecounter == 0:
                isfirstframe = True
                self._write_header(wh, self._arg_fps)
            # Create tags
            bm = _swf.BitmapTag(im)
            sh = _swf.ShapeTag(bm.id, (0, 0), wh)
            po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe))
            sf = _swf.ShowFrameTag()
            # Write tags
            for tag in [bm, sh, po, sf]:
                self._fp.write(tag.get_tag())
            self._framecounter += 1

        def set_meta_data(self, meta):
            pass
Пример #39
0
    def receive(self,
                command=None,
                prompts=None,
                answer=None,
                newline=True,
                prompt_retry_check=False,
                check_all=False):
        '''
        Handles receiving of output from command
        '''
        self._matched_prompt = None
        self._matched_cmd_prompt = None
        recv = BytesIO()
        handled = False
        command_prompt_matched = False
        matched_prompt_window = window_count = 0

        cache_socket_timeout = self._ssh_shell.gettimeout()
        command_timeout = self.get_option('persistent_command_timeout')
        self._validate_timeout_value(command_timeout,
                                     "persistent_command_timeout")
        if cache_socket_timeout != command_timeout:
            self._ssh_shell.settimeout(command_timeout)

        buffer_read_timeout = self.get_option('persistent_buffer_read_timeout')
        self._validate_timeout_value(buffer_read_timeout,
                                     "persistent_buffer_read_timeout")

        self._log_messages("command: %s" % command)
        while True:
            if command_prompt_matched:
                try:
                    signal.signal(signal.SIGALRM,
                                  self._handle_buffer_read_timeout)
                    signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout)
                    data = self._ssh_shell.recv(256)
                    signal.alarm(0)
                    self._log_messages("response-%s: %s" %
                                       (window_count + 1, data))
                    # if data is still received on channel it indicates the prompt string
                    # is wrongly matched in between response chunks, continue to read
                    # remaining response.
                    command_prompt_matched = False

                    # restart command_timeout timer
                    signal.signal(signal.SIGALRM, self._handle_command_timeout)
                    signal.alarm(command_timeout)

                except AnsibleCmdRespRecv:
                    # reset socket timeout to global timeout
                    self._ssh_shell.settimeout(cache_socket_timeout)
                    return self._command_response
            else:
                data = self._ssh_shell.recv(256)
                self._log_messages("response-%s: %s" %
                                   (window_count + 1, data))
            # when a channel stream is closed, received data will be empty
            if not data:
                break

            recv.write(data)
            offset = recv.tell() - 256 if recv.tell() > 256 else 0
            recv.seek(offset)

            window = self._strip(recv.read())
            window_count += 1

            if prompts and not handled:
                handled = self._handle_prompt(window, prompts, answer, newline,
                                              False, check_all)
                matched_prompt_window = window_count
            elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count:
                # check again even when handled, if same prompt repeats in next window
                # (like in the case of a wrong enable password, etc) indicates
                # value of answer is wrong, report this as error.
                if self._handle_prompt(window, prompts, answer, newline,
                                       prompt_retry_check, check_all):
                    raise AnsibleConnectionFailure(
                        "For matched prompt '%s', answer is not valid" %
                        self._matched_cmd_prompt)

            if self._find_prompt(window):
                self._last_response = recv.getvalue()
                resp = self._strip(self._last_response)
                self._command_response = self._sanitize(resp, command)
                if buffer_read_timeout == 0.0:
                    # reset socket timeout to global timeout
                    self._ssh_shell.settimeout(cache_socket_timeout)
                    return self._command_response
                else:
                    command_prompt_matched = True
Пример #40
0
 def test_skip_item_skips_a_non_empty_item(self):
     io = BytesIO(
         codecs.decode('00000004DEADBEEF' + self.padding, 'hex_codec'))
     parser.skip_item(io)
     self.assertEqual(io.tell(), 8)
class XportReader(BaseIterator):
    __doc__ = _xport_reader_doc

    def __init__(self,
                 filepath_or_buffer,
                 index=None,
                 encoding="ISO-8859-1",
                 chunksize=None):

        self._encoding = encoding
        self._lines_read = 0
        self._index = index
        self._chunksize = chunksize

        if isinstance(filepath_or_buffer, str):
            (
                filepath_or_buffer,
                encoding,
                compression,
                should_close,
            ) = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding)

        if isinstance(filepath_or_buffer, (str, bytes)):
            self.filepath_or_buffer = open(filepath_or_buffer, "rb")
        else:
            # Copy to BytesIO, and ensure no encoding
            contents = filepath_or_buffer.read()
            try:
                contents = contents.encode(self._encoding)
            except UnicodeEncodeError:
                pass
            self.filepath_or_buffer = BytesIO(contents)

        self._read_header()

    def close(self):
        self.filepath_or_buffer.close()

    def _get_row(self):
        return self.filepath_or_buffer.read(80).decode()

    def _read_header(self):
        self.filepath_or_buffer.seek(0)

        # read file header
        line1 = self._get_row()
        if line1 != _correct_line1:
            self.close()
            raise ValueError("Header record is not an XPORT file.")

        line2 = self._get_row()
        fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24],
               ["created", 16]]
        file_info = _split_line(line2, fif)
        if file_info["prefix"] != "SAS     SAS     SASLIB":
            self.close()
            raise ValueError("Header record has invalid prefix.")
        file_info["created"] = _parse_date(file_info["created"])
        self.file_info = file_info

        line3 = self._get_row()
        file_info["modified"] = _parse_date(line3[:16])

        # read member header
        header1 = self._get_row()
        header2 = self._get_row()
        headflag1 = header1.startswith(_correct_header1)
        headflag2 = header2 == _correct_header2
        if not (headflag1 and headflag2):
            self.close()
            raise ValueError("Member header not found")
        # usually 140, could be 135
        fieldnamelength = int(header1[-5:-2])

        # member info
        mem = [
            ["prefix", 8],
            ["set_name", 8],
            ["sasdata", 8],
            ["version", 8],
            ["OS", 8],
            ["_", 24],
            ["created", 16],
        ]
        member_info = _split_line(self._get_row(), mem)
        mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
        member_info.update(_split_line(self._get_row(), mem))
        member_info["modified"] = _parse_date(member_info["modified"])
        member_info["created"] = _parse_date(member_info["created"])
        self.member_info = member_info

        # read field names
        types = {1: "numeric", 2: "char"}
        fieldcount = int(self._get_row()[54:58])
        datalength = fieldnamelength * fieldcount
        # round up to nearest 80
        if datalength % 80:
            datalength += 80 - datalength % 80
        fielddata = self.filepath_or_buffer.read(datalength)
        fields = []
        obs_length = 0
        while len(fielddata) >= fieldnamelength:
            # pull data for one field
            field, fielddata = (
                fielddata[:fieldnamelength],
                fielddata[fieldnamelength:],
            )

            # rest at end gets ignored, so if field is short, pad out
            # to match struct pattern below
            field = field.ljust(140)

            fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", field)
            field = dict(zip(_fieldkeys, fieldstruct))
            del field["_"]
            field["ntype"] = types[field["ntype"]]
            fl = field["field_length"]
            if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
                self.close()
                msg = "Floating field width {0} is not between 2 and 8."
                raise TypeError(msg.format(fl))

            for k, v in field.items():
                try:
                    field[k] = v.strip()
                except AttributeError:
                    pass

            obs_length += field["field_length"]
            fields += [field]

        header = self._get_row()
        if not header == _correct_obs_header:
            self.close()
            raise ValueError("Observation header not found.")

        self.fields = fields
        self.record_length = obs_length
        self.record_start = self.filepath_or_buffer.tell()

        self.nobs = self._record_count()
        self.columns = [x["name"].decode() for x in self.fields]

        # Setup the dtype.
        dtypel = [("s" + str(i), "S" + str(field["field_length"]))
                  for i, field in enumerate(self.fields)]
        dtype = np.dtype(dtypel)
        self._dtype = dtype

    def __next__(self):
        return self.read(nrows=self._chunksize or 1)

    def _record_count(self):
        """
        Get number of records in file.

        This is maybe suboptimal because we have to seek to the end of
        the file.

        Side effect: returns file position to record_start.
        """

        self.filepath_or_buffer.seek(0, 2)
        total_records_length = self.filepath_or_buffer.tell(
        ) - self.record_start

        if total_records_length % 80 != 0:
            warnings.warn("xport file may be corrupted")

        if self.record_length > 80:
            self.filepath_or_buffer.seek(self.record_start)
            return total_records_length // self.record_length

        self.filepath_or_buffer.seek(-80, 2)
        last_card = self.filepath_or_buffer.read(80)
        last_card = np.frombuffer(last_card, dtype=np.uint64)

        # 8 byte blank
        ix = np.flatnonzero(last_card == 2314885530818453536)

        if len(ix) == 0:
            tail_pad = 0
        else:
            tail_pad = 8 * len(ix)

        self.filepath_or_buffer.seek(self.record_start)

        return (total_records_length - tail_pad) // self.record_length

    def get_chunk(self, size=None):
        """
        Reads lines from Xport file and returns as dataframe

        Parameters
        ----------
        size : int, defaults to None
            Number of lines to read.  If None, reads whole file.

        Returns
        -------
        DataFrame
        """
        if size is None:
            size = self._chunksize
        return self.read(nrows=size)

    def _missing_double(self, vec):
        v = vec.view(dtype="u1,u1,u2,u4")
        miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
        miss1 = (((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
                 | (v["f0"] == 0x5F)
                 | (v["f0"] == 0x2E))
        miss &= miss1
        return miss

    @Appender(_read_method_doc)
    def read(self, nrows=None):

        if nrows is None:
            nrows = self.nobs

        read_lines = min(nrows, self.nobs - self._lines_read)
        read_len = read_lines * self.record_length
        if read_len <= 0:
            self.close()
            raise StopIteration
        raw = self.filepath_or_buffer.read(read_len)
        data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)

        df = pd.DataFrame(index=range(read_lines))
        for j, x in enumerate(self.columns):
            vec = data["s%d" % j]
            ntype = self.fields[j]["ntype"]
            if ntype == "numeric":
                vec = _handle_truncated_float_vec(
                    vec, self.fields[j]["field_length"])
                miss = self._missing_double(vec)
                v = _parse_float_vec(vec)
                v[miss] = np.nan
            elif self.fields[j]["ntype"] == "char":
                v = [y.rstrip() for y in vec]

                if self._encoding is not None:
                    v = [y.decode(self._encoding) for y in v]

            df[x] = v

        if self._index is None:
            df.index = range(self._lines_read, self._lines_read + read_lines)
        else:
            df = df.set_index(self._index)

        self._lines_read += read_lines

        return df
Пример #42
0
if __name__ == '__main__':
    # send_mail(
    #     '来自Django的测试邮件',
    #     '测试,测试一波~',
    #     '*****@*****.**',
    #     ['*****@*****.**'],
    # )

    img = Image.open(
        'D:/My File/Projects/DiurenCMS/media/user_uploads/1/avatar/icon.png'
    )  # type:Image.Image
    img = img.convert(mode='RGB')

    quality = 100
    limit = 100 * 1024

    while True:
        temp_io = BytesIO()
        img.save(temp_io, format='JPEG', quality=quality, optimize=True)
        print(quality, temp_io.tell())
        if temp_io.tell() > limit:
            quality -= 10
        else:
            break

    img.save(
        'D:/My File/Projects/DiurenCMS/media/user_uploads/1/avatar/icon.jpeg',
        format='JPEG',
        quality=quality,
        optimize=True)
Пример #43
0
    def __read(self):
        """
        Read the next frame(s) from the socket.

        :return: list of frames read
        :rtype: list(bytes)
        """
        fastbuf = BytesIO()
        while self.running:
            try:
                try:
                    c = self.receive()
                except exception.InterruptedException:
                    log.debug("socket read interrupted, restarting")
                    continue
            except Exception:
                log.debug("socket read error", exc_info=True)
                c = b''
            if c is None or len(c) == 0:
                raise exception.ConnectionClosedException()
            if c == b'\x0a' and not self.__recvbuf and not fastbuf.tell():
                #
                # EOL to an empty receive buffer: treat as heartbeat.
                # Note that this may misdetect an optional EOL at end of frame as heartbeat in case the
                # previous receive() got a complete frame whose NUL at end of frame happened to be the
                # last byte of that read. But that should be harmless in practice.
                #
                fastbuf.close()
                return [c]
            fastbuf.write(c)
            if b'\x00' in c:
                #
                # Possible end of frame
                #
                break
        self.__recvbuf += fastbuf.getvalue()
        fastbuf.close()
        result = []

        if self.__recvbuf and self.running:
            while True:
                pos = self.__recvbuf.find(b'\x00')

                if pos >= 0:
                    frame = self.__recvbuf[0:pos]
                    preamble_end_match = utils.PREAMBLE_END_RE.search(frame)
                    if preamble_end_match:
                        preamble_end = preamble_end_match.start()
                        content_length_match = BaseTransport.__content_length_re.search(
                            frame[0:preamble_end])
                        if content_length_match:
                            content_length = int(
                                content_length_match.group('value'))
                            content_offset = preamble_end_match.end()
                            frame_size = content_offset + content_length
                            if frame_size > len(frame):
                                #
                                # Frame contains NUL bytes, need to read more
                                #
                                if frame_size < len(self.__recvbuf):
                                    pos = frame_size
                                    frame = self.__recvbuf[0:pos]
                                else:
                                    #
                                    # Haven't read enough data yet, exit loop and wait for more to arrive
                                    #
                                    break
                    result.append(frame)
                    pos += 1
                    #
                    # Ignore optional EOLs at end of frame
                    #
                    while self.__recvbuf[pos:pos + 1] == b'\x0a':
                        pos += 1
                    self.__recvbuf = self.__recvbuf[pos:]
                else:
                    break
        return result
Пример #44
0
 def test_read_id_returns_an_id(self):
     io = BytesIO(('ABCD' + self.padding).encode())
     self.assertEqual(parser.read_id(io), b'ABCD')
     self.assertEqual(io.tell(), 4)
Пример #45
0
class ByteIO:
    @contextlib.contextmanager
    def save_current_pos(self):
        entry = self.tell()
        yield
        self.seek(entry)

    def __init__(self,
                 path_or_file_or_data: Union[str, Path, BinaryIO, bytes,
                                             bytearray] = None,
                 open_to_read=True):
        if hasattr(path_or_file_or_data, 'mode'):
            file = path_or_file_or_data
            self.file = file
        elif type(path_or_file_or_data) is str or isinstance(
                path_or_file_or_data, Path):
            mode = 'rb' if open_to_read else 'wb'
            self.file = open(path_or_file_or_data, mode)

        elif type(path_or_file_or_data) in [bytes, bytearray]:
            self.file = io.BytesIO(path_or_file_or_data)
        elif issubclass(type(path_or_file_or_data), io.IOBase):
            self.file = path_or_file_or_data
        else:
            self.file = BytesIO()

    @property
    def preview(self):
        with self.save_current_pos():
            return self.read(64)

    @property
    def preview_f(self):
        with self.save_current_pos():
            block = self.read(64)
            hex_values = split(
                split(binascii.hexlify(block).decode().upper(), 2), 4)
            return [' '.join(b) for b in hex_values]

    def __repr__(self):
        return "<ByteIO {}/{}>".format(self.tell(), self.size())

    def close(self):
        if hasattr(self.file, 'close'):
            self.file.close()

    def rewind(self, amount):
        self.file.seek(-amount, io.SEEK_CUR)

    def skip(self, amount):
        self.file.seek(amount, io.SEEK_CUR)

    def seek(self, off, pos=io.SEEK_SET):
        self.file.seek(off, pos)

    def tell(self):
        return self.file.tell()

    def size(self):
        curr_offset = self.tell()
        self.seek(0, io.SEEK_END)
        ret = self.tell()
        self.seek(curr_offset, io.SEEK_SET)
        return ret

    def fill(self, amount):
        for _ in range(amount):
            self._write(b'\x00')

    def insert_begin(self, to_insert):
        self.seek(0)
        buffer = self.read(-1)

        del self.file
        self.file = BytesIO()
        self.file.write(to_insert)
        self.file.write(buffer)
        self.file.seek(0)

    # ------------ PEEK SECTION ------------ #

    def _peek(self, size=1):
        with self.save_current_pos():
            return self.read(size)

    def peek(self, t):
        size = struct.calcsize(t)
        return struct.unpack(t, self._peek(size))[0]

    def peek_fmt(self, fmt):
        size = struct.calcsize(fmt)
        return struct.unpack(fmt, self._peek(size))

    def peek_uint64(self):
        return self.peek('Q')

    def peek_int64(self):
        return self.peek('q')

    def peek_uint32(self):
        return self.peek('I')

    def peek_int32(self):
        return self.peek('i')

    def peek_uint16(self):
        return self.peek('H')

    def peek_int16(self):
        return self.peek('h')

    def peek_uint8(self):
        return self.peek('B')

    def peek_int8(self):
        return self.peek('b')

    def peek_float(self):
        return self.peek('f')

    def peek_double(self):
        return self.peek('d')

    def peek_fourcc(self):
        with self.save_current_pos():
            return self.read_ascii_string(4)

    # ------------ READ SECTION ------------ #

    def read(self, size=-1) -> bytes:
        return self.file.read(size)

    def _read(self, t):
        return struct.unpack(t, self.file.read(struct.calcsize(t)))[0]

    def read_fmt(self, fmt):
        return struct.unpack(fmt, self.file.read(struct.calcsize(fmt)))

    def read_uint64(self):
        return self._read('Q')

    def read_int64(self):
        return self._read('q')

    def read_uint32(self):
        return self._read('I')

    def read_int32(self):
        return self._read('i')

    def read_uint16(self):
        return self._read('H')

    def read_int16(self):
        return self._read('h')

    def read_uint8(self):
        return self._read('B')

    def read_int8(self):
        return self._read('b')

    def read_float(self):
        return self._read('f')

    def read_double(self):
        return self._read('d')

    def read_ascii_string(self, length=None):
        if length is not None:
            buffer = self.file.read(length).strip(b'\x00')
            if b'\x00' in buffer:
                buffer = buffer[:buffer.index(b'\x00')]
            return buffer.decode('latin', errors='replace')

        buffer = bytearray()

        while True:
            chunk = self.read(32)
            chunk_end = chunk.find(b'\x00')
            if chunk_end >= 0:
                buffer += chunk[:chunk_end]
            else:
                buffer += chunk
            if chunk_end >= 0:
                self.seek(-(len(chunk) - chunk_end - 1), io.SEEK_CUR)
                return buffer.decode('latin', errors='replace')

    def read_fourcc(self):
        return self.read_ascii_string(4)

    def read_from_offset(self, offset, reader, **reader_args):
        if offset > self.size():
            raise OffsetOutOfBounds()
        with self.save_current_pos():
            self.seek(offset, io.SEEK_SET)
            ret = reader(**reader_args)
        return ret

    def read_source1_string(self, entry):
        offset = self.read_int32()
        if offset:
            with self.save_current_pos():
                self.seek(entry + offset)
                return self.read_ascii_string()
        else:
            return ""

    def read_source2_string(self):
        entry = self.tell()
        offset = self.read_int32()
        with self.save_current_pos():
            self.seek(entry + offset)
            return self.read_ascii_string()

    # ------------ WRITE SECTION ------------ #

    def _write(self, data):
        self.file.write(data)

    def write(self, t, value):
        self._write(struct.pack(t, value))

    def write_uint64(self, value):
        self.write('Q', value)

    def write_int64(self, value):
        self.write('q', value)

    def write_uint32(self, value):
        self.write('I', value)

    def write_int32(self, value):
        self.write('i', value)

    def write_uint16(self, value):
        self.write('H', value)

    def write_int16(self, value):
        self.write('h', value)

    def write_uint8(self, value):
        self.write('B', value)

    def write_int8(self, value):
        self.write('b', value)

    def write_float(self, value):
        self.write('f', value)

    def write_double(self, value):
        self.write('d', value)

    def write_ascii_string(self, string, zero_terminated=False, length=-1):
        pos = self.tell()
        for c in string:
            self._write(c.encode('ascii'))
        if zero_terminated:
            self._write(b'\x00')
        elif length != -1:
            to_fill = length - (self.tell() - pos)
            if to_fill > 0:
                for _ in range(to_fill):
                    self.write_uint8(0)

    def write_fourcc(self, fourcc):
        self.write_ascii_string(fourcc)

    def write_to_offset(self, offset, writer, value, fill_to_target=False):
        if offset > self.size() and not fill_to_target:
            raise OffsetOutOfBounds()
        curr_offset = self.tell()
        self.seek(offset, io.SEEK_SET)
        ret = writer(value)
        self.seek(curr_offset, io.SEEK_SET)
        return ret

    def read_float16(self):
        return self._read('e')

    def write_bytes(self, data):
        self._write(data)

    def __bool__(self):
        return self.tell() < self.size()

    def read_ascii_padded(self):
        string = self.read_ascii_string()
        self.skip(get_pad(string))
        return string
Пример #46
0
 def test_read_item_returns_an_item(self):
     io = BytesIO(
         codecs.decode('00000004DEADBEEF' + self.padding, 'hex_codec'))
     self.assertEqual(parser.read_item(io),
                      codecs.decode('DEADBEEF', 'hex_codec'))
     self.assertEqual(io.tell(), 8)
Пример #47
0
    def get(self, request, *args, **kwargs):
        """
        获取验证码
        ---
        """
        uid = str(uuid.uuid4())
        mp_src = hashlib.md5(uid.encode("UTF-8")).hexdigest()
        text = mp_src[0:4]
        request.session["captcha_code"] = text
        request.session.save()
        font_path = current_path + "/www/static/www/fonts/Vera.ttf"
        logger.debug("======> font path " + str(font_path))
        font = ImageFont.truetype(font_path, 22)

        size = self.getsize(font, text)
        size = (size[0] * 2, int(size[1] * 1.4))

        image = Image.new('RGBA', size)

        try:
            PIL_VERSION = int(NON_DIGITS_RX.sub('', Image.VERSION))
        except Exception:
            PIL_VERSION = 116
        xpos = 2

        charlist = []
        for char in text:
            charlist.append(char)

        for char in charlist:
            fgimage = Image.new('RGB', size, '#001100')
            charimage = Image.new('L', self.getsize(font, ' %s ' % char),
                                  '#000000')
            chardraw = ImageDraw.Draw(charimage)
            chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff')
            if PIL_VERSION >= 116:
                charimage = charimage.rotate(random.randrange(*(-35, 35)),
                                             expand=0,
                                             resample=Image.BICUBIC)
            else:
                charimage = charimage.rotate(random.randrange(*(-35, 35)),
                                             resample=Image.BICUBIC)
            charimage = charimage.crop(charimage.getbbox())
            maskimage = Image.new('L', size)

            maskimage.paste(charimage,
                            (xpos, from_top, xpos + charimage.size[0],
                             from_top + charimage.size[1]))
            size = maskimage.size
            image = Image.composite(fgimage, image, maskimage)
            xpos = xpos + 2 + charimage.size[0]

        image = image.crop((0, 0, xpos + 1, size[1]))

        ImageDraw.Draw(image)

        out = BytesIO()
        image.save(out, "PNG")
        out.seek(0)

        response = HttpResponse(content_type='image/png')
        response.write(out.read())
        response['Content-length'] = out.tell()

        return response
Пример #48
0
class StatusQuery:
    def __init__(self):
        # this is used by the ProcessData method and is persistent to hold the buffer data
        self.data = BytesIO()

    '''
		A helper function that breaks the incoming data stream into packets. Each time
		it is called it will try to return a message from the buffer. It should be called
		until it returns None which means no messages reside in the buffer. The data parameter
		is optional and can be None meaning do not place any more data into the buffer.
	'''

    def ProcessData(self, data):
        if data is not None:
            self.data.write(data)

        # do we need to read a message header and can we?
        if self.wsz is None and self.data.tell() >= 4:
            self.data.seek(0)
            sz = struct.unpack_from('>I', self.data.read(4))[0]
            self.wsz = sz
            self.data.seek(0, 2)

        if self.wsz is None or self.data.tell() - 4 < self.wsz:
            return None

        # place remaining data into new buffer
        self.data.seek(4)
        _ret = self.data.read(self.wsz)
        ndata = BytesIO()
        ndata.write(self.data.read())
        self.data = ndata

        # return the message and vector
        self.wsz = None
        return _ret

    '''
		This just reads until [uptodate] so it gets a current status of the operation. For long
		term monitoring one could continue to read from the socket and would recieve updates.
	'''

    def Fetch(self, sock):
        self.wsz = None

        info = {}
        info['work'] = {}
        info['title'] = {}
        st = time.time()
        anydata = False
        while True:
            # TODO: maybe need some way of detecting if it is the right service
            #       instead of just waiting but instead look for a hello or even
            #       drop on invalid messages and invalid lengths; i see this causing
            #       potentially lengthy waits for certain users

            # after so long just consider it dead or not the right service..
            sock.settimeout(6)
            # read data if any
            data = sock.recv(1024)
            # if we been doing this too long or socket is closed
            if time.time() - st > 6 or not data:
                if anydata:
                    return info
                return None
            # process data into messages
            msg = True
            while msg is not None:
                msg = self.ProcessData(data)
                data = None
                if msg is None:
                    break
                # turn message into UTF8 string
                msg = msg.decode('utf8', 'ignore')
                parts = msg.split(':')
                # okay we have been brought up to date
                if parts[0] == '[uptodate]':
                    anydata = True
                    return info
                # update the info structure
                if parts[0] == '[title]':
                    info['title'][parts[1]] = parts[2]
                    anydata = True
                if parts[0] == '[add]':
                    info['work'][parts[1]] = {}
                    anydata = True
                if parts[0] == '[rem]':
                    del info['work'][parts[1]]
                    anydata = True
                if parts[0] == '[old]':
                    info['work'][parts[1]]['old'] = False
                    anydata = True
                if parts[0] == '[wkey]':
                    info['work'][parts[1]][parts[2]] = parts[3]
                    anydata = True
                # old way (replaced with [wkey])
                if parts[0] == '[status]':
                    info['work'][parts[1]]['status'] = parts[2]
                    anydata = True
                # old way (replaced with [wkey])
                if parts[0] == '[progress]':
                    info['work'][parts[1]]['progress'] = parts[2]
                    anydata = True

    def Scan(self):
        services = {}
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        for x in range(41000, 41100):
            try:
                try:
                    dummy = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    dummy.bind(('localhost', x))
                    dummy.close()
                    continue
                except Exception as e:
                    pass
                # try to connect
                sock.connect(('localhost', x))
                # fetch information (just the current)
                info = self.Fetch(sock)
                if info is not None:
                    services[x] = info
                sock.sendall(b'terminate')
                sock.close()
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            except Exception as e:
                print(e)
                pass
        return services
Пример #49
0
    def _parse_exports(self):
        """
        Parses the exports trie
        """
        l.debug("Parsing exports")
        blob = self.export_blob
        if blob is None:
            l.debug("Parsing exports done: No exports found")
            return

        # Note some of these fields are currently not used, keep them in to make used variables explicit
        index = 0
        sym_str = b''
        # index,str
        nodes_to_do = [(0, b'')]
        blob_f = BytesIO(blob)  # easier to handle seeking here

        # constants
        #FLAGS_KIND_MASK = 0x03
        #FLAGS_KIND_REGULAR = 0x00
        #FLAGS_KIND_THREAD_LOCAL = 0x01
        #FLAGS_WEAK_DEFINITION = 0x04
        FLAGS_REEXPORT = 0x08
        FLAGS_STUB_AND_RESOLVER = 0x10

        try:
            while True:
                index, sym_str = nodes_to_do.pop()
                l.debug("Processing node %#x %r", index, sym_str)
                blob_f.seek(index, SEEK_SET)
                info_len = struct.unpack("B", blob_f.read(1))[0]
                if info_len > 127:
                    # special case
                    blob_f.seek(-1, SEEK_CUR)
                    tmp = read_uleb(blob, blob_f.tell())  # a bit kludgy
                    info_len = tmp[0]
                    blob_f.seek(tmp[1], SEEK_CUR)

                if info_len > 0:
                    # a symbol is complete
                    tmp = read_uleb(blob, blob_f.tell())
                    blob_f.seek(tmp[1], SEEK_CUR)
                    flags = tmp[0]
                    if flags & FLAGS_REEXPORT:
                        # REEXPORT: uleb:lib ordinal, zero-term str
                        tmp = read_uleb(blob, blob_f.tell())
                        blob_f.seek(tmp[1], SEEK_CUR)
                        lib_ordinal = tmp[0]
                        lib_sym_name = b''
                        char = blob_f.read(1)
                        while char != b'\0':
                            lib_sym_name += char
                            char = blob_f.read(1)
                        l.info("Found REEXPORT export %r: %d,%r", sym_str, lib_ordinal, lib_sym_name)
                        self.exports_by_name[sym_str.decode()] = (flags, lib_ordinal, lib_sym_name.decode())
                    elif flags & FLAGS_STUB_AND_RESOLVER:
                        # STUB_AND_RESOLVER: uleb: stub offset, uleb: resovler offset
                        l.warning("EXPORT: STUB_AND_RESOLVER found")
                        tmp = read_uleb(blob, blob_f.tell())
                        blob_f.seek(tmp[1], SEEK_CUR)
                        stub_offset = tmp[0]
                        tmp = read_uleb(blob, blob_f.tell())
                        blob_f.seek(tmp[1], SEEK_CUR)
                        resolver_offset = tmp[0]
                        l.info("Found STUB_AND_RESOLVER export %r: %#x,%#x'", sym_str, stub_offset, resolver_offset)
                        self.exports_by_name[sym_str.decode()] = (flags, stub_offset, resolver_offset)
                    else:
                        # normal: offset from mach header
                        tmp = read_uleb(blob, blob_f.tell())
                        blob_f.seek(tmp[1], SEEK_CUR)
                        symbol_offset = tmp[0] + self.segments[1].vaddr
                        l.info("Found normal export %r: %#x", sym_str, symbol_offset)
                        self.exports_by_name[sym_str.decode()] = (flags, symbol_offset)

                child_count = struct.unpack("B", blob_f.read(1))[0]
                for i in range(0, child_count):
                    child_str = sym_str
                    char = blob_f.read(1)
                    while char != b'\0':
                        child_str += char
                        char = blob_f.read(1)
                    tmp = read_uleb(blob, blob_f.tell())
                    blob_f.seek(tmp[1], SEEK_CUR)
                    next_node = tmp[0]
                    l.debug("%d. child: (%#x, %r)", i, next_node, child_str)
                    nodes_to_do.append((next_node, child_str))

        except IndexError:
            # List is empty we are done!
            l.debug("Done parsing exports")
Пример #50
0
class picam(control.Control):
    """Set a connection socket to the camera."""

    __REQUIRED = ["width", "height", "ethernet"]

    def __init__(self):
        self.camera = PiCamera()

        # PiCamera Settings
        self.camera.resolution = (self.width, self.height)
        if not hasattr(self, 'framerate'):
            self.framerate = 24
        if not hasattr(self, 'frec'):
            self.frec = 0.02

        self.camera.framerate = self.framerate
        self.camera.contrast = 0
        self.camera.brightness = 50
        self.camera.video_stabilization = True
        self.camera.image_effect = 'none'
        self.camera.color_effects = None
        self.camera.rotation = 0

        self.camera.hflip = True if not hasattr(self, 'hflip') else self.hflip
        self.camera.vflip = True if not hasattr(self, 'vflip') else self.vflip

        #self.camera.sharpness = 0
        #self.camera.saturation = 0
        #self.camera.ISO = 0
        #self.camera.sure_compensation = 0
        #self.camera.exposure_mode = 'auto'
        #self.camera.meter_mode = 'average'
        #self.camera.awb_mode = 'auto'
        #self.camera.crop = (0.0, 0.0, 1.0, 1.0)

        # Stream settings
        self.buffer = BytesIO()
        self.clients = list()
        self.initPort = 9000

        self.ip = utils.get_ip_address(self.ethernet)

        self.start_worker(self.worker_read)
        self.start_thread(self.removeClosedConnections)

    def worker_read(self):
        """Main worker."""
        while self.worker_run:
            for foo in self.camera.capture_continuous(self.buffer, 'jpeg', use_video_port=True):
                # Si hay clientes a la espera...
                while (len(self.clients) is 0):
                    time.sleep(2)
                try:
                    self.acceptConnections()
                    streamPosition = self.buffer.tell()
                    for c in self.clients:
                        if c.closed is False:
                            try:
                                if (c.connection is not 0):
                                    c.connection.write(
                                        struct.pack('<L', streamPosition))
                                    c.connection.flush()
                            except Exception as e:
                                closer = threading.Thread(
                                    target=self.setAsClosed, args=(c,))
                                closer.start()
                    self.buffer.seek(0)
                    readBuffer = self.buffer.read()
                    for c in self.clients:
                        if c.closed is False:
                            try:
                                if (c.connection is not 0):
                                    c.connection.write(readBuffer)
                            except Exception as e:
                                closer = threading.Thread(
                                    target=self.setAsClosed, args=(c,))
                                closer.start()
                    self.buffer.seek(0)
                    self.buffer.truncate()
                except Exception as e:
                    utils.format_exception(e)

    @Pyro4.expose
    def image(self):
        """Return IP and PORT to socket conection """
        newClient = ClientSocket(self.initPort + 1)
        self.clients.append(newClient)
        self.initPort = newClient.port
        while not (newClient.waitingForConnection):
            time.sleep(2)
        return self.ip, newClient.port

    def acceptConnections(self):
        """Accept connections from clients"""
        # print "Aceptando conexiones desde picamera"
        for c in self.clients:
            c.acceptConnection()

    def setAsClosed(self, client, exception="None"):
        """Set client as closed"""
        # print "Client: ", client.getClient(), "closing."
        client.setClosed()
        try:
            client.connection.write(struct.pack('<L', 0))
            client.connection.close()
            client.serverSocket.close()
        except Exception:
            pass
        if (exception is not None):
            utils.format_exception(exception)

    def removeClosedConnections(self, sec=20):
        """Cleaner. Remove clients marked as closed every "sec" seconds."""
        while self.worker_run:
            time.sleep(sec)
            # print "Antes:", self.clients
            self.clients = [c for c in self.clients if not c.closed]
Пример #51
0
class YaraRule(object):
    def __init__(self, version, name='SwissCheese', ns='default'):

        self.rulename = name
        self.namespace = ns
        self.code_relocations = []
        self.target_version = version
        self.code = BytesIO()

    def addCode(self, bytecode, relocations):
        # offset = self.code.tell()
        offset = 0
        assert self.code.tell(
        ) == 0, 'multiple code offsets not supported as of yet'
        self.code.write(bytecode)
        for r in relocations:
            self.code_relocations.append(offset + r)

    def compile(self, fix_hash=True):
        # TODO:  assert code[-1] == YaraAssembler.END_OF_CODE, "code doesn't end with END_OF_CODE opcode"

        self.code.seek(0)
        reloc_buffer = ''
        file_hash = 0
        ns_c_str = make_c_str(self.namespace)
        rulename_c_str = make_c_str(self.rulename)
        struct_relocations = []

        with pragma.pack(1):
            BODY = struct([
                (YARA_RULES_FILE_HEADER, 'rules_file_hdr'),
                (YR_RULE, 'rule'),
                (YR_RULE, 'nullrule'),
                (YR_EXTERNAL_VARIABLE, 'nullexternal'),
                (YR_NAMESPACE, 'namespace'),
                (BYTE[len(ns_c_str)], 'ns_name'),
                (BYTE[len(rulename_c_str)], 'rule_name'),
                (YR_AC_MATCH_TABLE, 'match_table'),
                (EMPTY_TRANSTION_TABLE, 'transition_table'),
            ])

        body = BODY()
        rulehdroffset = offsetof('rules_file_hdr', body)

        body.rules_file_hdr.rules_list_head = offsetof('rule', body)
        struct_relocations.append(
            rulehdroffset +
            offsetof('rules_list_head', YARA_RULES_FILE_HEADER))

        body.rules_file_hdr.externals_list_head = offsetof(
            'nullexternal', body)
        struct_relocations.append(
            rulehdroffset +
            offsetof('externals_list_head', YARA_RULES_FILE_HEADER))

        body.rules_file_hdr.ac_match_table = offsetof('match_table', body)
        struct_relocations.append(
            rulehdroffset + offsetof('ac_match_table', YARA_RULES_FILE_HEADER))

        body.rules_file_hdr.ac_transition_table = offsetof(
            'transition_table', body)
        struct_relocations.append(
            rulehdroffset +
            offsetof('ac_transition_table', YARA_RULES_FILE_HEADER))

        file_code_start_offset = sizeof(body)
        body.rules_file_hdr.code_start = file_code_start_offset
        struct_relocations.append(
            rulehdroffset + offsetof('code_start', YARA_RULES_FILE_HEADER))

        body.rule.identifier = offsetof('rule_name', body)
        struct_relocations.append(
            offsetof('rule', body) + offsetof('identifier', YR_RULE))

        body.rule.ns = offsetof('namespace', body)
        struct_relocations.append(
            offsetof('rule', body) + offsetof('ns', YR_RULE))

        body.nullrule = BYTE(UNUSED) * sizeof(YR_RULE)
        body.nullrule.g_flags = RULE_GFLAGS_NULL
        body.ns_name = ns_c_str
        body.rule_name = rulename_c_str
        body.nullrule.g_flags = RULE_GFLAGS_NULL
        body.nullexternal = BYTE(UNUSED) * sizeof(YR_EXTERNAL_VARIABLE)
        body.nullexternal.type = EXTERNAL_VARIABLE_TYPE_NULL

        for r in struct_relocations:
            reloc_buffer += DWORD(r)

        self.code.seek(0)
        final_code_bytes = self.code.read()

        for r in self.code_relocations[:]:
            reloc_buffer += DWORD(file_code_start_offset +
                                  r)  # add base of code
            patched = QWORD(file_code_start_offset +
                            QWORD(final_code_bytes[r:r + 8]))
            final_code_bytes = final_code_bytes[:
                                                r] + patched + final_code_bytes[
                                                    r + 8:]

        hdr = YR_HDR()

        body_bytes = bytes(body) + final_code_bytes
        if len(body_bytes) < 2048:
            padding = '\xCC' * (2048 - len(body_bytes))
            body_bytes += padding
        body_bytes += '\xCC' * sizeof(
            DWORD
        )  # padding for relocation check in 3.8.1; will only work on 32 bit
        # finalize
        hdr.magic = [ord(l) for l in 'YARA']
        hdr.version.max_threads = self.target_version & 0xFFFF
        hdr.version.arena_ver = (self.target_version & 0xFFFF0000) >> 16
        hdr.size = len(body_bytes)

        hdr_bytes = bytes(hdr)
        file_hash = yr_hash(hdr_bytes)
        file_hash = yr_hash(body_bytes, file_hash)
        return hdr_bytes + body_bytes + reloc_buffer + DWORD(-1) + DWORD(
            file_hash)
Пример #52
0
class BaseModel:
    field_map = {}

    def __init__(self, data):
        self.result = {}
        if isinstance(data, BytesIO):
            self.data = data
            self.raw_data = data
        else:
            self.raw_data = data
            self.data = BytesIO(data)

    def unpack(self, fmt, stream):
        size = struct.calcsize(fmt)
        buf = stream.read(size)
        try:
            return struct.unpack(fmt, buf)
        except struct.error as e:
            logging.error("数据不全:{}".format(buf))

    def main(self):
        self.result = self.archive_d_a_archive_c()
        return self.result

    def decode(self):
        while True:
            j_flag = self.archive_d_j()
            if j_flag < 1:
                break
            j_flag_func_name = 'j_flag_{}'.format(j_flag)
            if hasattr(self, j_flag_func_name):
                getattr(self, j_flag_func_name)()
            else:
                try:
                    self.archive_d_i()
                except ValueError as e:
                    logging.error("数据错误不解析了 j_flag:{} 当前model:{}".format(
                        j_flag, self.__class__))
                    raise e

    def archive_d_b(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x54:
            data = 0x1
        elif flag in [0x46, 0x4e]:
            data = 0x0
        else:
            logging.error("archive_d_b抛错:unable to read boolean")
            raise ValueError()
        logging.info("找到bool:{}".format(data))
        return data

    def archive_d_c(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x49:
            data, = self.unpack(">i", self.data)
        elif flag == 0x4e:
            data = 0x00
        else:
            logging.error("archive_d_c抛错")
            raise ValueError()
        logging.info("找到int:{}".format(data))
        return data

    def archive_d_d(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x4c:
            data, = self.unpack(">q", self.data)
        elif flag == 0x4e:
            data = 0x0
        else:
            logging.error("archive_d_d抛错")
            raise ValueError()
        logging.info("archive_d_d找到string:{}".format(data))
        return data

    def archive_d_j(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x4d:
            data, = self.unpack(">h", self.data)
            data &= 0xffff
        elif flag == 0x5a:
            data = 0x00
        else:
            logging.error("archive_d_j抛错")
            raise ValueError()
        logging.info("当前model{}".format(self.__class__))
        return data

    def archive_d_e(self):
        flag, = self.unpack(">b", self.data)

        if flag == 0x44:
            data, = self.unpack(">d", self.data)
        elif flag == 0x4e:
            data = 0x0
        else:
            logging.error("archive_d_e抛错")
            raise ValueError()
        logging.info("当前model{}".format(self.__class__))
        return data

    def archive_d_g(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x53:
            length, = self.unpack(">h", self.data)
            length = 0xffff & length
            data, = self.unpack(">{}s".format(length), self.data)
        elif flag == 0x4e:
            data = b""
        elif flag == 0x42:
            length, = self.unpack(">i", self.data)
            j = (int(length / 0x1000) + 0x1) * 0x1000
            data, = self.unpack(">{}s".format(length), self.data)
        else:
            logging.error("archive_d_g抛错")
            raise ValueError()

        logging.info("找到string:{}".format(data.decode()))
        return data.decode()

    def archive_d_n(self):
        data = []
        flag, = self.unpack(">b", self.data)
        if flag == 0x4e:
            data = [""]
        elif flag == 0x41:
            length, = self.unpack(">h", self.data)
            length = 0xffff & length
            for i in range(length):
                data.append(self.archive_d_g())
        else:
            logging.error("archive_d_n抛错")
            raise ValueError()
        logging.info("找到string:{}".format(data))
        return data

    def archive_d_i(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x41:
            length, = self.unpack(">h", self.data)
            length = length & 0xffff
            for i in range(length):
                self.archive_d_i()
        elif flag == 0x44:
            self.unpack(">d", self.data)
        elif flag == 0x49:
            self.unpack(">i", self.data)
        elif flag == 0x4c:
            self.unpack(">q", self.data)
        elif flag == 0x4f:
            self.unpack(">h", self.data)
            while self.archive_d_j() > 0:
                self.archive_d_i()
        elif flag == 0x53:
            position, = self.unpack(">h", self.data)
            position = (position & 0xffff) + self.data.tell()
            self.data.seek(position)
        elif flag == 0x55:
            self.unpack(">i", self.data)
        elif flag in [
                0x46,
                0x4e,
                0x54,
        ]:
            pass
        elif flag in [
                0x42, 0x43, 0x45, 0x47, 0x48, 0x4a, 0x4b, 0x4d, 0x50, 0x51,
                0x52
        ]:
            raise ValueError("unable to skip object:")

    def archive_d_a_archive_c(self):
        flag, = self.unpack(">b", self.data)
        if flag == 0x4e:
            logging.info("创建了一个空对象")
            return {}
        elif flag == 0x4f:
            data, = self.unpack(">h", self.data)
            data &= 0xffff
            model_class = flag_model_map.get(data, None)
            if model_class:
                model_instance = model_class(self.data)
                model_instance.decode()
                return model_instance.result
            else:
                logging.error("archive_d_a_archive_c未找到此model:{}".format(
                    hex(data)))
                raise ValueError()
        else:
            logging.error("archive_d_a_archive_c抛错")
            raise ValueError()

    def archive_d_b_archive_c(self):
        result = []
        flag, = self.unpack(">b", self.data)
        if flag == 0x4e:
            logging.info("创建空对象")
            return []
        elif flag == 0x41:
            length, = self.unpack(">h", self.data)
            length = 0xffff & length
            for i in range(length):
                data = self.archive_d_a_archive_c()
                logging.info("创建了一个对象:{}".format(data))
                result.append(data)
            return result
        else:
            logging.error("抛错")
            raise ValueError()
Пример #53
0
class InputFile:
    max_buffer_size = 1024 * 1024

    def __init__(self, rfile, length):
        """File-like object used to provide a seekable view of request body data"""
        self._file = rfile
        self.length = length

        self._file_position = 0

        if length > self.max_buffer_size:
            self._buf = tempfile.TemporaryFile()
        else:
            self._buf = BytesIO()

    @property
    def _buf_position(self):
        rv = self._buf.tell()
        assert rv <= self._file_position
        return rv

    def read(self, bytes=-1):
        assert self._buf_position <= self._file_position

        if bytes < 0:
            bytes = self.length - self._buf_position
        bytes_remaining = min(bytes, self.length - self._buf_position)

        if bytes_remaining == 0:
            return b""

        if self._buf_position != self._file_position:
            buf_bytes = min(bytes_remaining,
                            self._file_position - self._buf_position)
            old_data = self._buf.read(buf_bytes)
            bytes_remaining -= buf_bytes
        else:
            old_data = b""

        assert bytes_remaining == 0 or self._buf_position == self._file_position, (
            "Before reading buffer position (%i) didn't match file position (%i)"
            % (self._buf_position, self._file_position))
        new_data = self._file.read(bytes_remaining)
        self._buf.write(new_data)
        self._file_position += bytes_remaining
        assert bytes_remaining == 0 or self._buf_position == self._file_position, (
            "After reading buffer position (%i) didn't match file position (%i)"
            % (self._buf_position, self._file_position))

        return old_data + new_data

    def tell(self):
        return self._buf_position

    def seek(self, offset):
        if offset > self.length or offset < 0:
            raise ValueError
        if offset <= self._file_position:
            self._buf.seek(offset)
        else:
            self.read(offset - self._file_position)

    def readline(self, max_bytes=None):
        if max_bytes is None:
            max_bytes = self.length - self._buf_position

        if self._buf_position < self._file_position:
            data = self._buf.readline(max_bytes)
            if data.endswith(b"\n") or len(data) == max_bytes:
                return data
        else:
            data = b""

        assert self._buf_position == self._file_position

        initial_position = self._file_position
        found = False
        buf = []
        max_bytes -= len(data)
        while not found:
            readahead = self.read(min(2, max_bytes))
            max_bytes -= len(readahead)
            for i, c in enumerate(readahead):
                if c == b"\n"[0]:
                    buf.append(readahead[:i + 1])
                    found = True
                    break
            if not found:
                buf.append(readahead)
            if not readahead or not max_bytes:
                break
        new_data = b"".join(buf)
        data += new_data
        self.seek(initial_position + len(new_data))
        return data

    def readlines(self):
        rv = []
        while True:
            data = self.readline()
            if data:
                rv.append(data)
            else:
                break
        return rv

    def __next__(self):
        data = self.readline()
        if data:
            return data
        else:
            raise StopIteration

    next = __next__

    def __iter__(self):
        return self
Пример #54
0
def decode(data: bytes, max_size: int = 4096) -> bytearray:
    """
    Performs LZSS decoding

    Parameters
    ----------
    data: bytes
        A string of bytes to decompress

    max_size: int
        Maximum size of uncompressed data, in bytes

    Returns
    -------
    bytearray
        A bytearray containing the uncompressed data
    """
    reader = BytesIO(data)
    length = len(reader.getbuffer())

    flags = 0  # Encoded flag
    flags_used = 7  # Unencoded flag

    out_data = bytearray()

    while len(out_data) < max_size:

        flags = flags >> 1
        flags_used = flags_used + 1

        # If all flag bits have been shifted out, read a new flag
        if flags_used == 8:

            if reader.tell() == length:
                break

            flags = reader.read(1)[0]
            flags_used = 0

        # Found an unencoded byte
        if (flags & 1) != 0:

            if reader.tell() == length:
                break

            out_data.append(reader.read(1)[0])

        # Found encoded data
        else:

            if reader.tell() == length:
                break

            code_offset = reader.read(1)[0]

            if reader.tell() == length:
                break

            code_length = reader.read(1)[0] + MAX_UNENCODED + 1

            for i in range(0, code_length):
                out_data.append(out_data[len(out_data) - (code_offset + 1)])

    return out_data
Пример #55
0
class PDFContentParser(PSStackParser):
    def __init__(self, streams):
        self.streams = streams
        self.istream = 0
        PSStackParser.__init__(self, None)
        return

    def fillfp(self):
        if not self.fp:
            if self.istream < len(self.streams):
                strm = stream_value(self.streams[self.istream])
                self.istream += 1
            else:
                raise PSEOF('Unexpected EOF, file truncated?')
            self.fp = BytesIO(strm.get_data())
        return

    def seek(self, pos):
        self.fillfp()
        PSStackParser.seek(self, pos)
        return

    def fillbuf(self):
        if self.charpos < len(self.buf):
            return
        while 1:
            self.fillfp()
            self.bufpos = self.fp.tell()
            self.buf = self.fp.read(self.BUFSIZ)
            if self.buf:
                break
            self.fp = None
        self.charpos = 0
        return

    def get_inline_data(self, pos, target=b'EI'):
        self.seek(pos)
        i = 0
        data = b''
        while i <= len(target):
            self.fillbuf()
            if i:
                c = self.buf[self.charpos]
                c = bytes((c, ))
                data += c
                self.charpos += 1
                if len(target) <= i and c.isspace():
                    i += 1
                elif i < len(target) and c == (bytes((target[i], ))):
                    i += 1
                else:
                    i = 0
            else:
                try:
                    j = self.buf.index(target[0], self.charpos)
                    data += self.buf[self.charpos:j + 1]
                    self.charpos = j + 1
                    i = 1
                except ValueError:
                    data += self.buf[self.charpos:]
                    self.charpos = len(self.buf)
        data = data[:-(len(target) + 1)]  # strip the last part
        data = re.sub(br'(\x0d\x0a|[\x0d\x0a])$', b'', data)
        return (pos, data)

    def flush(self):
        self.add_results(*self.popall())
        return

    KEYWORD_BI = KWD(b'BI')
    KEYWORD_ID = KWD(b'ID')
    KEYWORD_EI = KWD(b'EI')

    def do_keyword(self, pos, token):
        if token is self.KEYWORD_BI:
            # inline image within a content stream
            self.start_type(pos, 'inline')
        elif token is self.KEYWORD_ID:
            try:
                (_, objs) = self.end_type('inline')
                if len(objs) % 2 != 0:
                    error_msg = 'Invalid dictionary construct: {!r}' \
                        .format(objs)
                    raise PSTypeError(error_msg)
                d = {literal_name(k): v for (k, v) in choplist(2, objs)}
                (pos, data) = self.get_inline_data(pos + len(b'ID '))
                obj = PDFStream(d, data)
                self.push((pos, obj))
                self.push((pos, self.KEYWORD_EI))
            except PSTypeError:
                if settings.STRICT:
                    raise
        else:
            self.push((pos, token))
        return
Пример #56
0
 def test_read_payload_returns_a_payload(self):
     io = BytesIO(codecs.decode('FEEDDEADBEEF' + self.padding, 'hex_codec'))
     self.assertEqual(parser.read_payload(io, 6),
                      codecs.decode('FEEDDEADBEEF', 'hex_codec'))
     self.assertEqual(io.tell(), 6)
Пример #57
0
class Renderer(object):
    """Helper class for building DNS wire-format messages.

    Most applications can use the higher-level L{dns.message.Message}
    class and its to_wire() method to generate wire-format messages.
    This class is for those applications which need finer control
    over the generation of messages.

    Typical use::

        r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
        r.add_question(qname, qtype, qclass)
        r.add_rrset(dns.renderer.ANSWER, rrset_1)
        r.add_rrset(dns.renderer.ANSWER, rrset_2)
        r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
        r.add_edns(0, 0, 4096)
        r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
        r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
        r.write_header()
        r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
        wire = r.get_wire()

    @ivar output: where rendering is written
    @type output: BytesIO object
    @ivar id: the message id
    @type id: int
    @ivar flags: the message flags
    @type flags: int
    @ivar max_size: the maximum size of the message
    @type max_size: int
    @ivar origin: the origin to use when rendering relative names
    @type origin: dns.name.Name object
    @ivar compress: the compression table
    @type compress: dict
    @ivar section: the section currently being rendered
    @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER,
    dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL)
    @ivar counts: list of the number of RRs in each section
    @type counts: int list of length 4
    @ivar mac: the MAC of the rendered message (if TSIG was used)
    @type mac: string
    """
    def __init__(self, id=None, flags=0, max_size=65535, origin=None):
        """Initialize a new renderer.

        @param id: the message id
        @type id: int
        @param flags: the DNS message flags
        @type flags: int
        @param max_size: the maximum message size; the default is 65535.
        If rendering results in a message greater than I{max_size},
        then L{dns.exception.TooBig} will be raised.
        @type max_size: int
        @param origin: the origin to use when rendering relative names
        @type origin: dns.name.Name or None.
        """

        self.output = BytesIO()
        if id is None:
            self.id = random.randint(0, 65535)
        else:
            self.id = id
        self.flags = flags
        self.max_size = max_size
        self.origin = origin
        self.compress = {}
        self.section = QUESTION
        self.counts = [0, 0, 0, 0]
        self.output.write(b'\x00' * 12)
        self.mac = ''

    def _rollback(self, where):
        """Truncate the output buffer at offset I{where}, and remove any
        compression table entries that pointed beyond the truncation
        point.

        @param where: the offset
        @type where: int
        """

        self.output.seek(where)
        self.output.truncate()
        keys_to_delete = []
        for k, v in self.compress.items():
            if v >= where:
                keys_to_delete.append(k)
        for k in keys_to_delete:
            del self.compress[k]

    def _set_section(self, section):
        """Set the renderer's current section.

        Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
        ADDITIONAL.  Sections may be empty.

        @param section: the section
        @type section: int
        @raises dns.exception.FormError: an attempt was made to set
        a section value less than the current section.
        """

        if self.section != section:
            if self.section > section:
                raise dns.exception.FormError
            self.section = section

    def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
        """Add a question to the message.

        @param qname: the question name
        @type qname: dns.name.Name
        @param rdtype: the question rdata type
        @type rdtype: int
        @param rdclass: the question rdata class
        @type rdclass: int
        """

        self._set_section(QUESTION)
        before = self.output.tell()
        qname.to_wire(self.output, self.compress, self.origin)
        self.output.write(struct.pack("!HH", rdtype, rdclass))
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[QUESTION] += 1

    def add_rrset(self, section, rrset, **kw):
        """Add the rrset to the specified section.

        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.

        @param section: the section
        @type section: int
        @param rrset: the rrset
        @type rrset: dns.rrset.RRset object
        """

        self._set_section(section)
        before = self.output.tell()
        n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[section] += n

    def add_rdataset(self, section, name, rdataset, **kw):
        """Add the rdataset to the specified section, using the specified
        name as the owner name.

        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.

        @param section: the section
        @type section: int
        @param name: the owner name
        @type name: dns.name.Name object
        @param rdataset: the rdataset
        @type rdataset: dns.rdataset.Rdataset object
        """

        self._set_section(section)
        before = self.output.tell()
        n = rdataset.to_wire(name, self.output, self.compress, self.origin,
                             **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[section] += n

    def add_edns(self, edns, ednsflags, payload, options=None):
        """Add an EDNS OPT record to the message.

        @param edns: The EDNS level to use.
        @type edns: int
        @param ednsflags: EDNS flag values.
        @type ednsflags: int
        @param payload: The EDNS sender's payload field, which is the maximum
        size of UDP datagram the sender can handle.
        @type payload: int
        @param options: The EDNS options list
        @type options: list of dns.edns.Option instances
        @see: RFC 2671
        """

        # make sure the EDNS version in ednsflags agrees with edns
        ednsflags &= long(0xFF00FFFF)
        ednsflags |= (edns << 16)
        self._set_section(ADDITIONAL)
        before = self.output.tell()
        self.output.write(
            struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload, ednsflags, 0))
        if options is not None:
            lstart = self.output.tell()
            for opt in options:
                stuff = struct.pack("!HH", opt.otype, 0)
                self.output.write(stuff)
                start = self.output.tell()
                opt.to_wire(self.output)
                end = self.output.tell()
                assert end - start < 65536
                self.output.seek(start - 2)
                stuff = struct.pack("!H", end - start)
                self.output.write(stuff)
                self.output.seek(0, 2)
            lend = self.output.tell()
            assert lend - lstart < 65536
            self.output.seek(lstart - 2)
            stuff = struct.pack("!H", lend - lstart)
            self.output.write(stuff)
            self.output.seek(0, 2)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[ADDITIONAL] += 1

    def add_tsig(self,
                 keyname,
                 secret,
                 fudge,
                 id,
                 tsig_error,
                 other_data,
                 request_mac,
                 algorithm=dns.tsig.default_algorithm):
        """Add a TSIG signature to the message.

        @param keyname: the TSIG key name
        @type keyname: dns.name.Name object
        @param secret: the secret to use
        @type secret: string
        @param fudge: TSIG time fudge
        @type fudge: int
        @param id: the message id to encode in the tsig signature
        @type id: int
        @param tsig_error: TSIG error code; default is 0.
        @type tsig_error: int
        @param other_data: TSIG other data.
        @type other_data: string
        @param request_mac: This message is a response to the request which
        had the specified MAC.
        @type request_mac: string
        @param algorithm: the TSIG algorithm to use
        @type algorithm: dns.name.Name object
        """

        self._set_section(ADDITIONAL)
        before = self.output.tell()
        s = self.output.getvalue()
        (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
                                                    keyname,
                                                    secret,
                                                    int(time.time()),
                                                    fudge,
                                                    id,
                                                    tsig_error,
                                                    other_data,
                                                    request_mac,
                                                    algorithm=algorithm)
        keyname.to_wire(self.output, self.compress, self.origin)
        self.output.write(
            struct.pack('!HHIH', dns.rdatatype.TSIG, dns.rdataclass.ANY, 0, 0))
        rdata_start = self.output.tell()
        self.output.write(tsig_rdata)
        after = self.output.tell()
        assert after - rdata_start < 65536
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.output.seek(rdata_start - 2)
        self.output.write(struct.pack('!H', after - rdata_start))
        self.counts[ADDITIONAL] += 1
        self.output.seek(10)
        self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
        self.output.seek(0, 2)

    def write_header(self):
        """Write the DNS message header.

        Writing the DNS message header is done after all sections
        have been rendered, but before the optional TSIG signature
        is added.
        """

        self.output.seek(0)
        self.output.write(
            struct.pack('!HHHHHH', self.id, self.flags, self.counts[0],
                        self.counts[1], self.counts[2], self.counts[3]))
        self.output.seek(0, 2)

    def get_wire(self):
        """Return the wire format message.

        @rtype: string
        """

        return self.output.getvalue()
Пример #58
0
    def _read(cls, io, **kwargs):
        """
        Read data from `io` according to the passed `read_excel` `kwargs` parameters.

        Parameters
        ----------
        io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
            `io` parameter of `read_excel` function.
        **kwargs : dict
            Parameters of `read_excel` function.

        Returns
        -------
        new_query_compiler : BaseQueryCompiler
            Query compiler with imported data for further processing.
        """
        if (kwargs.get("engine", None) is not None
                and kwargs.get("engine") != "openpyxl"):
            warnings.warn(
                "Modin only implements parallel `read_excel` with `openpyxl` engine, "
                'please specify `engine=None` or `engine="openpyxl"` to '
                "use Modin's parallel implementation.")
            return cls.single_worker_read(io, **kwargs)
        if sys.version_info < (3, 7):
            warnings.warn(
                "Python 3.7 or higher required for parallel `read_excel`.")
            return cls.single_worker_read(io, **kwargs)

        from zipfile import ZipFile
        from openpyxl.worksheet.worksheet import Worksheet
        from openpyxl.worksheet._reader import WorksheetReader
        from openpyxl.reader.excel import ExcelReader
        from modin.backends.pandas.parsers import PandasExcelParser

        sheet_name = kwargs.get("sheet_name", 0)
        if sheet_name is None or isinstance(sheet_name, list):
            warnings.warn(
                "`read_excel` functionality is only implemented for a single sheet at a "
                "time. Multiple sheet reading coming soon!")
            return cls.single_worker_read(io, **kwargs)

        warnings.warn("Parallel `read_excel` is a new feature! Please email "
                      "[email protected] if you run into any problems.")

        # NOTE: ExcelReader() in read-only mode does not close file handle by itself
        # work around that by passing file object if we received some path
        io_file = open(io, "rb") if isinstance(io, str) else io
        try:
            ex = ExcelReader(io_file, read_only=True)
            ex.read()
            wb = ex.wb

            # Get shared strings
            ex.read_manifest()
            ex.read_strings()
            ws = Worksheet(wb)
        finally:
            if isinstance(io, str):
                # close only if it were us who opened the object
                io_file.close()

        pandas_kw = dict(kwargs)  # preserve original kwargs
        with ZipFile(io) as z:
            from io import BytesIO

            # Convert index to sheet name in file
            if isinstance(sheet_name, int):
                sheet_name = "sheet{}".format(sheet_name + 1)
            else:
                sheet_name = "sheet{}".format(
                    wb.sheetnames.index(sheet_name) + 1)
            if any(sheet_name.lower() in name for name in z.namelist()):
                sheet_name = sheet_name.lower()
            elif any(sheet_name.title() in name for name in z.namelist()):
                sheet_name = sheet_name.title()
            else:
                raise ValueError("Sheet {} not found".format(
                    sheet_name.lower()))
            # Pass this value to the workers
            kwargs["sheet_name"] = sheet_name

            f = z.open("xl/worksheets/{}.xml".format(sheet_name))
            f = BytesIO(f.read())
            total_bytes = cls.file_size(f)

            num_partitions = NPartitions.get()
            # Read some bytes from the sheet so we can extract the XML header and first
            # line. We need to make sure we get the first line of the data as well
            # because that is where the column names are. The header information will
            # be extracted and sent to all of the nodes.
            sheet_block = f.read(EXCEL_READ_BLOCK_SIZE)
            end_of_row_tag = b"</row>"
            while end_of_row_tag not in sheet_block:
                sheet_block += f.read(EXCEL_READ_BLOCK_SIZE)
            idx_of_header_end = sheet_block.index(end_of_row_tag) + len(
                end_of_row_tag)
            sheet_header = sheet_block[:idx_of_header_end]
            # Reset the file pointer to begin at the end of the header information.
            f.seek(idx_of_header_end)
            kwargs["_header"] = sheet_header
            footer = b"</sheetData></worksheet>"
            # Use openpyxml to parse the data
            reader = WorksheetReader(ws, BytesIO(sheet_header + footer),
                                     ex.shared_strings, False)
            # Attach cells to the worksheet
            reader.bind_cells()
            data = PandasExcelParser.get_sheet_data(
                ws, kwargs.get("convert_float", True))
            # Extract column names from parsed data.
            column_names = pandas.Index(data[0])
            index_col = kwargs.get("index_col", None)
            # Remove column names that are specified as `index_col`
            if index_col is not None:
                column_names = column_names.drop(column_names[index_col])

            if not all(column_names):
                # some column names are empty, use pandas reader to take the names from it
                pandas_kw["nrows"] = 1
                df = pandas.read_excel(io, **pandas_kw)
                column_names = df.columns

            # Compute partition metadata upfront so it is uniform for all partitions
            chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
            num_splits = min(len(column_names), num_partitions)
            kwargs["fname"] = io
            # Skiprows will be used to inform a partition how many rows come before it.
            kwargs["skiprows"] = 0
            row_count = 0
            data_ids = []
            index_ids = []
            dtypes_ids = []

            # Compute column metadata
            column_chunksize = compute_chunksize(
                pandas.DataFrame(columns=column_names), num_splits, axis=1)
            if column_chunksize > len(column_names):
                column_widths = [len(column_names)]
                # This prevents us from unnecessarily serializing a bunch of empty
                # objects.
                num_splits = 1
            else:
                column_widths = [
                    column_chunksize if len(column_names) >
                    (column_chunksize * (i + 1)) else 0 if len(column_names) <
                    (column_chunksize * i) else len(column_names) -
                    (column_chunksize * i) for i in range(num_splits)
                ]
            kwargs["num_splits"] = num_splits

            while f.tell() < total_bytes:
                args = kwargs
                args["skiprows"] = row_count + args["skiprows"]
                args["start"] = f.tell()
                chunk = f.read(chunk_size)
                # This edge case can happen when we have reached the end of the data
                # but not the end of the file.
                if b"<row" not in chunk:
                    break
                row_close_tag = b"</row>"
                row_count = re.subn(row_close_tag, b"", chunk)[1]

                # Make sure we are reading at least one row.
                while row_count == 0:
                    chunk += f.read(chunk_size)
                    row_count += re.subn(row_close_tag, b"", chunk)[1]

                last_index = chunk.rindex(row_close_tag)
                f.seek(-(len(chunk) - last_index) + len(row_close_tag), 1)
                args["end"] = f.tell()

                # If there is no data, exit before triggering computation.
                if b"</row>" not in chunk and b"</sheetData>" in chunk:
                    break
                remote_results_list = cls.deploy(cls.parse, num_splits + 2,
                                                 args)
                data_ids.append(remote_results_list[:-2])
                index_ids.append(remote_results_list[-2])
                dtypes_ids.append(remote_results_list[-1])

                # The end of the spreadsheet
                if b"</sheetData>" in chunk:
                    break

        # Compute the index based on a sum of the lengths of each partition (by default)
        # or based on the column(s) that were requested.
        if index_col is None:
            row_lengths = cls.materialize(index_ids)
            new_index = pandas.RangeIndex(sum(row_lengths))
        else:
            index_objs = cls.materialize(index_ids)
            row_lengths = [len(o) for o in index_objs]
            new_index = index_objs[0].append(index_objs[1:])

        # Compute dtypes by getting collecting and combining all of the partitions. The
        # reported dtypes from differing rows can be different based on the inference in
        # the limited data seen by each worker. We use pandas to compute the exact dtype
        # over the whole column for each column. The index is set below.
        dtypes = cls.get_dtypes(dtypes_ids)

        data_ids = cls.build_partition(data_ids, row_lengths, column_widths)
        # Set the index for the dtypes to the column names
        if isinstance(dtypes, pandas.Series):
            dtypes.index = column_names
        else:
            dtypes = pandas.Series(dtypes, index=column_names)
        new_frame = cls.frame_cls(
            data_ids,
            new_index,
            column_names,
            row_lengths,
            column_widths,
            dtypes=dtypes,
        )
        new_query_compiler = cls.query_compiler_cls(new_frame)
        if index_col is None:
            new_query_compiler._modin_frame._apply_index_objs(axis=0)
        return new_query_compiler
Пример #59
0
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('172.14.1.126', 8000))
connection = client_socket.makefile('wb')

try:
    with PiCamera() as camera:
        camera.resolution = (320, 240)  # pi camera resolution
        camera.framerate = 10  # 10 frames/sec
        sleep(2)  # give 2 secs for camera to initilize
        start = time()
        stream = BytesIO()

        # send jpeg format video stream
        for foo in camera.capture_continuous(stream,
                                             'jpeg',
                                             use_video_port=True):
            connection.write(struct.pack('<L', stream.tell()))
            connection.flush()
            stream.seek(0)
            connection.write(stream.read())
            if time.time() - start > 600:
                break
            stream.seek(0)
            stream.truncate()
    connection.write(struct.pack('<L', 0))
finally:
    connection.close()
    client_socket.close()
Пример #60
0
class DataChunk:
    
    def __init__(self):
        self.times = 0
        self.expect_new = True
        self.current_msg_len = 0
        self.current_buffer = BytesIO()

    def handle_msg(self, data):
        self.handle_chunk(data)
    
    def handle_chunk(self, data):
        self.times += 1
        print len(data)
        
    def process_chunk(self, data):
        datalen = len(data)
        if self.expect_new:
            if datalen >= 4:
                self.current_msg_len = unpack('i', data[0:4])[0]
                if (self.current_msg_len + 4) == datalen:
                    # We got the entire packet 
                    self.handle_msg(data[4:])
                    return
                elif (self.current_msg_len + 4) > datalen:
                    # We need some more bytes
                    self.current_buffer.write(data[4:])
                    self.expect_new = False
                else:
                    # We may have got more than one message
                    start = 4
                    while True:
                        self.handle_msg(data[start : start + self.current_msg_len])
                        start = start + self.current_msg_len
                        if start == datalen:
                            # We finished all the bytes and there is no incomplete messages
                            # in the bytes
                            self.expect_new = True
                            self.current_msg_len = -1
                            self.current_buffer = BytesIO()
                            break
                        if 4 <= (datalen - start):
                            self.current_msg_len = unpack('i', data[start : start + 4])[0]
                            start += 4
                            if (datalen - start) >= self.current_msg_len:
                                # we have this message also in this buffer
                                continue
                            else:
                                # This message is incomplete, wait for the next chunk
                                self.expect_new = False
                                self.current_buffer = BytesIO()
                                self.current_buffer.write(data[start:])
                                break
                        else:
                            # we don't even know the size of the current buffer
                            self.current_msg_len = -1
                            self.current_buffer = BytesIO()
                            self.current_buffer.write(data[start:])
                            self.expect_new = False
                            break
            else:
                # We haven't even received 4 bytes of data for this brand new 
                # packet
                self.expect_new = False
                self.current_buffer = BytesIO()
                self.current_buffer.write(data)
                self.current_msg_len = -1
        else:
            # Not a new message
            start = 0
            if self.current_msg_len == -1:
                # try to get the message len
                if datalen >= (4 - self.current_buffer.tell()):
                    #get the length of the data
                    start = 4 - self.current_buffer.tell()
                    self.current_buffer.write(data[0: start])
                    self.current_buffer.seek(0)
                    self.current_msg_len = unpack('i', self.current_buffer.read())[0]
                    self.current_buffer = BytesIO()
                else:
                    # Till now even the size of the data is not known
                    self.current_buffer.write(data)
                    return
            while start < datalen:
                if self.current_buffer is None:
                    self.current_buffer = BytesIO()
                if self.current_msg_len == -1:
                    if (datalen - start) < 4:
                        self.current_buffer.write(data[start:])
                        break
                    elif (datalen - start) == 4:
                        self.current_msg_len = unpack('i', data[start:])[0]
                        break
                    else:
                        self.current_msg_len = unpack('i', data[start: start + 4])[0]
                        start += 4
                if (datalen - start) >= (self.current_msg_len - self.current_buffer.tell()):
                    consume = self.current_msg_len -  self.current_buffer.tell()
                    self.current_buffer.write(data[start: start + consume])
                    start += consume
                    self.current_msg_len = - 1
                    self.current_buffer.seek(0)
                    self.handle_msg(self.current_buffer.read())
                    self.current_buffer = BytesIO()
                    if start == datalen:
                        self.expect_new = True
                else:
                    self.current_buffer.write(data[start:])
                    break