def unpack_list_str_fl(fl: RawIOBase, head_len: int,
                       str_head_len: int) -> List[bytes]:
    len_lst = int.from_bytes(fl.read(head_len), "little")
    return [
        fl.read(int.from_bytes(fl.read(str_head_len), "little"))
        for _ in range(len_lst)
    ]
Exemple #2
0
    def _read_block_into(in_stream: io.RawIOBase, out_stream: io.RawIOBase):
        MAGIC = 0x00000010

        HEADER_LENGTH = 0x10
        MAGIC_OFFSET = 0x00
        SOURCE_SIZE_OFFSET = 0x08
        RAW_SIZE_OFFSET = 0x0C

        BLOCK_PADDING = 0x80

        COMPRESSION_THRESHOLD = 0x7D00

        # Block:
        # 10h   Header
        # *     Data
        #
        # Header:
        # 4h    Magic
        # 4h    Unknown / Zero
        # 4h    Size in source
        # 4h    Raw size
        # -> If size in source >= 7D00h then data is uncompressed

        header = in_stream.read(HEADER_LENGTH)
        if len(header) != HEADER_LENGTH:
            raise EOFError

        magic_check, = struct.unpack_from('<l', header, MAGIC_OFFSET)
        source_size, = struct.unpack_from('<l', header, SOURCE_SIZE_OFFSET)
        raw_size, = struct.unpack_from('<l', header, RAW_SIZE_OFFSET)

        if magic_check != MAGIC:
            raise NotImplementedError("Magic number not present")

        is_compressed = source_size < COMPRESSION_THRESHOLD

        block_size = source_size if is_compressed else raw_size

        if is_compressed and (
            (block_size + HEADER_LENGTH) % BLOCK_PADDING) != 0:
            block_size += BLOCK_PADDING - (
                (block_size + HEADER_LENGTH) % BLOCK_PADDING)

        buffer = in_stream.read(block_size)
        if len(buffer) != block_size:
            raise EOFError

        if is_compressed:
            current_position = out_stream.tell()
            if raw_size != out_stream.write(zlib.decompress(buffer, -15)):
                raise RuntimeError(
                    "Inflated block does not match indicated size")
        else:
            out_stream.write(buffer)
def statistiques(source: io.RawIOBase) -> (Compteur, int):
    """
    Fonction de calcul des statistiques dans le cadre de la compression de Huffman
    """
    compteur = Compteur()
    source.seek(0)
    octet = source.read(1)
    iterator = 0
    while octet:
        compteur.incrementer(octet)
        iterator += 1
        octet = source.read(1)
    return (compteur, iterator)
Exemple #4
0
    def fromFile(file:io.RawIOBase) -> SfaTexture:
        """Instantiate texture from file."""
        self = SfaTexture()

        header = file.read(0x60)
        self.width, self.height = struct.unpack_from('>HH', header, 0x0A)
        self.numMipMaps = struct.unpack_from('>B', header, 0x19)[0] # grumble
        fmtId = struct.unpack_from('>B', header, 0x16)[0] # grumble
        self.format = ImageFormat(fmtId)

        bpp = BITS_PER_PIXEL[self.format]
        dataLen = self.width * self.height * bpp // 8
        return self._fromData(header, file.read(dataLen))
Exemple #5
0
 def upload(self, readable: io.RawIOBase, remote_path: str) -> None:
     append = False
     # У шелла есть ограничения на максмальную длину строки
     # см. getconf ARG_MAX
     while (chunk := readable.read(self.upload_chunk_size)) :
         encoded = base64.b64encode(chunk).decode()
         result = self.exploit(
             'echo "{}" | base64 -d {} {}'.format(
                 encoded, '>>' if append else '>', remote_path
             )
         )
         # logging.debug(result)
         assert result == ''
         append = True
Exemple #6
0
def decode_by_char(f: io.RawIOBase) -> str:
    """Returns a ``str`` decoded from the characters in *f*.

    :param f: is expected to be a file object which has been
        opened in binary mode ('rb') or just read mode ('r').

    The *f* stream will have one character or byte at a time read from it,
    and will attempt to decode each to a string and accumulate
    those individual strings together.  Once the end of the file is found
    or an element can no longer be decoded, the accumulated string will
    be returned.
    """
    s = ''
    try:
        for elem in iter(lambda: f.read(1), b''):
            if isinstance(elem, str):
                s += elem
            else:
                s += elem.decode()
    except UnicodeError:
        # Expecting this to mean that we got to the end of decodable
        # bytes, so we're all done, and pass through to return s.
        pass

    return s
Exemple #7
0
def read_header(f_in: io.RawIOBase) -> dict:
    """ reads the header of a file stream with CrossCloud encryption

    :param f_in: readable file-object
    :return: the header dict
    """
    magic_number = f_in.read(len(MAGIC_NUMBER))

    if magic_number != MAGIC_NUMBER:
        raise HeaderError('magic number is {}'.format(magic_number),
                          read_data=magic_number)

    header_length, = struct.unpack('<I', f_in.read(4))

    header_str = f_in.read(header_length).decode(ENCODING)
    return json.loads(header_str)
Exemple #8
0
 def write_input_stream(self, in_stream: io.RawIOBase, key: str, prefix=""):
     with open(self.get_full_path(key, prefix=prefix), 'wb') as fout:
         while True:
             r = in_stream.read(self.chunk_size)
             if r is None:
                 break
             fout.write(r)
Exemple #9
0
def search4cave(stream: io.RawIOBase, section_name: str, section_size: int,
                section_info, cave_size: int, virtaddr: int, _bytes: bytes):
    caves = []
    byte_count = 0

    base = stream.tell()
    offset = 0

    while section_size > 0:
        rb = stream.read(1)
        section_size -= 1
        offset += 1

        if _bytes not in rb:
            if byte_count >= cave_size:
                mr = MiningResult()
                mr.name = section_name
                mr.cave_begin = (base + offset) - byte_count - 1
                mr.cave_end = (base + offset) - 1
                mr.cave_size = byte_count
                mr.virtaddr = virtaddr + offset - byte_count - 1
                mr.info = section_info
                caves.append(mr)
            byte_count = 0
            continue
        byte_count += 1

    stream.seek(base)
    return caves
Exemple #10
0
    def __read(self, stream: io.RawIOBase):
        FILE_TYPE_OFFSET = 0x04
        FILE_LENGTH_OFFSET = 0x10
        FILE_LENGTH_SHIFT = 7

        self._buffer = stream.read(4)

        length, = struct.unpack_from('<l', self._buffer, 0)

        remaining = length - 4

        self._buffer += stream.read(remaining)

        self._file_type, = struct.unpack_from('<l', self._buffer, FILE_TYPE_OFFSET)
        self._length = struct.unpack_from('<l', self._buffer, FILE_LENGTH_OFFSET)[0] << FILE_LENGTH_SHIFT

        self._end_of_header = stream.tell()
 def unpack(self, fl: RawIOBase) -> list:
     byteorder = "little" if self.hl_lsb_first else "big"
     ln = int.from_bytes(fl.read(self.head_len), byteorder,
                         signed=False) + self.head_num_off
     rtn = [None] * ln
     for c in range(ln):
         rtn[c] = self.sub_dt.unpack(fl)
     return rtn
Exemple #12
0
def verifycave(stream: io.RawIOBase, cave_size, _byte: bytes):
    base = stream.tell()
    success = True
    while cave_size > 0:
        cave_size -= 1
        rb = stream.read(1)
        if _byte not in rb:
            success = False
            break
    stream.seek(base)
    return success
Exemple #13
0
def readline(f: io.RawIOBase):
    # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
    res = bytearray()
    while True:
        b = f.read(1)
        if not b:
            break
        res += b
        if res.endswith(b"\n"):
            break
    return bytes(res)
Exemple #14
0
def _verify_image(img_file: io.RawIOBase, verifier) -> None:
    # This methods must be called from a native thread, as the file I/O may
    # not yield to other greenthread in some cases, and since the update and
    # verify operations are CPU bound there would not be any yielding either,
    # which could lead to thread starvation.
    while True:
        chunk = img_file.read(1024)
        if not chunk:
            break
        verifier.update(chunk)
    verifier.verify()
Exemple #15
0
    def write_input_stream(self, in_stream: io.RawIOBase, key: str, prefix=""):
        full_prefix = self.append_prefix(self.base_prefix, prefix)
        full_key = self.append_prefix(full_prefix, key)

        with smart_open.open("s3://%s/%s" % (self.bucket, full_key),
                             'wb',
                             transport_params={'client': self.client}) as fout:
            while True:
                r = in_stream.read(self.chunk_size)
                if r is None:
                    break
                fout.write(r)
            fout.close()
    def fromFile(file: io.RawIOBase) -> SfaTexture:
        """Instantiate texture from file."""
        self = SfaTexture()

        header = file.read(0x60)
        self.width, self.height = struct.unpack_from('>HH', header, 0x0A)
        self.numMipMaps = struct.unpack_from('>B', header, 0x19)[0]  # grumble
        fmtId = struct.unpack_from('>B', header, 0x16)[0]  # grumble
        self.format = ImageFormat(fmtId)

        bpp = BITS_PER_PIXEL[self.format]
        dataLen = self.width * self.height * bpp // 8
        data = file.read(dataLen)
        self.image = decode_image(
            data,
            None,  # palette_data
            self.format,  # image_format
            None,  # palette_format
            0,  # num_colors (for palettes)
            self.width,
            self.height)
        return self
Exemple #17
0
def copy_to_websocket(
    ws: lomond.WebSocket, f: io.RawIOBase, ready_sem: threading.Semaphore
) -> None:
    ready_sem.acquire()

    try:
        while True:
            chunk = f.read(4096)
            if not chunk:
                break
            ws.send_binary(chunk)
    finally:
        f.close()
        ws.close()
Exemple #18
0
def deserialize(
    stream: io.RawIOBase,
    message: Union[Type[Message], Message],
    max_size: int = MB,
) -> Message:
    if isinstance(message, Message):
        record: Message = message
    else:
        record = message()

    contents = stream.read(max_size + 1)
    if len(contents) > max_size:
        raise BufferError('Message content length is greater than max_size')
    record.ParseFromString(contents)
    return record
Exemple #19
0
def unlock_account_with_passwordfile(
        account_manager: AccountManager,
        address_hex: AddressHex,
        password_file: io.RawIOBase,
) -> PrivateKey:
    password = password_file.read()

    try:
        return account_manager.get_privkey(address_hex, password)
    except ValueError:
        click.secho(
            f'Incorrect password for {address_hex} in file. Aborting ...',
            fg='red',
        )
        sys.exit(1)
Exemple #20
0
def read_event_file(inp: io.RawIOBase) -> Tuple[Dict, bytes]:
    """ Reads an event file, and returns the header (as a python dict)
        and the data (as bytes).

        TODO: This reads the whole file into memory.  It might be
        better to only read the header, if this causes problems later
        with large files.
    """
    file_bytes = inp.read()
    zero_pos = file_bytes.index(b'\x00')
    header_bytes = file_bytes[:zero_pos]
    header_str = header_bytes.decode('utf-8')
    header = json.loads(header_str)
    data = file_bytes[zero_pos + 1:]
    return header, data
 def unpack(self, fl: RawIOBase) -> dict:
     byteorder = "little" if self.hl_lsb_first else "big"
     ln = int.from_bytes(fl.read(self.head_len), byteorder,
                         signed=False) + self.head_num_off
     key_t = self.key_t
     val_t = self.val_t
     rtn = {}
     if isinstance(key_t, DataArray):
         for c in range(ln):
             key = tuple(key_t.unpack(fl))
             rtn[key] = val_t.unpack(fl)
     else:
         for c in range(ln):
             key = key_t.unpack(fl)
             rtn[key] = val_t.unpack(fl)
     return rtn
Exemple #22
0
    def __init__(self, stream: io.RawIOBase):
        LENGTH = 0x50
        FORMAT_OFFSET = 0x04
        WIDTH_OFFSET = 0x08
        HEIGHT_OFFSET = 0x0A

        self._buffer = stream.read(LENGTH)
        if len(self._buffer) != LENGTH:
            raise EOFError

        self.__width, = struct.unpack_from('<h', self._buffer, WIDTH_OFFSET)
        self.__height, = struct.unpack_from('<h', self._buffer, HEIGHT_OFFSET)
        self.__imgformat = ImageFormat(struct.unpack_from('<h',
                                                          self._buffer,
                                                          FORMAT_OFFSET)[0])
        self.__end_of_header = stream.tell()
Exemple #23
0
 def wait_until_ready(self, channel:RawIOBase, timeout=60):
     """
     sends ' ' (space) and waits for the corresponding ACK message. Once we have 10 of these in a row we can be fairly
     certain the device is ready for ymodem.
     :param channel:
     :param timeout:
     :return:
     """
     success_count = 0
     while success_count < 10:
         while channel.readline():       # flush any existing data
             success_count = 0
         channel.write(b' ')
         result = channel.read()
         if result and result[0]==LightYModemProtocol.ack:
             success_count += 1
Exemple #24
0
    def wait_until_ready(self, channel:RawIOBase, timeout=60):
        """
        sends ' ' (space) and waits for the corresponding ACK message. Once we have 3 of these in a row we can be fairly
        certain the device is ready for ymodem.
        :param channel:
        :param timeout:
        :return:
        """
        success_count = 0
        while channel.readline():  # flush any existing data
            success_count = 0

        while success_count < 2:
            channel.write(b' ')
            result = channel.read()
            if result and result[0]==LightYModemProtocol.ack:
                success_count += 1
Exemple #25
0
    def compute(self, stream_in: RawIOBase, stream_out: RawIOBase = None):
        """Compute and return the hash for the given stream.

        The data is read from stream_in until EOF, given to the hasher, and
        written unmodified to the stream_out (unless it is None).

        :param stream_in: input stream.
        :type stream_in: io.RawIOBase

        :param stream_out: output stream.
        :type stream_out: io.RawIOBase
        """
        self._init_hashers()
        read_bytes = self.chunk_size
        while read_bytes == self.chunk_size:
            data = stream_in.read(self.chunk_size)
            read_bytes = len(data)
            for hasher in self._hashers.values():
                hasher.update(data)
            if stream_out is not None:
                stream_out.write(data)
Exemple #26
0
def autodetect_csv(stream: io.RawIOBase,
                   csv_options: CSVOptions) -> CSVOptions:
    """Autodetect the CSV dialect, encoding, header etc."""
    if not (csv_options.autodetect_encoding or csv_options.autodetect_header
            or csv_options.autodetect_dialect):
        return csv_options

    data = stream.read(csv_options.autodetect_sample_size)
    assert data

    if csv_options.autodetect_encoding:
        encoding = chardet.detect(data)["encoding"]
        if encoding == "ascii" or encoding is None:
            # ASCII is a subset of UTF-8. For safety, if chardet detected
            # the encoding as ASCII, use UTF-8 (a valid ASCII file is a valid UTF-8 file,
            # but not vice versa)

            # If we can't detect the encoding, fall back to utf-8 too (hopefully the user
            # passed ignore_decode_errors=True
            encoding = "utf-8"
        csv_options = csv_options._replace(encoding=encoding)

    sample = data.decode(
        csv_options.encoding,
        errors="ignore" if csv_options.ignore_decode_errors else "strict")
    # Emulate universal newlines mode (convert \r, \r\n, \n into \n)
    sample = "\n".join(sample.splitlines())

    if csv_options.autodetect_dialect:
        dialect = csv.Sniffer().sniff(sample)
        # These are meant to be set, but mypy claims they might not be.
        csv_options = csv_options._replace(delimiter=dialect.delimiter or ",",
                                           quotechar=dialect.quotechar or '"')

    if csv_options.autodetect_header:
        has_header = csv.Sniffer().has_header(sample)
        csv_options = csv_options._replace(header=has_header)

    return csv_options
Exemple #27
0
def read_wad(f: io.RawIOBase):
    f.seek(0)
    header = f.read(256)

    if header[:4] != WAD_MAGIC:
        raise CommandError(
            f'File does not appear to be a Zwift WAD file, Expected '
            f'magic: {WAD_MAGIC}, actual: {header[:4]}')

    body_size = struct.unpack('<I', header[248:252])[0]
    wad_size = 256 + body_size
    actual_size = os.fstat(f.fileno()).st_size

    if actual_size < wad_size:
        raise CommandError(f'Truncated wad file: header implies '
                           f'{wad_size} bytes but file is {actual_size} bytes')
    if actual_size > wad_size:
        warnings.warn(
            f'wad file is larger than header implies. expected size: '
            f'{actual_size} bytes, actual size: {actual_size} bytes')

    entry_pointers = read_entry_pointers(f)

    return {'file': f, 'entry_pointers': entry_pointers}
def bulk_copy(read_from: io.RawIOBase, write_to: io.RawIOBase):
    while True:
        chunk = read_from.read(BUFFER_SIZE)
        if not chunk:
            break
        write_to.write(chunk)
def _buffer_is_mseed3(handle: io.RawIOBase) -> bool:
    return handle.read(2) == b"MS"
Exemple #30
0
 def unpack_from_io(cls, stream: io.RawIOBase, endianness=None):
     cls.unpack(stream.read(cls.__size__), endianness)
Exemple #31
0
 def add_binary_file(self, remote_path: str, stream: RawIOBase):
     f = VMCreateNewFileItem()
     f.path = remote_path
     f.contents = base64.b64encode(stream.read())
     self.__vm.personality.append(f)
     return self
Exemple #32
0
    def _output_to_file(
        cls,
        data: OutputData,
        bs: bytes,
        datastd: OutputDataConfig,
        targetdir: str,
        stm: io.RawIOBase = None,
    ) -> bool:
        """输出到指定目录\n
        bs:要输出的数据\n
        datastd:数据对应的数据标准,用于构建文件名等\n
        targetdir:目标目录\n
        stm: 附带的数据流"""
        res: bool = False
        tmppath: str = None
        outfi: str = None
        try:
            with cls.tmpdir_locker:
                # 临时路径
                tmppath: str = cls._get_datapath(cls._tmpdir, datastd)
                if not isinstance(tmppath, str) or tmppath == "":
                    return res

                with open(tmppath, mode="wb") as fs:
                    fs.write(bs)
                    if not stm is None and stm.readable():
                        # stm.readinto(fs)
                        readlen = 1024 * 1024 * 1024
                        while True:
                            buf = stm.read(readlen)
                            if buf is None:
                                break
                            readcount = len(buf)
                            fs.write(buf)
                            if readcount < readlen:
                                break

            # 加了一个验证步骤..
            # 后面如果要搞扩展输出方式,
            # 应吧输出到临时,和输出到目标分成两个函数,
            # 在两个函数调用的中间加一个验证步骤,各自实现

            if not data.validate_file(tmppath):
                # 不打日志了,错误数据直接不输出
                cls._logger.debug("Corrupted data: {}".format(tmppath))
                if os.path.isfile(tmppath):
                    os.remove(tmppath)
                return res

            with cls.outdir_locker:
                outfi: str = cls._get_datapath(targetdir, datastd)
                shutil.move(tmppath, outfi)
            res = True
        except Exception:
            if not tmppath is None and tmppath != "" and os.path.isfile(
                    tmppath):
                os.remove(tmppath)
            if not outfi is None and outfi != "" and os.path.isfile(outfi):
                os.remove(outfi)

            cls._logger.error("Output data segments sub error: {}".format(
                traceback.format_exc()))
        return res