Пример #1
0
def copy_from_websocket(
    f: io.RawIOBase,
    ws: lomond.WebSocket,
    ready_sem: threading.Semaphore,
    cert_file: Optional[str],
    cert_name: Optional[str],
) -> None:
    try:
        for event in ws.connect(
            ping_rate=0,
            session_class=lambda socket: CustomSSLWebsocketSession(socket, cert_file, cert_name),
        ):
            if isinstance(event, lomond.events.Binary):
                f.write(event.data)
            elif isinstance(event, lomond.events.Ready):
                ready_sem.release()
            elif isinstance(
                event,
                (lomond.events.ConnectFail, lomond.events.Rejected, lomond.events.ProtocolError),
            ):
                raise Exception("Connection failed: {}".format(event))
            elif isinstance(event, (lomond.events.Closing, lomond.events.Disconnected)):
                break
    finally:
        f.close()
Пример #2
0
    def __init__(self, fileno, open_descriptor, closefd=True):
        RawIOBase.__init__(self)

        self._closefd = closefd
        self._fileno = fileno
        self.mode = open_descriptor.fileio_mode
        make_nonblocking(fileno)
        readable = open_descriptor.can_read
        writable = open_descriptor.can_write

        self.hub = get_hub()
        io_watcher = self.hub.loop.io
        try:
            if readable:
                self._read_watcher = io_watcher(fileno, 1)

            if writable:
                self._write_watcher = io_watcher(fileno, 2)
        except:
            # If anything goes wrong, it's important to go ahead and
            # close these watchers *now*, especially under libuv, so
            # that they don't get eventually reclaimed by the garbage
            # collector at some random time, thanks to the C level
            # slot (even though we don't seem to have any actual references
            # at the Python level). Previously, if we didn't close now,
            # that random close in the future would cause issues if we had duplicated
            # the fileno (if a wrapping with statement had closed an open fileobject,
            # for example)

            # test__fileobject can show a failure if this doesn't happen
            # TRAVIS=true GEVENT_LOOP=libuv python -m gevent.tests.test__fileobject \
            #    TestFileObjectPosix.test_seek TestFileObjectThread.test_bufsize_0
            self.close()
            raise
Пример #3
0
 def writeToFile(self, file: io.RawIOBase) -> None:
     """Write this texture to SFA-format file."""
     header = self._makeHeader()
     imageData, paletteData, colors = encode_image(
         self.image, self.format, None, mipmap_count=self.numMipMaps)
     file.write(header)
     file.write(imageData.getbuffer())
Пример #4
0
 def close(self):
     if self._sock is None:
         return
     else:
         self._sock.close()
         self._sock = None
         RawIOBase.close(self)
Пример #5
0
 def __init__(self, block):
     ''' Initialise with Block `block`.
 '''
     RawIOBase.__init__(self)
     self.isdir = False
     self.block = block
     self._offset = 0
Пример #6
0
    def __init__(self, fileno, mode='r', closefd=True):
        RawIOBase.__init__(self) # Python 2: pylint:disable=no-member,non-parent-init-called

        self._closefd = closefd
        self._fileno = fileno
        make_nonblocking(fileno)
        readable = 'r' in mode
        writable = 'w' in mode

        self.hub = get_hub()
        io_watcher = self.hub.loop.io
        try:
            if readable:
                self._read_event = io_watcher(fileno, 1)

            if writable:
                self._write_event = io_watcher(fileno, 2)
        except:
            # If anything goes wrong, it's important to go ahead and
            # close these watchers *now*, especially under libuv, so
            # that they don't get eventually reclaimed by the garbage
            # collector at some random time, thanks to the C level
            # slot (even though we don't seem to have any actual references
            # at the Python level). Previously, if we didn't close now,
            # that random close in the future would cause issues if we had duplicated
            # the fileno (if a wrapping with statement had closed an open fileobject,
            # for example)

            # test__fileobject can show a failure if this doesn't happen
            # TRAVIS=true GEVENT_LOOP=libuv python -m gevent.tests.test__fileobject \
            #    TestFileObjectPosix.test_seek TestFileObjectThread.test_bufsize_0
            self.close()
            raise
Пример #7
0
 def close(self):
     if self._sock is None:
         return
     else:
         self._sock.close()
         self._sock = None
         RawIOBase.close(self)
Пример #8
0
 def pack(self, data: list, fl: RawIOBase):
     byteorder = "little" if self.hl_lsb_first else "big"
     ln = len(data) - self.head_num_off
     assert ln >= 0
     fl.write(ln.to_bytes(self.head_len, byteorder, signed=False))
     for x in data:
         self.sub_dt.pack(x, fl)
Пример #9
0
def unpack_list_str_fl(fl: RawIOBase, head_len: int,
                       str_head_len: int) -> List[bytes]:
    len_lst = int.from_bytes(fl.read(head_len), "little")
    return [
        fl.read(int.from_bytes(fl.read(str_head_len), "little"))
        for _ in range(len_lst)
    ]
Пример #10
0
def search4cave(stream: io.RawIOBase, section_name: str, section_size: int,
                section_info, cave_size: int, virtaddr: int, _bytes: bytes):
    caves = []
    byte_count = 0

    base = stream.tell()
    offset = 0

    while section_size > 0:
        rb = stream.read(1)
        section_size -= 1
        offset += 1

        if _bytes not in rb:
            if byte_count >= cave_size:
                mr = MiningResult()
                mr.name = section_name
                mr.cave_begin = (base + offset) - byte_count - 1
                mr.cave_end = (base + offset) - 1
                mr.cave_size = byte_count
                mr.virtaddr = virtaddr + offset - byte_count - 1
                mr.info = section_info
                caves.append(mr)
            byte_count = 0
            continue
        byte_count += 1

    stream.seek(base)
    return caves
Пример #11
0
 def download(self, remote_path: str, writable: io.RawIOBase) -> int:
     encoded = self.exploit('base64 {}'.format(remote_path))
     contents = base64.b64decode(encoded)
     try:
         return writable.write(contents)
     finally:
         writable.flush()
Пример #12
0
    def __init__(self, name, mode="r", storage_parameters=None, **kwargs):

        RawIOBase.__init__(self)
        ObjectIOBase.__init__(self, name, mode=mode)

        if storage_parameters is not None:
            storage_parameters = storage_parameters.copy()

        try:
            self._cache["_head"] = storage_parameters.pop("airfs.raw_io._head")
        except (AttributeError, KeyError):
            pass

        try:
            self._system = storage_parameters.pop("airfs.system_cached")
        except (AttributeError, KeyError):
            self._system = None

        if not self._system:
            self._system = self._SYSTEM_CLASS(
                storage_parameters=storage_parameters, **kwargs
            )

        self._client_kwargs = self._system.get_client_kwargs(name)

        self._is_raw_of_buffered = False

        if self._writable:
            self._write_buffer = bytearray()

            if "a" in mode:
                if self._exists() == 1:
                    with handle_os_exceptions():
                        self._init_append()

                elif self._exists() == 0:
                    with handle_os_exceptions():
                        self._create()

                else:
                    raise PermissionError(
                        "Insufficient permission to check if file already exists."
                    )

            elif "x" in mode and self._exists() == 1:
                raise FileExistsError

            elif "x" in mode and self._exists() == -1:
                raise PermissionError(
                    "Insufficient permission to check if file already exists."
                )

            else:
                with handle_os_exceptions():
                    self._create()

        else:
            with handle_os_exceptions():
                self._head()
Пример #13
0
 def close(self):
     """Close the SocketIO object. This doesn't close the underlying
     socket, except if all references to it have disappeared.
     """
     if self.closed:
         return
     RawIOBase.close(self)
     self._sock = None
Пример #14
0
    def close(self):
        """Close open resources."""

        RawIOBase.close(self)

        if self.fd:
            self.fd.close()
            self.fd = None
Пример #15
0
    def close(self):
        """Close open resources."""

        RawIOBase.close(self)

        if self.fd:
            self.fd.close()
            self.fd = None
Пример #16
0
    def __init__(self, stream: RawIOBase, chunk_size=4096, *args, **kwargs):
        self.chunk_size = chunk_size

        # Get content-size
        stream.seek(0, os.SEEK_END)
        content_length = stream.tell()
        stream.seek(0, os.SEEK_SET)

        super().__init__(stream, content_len=content_length, *args, **kwargs)
Пример #17
0
    def dump_detection_output(self, idx: Union[int, tuple],
                              detections: Target3DArray,
                              fout: RawIOBase) -> None:
        '''
        :param detections: detection result
        :param ids: auxiliary information for output, each item contains context name and timestamp
        :param fout: output file-like object
        '''
        try:
            from waymo_open_dataset import label_pb2
            from waymo_open_dataset.protos import metrics_pb2
        except:
            _logger.error(
                "Cannot find waymo_open_dataset, install the package at "
                "https://github.com/waymo-research/waymo-open-dataset, output will be skipped now."
            )
            return

        label_map = {
            WaymoObjectClass.Unknown: label_pb2.Label.TYPE_UNKNOWN,
            WaymoObjectClass.Vehicle: label_pb2.Label.TYPE_VEHICLE,
            WaymoObjectClass.Pedestrian: label_pb2.Label.TYPE_PEDESTRIAN,
            WaymoObjectClass.Sign: label_pb2.Label.TYPE_SIGN,
            WaymoObjectClass.Cyclist: label_pb2.Label.TYPE_CYCLIST
        }

        waymo_array = metrics_pb2.Objects()
        for target in detections:
            waymo_target = metrics_pb2.Object()

            # convert box parameters
            box = label_pb2.Label.Box()
            box.center_x = target.position[0]
            box.center_y = target.position[1]
            box.center_z = target.position[2]
            box.length = target.dimension[0]
            box.width = target.dimension[1]
            box.height = target.dimension[2]
            box.heading = target.yaw
            waymo_target.object.box.CopyFrom(box)

            # convert label
            waymo_target.object.type = label_map[target.tag_top]
            waymo_target.score = target.tag_top_score

            waymo_target.context_name = idx[
                0]  # the name of the sequence is the context
            waymo_target.frame_timestamp_micros = int(
                self.timestamp(idx) * 1e6)
            waymo_array.objects.append(waymo_target)

        bindata = waymo_array.SerializeToString()
        if isinstance(fout, (str, Path)):
            Path(fout).write_bytes(bindata)
        else:
            fout.write(bindata)
Пример #18
0
 def pack(self, data: dict, fl: RawIOBase):
     byteorder = "little" if self.hl_lsb_first else "big"
     ln = len(data) - self.head_num_off
     assert ln >= 0
     fl.write(ln.to_bytes(self.head_len, byteorder, signed=False))
     key_t = self.key_t
     val_t = self.val_t
     for key in data:
         key_t.pack(key, fl)
         val_t.pack(data[key], fl)
Пример #19
0
def write_header(outp: io.RawIOBase, header: Dict) -> None:
    """ Writes a header to the file.  The header must be a python dict
        that is convertable to JSON.

        After calling this, you can just write the events to the file.
    """
    dumped = json.dumps(header)
    encoded = dumped.encode('utf-8')
    outp.write(encoded)
    outp.write(b'\x00')
Пример #20
0
def read_chunks(f: RawIOBase,
                buffer_size: int = 4 * 1024**2) -> Iterator[memoryview]:
    """
    Read a chunk of complete FASTA or FASTQ records from a file.
    The size of a chunk is at most buffer_size.
    f needs to be a file opened in binary mode.

    The yielded memoryview objects become invalid on the next iteration.
    """
    # This buffer is re-used in each iteration.
    buf = bytearray(buffer_size)

    # Read one byte to determine file format.
    # If there is a comment char, we assume FASTA!
    start = f.readinto(memoryview(buf)[0:1])
    if start == 1 and buf[0:1] == b'@':
        head = _fastq_head
    elif start == 1 and (buf[0:1] == b'#' or buf[0:1] == b'>'):
        head = _fasta_head
    elif start == 0:
        # Empty file
        return
    else:
        raise UnknownFileFormat('Input file format unknown')

    # Layout of buf
    #
    # |-- complete records --|
    # +---+------------------+---------+-------+
    # |   |                  |         |       |
    # +---+------------------+---------+-------+
    # ^   ^                   ^         ^       ^
    # 0   start               end       bufend  len(buf)
    #
    # buf[0:start] is the 'leftover' data that could not be processed
    # in the previous iteration because it contained an incomplete
    # FASTA or FASTQ record.

    while True:
        if start == len(buf):
            raise OverflowError('FASTA/FASTQ record does not fit into buffer')
        bufend = f.readinto(memoryview(buf)[start:]) + start  # type: ignore
        if start == bufend:
            # End of file
            break
        end = head(buf, bufend)
        assert end <= bufend
        if end > 0:
            yield memoryview(buf)[0:end]
        start = bufend - end
        assert start >= 0
        buf[0:start] = buf[end:bufend]

    if start > 0:
        yield memoryview(buf)[0:start]
Пример #21
0
def verifycave(stream: io.RawIOBase, cave_size, _byte: bytes):
    base = stream.tell()
    success = True
    while cave_size > 0:
        cave_size -= 1
        rb = stream.read(1)
        if _byte not in rb:
            success = False
            break
    stream.seek(base)
    return success
Пример #22
0
def statistiques(source: io.RawIOBase) -> (Compteur, int):
    """
    Fonction de calcul des statistiques dans le cadre de la compression de Huffman
    """
    compteur = Compteur()
    source.seek(0)
    octet = source.read(1)
    iterator = 0
    while octet:
        compteur.incrementer(octet)
        iterator += 1
        octet = source.read(1)
    return (compteur, iterator)
Пример #23
0
    def fromFile(file:io.RawIOBase) -> SfaTexture:
        """Instantiate texture from file."""
        self = SfaTexture()

        header = file.read(0x60)
        self.width, self.height = struct.unpack_from('>HH', header, 0x0A)
        self.numMipMaps = struct.unpack_from('>B', header, 0x19)[0] # grumble
        fmtId = struct.unpack_from('>B', header, 0x16)[0] # grumble
        self.format = ImageFormat(fmtId)

        bpp = BITS_PER_PIXEL[self.format]
        dataLen = self.width * self.height * bpp // 8
        return self._fromData(header, file.read(dataLen))
Пример #24
0
def copy_to_websocket(
    ws: lomond.WebSocket, f: io.RawIOBase, ready_sem: threading.Semaphore
) -> None:
    ready_sem.acquire()

    try:
        while True:
            chunk = f.read(4096)
            if not chunk:
                break
            ws.send_binary(chunk)
    finally:
        f.close()
        ws.close()
Пример #25
0
 def upload(self, readable: io.RawIOBase, remote_path: str) -> None:
     append = False
     # У шелла есть ограничения на максмальную длину строки
     # см. getconf ARG_MAX
     while (chunk := readable.read(self.upload_chunk_size)) :
         encoded = base64.b64encode(chunk).decode()
         result = self.exploit(
             'echo "{}" | base64 -d {} {}'.format(
                 encoded, '>>' if append else '>', remote_path
             )
         )
         # logging.debug(result)
         assert result == ''
         append = True
Пример #26
0
    def __init__(self, fileno, mode='r', closefd=True):
        RawIOBase.__init__(self)  # Python 2: pylint:disable=no-member,non-parent-init-called
        self._closefd = closefd
        self._fileno = fileno
        make_nonblocking(fileno)
        readable = 'r' in mode
        writable = 'w' in mode
        self.hub = get_hub()

        io_watcher = self.hub.loop.io
        if readable:
            self._read_event = io_watcher(fileno, 1)

        if writable:
            self._write_event = io_watcher(fileno, 2)
Пример #27
0
    def __init__(self, stream: io.RawIOBase):
        LENGTH = 0x50
        FORMAT_OFFSET = 0x04
        WIDTH_OFFSET = 0x08
        HEIGHT_OFFSET = 0x0A

        self._buffer = stream.read(LENGTH)
        if len(self._buffer) != LENGTH:
            raise EOFError

        self.__width, = struct.unpack_from('<h', self._buffer, WIDTH_OFFSET)
        self.__height, = struct.unpack_from('<h', self._buffer, HEIGHT_OFFSET)
        self.__imgformat = ImageFormat(struct.unpack_from('<h',
                                                          self._buffer,
                                                          FORMAT_OFFSET)[0])
        self.__end_of_header = stream.tell()
Пример #28
0
def read_header(f_in: io.RawIOBase) -> dict:
    """ reads the header of a file stream with CrossCloud encryption

    :param f_in: readable file-object
    :return: the header dict
    """
    magic_number = f_in.read(len(MAGIC_NUMBER))

    if magic_number != MAGIC_NUMBER:
        raise HeaderError('magic number is {}'.format(magic_number),
                          read_data=magic_number)

    header_length, = struct.unpack('<I', f_in.read(4))

    header_str = f_in.read(header_length).decode(ENCODING)
    return json.loads(header_str)
Пример #29
0
 def write_input_stream(self, in_stream: io.RawIOBase, key: str, prefix=""):
     with open(self.get_full_path(key, prefix=prefix), 'wb') as fout:
         while True:
             r = in_stream.read(self.chunk_size)
             if r is None:
                 break
             fout.write(r)
Пример #30
0
 def wait_until_ready(self, channel:RawIOBase, timeout=60):
     """
     sends ' ' (space) and waits for the corresponding ACK message. Once we have 10 of these in a row we can be fairly
     certain the device is ready for ymodem.
     :param channel:
     :param timeout:
     :return:
     """
     success_count = 0
     while success_count < 10:
         while channel.readline():       # flush any existing data
             success_count = 0
         channel.write(b' ')
         result = channel.read()
         if result and result[0]==LightYModemProtocol.ack:
             success_count += 1
Пример #31
0
def decode_by_char(f: io.RawIOBase) -> str:
    """Returns a ``str`` decoded from the characters in *f*.

    :param f: is expected to be a file object which has been
        opened in binary mode ('rb') or just read mode ('r').

    The *f* stream will have one character or byte at a time read from it,
    and will attempt to decode each to a string and accumulate
    those individual strings together.  Once the end of the file is found
    or an element can no longer be decoded, the accumulated string will
    be returned.
    """
    s = ''
    try:
        for elem in iter(lambda: f.read(1), b''):
            if isinstance(elem, str):
                s += elem
            else:
                s += elem.decode()
    except UnicodeError:
        # Expecting this to mean that we got to the end of decodable
        # bytes, so we're all done, and pass through to return s.
        pass

    return s
Пример #32
0
    def __init__(self, rf, inf):
        """Fill common fields"""

        RawIOBase.__init__(self)

        # standard io.* properties
        self.name = inf.filename
        self.mode = 'rb'

        self.rf = rf
        self.inf = inf
        self.crc_check = rf._crc_check
        self.fd = None
        self.CRC = 0
        self.remain = 0

        self._open()
Пример #33
0
    def wait_until_ready(self, channel:RawIOBase, timeout=60):
        """
        sends ' ' (space) and waits for the corresponding ACK message. Once we have 3 of these in a row we can be fairly
        certain the device is ready for ymodem.
        :param channel:
        :param timeout:
        :return:
        """
        success_count = 0
        while channel.readline():  # flush any existing data
            success_count = 0

        while success_count < 2:
            channel.write(b' ')
            result = channel.read()
            if result and result[0]==LightYModemProtocol.ack:
                success_count += 1
Пример #34
0
    def __read(self, stream: io.RawIOBase):
        FILE_TYPE_OFFSET = 0x04
        FILE_LENGTH_OFFSET = 0x10
        FILE_LENGTH_SHIFT = 7

        self._buffer = stream.read(4)

        length, = struct.unpack_from('<l', self._buffer, 0)

        remaining = length - 4

        self._buffer += stream.read(remaining)

        self._file_type, = struct.unpack_from('<l', self._buffer, FILE_TYPE_OFFSET)
        self._length = struct.unpack_from('<l', self._buffer, FILE_LENGTH_OFFSET)[0] << FILE_LENGTH_SHIFT

        self._end_of_header = stream.tell()
Пример #35
0
    def __init__(self, rf, inf):
        """Fill common fields"""

        RawIOBase.__init__(self)

        # standard io.* properties
        self.name = inf.filename
        self.mode = 'rb'

        self.rf = rf
        self.inf = inf
        self.crc_check = rf._crc_check
        self.fd = None
        self.CRC = 0
        self.remain = 0

        self._open()
Пример #36
0
 def __init__(self, fileno, mode='r', closefd=True):
     RawIOBase.__init__(self)
     self._closed = False
     self._closefd = closefd
     self._fileno = fileno
     make_nonblocking(fileno)
     self._readable = 'r' in mode
     self._writable = 'w' in mode
     self.hub = get_hub()
     io = self.hub.loop.io
     if self._readable:
         self._read_event = io(fileno, 1)
     else:
         self._read_event = None
     if self._writable:
         self._write_event = io(fileno, 2)
     else:
         self._write_event = None
Пример #37
0
    def __init__(self, fileno, mode='r', closefd=True):
        RawIOBase.__init__(self) # Python 2: pylint:disable=no-member,non-parent-init-called
        self._closed = False
        self._closefd = closefd
        self._fileno = fileno
        make_nonblocking(fileno)
        self._readable = 'r' in mode
        self._writable = 'w' in mode
        self.hub = get_hub()

        io_watcher = self.hub.loop.io
        if self._readable:
            self._read_event = io_watcher(fileno, 1)

        if self._writable:
            self._write_event = io_watcher(fileno, 2)

        self._seekable = None
Пример #38
0
 def __init__(self, sock):
     RawIOBase.__init__(self)
     self._sock = sock
Пример #39
0
 def close(self):
     if self.closed:
         return
     RawIOBase.close(self)
     self.iter = None
Пример #40
0
 def close(self):
     if self.closed:
         return
     RawIOBase.close(self)
     self.http_stream = None
Пример #41
0
 def __init__(self, *args, **kwargs):
     RawIOBase.__init__(self)
     FtdiSerial.__init__(self, *args, **kwargs)
def bulk_copy(read_from: io.RawIOBase, write_to: io.RawIOBase):
    while True:
        chunk = read_from.read(BUFFER_SIZE)
        if not chunk:
            break
        write_to.write(chunk)