Ejemplo n.º 1
0
 def serialize(self, data_out: BufferedIOBase, conf: Configuration, *args,
               **kwargs):
     rounded_sampling_rate = int(round((self.sampling_rate * 4096) / 44100))
     data_out.write(
         self.struct.pack(kwargs['end_section_offset'],
                          rounded_sampling_rate, self.flags.value, self.uk1,
                          self.size))
Ejemplo n.º 2
0
def recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:
    """
    Implementation of the receiving logic for receiving data over a slow,
    lossy, constrained network.

    Args:
        sock -- A socket object, constructed and initialized to communicate
                over a simulated lossy network.

    Return:
        The number of bytes written to the destination.
    """
    logger = util.logging.get_logger("project-receiver")
    # Naive solution, where we continually read data off the socket
    # until we don't receive any more data, and then return.
    num_bytes = 0
    while True:
        data = sock.recv(util.MAX_PACKET)
        if not data:
            break
        logger.info("Received %d bytes", len(data))
        dest.write(data)
        num_bytes += len(data)
        dest.flush()
    return num_bytes
Ejemplo n.º 3
0
    def write(self, io_out: BufferedIOBase, v: Optional[Dict[str, Any]], otherfields: Dict[str, Any]) -> None:
        # If they didn't specify this tlvstream, it's empty.
        if v is None:
            return

        # Make a tuple of (fieldnum, val_to_bin, val) so we can sort into
        # ascending order as TLV spec requires.
        def write_raw_val(iobuf: BufferedIOBase, val: Any, otherfields: Dict[str, Any]) -> None:
            iobuf.write(val)

        def get_value(tup):
            """Get value from num, fun, val tuple"""
            return tup[0]

        ordered: List[Tuple[int,
                            Callable[[BufferedIOBase, Any, Dict[str, Any]], None],
                            Any]] = []
        for fieldname in v:
            f = self.find_field(fieldname)
            if f is None:
                # fieldname can be an integer for a raw field.
                ordered.append((int(fieldname), write_raw_val, v[fieldname]))
            else:
                ordered.append((f.number, f.write, v[fieldname]))

        ordered.sort(key=get_value)

        for typenum, writefunc, val in ordered:
            buf = BytesIO()
            writefunc(cast(BufferedIOBase, buf), val, otherfields)
            BigSizeType.write(io_out, typenum)
            BigSizeType.write(io_out, len(buf.getvalue()))
            io_out.write(buf.getvalue())
    def __init__(self, bucket, key, *, chunk_size=DEFAULT_CHUNK_SIZE,
                 max_file_log_time=DEFAULT_ROTATION_TIME_SECS, max_file_size_bytes=MAX_FILE_SIZE_BYTES,
                 encoder='utf-8', workers=2, compress=False, key_id=None, secret=None, token=None):

        self.session = Session(aws_access_key_id=key_id, aws_secret_access_key=secret, aws_session_token=token)
        self.s3 = self.session.resource('s3')
        self.start_time = int(time.time())
        self.key = key.strip('/')
        self.chunk_size = chunk_size
        self.max_file_log_time = max_file_log_time
        self.max_file_size_bytes = max_file_size_bytes
        self.current_file_name = "{}_{}".format(key, int(time.time()))
        if compress:
            self.current_file_name = "{}.gz".format(self.current_file_name)
        self.encoder = encoder

        try:
            self.s3.meta.client.head_bucket(Bucket=bucket)
        except Exception:
            raise ValueError('Bucket %s does not exist, or missing permissions' % bucket)

        self._bucket = self.s3.Bucket(bucket)
        self._current_object = self._get_stream_object(self.current_file_name)
        self.workers = [threading.Thread(target=task_worker, args=(self._rotation_queue,)).start() for _ in
                        range(int(max(workers, MIN_WORKERS_NUM) / 2) + 1)]
        self.stream_bg_workers = [threading.Thread(target=task_worker, args=(self._stream_buffer_queue,)).start() for _
                                  in range(max(int(max(workers, MIN_WORKERS_NUM) / 2), 1))]

        self._is_open = True
        self.compress = compress

        BufferedIOBase.__init__(self)
Ejemplo n.º 5
0
def recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:
    """
    Implementation of the receiving logic for receiving data over a slow,
    lossy, constrained network.

    Args:
        sock -- A socket object, constructed and initialized to communicate
                over a simulated lossy network.

    Return:
        The number of bytes written to the destination.
    """
    logger = homework5.logging.get_logger("hw5-receiver")
    # Naive solution, where we continually read data off the socket
    # until we don't receive any more data, and then return.
    num_bytes = 0
    sequenceNumber = 0
    while True:
        data = sock.recv(homework5.MAX_PACKET)
        if not data:
            break
        header = data[:4]
        data = data[4:]
        tempNumber = struct.unpack("i", header)[0]
        if data[4:] is b'':
            break

        logger.info("Received %d bytes", len(data))
        if tempNumber > sequenceNumber:
            sequenceNumber = tempNumber
            dest.write(data)
            num_bytes += len(data)
            dest.flush()
        sock.send(struct.pack("i", sequenceNumber))
    return num_bytes
Ejemplo n.º 6
0
Archivo: db2.py Proyecto: Tankobot/p2pg
    def __init__(self, file_obj: BufferedIOBase,
                 key_map: KeyMap,
                 init=False, *,
                 portion=3/4):
        self.file = file_obj
        self.maps = [key_map]
        self._portion = portion

        # set the first and last pointers
        file_obj.seek(0)
        if init:
            # empty pointers
            self._first = None
            self._last = None
            # start fill tracking
            self._filled = 0
        else:
            # read pointers
            self._first = int.from_bytes(file_obj.read(32), 'little')
            self._last = int.from_bytes(file_obj.read(32), 'little')
            # get current fill
            self._filled = int.from_bytes(file_obj.read(32), 'little')
        # add offset for pointers
        self._pos = 64

        # initialize key cache
        self._keys = {}

        # track if currently expanding db
        self._expansion = None

        # set up iterable variable
        self._current = None
Ejemplo n.º 7
0
    def __init__(self, key_id, secret, stream_name, region, partition, chunk_size=DEFAULT_CHUNK_SIZE, encoder='utf-8',
                 workers=2):

        self.kinesis = client('kinesis', region_name=region, aws_access_key_id=key_id,
                              aws_secret_access_key=secret)
        self.chunk_size = chunk_size
        self.stream_name = stream_name
        self.region = region
        self.tasks = queue.Queue()
        self.partition = partition
        self.encoder = encoder

        try:
            stream_desc = self.kinesis.describe_stream(StreamName=self.stream_name)
            if stream_desc['StreamDescription']['StreamStatus'] != 'ACTIVE':
                raise AssertionError
        except Exception:
            raise ValueError('Kinesis stream %s does not exist or inactive, or insufficient permissions' % stream_name)

        self.workers = [threading.Thread(target=task_worker, args=(self.tasks,)).start() for _ in
                        range(int(max(workers, MIN_WORKERS_NUM) / 2) + 1)]
        self._stream = BytesIO()

        self._is_open = True

        BufferedIOBase.__init__(self)
Ejemplo n.º 8
0
    def unpack(
        self,
        buffer: io.BufferedIOBase,
        unicode: bool = None,
        version: Version = None,
        gx_lists: list[GXList] = None,
        gx_list_indices: dict[int, int] = None,
    ):
        if any(var is None
               for var in (unicode, version, gx_lists, gx_list_indices)):
            raise ValueError(
                "Not all required keywords were passed to `Material.unpack()`."
            )

        data = self.STRUCT.unpack(buffer)
        encoding = "utf-16-le" if unicode else "shift_jis_2004"
        self.name = read_chars_from_buffer(buffer,
                                           offset=data.pop("__name_offset"),
                                           encoding=encoding)
        self.mtd_path = read_chars_from_buffer(
            buffer, offset=data.pop("__mtd_path_offset"), encoding=encoding)
        gx_offset = data.pop("__gx_offset")
        if gx_offset == 0:
            self.gx_index = -1
        elif gx_offset in gx_list_indices:
            self.gx_index = gx_list_indices[gx_offset]
        else:
            gx_list_indices[gx_offset] = len(gx_lists)
            material_offset = buffer.tell()
            buffer.seek(gx_offset)
            gx_lists.append(GXList(buffer, version))
            buffer.seek(material_offset)
        self.set(**data)
Ejemplo n.º 9
0
def read_socket_to_file(bufferedReader: BufferedReader, file: BufferedIOBase,
                        size):
    """
		Reads data from a socket to a file, logging progress if data is big.
	"""
    logProgress = False
    if size > big_file_threshold:
        logProgress = True

    try:
        pointer = 0
        while pointer < size:
            data = bufferedReader.read(min(buffer_size, size - pointer))
            file.write(data)
            pointer += len(data)
            if logProgress:
                print("Receiving file",
                      pointer,
                      "/",
                      size,
                      "bytes received",
                      end='\r')
        if logProgress: print()
    except Exception:
        print("Error while receiving file.")
Ejemplo n.º 10
0
    def __init__(self,
                 name,
                 mode='r',
                 buffer_size=None,
                 max_buffers=0,
                 max_workers=None,
                 **kwargs):

        if 'a' in mode:
            # TODO: Implement append mode and remove this exception
            raise NotImplementedError('Not implemented yet in Pycosio')

        BufferedIOBase.__init__(self)
        ObjectIOBase.__init__(self, name, mode=mode)
        WorkerPoolBase.__init__(self, max_workers)

        # Instantiate raw IO
        self._raw = self._RAW_CLASS(name, mode=mode, **kwargs)
        self._raw._is_raw_of_buffered = True

        # Link to RAW methods
        self._mode = self._raw.mode
        self._name = self._raw.name
        self._client_kwargs = self._raw._client_kwargs

        # Initializes buffer
        if not buffer_size or buffer_size < 0:
            self._buffer_size = self.DEFAULT_BUFFER_SIZE
        elif buffer_size < self.MINIMUM_BUFFER_SIZE:
            self._buffer_size = self.MINIMUM_BUFFER_SIZE
        elif (self.MAXIMUM_BUFFER_SIZE
              and buffer_size > self.MAXIMUM_BUFFER_SIZE):
            self._buffer_size = self.MAXIMUM_BUFFER_SIZE
        else:
            self._buffer_size = buffer_size

        # Initialize write mode
        if self._writable:
            self._max_buffers = max_buffers
            self._buffer_seek = 0
            self._write_buffer = bytearray(self._buffer_size)
            self._seekable = False
            self._write_futures = []
            self._raw_flush = self._raw._flush

            # Size used only with random write access
            # Value will be lazy evaluated latter if needed.
            self._size_synched = False
            self._size = 0
            self._size_lock = Lock()

        # Initialize read mode
        else:
            self._size = self._raw._size
            self._read_range = self.raw._read_range
            if max_buffers:
                self._max_buffers = max_buffers
            else:
                self._max_buffers = ceil(self._size / self._buffer_size)
            self._read_queue = dict()
Ejemplo n.º 11
0
    def write(self,
              stream: BufferedIOBase,
              nodes: List[SceneNode],
              mode=MeshWriter.OutputMode.BinaryMode) -> bool:
        Logger.log("i", "starting ChituCodeWriter.")
        if mode != MeshWriter.OutputMode.TextMode:
            Logger.log("e", "ChituCodeWriter does not support non-text mode.")
            self.setInformation(
                catalog.i18nc(
                    "@error:not supported",
                    "ChituCodeWriter does not support non-text mode."))
            return False
        gcode_textio = StringIO()  #We have to convert the g-code into bytes.
        gcode_writer = cast(
            MeshWriter,
            PluginRegistry.getInstance().getPluginObject("GCodeWriter"))
        success = gcode_writer.write(gcode_textio, None)

        if not success:
            self.setInformation(gcode_writer.getInformation())
            return False
        result = self.modify(gcode_textio.getvalue())
        stream.write(result)
        Logger.log("i", "ChituWriter done")
        return True
Ejemplo n.º 12
0
 def serialize(self, data_out: BufferedIOBase, conf: Configuration, *args,
               **kwargs):
     rounded_sampling_rate = int(round((self.sampling_rate * 4096) / 48000))
     data_out.write(
         self.struct.pack(self.sampling_rate, rounded_sampling_rate,
                          self.volume_level, self.flags.value, self.uk1,
                          self.uk2, self.size))
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     data_in.seek(4, SEEK_CUR)  # Group header offset
     n_sound_effects = int.from_bytes(data_in.read(4), 'little')
     data_in.seek(
         8, SEEK_CUR
     )  # End offset (4 bytes) | Sum of group VAGs' sizes (4 bytes)
     return cls(n_sound_effects=n_sound_effects)
Ejemplo n.º 14
0
def readtimes(f: io.BufferedIOBase) -> List[float]:
    b = f.read(4)
    numof_times = struct.unpack_from("<L", b, 0)[0]
    times = [0.0 for _ in range(numof_times)]
    bs = f.read(8 * numof_times)
    for i, _ in enumerate(times):
        times[i] = struct.unpack_from("<d", bs, 8 * i)
    return times
Ejemplo n.º 15
0
 def uncompress(self, f_in: BufferedIOBase, f_out: BufferedIOBase):
     while True:
         start = uncompress_num(f_in)
         if start is None:
             break
         byte_len = uncompress_num(f_in)
         # print(hex(start), hex(byte_len))
         f_out.write(get_bytes(self.dict, start, byte_len))
Ejemplo n.º 16
0
 def write(self, io_out: BufferedIOBase, v: int, otherfields: Dict[str, Any]) -> None:
     binval = struct.pack('>Q', v)
     while len(binval) != 0 and binval[0] == 0:
         binval = binval[1:]
     if len(binval) > self.maxbytes:
         raise ValueError('{} exceeds maximum {} capacity'
                          .format(v, self.name))
     io_out.write(binval)
Ejemplo n.º 17
0
def parse_message_pairs(stream: BufferedIOBase):
    parser = intialize_parser(parse_message_pair)

    data = stream.read(1024)
    while data:
        for rr in parse(parser, data):
            yield rr
        data = stream.read(1024)
Ejemplo n.º 18
0
def download_file_from_output_directory(base_url: str, relative_path: str,
                                        output: BufferedIOBase):
    url = (base_url if base_url.endswith('/') else base_url +
           '/') + relative_path.replace('\\', '/').replace('//', '/')
    resp = requests.get(url, stream=True)
    for chunk in resp.iter_content(1024):
        if chunk:
            output.write(chunk)
Ejemplo n.º 19
0
def recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:
    """
    Implementation of the receiving logic for receiving data over a slow,
    lossy, constrained network.

    Args:
        sock -- A socket object, constructed and initialized to communicate
                over a simulated lossy network.

    Return:
        The number of bytes written to the destination.
    """
    global EXPECTED_SEQUENCE

    logger = homework5.logging.get_logger("hw5-receiver")

    num_bytes = 0
    while True:
        try:
            data = sock.recv(homework5.MAX_PACKET)

            # Kill the process as soon as there is nothing else to send
            if not data:
                break

            # Gather the packet and retrieve the sequence number and data
            new_packet = decode_packet(data)
            header_only = new_packet[0]
            data_only = new_packet[1]

            # Check if the packet received is not off
            if header_only == EXPECTED_SEQUENCE:

                # If the packet received also contains data, then send an ack
                if data_only is not None:
                    # Send an Acknowledgement that the data received corresponds
                    # to expected value
                    sock.send(make_packet(EXPECTED_SEQUENCE))

                    logger.info("Received %d bytes", len(data_only))

                    dest.write(data_only)
                    num_bytes += len(data_only)
                    dest.flush()

                    # Update the expected sequence if the data that we received
                    # is the one that was expected
                    update_sequence()

            # If the packet sequence is off, resent the
            else:
                sock.send(make_packet(EXPECTED_SEQUENCE))

        # If there was a timeout, continue
        except socket.timeout:
            continue

    return num_bytes
Ejemplo n.º 20
0
def _read_data_frame(rfile: BufferedIOBase):
    frame = {}
    net_bytes = ord(rfile.read(1))
    frame["FIN"] = net_bytes >> 7
    frame["RSV1"] = (net_bytes & 0x40) >> 6
    frame["RSV2"] = (net_bytes & 0x20) >> 5
    frame["RSV3"] = (net_bytes & 0x10) >> 4
    frame["opcode"] = net_bytes & 0x0F

    if frame["RSV1"] != 0 or frame["RSV2"] != 0 or frame["RSV3"] != 0:
        raise FrameError(
            "Unsupported feature. RSV1, RSV2 or RSV3 has a non-zero value.",
            frame,
        )

    if not frame["opcode"] in OPCODE.values():
        raise FrameError("Unsupported opcode value.", frame)

    if frame["FIN"] == 0 and frame["opcode"] != OPCODE["continueation"]:
        raise FrameError(
            "FIN bit not set for a non-continueation frame.", frame
        )

    if frame["opcode"] in CONTROL_OPCODES and frame["FIN"] == 0:
        raise FrameError("FIN bit not set for a control frame.", frame)

    net_bytes = ord(rfile.read(1))
    mask_bit = net_bytes >> 7

    if mask_bit == 0:
        raise FrameError("Unmasked frame from client.", frame)

    length1 = net_bytes & 0x7F

    if frame["opcode"] in CONTROL_OPCODES and length1 > 125:
        raise FrameError("Control frame with invalid payload length.", frame)

    try:
        length = _read_payload_length(length1, rfile)
    except InvalidLengthError as error:
        raise FrameError(
            f"Invalid payload length of {error.length} bytes.", frame
        )

    masking_key = rfile.read(4)
    encoded_payload = rfile.read(length)
    frame["payload"] = _decode_payload(masking_key, encoded_payload)

    if frame["opcode"] == OPCODE["close"] and frame["payload"]:
        frame["status_code"] = int.from_bytes(
            frame["payload"][0:2], byteorder="big"
        )
        if length > 2:
            # /!\ may raise UnicodeError /!\
            frame["close_reason"] = frame["payload"][2:].decode()

    return frame
Ejemplo n.º 21
0
def _read_payload_length(payload_length1: int, rfile: BufferedIOBase):
    final_length = payload_length1
    if payload_length1 == 126:
        final_length = int.from_bytes(rfile.read(2), byteorder="big")
    elif payload_length1 == 127:
        final_length = int.from_bytes(rfile.read(8), byteorder="big")
        if final_length >> 63 == 1:
            raise InvalidLengthError(final_length)
    return final_length
Ejemplo n.º 22
0
def recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:
    """
    Implementation of the receiving logic for receiving data over a slow,
    lossy, constrained network.

    Args:
        sock -- A socket object, constructed and initialized to communicate
                over a simulated lossy network.

    Return:
        The number of bytes written to the destination.
    """
    logger = homework5.logging.get_logger("hw5-receiver")
    num_bytes = 0
    sequence = 0
    count = 0
    while True:
        data = sock.recv(homework5.MAX_PACKET)
        #print("\nRECEIVER DATA[0]->", data[:2])
        if data[0] == sequence:
            sock.send(bytes([sequence]))
            #print('\n')
            print("RECEVIER SENT THESE BYTES->", str(data[0]))
            buff = data[1:]
            logger.info("Received %d bytes", len(buff))
            #print('\n')
            dest.write(buff)
            count = 0
            num_bytes += len(buff)
            dest.flush()
            if sequence == 255:
                sequence = 0
            else:
                sequence+=1
            continue

        elif data[:2] == FIN:
            sock.send(FIN)
            # print("RECV CLOSED SUCCESSFULLY")
            sock.close()
            break

        if data[0] != sequence:
            # sequence-=1
            # if count ==0:
            sock.send(bytes([data[0]]))
            print("WRONG SEQUENCE -- RECEVIER GOT THESE BYTES->", str(data[0]))
            print("looking for these bytes->",sequence)


                # count+=1
            # sock.send(bytes([data[0]]))
            # print("DATA was dropped I am RECIEVER and still looking for sequence",str(bytes([sequence])))
            continue

        break
    return num_bytes
Ejemplo n.º 23
0
Archivo: db2.py Proyecto: Tankobot/p2pg
    def read_object(cls, file: BufferedIOBase):
        new_key = file.read(cls.key_size)

        obj_size = file.read(cls.key_size)
        obj_size = int.from_bytes(obj_size, 'little')

        new_data = file.read(obj_size)

        return cls(new_key, new_data)
Ejemplo n.º 24
0
def compress_num(nums: list, f_out: BufferedIOBase):
    # print(hex(nums[0]), hex(nums[1]))
    # use most significant bit to tell the end of a number
    buf = bytearray()
    for num in nums:
        while num >= 128:
            buf.append(num & 0x7F)
            num >>= 7
        else:
            buf.append(num + 128)
    f_out.write(buf)
Ejemplo n.º 25
0
    def __fill_buf(self, f_in: BufferedIOBase) -> bool:
        next_bytes = f_in.read(self.read_len)
        if len(next_bytes) == 0:
            return False

        cur_process = f_in.tell() * 100 // self.length
        if cur_process != self.process:
            self.process = cur_process
            print(str(cur_process) + "%")
        self.buf.extend(next_bytes)
        return True
Ejemplo n.º 26
0
    def transfer_destination_io(self,
                                *,
                                source: str,
                                destination: io.BufferedIOBase,
                                chunk_size: int = 4096) -> None:
        """Transfer from source file to destination IO.

        Note that this can't use std{in,out}=open(...) due to LP #1849753.

        :param source: The source path, prefixed with <name:> for a path inside
            the instance.
        :param destination: An IO stream to write to.

        :raises MultipassError: On error.
        """
        command = [str(self.multipass_path), "transfer", source, "-"]
        proc = subprocess.Popen(
            command,
            stdin=subprocess.DEVNULL,
            stdout=subprocess.PIPE,
        )

        # Should never happen, but pyright makes noise.
        assert proc.stdout is not None

        while True:
            written = proc.stdout.read(chunk_size)
            if written:
                destination.write(written)

            if len(written) < chunk_size:
                logger.debug("Finished streaming standard output")
                break

        while True:
            try:
                out, _ = proc.communicate(timeout=1)
            except subprocess.TimeoutExpired:
                continue

            if out:
                destination.write(out)

            if proc.returncode == 0:
                logger.debug("Process completed")
                break

            if proc.returncode is not None:
                raise MultipassError(
                    command=command,
                    returncode=proc.returncode,
                    msg=f"Failed to transfer file {source!r} to destination.",
                )
Ejemplo n.º 27
0
 def __init__(self, out=None, encoding='iso-8859-1', short_empty_elements=False):
     xmltodict.XMLGenerator.__bases__.__init__(self)
     buf = BufferedIOBase()
     buf.writable = lambda : True
     buf.write = out.write
     ioWrapper = TextIOWrapper(buf, encoding=encoding, errors='xmlcharrefreplace', newline='\n')
     self._write = ioWrapper.write
     self._flush = ioWrapper.flush
     self._ns_contexts = [{}]
     self._current_context = self._ns_contexts[-1]
     self._undeclared_ns_maps = []
     self._encoding = encoding
Ejemplo n.º 28
0
    def unpack(self, buffer: io.BufferedIOBase):
        data = self.STRUCT.unpack(buffer)
        layout_offset = buffer.tell()

        buffer.seek(data.pop("__member_offset"))
        struct_offset = 0
        self.members = []
        for _ in range(data.pop("__member_count")):
            member = LayoutMember(buffer, dict(struct_offset=struct_offset))
            self.members.append(member)
            struct_offset += member.layout_type.size()
        buffer.seek(layout_offset)
Ejemplo n.º 29
0
    def __init__(self,
                 name,
                 mode="r",
                 buffer_size=None,
                 max_buffers=0,
                 max_workers=None,
                 **kwargs):

        if "a" in mode:
            raise NotImplementedError('"a" mode not implemented yet')

        BufferedIOBase.__init__(self)
        ObjectIOBase.__init__(self, name, mode=mode)
        WorkerPoolBase.__init__(self, max_workers)

        self._raw = self._RAW_CLASS(name, mode=mode, **kwargs)
        self._raw._is_raw_of_buffered = True
        self._mode = self._raw.mode
        self._name = self._raw.name
        self._client_kwargs = self._raw._client_kwargs

        if not buffer_size or buffer_size < 0:
            self._buffer_size = self.DEFAULT_BUFFER_SIZE
        elif buffer_size < self.MINIMUM_BUFFER_SIZE:
            self._buffer_size = self.MINIMUM_BUFFER_SIZE
        elif self.MAXIMUM_BUFFER_SIZE and buffer_size > self.MAXIMUM_BUFFER_SIZE:
            self._buffer_size = self.MAXIMUM_BUFFER_SIZE
        else:
            self._buffer_size = buffer_size

        if self._writable:
            self._max_buffers = max_buffers
            self._buffer_seek = 0
            self._write_buffer = bytearray(self._buffer_size)
            self._seekable = False
            self._write_futures = []
            self._raw_flush = self._raw._flush

            # Size used only with random write access
            # Value will be lazy evaluated latter if needed.
            self._size_synched = False
            self._size = 0
            self._size_lock = Lock()

        else:
            self._size = self._raw._size
            self._read_range = self.raw._read_range
            self._seekable = self.raw._seekable
            if max_buffers:
                self._max_buffers = max_buffers
            else:
                self._max_buffers = ceil(self._size / self._buffer_size)
            self._read_queue = dict()
Ejemplo n.º 30
0
def _extract_next_file(
        archive_file: io.BufferedIOBase) -> Iterator[Tuple[str, bytes]]:
    """Extracts the next available file from the archive.

  Reads the next available file header section and yields its filename and
  content in bytes as a tuple. Stops when there are no more available files in
  the provided archive_file.

  Args:
    archive_file: The archive file object, of which cursor is pointing to the
      next available file header section.

  Yields:
    The name and content of the next available file in the given archive file.

  Raises:
    RuntimeError: The archive_file is in an unknown format.
  """
    while True:
        header = archive_file.read(60)
        if not header:
            return
        elif len(header) < 60:
            raise RuntimeError('Invalid file header format.')

        # For the details of the file header format, see:
        # https://en.wikipedia.org/wiki/Ar_(Unix)#File_header
        # We only need the file name and the size values.
        name, _, _, _, _, size, end = struct.unpack('=16s12s6s6s8s10s2s',
                                                    header)
        if end != b'`\n':
            raise RuntimeError('Invalid file header format.')

        # Convert the bytes into more natural types.
        name = name.decode('ascii').strip()
        size = int(size, base=10)
        odd_size = size % 2 == 1

        # Handle the extended filename scheme.
        if name.startswith('#1/'):
            filename_size = int(name[3:])
            name = archive_file.read(filename_size).decode('utf-8').strip(
                ' \x00')
            size -= filename_size

        file_content = archive_file.read(size)
        # The file contents are always 2 byte aligned, and 1 byte is padded at the
        # end in case the size is odd.
        if odd_size:
            archive_file.read(1)

        yield (name, file_content)
Ejemplo n.º 31
0
    def __init__(self,
                 stream_name: str,
                 partition_key: str,
                 *,
                 chunk_size: int = DEFAULT_CHUNK_SIZE,
                 encoder: str = 'utf-8',
                 workers: int = 1,
                 **boto_session_kwargs):
        """

        :param stream_name: Name of the Kinesis stream
        :type stream_name: str
        :param partition_key: Kinesis partition key used to group data by shards
        :type partition_key: str
        :param chunk_size: the size of a a chunk of records for rotation threshold (default 524288)
        :type chunk_size: int
        :param encoder: the encoder to be used for log records (default 'utf-8')
        :type encoder: str
        :param workers: the number of background workers that rotate log records (default 1)
        :type workers: int
        :param boto_session_kwargs: additional keyword arguments for the AWS Kinesis Resource
        :type boto_session_kwargs: boto3 resource keyword arguments
        """

        self._client = client('kinesis', **boto_session_kwargs)
        self.chunk_size = chunk_size
        self.stream_name = stream_name
        self.tasks = Queue()
        self.partition_key = partition_key
        self.encoder = encoder

        try:
            stream_desc = self._client.describe_stream(
                StreamName=self.stream_name)
            if stream_desc['StreamDescription']['StreamStatus'] != 'ACTIVE':
                raise AssertionError
        except Exception:
            raise ValueError(
                'Kinesis stream %s does not exist or inactive, or insufficient permissions'
                % stream_name)

        self.workers = [
            threading.Thread(target=task_worker,
                             args=(self.tasks, ),
                             daemon=True).start()
            for _ in range(int(max(workers, MIN_WORKERS_NUM) / 2) + 1)
        ]
        self._stream = BytesIO()

        self._is_open = True

        BufferedIOBase.__init__(self)
Ejemplo n.º 32
0
    def write(self, stream: BufferedIOBase, nodes: List[SceneNode], mode = MeshWriter.OutputMode.BinaryMode) -> bool:
        if mode != MeshWriter.OutputMode.BinaryMode:
            Logger.log("e", "GCodeGzWriter does not support text mode.")
            return False

        #Get the g-code from the g-code writer.
        gcode_textio = StringIO() #We have to convert the g-code into bytes.
        success = PluginRegistry.getInstance().getPluginObject("GCodeWriter").write(gcode_textio, None)
        if not success: #Writing the g-code failed. Then I can also not write the gzipped g-code.
            return False

        result = gzip.compress(gcode_textio.getvalue().encode("utf-8"))
        stream.write(result)
        return True
Ejemplo n.º 33
0
def send_message(
    wfile: BufferedIOBase,
    payload: bytes,
    opcode=OPCODE["text"],
    rsv1=0,
    rsv2=0,
    rsv3=0,
):
    if len(payload) > 0x7FFFFFFFFFFFFFFF:
        raise ValueError(
            "Payload to big. Sending fragmented messages not implemented."
        )
    frame = _encode_data_frame(1, opcode, rsv1, rsv2, rsv3, payload)
    wfile.write(frame)
Ejemplo n.º 34
0
def merge_sort_stupid(fin: io.BufferedIOBase, fout: io.BufferedIOBase, memory_size: int, left=0, count=None):
    fout.seek(0)
    if count is None:
        count = content_length(fin, preserve_pos=False)

    if count <= memory_size:
        go_to_pos(fin, left)
        write_content(fout, sorted(read_content(fin, count=count)), batch_size=memory_size)
        return

    with tmp_file() as left_f, tmp_file() as right_f:
        merge_sort_stupid(fin, left_f, memory_size, left, count=count // 2)
        merge_sort_stupid(fin, right_f, memory_size, left + count // 2, count=count - count // 2)
        left_f.seek(0)
        right_f.seek(0)
        write_content(fout, heapq.merge(read_content(left_f, batch_size=memory_size // 2),
                                        read_content(right_f, batch_size=memory_size // 2)),
                      batch_size=memory_size)
Ejemplo n.º 35
0
    def _check_sorted(self, source: io.BufferedIOBase, res: io.BufferedIOBase):
        hashes_size = 2**20

        def h(value):
            return hash(value) % hashes_size

        source.seek(0)
        source_content = list(itertools.repeat(0, hashes_size))
        for v in read_content(source):
            source_content[h(v)] += 1

        res.seek(0)
        res_content = list(itertools.repeat(0, hashes_size))
        prev = None
        for cur in read_content(res):
            res_content[h(cur)] += 1
            self.assertTrue(prev is None or prev <= cur)
            prev = cur

        self.assertTrue(source_content == res_content, 'Content differs')
Ejemplo n.º 36
0
def read_content(f: io.BufferedIOBase, count=None, batch_size=_DEFAULT_BATCH_SIZE):
    while True:
        values_to_read = batch_size if count is None else min(count, batch_size)
        b = f.read(values_to_read * _VALUE_SIZE)
        if not b:
            return

        arr = _make_array()
        arr.frombytes(b)
        yield from arr
        if count is not None:
            count -= len(arr)

        if count == 0:
            return
Ejemplo n.º 37
0
def content_length(f: io.BufferedIOBase, preserve_pos=True):
    if preserve_pos:
        pos = f.tell()

    f.seek(0, io.SEEK_END)
    res = f.tell() // _VALUE_SIZE

    if preserve_pos:
        f.seek(pos)

    return res
Ejemplo n.º 38
0
def go_to_pos(f: io.BufferedIOBase, i: int):
    f.seek(i * _VALUE_SIZE)
Ejemplo n.º 39
0
 def close(self):
     if not self.coroutine_result:
         result = call_result(self.coroutine.throw, EOFError, EOFError())
         if isinstance(result, RaiseResult):
             self.coroutine_result = result.generator_result()
     return BufferedIOBase.close(self)
Ejemplo n.º 40
0
Archivo: db2.py Proyecto: Tankobot/p2pg
 def byte_object(self, file: BufferedIOBase):
     file.write(self.key)
     file.write(len(self.data).to_bytes(len(self.key), 'little'))
     file.write(self.data)
Ejemplo n.º 41
0
	def __init__(self, stream: io.BufferedIOBase, new: Version = None):
		"""Parse a MIX from `stream`, which must be a buffered file object.
		
		If `new` is not None, initialize an empty MIX of this version instead.
		MixParseError is raised on parsing errors.
		"""
		
		# Initialize mandatory attributes
		self._dirty = False
		self._stream = None
		self._open = []
		
		# If stream is, for example, a raw I/O object, files could be destroyed
		# without ever raising an error, so check this.
		if not isinstance(stream, io.BufferedIOBase):
			raise TypeError("`stream` must be an instance of io.BufferedIOBase")
		
		if not stream.readable():
			raise ValueError("`stream` must be readable")
		
		if not stream.seekable():
			raise ValueError("`stream` must be seekable")
		
		if new is not None:
			# Start empty (new file)
			if type(new) is not Version:
				raise TypeError("`new` must be a Version enumeration member or None")
			if version is Version.RG:
				raise NotImplementedError("RG MIX files are not yet supported")
			self._stream = stream
			self._index = {}
			self._contents = []
			self._version = version
			self._flags = 0
			return
		
		# Parse an existing file
		filesize = stream.seek(0, io.SEEK_END)
		if filesize <= 6:
			raise MixParseError("File too small")
		stream.seek(0)
		
		first4 = stream.read(4)
		if first4 == b"MIX1":
			raise NotImplementedError("RG MIX files are not yet supported")
		elif first4[:2] == b"\x00\x00":
			# It seems we have a RA or TS MIX so check the flags
			flags = int.from_bytes(first4[2:], "little")
			if flags > 3:
				raise MixParseError("Unsupported properties")
			if flags & 2:
				raise NotImplementedError("Encrypted MIX files are not yet supported")
			
			# FIXME HERE: 80 bytes of westwood key_source if encrypted,
			#             to create a 56 byte long blowfish key from it.
			#
			#             They are followed by a number of 8 byte blocks,
			#             the first of them decrypting to filecount and bodysize.
			
			# Encrypted TS MIXes have a key.ini we can check for later,
			# so at this point assume Version.TS only if unencrypted.
			# Stock RA MIXes seem to be always encrypted.
			version = Version.TS
			
			# RA/TS MIXes hold their filecount after the flags,
			# whilst for TD MIXes their first two bytes are the filecount.
			filecount = int.from_bytes(stream.read(2), "little")
		else:
			version = Version.TD
			flags = 0
			filecount = int.from_bytes(first4[:2], "little")
			stream.seek(2)
			
		# From here it's the same for every unencrypted MIX
		bodysize    = int.from_bytes(stream.read(4), "little")
		indexoffset = stream.tell()
		indexsize   = filecount * 12
		bodyoffset  = indexoffset + indexsize

		# Check if data is sane
		# FIXME: Checksummed MIXes have 20 additional bytes after the body.
		if filesize - bodyoffset != bodysize:
			raise MixParseError("Incorrect filesize or invalid header")

		# OK, time to read the index
		index = {}
		for key, offset, size in struct.iter_unpack("<LLL", stream.read(indexsize)):
			offset += bodyoffset
			
			if offset + size > filesize:
				raise MixParseError("Content extends beyond end of file")

			index[key] = _MixNode(key, offset, size, size, None)

		if len(index) != filecount:
			raise MixParseError("Duplicate key")

		# Now read the names
		# TD/RA: 1422054725; TS: 913179935
		for dbkey in (1422054725, 913179935):
			if dbkey in index:
				stream.seek(index[dbkey].offset)
				header = stream.read(32)

				if header != b"XCC by Olaf van der Spek\x1a\x04\x17'\x10\x19\x80\x00":
					continue

				dbsize  = int.from_bytes(stream.read(4), "little")  # Total filesize

				if dbsize != index[dbkey].size or dbsize > 16777216:
					raise MixParseError("Invalid name table")

				# Skip four bytes for XCC type; 0 for LMD, 2 for XIF
				# Skip four bytes for DB version; Always zero
				stream.seek(8, io.SEEK_CUR)
				gameid = int.from_bytes(stream.read(4), "little")  # XCC Game ID
				
				# XCC saves alias numbers, so converting them
				# to `Version` is not straight forward.
				# FIXME: Check if Dune games and Nox also use MIX files
				if gameid == 0:
					if version is not Version.TD:
						continue
				elif gameid == 1:
					version = Version.RA
				elif 2 <= gameid <= 6 or gameid == 15:
					version = Version.TS
				else:
					continue
				
				namecount = int.from_bytes(stream.read(4), "little")
				bodysize  = dbsize - 53  # Size - Header - Last byte
				namelist  = stream.read(bodysize).split(b"\x00") if bodysize else []
				
				if len(namelist) != namecount:
					raise MixParseError("Invalid name table")
				
				# Remove Database from index
				del index[dbkey]
				
				# Add names to index
				names = False
				for name in namelist:
					name = name.decode(ENCODING, "ignore")
					key = genkey(name, version)
					if key in index:
						index[key].name = name
						names = True
				
				# XCC sometimes puts two Databases in a file by mistake,
				# so if no names were found, give it another try
				if names: break

		# Create a sorted list of all contents
		contents = sorted(index.values(), key=lambda node: node.offset)

		# Calculate alloc values
		# This is the size up to wich a file may grow without needing a move
		for i in range(len(contents) - 1):
			contents[i].alloc = contents[i+1].offset - contents[i].offset

			if contents[i].alloc < contents[i].size:
				raise MixParseError("Overlapping file boundaries")

		# Populate the object
		self._stream = stream
		self._version = version
		self._index = index
		self._contents = contents
		self._flags = flags
Ejemplo n.º 42
0
 def __init__(self):
     self.coroutine_result = None
     self.buffer = bytes()
     return BufferedIOBase.__init__(self)