def _read(self, stream: IO, name: str): while stream.readable(): data = stream.read(1024).decode('utf-8') if len(data) == 0: break print(data) self.api_logs.outputs(**{name: data})
def download_file_stream(url: str, file_stream: IO, block_size: int = 1024 * 8, with_progress_bar: bool = True, fatal: bool = True) -> bool: try: download_stream = urllib.request.urlopen(url) except Exception as e: log(f'Failed on: {url}', fatal=False, log_type=LogType.ERROR) log(e, fatal=fatal, log_type=LogType.ERROR) return False with download_stream: res_headers: HTTPMessage = download_stream.info() total_size = int(res_headers.get('Content-Length', failobj=0)) progress_bar = None if with_progress_bar and total_size > 0: progress_bar = DownloadProgressBar(total_size) read = 0 while True: block: bytes = download_stream.read(block_size) if not block: break read += len(block) file_stream.write(block) if progress_bar and not progress_bar.run(len(block)): break if total_size >= 0 and read < total_size: return False return True
def __init__(self, f: IO): """ Init """ super().__init__() self.length: int = read_u32(f, endianess=Endianess.BIG_ENDIAN) self.logger.debug(f"Length: {self.length}") if self.length != 0: self.data_block_start_address: int = read_u32( f, endianess=Endianess.BIG_ENDIAN) self.logger.debug( f"Data Block Start Address: {self.data_block_start_address}") self.reserved_for_word_align: int = f.read(3) self.logger.debug( f"Reserved For Word Align: {self.reserved_for_word_align}") self.number_of_ext_data_entries: int = read_u8( f, endianess=Endianess.BIG_ENDIAN) self.logger.debug( f"Number of Ext Data Entries: {self.number_of_ext_data_entries}" ) self.entries: List[Entry] = list() for entry_index in range(self.number_of_ext_data_entries): self.logger.debug(f"Reading Entry {entry_index}") self.entries.append(Entry(f)) self.data_block: bytes = f.read(4 + self.length - self.data_block_start_address) self.logger.debug(f"Data Block: {hex_log_str(self.data_block)}")
def __init__(self, f: IO): """ Init """ super().__init__(f) if self.object_type == ObjectType.HDMV: self.object_flags: bytes = f.read(2) self.logger.debug(f"Object Flags: {hexlify(self.flags)}") assert (self.object_flags[0] & 0b11000000) >> 6 == 0b01 self.mobj_id_ref: int = read_u16(f, endianess=Endianess.BIG_ENDIAN) """ Represents the ID of a movie indexes used in the top menu """ self.logger.debug(f"Movie Object Id Reference: {self.mobj_id_ref}") self.reserved: bytes = f.read(4) self.logger.debug(f"Reserved: {hexlify(self.reserved)}") elif self.object_type == ObjectType.BDJ: self.object_flags: bytes = f.read(2) self.logger.debug(f"Object Flags: {hexlify(self.flags)}") assert (self.object_flags[0] & 0b11000000) >> 6 == 0b11 self.bdjo_file_name: int = f.read(5).decode("ASCII") """ Represents the file name of a BD-J indexes used in the top menu """ self.logger.debug(f"BD-J Object File Name: {self.bdjo_file_name}") self.reserved: bytes = f.read(1) self.logger.debug(f"Reserved: {hexlify(self.reserved)}")
def _read(self, stream: IO, path: Path, is_err: bool): if not stream.readable(): return with open(str(path), 'ab') as f: while stream.readable(): data = stream.read(1024) if len(data) == 0: break f.write(data) self.ui_mode.on_bytes(data, is_err=is_err)
def _read(self, stream: IO, name: str): buffer = '' while stream.readable(): data = stream.read(1) if len(data) == 0: break buffer += data print(data, end='') if '\n' in buffer or len(buffer) > 100: self.api_logs.outputs(**{name: buffer}) buffer = '' if len(buffer) > 0: self.api_logs.outputs(**{name: buffer})
def write_pandas_out_to_file(self, file: IO): """ Writes the pandas data to the specified file, as bytes. NOTES: The data is written as bytes (i.e. in binary), and the buffer mode must be 'wb'. E.g. open(file_name, 'wb') The file will NOT be human-readable. :param file: The file object (or a buffer). """ if 'b' not in file.mode: raise IOError("Proto files must be binary use open(path,\"wb\")") if self.df_bytes is not None: file.write(self.df_bytes) elif not self.should_store_frames: logger.warning("pd DataFrames are not being stored anywhere")
def upload_from_file(self, file_stream: IO) -> Tuple[bytes, str, str]: content = file_stream.read() file_hash = self.calc_hash(content) filename = f'{file_hash}.pdf' pdf_link = self._file_access_provider.get_link_to_file(filename) if not self._file_access_provider.exists(filename): self._file_access_provider.save_file(filename, BytesIO(content)) return content, file_hash, pdf_link
def __init__(self, f: IO): """ Init """ super().__init__(f) self.access_type: AccessType = AccessType((self.flags[0] & 0b00110000) >> 4) self.logger.debug(f"Access Type: {self.access_type}") if self.object_type == ObjectType.HDMV: self.object_flags: bytes = f.read(2) self.logger.debug(f"Object Flags: {hexlify(self.flags)}") self.hdmv_title_playback_type: HDMVTitlePlaybackType = HDMVTitlePlaybackType( (self.object_flags[0] & 0b11000000) >> 6) self.logger.debug( f"HDMV Title Playback Type: {self.hdmv_title_playback_type}") self.mobj_id_ref: int = read_u16(f, endianess=Endianess.BIG_ENDIAN) """ Represents the ID of a movie indexes used in the top menu """ self.logger.debug(f"Movie Object Id Reference: {self.mobj_id_ref}") self.reserved: bytes = f.read(4) self.logger.debug(f"Reserved: {hexlify(self.reserved)}") elif self.object_type == ObjectType.BDJ: self.object_flags: bytes = f.read(2) self.logger.debug(f"Object Flags: {hexlify(self.flags)}") self.bdj_title_playback_type: BDJTitlePlaybackType = BDJTitlePlaybackType( (self.object_flags[0] & 0b11000000) >> 6) self.logger.debug( f"BD-J Object Playback Type: {self.bdj_title_playback_type}") self.bdjo_file_name: int = f.read(5).decode("ASCII") """ Represents the file name of a BD-J indexes used in the top menu """ self.logger.debug(f"BD-J Object File Name: {self.bdjo_file_name}") self.reserved: bytes = f.read(1) self.logger.debug(f"Reserved: {hexlify(self.reserved)}")
def __init__(self, f: IO): """ Init """ super().__init__() self.flags: bytes = f.read(4) self.logger.debug(f"Flags: {hexlify(self.flags)}") self.object_type: ObjectType = ObjectType((self.flags[0] & 0b11000000) >> 6) self.logger.debug(f"Object Type: {self.object_type}")
async def write_to_file(self, file: IO, reader: asyncio.StreamReader, data_len: int, iv: bytes): remaining = data_len file_hash = sha512() while remaining > 0: chunk = min(remaining, CHUNK) encrypted_data = await reader.read(chunk) if len(encrypted_data) != chunk: raise ConnectionError data = self.encryptor.decrypt(encrypted_data, iv) file.write(data) file_hash.update(data) remaining -= chunk return file_hash.digest()
async def file_edit(sid, file: IO): file_name_len = int.from_bytes(file.read(8), 'big') file_name = file.read(file_name_len).decode() try: with open(f'files/{file_name}', 'wb') as f: while True: data = file.read(CHUNK) if not data: break f.write(data) except FileExistsError: await server.send(sid, 'file_edit', b'File not found') return except FileNotFoundError: await server.send(sid, 'file_edit', b'File not found') return await server.send(sid, 'file_edit', b'Editing done')
def as_text(file: IO, reqs: List[Record]): records = list(map(text_format, reqs)) nl = "\n" hr = "".join([nl, "=" * 80, nl]) header = nl.join([ project_name, "THIRD - PARTY SOFTWARE NOTICES AND INFORMATION", "This project incorporates components from the projects listed below.", hr, nl ]) footer = nl.join([hr, nl]) file.write(header) for record in records: file.write(nl.join(record)) file.write(footer) file.flush()
def auto_flush_file(file: IO): while glob.server_running: time.sleep(20) with lockfile: file.flush()