Beispiel #1
0
 def write_header(self, f: BinaryIO) -> None:
     """Writes the BSA header to f
     
     Args:
         f (BinaryIO): file-like output stream
     """
     if self.auto_file_flags:
         logging.debug("Determining file flags")
         self.file_flags = functools.reduce(operator.or_, (x.flags for x in self.walk_folders()), FileFlag.NONE)
     logging.info(f"Archive flags: {str(self.flags)}")
     logging.info(f"File flags: {str(self.file_flags)}")
     logging.info(f"Folders count: {self.non_empty_folders_count}")
     logging.info(f"Files count: {self.files_count}")
     logging.debug(f"Total folder length: {self.total_folder_name_length}")
     logging.debug(f"Total file names length: {self.total_file_name_length}")
     logging.info(f"Compression level: {self.compression_level if self.compress else 'no compression'}")
     logging.debug("Writing header")
     f.write(b"BSA\x00")
     f.write(
         struct.pack(
             "<LLLLLLLL",
             self.game.value,
             0x24,
             self.flags.value,
             self.non_empty_folders_count,
             self.files_count,
             self.total_folder_name_length,
             self.total_file_name_length,
             self.file_flags.value
         )
     )
Beispiel #2
0
 def write_files(self, f: BinaryIO) -> None:
     """Writes file data to f.
     
     Args:
         f (BinaryIO): file-like output stream
     """
     # TODO: name 	bstring 	Full path and name of the file. Only present if Bit 9 of archiveFlags is set.
     self.i = 0
     total = self.files_count
     for folder in self.sorted_folders:
         for file in folder.sorted_files:
             p = f"{file.folder.name}\\{file.name}"
             # logging.info(f"Writing {p:100s}[{(i * 100) / total:2.2f}%]")
             data_start = f.tell()
             with open(os.path.join(self.data_path, folder.name, file.name), "rb") as o:
                 if not self.compress:
                     f.write(o.read())
                 else:
                     uncompressed_data = o.read()
                     compressed_data = lz4.frame.compress(uncompressed_data, compression_level=self.compression_level)
                     f.write(struct.pack("<L", len(uncompressed_data)))
                     f.write(compressed_data)
             size = f.tell() - data_start
             f.seek(file.record_offset + 8)
             f.write(struct.pack("<LL", size + (4 if self.compress else 0), data_start))
             f.seek(0, os.SEEK_END)
             self.i += 1
Beispiel #3
0
 def write_file_names(self, f: BinaryIO) -> None:
     """Writes file names block to f.
     If BSA flags do not have the Flags.INCLUDE_FILE_NAMES set, this method does nothing.
     
     Args:
         f (BinaryIO): file-like output stream
     """
     if (self.flags & Flags.INCLUDE_FILE_NAMES) == 0:
         return
     logging.debug("Writing file names")
     for folder in self.sorted_folders:
         for file in folder.sorted_files:
             f.write(file.name.encode("ascii"))
             f.write(b"\x00")
Beispiel #4
0
 def _save_index(self, file: BinaryIO):
     start_time = time.time()
     max_bytes = 2**31 - 1
     print('Serializing index')
     bytes = pickle.dumps(self.index)
     elapsed_time = time.time() - start_time
     print('Index serialized; elapsed time: {}'.format(
         str(datetime.timedelta(seconds=elapsed_time))))
     start_time = time.time()
     print('Writing index')
     for i in tqdm(range(0, len(bytes), max_bytes)):
         file.write(bytes[i:i + max_bytes])
         file.flush()
     elapsed_time = time.time() - start_time
     print('Index saved; elapsed time: {}'.format(
         str(datetime.timedelta(seconds=elapsed_time))))
Beispiel #5
0
 def write_folder_records(self, f: BinaryIO) -> None:
     """Writes the folder records block to f
     
     Args:
         f (BinaryIO): file-like output stream
     """
     # And write their info
     logging.debug("Writing folder records")
     logging.debug(f"Sorted folder hashes: {[x.tes_hash for x in self.sorted_folders]}")
     for folder in self.sorted_folders:
         folder.record_offset = f.tell()
         f.write(
             struct.pack(
                 "<QLLQ",
                 folder.tes_hash,
                 len(folder.files),
                 0,
                 0
             )
         )
Beispiel #6
0
 def write_file_records(self, f: BinaryIO) -> None:
     """Writes the file records block to f
     
     Args:
         f (BinaryIO): file-like output stream
     """
     logging.debug("Writing file records")
     for folder in self.sorted_folders:
         logging.debug(f"Processing file records for folder {folder.name}")
         offset = f.tell()
         f.seek(folder.record_offset + 8 + 4 + 4)
         f.write(struct.pack("<Q", offset + self.total_file_name_length))
         f.seek(0, os.SEEK_END)
         if (self.flags & Flags.INCLUDE_DIRECTORY_NAMES) > 0:
             f.write(pack_str(folder.name))
         logging.debug(f"Sorted files in {folder.name}: {[x.tes_hash for x in folder.sorted_files]}")
         for file in folder.sorted_files:
             file.record_offset = f.tell()
             f.write(
                 struct.pack(
                     "<QLL",
                     file.tes_hash,
                     0,
                     0
                 )
             )
def write_certificate_to_file(
    certificate: Certificate,
    dest_file: BinaryIO,
    encoding: serialization.Encoding,
) -> None:
    """Writes a certificate to a file.

    Args:
        certificate: Certificate object to be saved to file.
        dest_file: BinaryIO object representing the file to be written to.
        encoding: The serialization format to use to save the certificate.

    Raises:
        X509CertificateError: In case the certificate cannot be saved to file.
    """

    try:
        cert_bytes = serialize_certificate(certificate, encoding)
        dest_file.write(cert_bytes)
    except Exception as err:
        raise X509CertificateError(
            'Error writing certificate to file: {}'.format(str(err)))
Beispiel #8
0
def dump(value: TSerializable, file_handle: io.BinaryIO) -> None:
    """
    This function dumps a python object as a tnetstring and
    writes it to the given file.
    """
    file_handle.write(dumps(value))
    def write(self, content, fp: BinaryIO):
        """Write `content` into a file-like object.

        Content should be a barcode rendered by this writer.
        """
        fp.write(content)
Beispiel #10
0
def dump(value: TSerializable, file_handle: io.BinaryIO) -> None:
    """
    This function dumps a python object as a tnetstring and
    writes it to the given file.
    """
    file_handle.write(dumps(value))
Beispiel #11
0
def write_record(stream: BinaryIO, record: MarcRecord, encoding: str) -> None:
    """
    Сохранение записи в файл в формате ISO 2709.

    :param stream: Поток
    :param record: Запись
    :param encoding: Кодировка
    :return: None
    """

    record_length = MARKER_LENGTH
    dictionary_length = 1  # С учетом ограничителя справочника
    field_length: List[int] = []

    # Сначала подсчитываем общую длину записи
    for field in record.fields:

        if field.tag <= 0 or field.tag >= 1000:
            # Невозможно закодировать тег поля
            raise Exception

        dictionary_length += 12  # Одна статья справочника
        fldlen = 0
        if field.tag < 10:
            # В фиксированном поле не бывает подполей и индикаторов
            fldlen += len(field.value.encode(encoding))
        else:
            fldlen += 2  # Индикаторы
            if field.value:
                fldlen += len(field.value.encode(encoding))
            for subfield in field.subfields:
                code = subfield.code
                if code is None or ord(code) <= 32 or ord(code) >= 255:
                    raise IrbisError('Bad code: ' + safe_str(code))
                fldlen += 2  # Признак подполя и его код
                fldlen += len(subfield.value.encode(encoding))
        fldlen += 1  # Разделитель полей

        if fldlen >= 10_000:
            # Слишком длинное поле
            raise Exception

        field_length.append(fldlen)
        record_length += fldlen

    record_length += dictionary_length  # Справочник
    record_length += 1  # Разделитель записей

    if record_length >= 100_000:
        # Слишком длинная запись
        raise Exception

    # Приступаем к кодированию
    dictionary_position = MARKER_LENGTH
    base_address = MARKER_LENGTH + dictionary_length
    current_address = base_address
    buffer = bytearray(record_length)
    for i in range(base_address):
        buffer[i] = 32  # Заполняем пробелами
    encode_int(buffer, 0, 5, record_length)
    encode_int(buffer, 12, 5, base_address)

    buffer[5] = ord('n')  # Record status
    buffer[6] = ord('a')  # Record type
    buffer[7] = ord('m')  # Bibligraphical index
    buffer[8] = ord('2')
    buffer[10] = ord('2')
    buffer[11] = ord('2')
    buffer[17] = ord(' ')  # Bibliographical level
    buffer[18] = ord('i')  # Cataloging rules
    buffer[19] = ord(' ')  # Related record
    buffer[20] = ord('4')  # Field length
    buffer[21] = ord('5')  # Field offset
    buffer[22] = ord('0')

    # Кодируем конец справочника
    buffer[base_address - 1] = FIELD_DELIMITER

    # Проходим по полям
    for i, field in enumerate(record.fields):
        # Кодируем справочник
        encode_int(buffer, dictionary_position + 0, 3, field.tag)
        encode_int(buffer, dictionary_position + 3, 4, field_length[i])
        encode_int(buffer, dictionary_position + 7, 5,
                   current_address - base_address)

        # Кодируем поле
        if field.tag < 10:
            # В фиксированном поле не бывает подполей и индикаторов
            encode_str(buffer, current_address, field.value, encoding)
        else:
            # Два индикатора
            buffer[current_address + 0] = 32
            buffer[current_address + 1] = 32
            current_address += 2

            # Значение поля до первого разделителя
            current_address = encode_str(buffer, current_address, field.value,
                                         encoding)

            # Подполя
            for subfield in field.subfields:
                buffer[current_address + 0] = SUBFIELD_DELIMITER
                buffer[current_address + 1] = ord(subfield.code)
                current_address += 2
                current_address = encode_str(buffer, current_address,
                                             subfield.value, encoding)
        buffer[current_address] = FIELD_DELIMITER
        current_address += 1
        dictionary_position += 12

    # Ограничитель записи
    buffer[record_length - 2] = FIELD_DELIMITER
    buffer[record_length - 1] = RECORD_DELIMITER

    # Собственно записываем
    stream.write(buffer)