Пример #1
0
    def export_file(self, entry, output_dir):
        """ Export the file corresponding to that BHD data entry, return the
        relative file path on success, None on failure """
        if not self.is_entry_valid(entry):
            LOG.error("Tried to extract a file not from this archive.")
            return None

        rel_path = self.filelist.get(entry.hash) or "{:08X}".format(entry.hash)
        LOG.info("Extracting {}".format(rel_path))

        file_content = self.bdt.read_entry(entry.offset, entry.size)
        content_len = len(file_content)
        if content_len != entry.size:
            LOG.error( "Tried to read {} bytes but only {} were available "
                       "(file '{}').".format(
                entry.size, content_len, rel_path
            ))
            return None

        output_path = os.path.join(output_dir, rel_path.lstrip("/"))
        if not os.path.isdir(os.path.dirname(output_path)):
            os.makedirs(os.path.dirname(output_path))
        with open(output_path, "wb") as output_file:
            output_file.write(file_content)
        return rel_path
Пример #2
0
 def load_decompressed_list(self, input_dir):
     """ Load the list of files in that input dir that should be compressed
     before being imported in the archive. """
     list_path = os.path.join(input_dir, self.DECOMPRESSED_LIST_NAME)
     if not os.path.isfile(list_path):
         LOG.info("No decompressed file list found in the input dir.")
         return False
     else:
         with open(list_path, "r") as list_file:
             self.decompressed_list = json.load(list_file)
         LOG.info("Loaded decompressed file list.")
         return True
Пример #3
0
 def _try_decompress(self, rel_path, base_rel_path, output_dir):
     """ Try to decompress the DCX at rel_path to base_rel_path, in the
     output_dir; fails if a file is already expected at base_rel_path. """
     if base_rel_path in self.filelist.values():
         LOG.info("Won't decompress {} because it conflicts with {}".format(
             rel_path, base_rel_path
         ))
         return
     joinable_rel_path = os.path.normpath(rel_path.lstrip("/"))
     file_path = os.path.join(output_dir, joinable_rel_path)
     success = ExternalArchive._decompress(file_path)
     if success:
         self.decompressed_list.append(base_rel_path)
Пример #4
0
    def extract_all_files(self, output_dir, write_infos = True):
        """ Extract all files contained in this archive in output_dir.

        If write_infos is True (default), a JSON file is written to the disk for
        each file, plus an additional general file for the whole BND. This
        allows you to call import_files later and use the same BND and entries
        properties, to try not to break anything when editing a file.
        """
        for entry in self.entries:
            relative_path = entry.get_joinable_path()
            LOG.info("Extracting {}".format(relative_path))
            entry_path = os.path.join(output_dir, relative_path)
            entry.extract_file(entry_path, write_infos)
        self._write_infos(output_dir)
Пример #5
0
    def import_file(self, data_dir, file_dir, file_name):
        """ Try to import the file file_name in file_dir, with data_dir as the
        archive root; create a data entry in the appropriate record, and write
        the file data in the BDT file. Return True on success. """
        file_path = os.path.join(file_dir, file_name)

        # Find rel_path: either a hashable name like "/chr/c5352.anibnd.dcx"
        # or directly a hash name like "192E66A4".
        is_unnamed = self.UNNAMED_FILE_RE.match(file_name) is not None
        if is_unnamed:
            rel_path = file_name
        else:
            rel_path = ExternalArchive._get_rel_path(data_dir, file_path)
            rel_path = "/" + rel_path
        LOG.info("Importing {}".format(rel_path))

        # If the file is in the decompressed list, it has to be compressed first
        # and that means we have to create its DCX file, then we update the
        # paths we use afterwards.
        if rel_path in self.decompressed_list:
            joinable_rel_path = os.path.normpath(rel_path.lstrip("/"))
            decompressed_path = os.path.join(data_dir, joinable_rel_path)
            success = ExternalArchive._compress(decompressed_path)
            if not success:
                return False
            rel_path = rel_path + ".dcx"
            file_path = file_path + ".dcx"

        # Import the file
        import_results = self.bdt.import_file(file_path)
        if import_results[1] == -1:  # written bytes
            return False

        # Unnamed files aren't decompressed, so their hash is already available.
        # Named files can be decompressed, therefore we don't know their
        # relative path until now.
        if is_unnamed:
            entry_hash = int(file_name, 16)
        else:
            entry_hash = BhdDataEntry.hash_name(rel_path)

        data_entry = BhdDataEntry()
        data_entry.hash = entry_hash
        data_entry.offset, data_entry.size = import_results

        record_is_updated = self._update_record(rel_path, data_entry)
        return record_is_updated
Пример #6
0
 def _save_files(self, bhd_path):
     """ Write both BHD and BDT files to disk. """
     LOG.info("Saving files to disk...")
     self.bhd.save(bhd_path)
     self.bdt.close()