def import_file(self, data_dir, file_dir, file_name): """ Try to import the file file_name in file_dir, with data_dir as the archive root; create a data entry in the appropriate record, and write the file data in the BDT file. Return True on success. """ file_path = os.path.join(file_dir, file_name) # Find rel_path: either a hashable name like "/chr/c5352.anibnd.dcx" # or directly a hash name like "192E66A4". is_unnamed = self.UNNAMED_FILE_RE.match(file_name) is not None if is_unnamed: rel_path = file_name else: rel_path = ExternalArchive._get_rel_path(data_dir, file_path) rel_path = "/" + rel_path LOG.info("Importing {}".format(rel_path)) # If the file is in the decompressed list, it has to be compressed first # and that means we have to create its DCX file, then we update the # paths we use afterwards. if rel_path in self.decompressed_list: joinable_rel_path = os.path.normpath(rel_path.lstrip("/")) decompressed_path = os.path.join(data_dir, joinable_rel_path) success = ExternalArchive._compress(decompressed_path) if not success: return False rel_path = rel_path + ".dcx" file_path = file_path + ".dcx" # Import the file import_results = self.bdt.import_file(file_path) if import_results[1] == -1: # written bytes return False # Unnamed files aren't decompressed, so their hash is already available. # Named files can be decompressed, therefore we don't know their # relative path until now. if is_unnamed: entry_hash = int(file_name, 16) else: entry_hash = BhdDataEntry.hash_name(rel_path) data_entry = BhdDataEntry() data_entry.hash = entry_hash data_entry.offset, data_entry.size = import_results record_is_updated = self._update_record(rel_path, data_entry) return record_is_updated
def test_hash_name(self): self.assertEquals(BhdDataEntry.hash_name(EXAMPLE_NAME), EXAMPLE_HASH)