def save(self, file_path): with open(file_path, 'wb') as f: file_size = common.get_file_size(f) # Write magic header f.write('.EVS') # Write the number of entries common.write_uint32(f, len(self.entries)) # Write entry offsets # Size of header + size of entry table previous_entry_end = 8 + 4 * len(self.entries) converted_entries = [] for (entry_type, entry_parameters, entry_content) in self.entries: # Write offset common.write_uint32(f, previous_entry_end) # Calculate the size of this entry, # so that we know when the next entry starts # Convert UTF8 to Shift-JIS entry_content = common.to_eva_sjis(entry_content) # Calculate the entry_size # parameters + content entry_size = 4 * len(entry_parameters) + len(entry_content) # Update previous_entry_end for the next iteration # Add 4 for the entry_type and entry_size fields, along with padding previous_entry_end += common.align_size(4 + entry_size, 4) # Add the entry to the converted entries converted_entries.append( (entry_type, entry_size, entry_parameters, entry_content)) # Loop through entries for (entry_type, entry_size, entry_parameters, entry_content) in converted_entries: # Write entry type common.write_uint16(f, entry_type) # Write entry size common.write_uint16(f, entry_size) # Write the parameters for entry_parameter in entry_parameters: common.write_uint32(f, entry_parameter) # Write the content f.write(entry_content) # Add padding entry_padding = (common.align_size(entry_size, 4) - entry_size) f.write('\0' * entry_padding)
def save(self, file_path): with open(file_path, 'wb') as f: # Write magic header f.write(b'BIND') # Write "size" byte size # This determines what's the "size" of the size variable itself common.write_uint16(f, self.size_byte_size) # Write the number of entries common.write_uint16(f, len(self.entries)) # Write block size common.write_uint32(f, self.block_size) # Calculate header size, and padding # The header size is: # (16 + number of entries * self.size_byte_size) aligned to self.block_size header_size = 16 + len(self.entries) * self.size_byte_size padded_header_size = common.align_size(header_size, self.block_size) # Write header size common.write_uint32(f, padded_header_size) # Write the entry sizes for entry in self.entries: # Write the entry size if self.size_byte_size == 1: common.write_uint8(f, entry.get_size()) elif self.size_byte_size == 2: common.write_uint16(f, entry.get_size()) elif self.size_byte_size == 4: common.write_uint32(f, entry.get_size()) # Insert padding f.write(b'\0' * (padded_header_size - header_size)) # Write entries for entry in self.entries: entry_size = entry.get_size() padded_entry_size = common.align_size(entry_size, self.block_size) f.write(entry.content) f.write(b'\0' * (padded_entry_size - entry_size))
def save(self, file_path): with open(file_path, 'wb') as f: # Write magic header f.write(b'TEXT') # Write number of entries common.write_uint32(f, len(self.entries)) # Write size of header common.write_uint32(f, 16) # Write content start offset # Size of each entry * number of entries + header size content_start_offset = 8 * len(self.entries) + 16 common.write_uint32(f, content_start_offset) # Generate the string offsets previous_string_end = content_start_offset converted_strings = [] for (unknown_first, unknown_second, string_content) in self.strings: # If string is None, because its contents were outside the end of the file, # then recreate the same conditions by injecting a dummy if string_content is None: converted_strings.append((unknown_first, unknown_second, None, previous_string_end)) continue # Calculate the offset of this string based on the end of the previous string string_offset = previous_string_end # Add the implicit null terminator string_content += '\0' # Convert unicode to Shift-JIS raw_string_content = common.to_eva_sjis(string_content) # Calculate how much memory this string takes raw_string_content = common.zero_pad_and_align_string( raw_string_content) string_padded_size = len(raw_string_content) # Update previous_string_end for the next iteration (+ 8 for the two unknowns) previous_string_end += string_padded_size + 8 # Add the string to converted strings converted_strings.append((unknown_first, unknown_second, raw_string_content, string_offset)) # Write entries for (entry_unknown, entry_string_index) in self.entries: # Write unknown common.write_uint32(f, entry_unknown) # Write string offset # 3rd index is the string offset common.write_uint32(f, converted_strings[entry_string_index][3]) # Write the actual strings (which are used multiple times per entry) # They should already be in order of appearance for (unknown_first, unknown_second, string_content, string_offset) in converted_strings: # This feature is used by the game to put a # dynamic string at the end possibly if string_content is None: continue # Write the unknowns common.write_uint32(f, unknown_first) common.write_uint32(f, unknown_second) # Write the string f.write(string_content)
def save(self, file_path): with open(file_path, 'wb') as f: f.write(b'HGAR') common.write_uint16(f, self.version) number_of_files = self.get_total_files() common.write_uint16(f, number_of_files) # Calculate the header size to be able to calculate the file start offsets # Magic + Version + nFiles + FileOffsets size_of_header = 4 + 2 + 2 + 4 * number_of_files if self.version == 3: # Unknowns size_of_header += 8 * number_of_files # Add long names for file in self.files: size_of_header += 4 + len(file.long_name) # Write file start offsets file_offset = size_of_header for file in self.files: common.write_uint32(f, file_offset) # ShortName + Identifier + Size file_offset += 0xC + 4 + 4 # Actual file content file_offset += common.calculate_word_aligned_length(file.size) if self.version == 3: # Write unknowns for file in self.files: common.write_uint32(f, file.unknown_first) common.write_uint32(f, file.unknown_last) # Write long names for number, file in enumerate(self.files): common.write_uint32(f, number) f.write(file.long_name) for file in self.files: # Write short name short_name_name, short_name_extension = (file.short_name + b'. ').split(b'.', 1) formatted_short_name = (short_name_name + b' ' * 8)[0:8] + b'.' + (short_name_extension + b' ' * 3)[0:3] f.write(formatted_short_name) # Write encoded identifier file.encode_identifier(self.identifier_limit) common.write_uint32(f, file.encoded_identifier) # Write file size common.write_uint32(f, file.size) # Write content f.write(file.content) # Write padding padding_length = 4 - (file.size & 3) if padding_length != 4: f.write(b'\0' * padding_length)
def save(self, file_path): with open(file_path, 'wb') as f: # Calculate various sizes # Divisions sizes divisions_padded_size = 0 divisions_padding = 0 if self.has_extended_header: divisions_size = 12 + len(self.divisions) * 8 divisions_padded_size = common.align_size(divisions_size, 16) divisions_padding = divisions_padded_size - divisions_size # PP offset pp_offset = 16 + divisions_padded_size # Palette total, PP format, bytes_per_pixel, and tile_width palette_total = len(self.palette) pp_format = 0x13 bytes_per_pixel = 1 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 16 if palette_total == 0: pp_format = 0x8800 bytes_per_pixel = 4 bytes_per_pixel_ppd_size = 1 tile_width = 4 elif palette_total == 16: pp_format = 0x14 bytes_per_pixel = 0.5 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 32 elif palette_total == 256: pp_format = 0x13 bytes_per_pixel = 1 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 16 else: raise Exception('Unknown palette total, %s' % palette_total) # Sixteenths resolution and storage resolution ppd_sixteenths_width = common.align_size(self.width, 16) ppd_sixteenths_height = common.align_size(self.height, 8) storage_width = common.align_size(self.width, tile_width) storage_height = common.align_size(self.height, 8) number_of_pixels = storage_width * storage_height # PPD Size # The size also includes the PPD header in the count, which is 0x20 bytes ppd_size = int(number_of_pixels * bytes_per_pixel_ppd_size) + 0x20 # Calculate the number of bytes number_of_bytes = int(number_of_pixels * bytes_per_pixel) # Calculate headers pp_header = 0x00007070 | ((pp_format & 0xFFFF) << 16) ppd_header = 0x00647070 | ((pp_format & 0xFF) << 24) ppc_header = 0x00637070 # Begin writing # Write magic header f.write('HGPT') # Write pp offset common.write_uint16(f, pp_offset) # Write if it has an extended header common.write_uint16(f, 1 if self.has_extended_header else 0) # Write number of divisions common.write_uint16(f, len(self.divisions)) # Write unknowns common.write_uint16(f, 0x0001) # ff ff ff ff in pics with extended header # 00 00 00 00 in pictures w/o extended header common.write_uint32( f, 0xFFFFFFFF if self.has_extended_header else 0x00000000) if self.has_extended_header: # Write the number of divisions again common.write_uint16(f, len(self.divisions)) # Write unknown # 0x0013 is the most common # 0x0014 is the other occurence common.write_uint16(f, self.unknown_three) # Write division name f.write((self.division_name + '\0' * 8)[0:8]) # Write divisions for division in self.divisions: common.write_uint16(f, division[0]) # division_start_x common.write_uint16(f, division[1]) # division_start_y common.write_uint16(f, division[2]) # division_width common.write_uint16(f, division[3]) # division_height # Add zero padding f.write('\0' * divisions_padding) # Write PP header common.write_uint32(f, pp_header) # Write display dimensions common.write_uint16(f, self.width) # display_width common.write_uint16(f, self.height) # display_height # Write zero padding f.write('\0' * 8) # Write ppd header common.write_uint32(f, ppd_header) # Write display dimensions again common.write_uint16(f, self.width) # ppd_display_width common.write_uint16(f, self.height) # ppd_display_height # Write zero padding f.write('\0' * 4) # Write ppd sixteenths dimensions common.write_uint16(f, ppd_sixteenths_width) common.write_uint16(f, ppd_sixteenths_height) # Write ppd_size common.write_uint32(f, ppd_size) # Write zero padding f.write('\0' * 12) # Re-tile tiled_image_data = [0] * number_of_pixels tile_height = 8 tile_size = tile_width * tile_height tile_row = tile_size * int(storage_width / tile_width) for y in xrange(0, self.height): for x in xrange(0, self.width): tile_y = int(y / tile_height) tile_x = int(x / tile_width) tile_sub_y = y % tile_height tile_sub_x = x % tile_width tiled_image_data[tile_y * tile_row + tile_x * tile_size + tile_sub_y * tile_width + tile_sub_x] = self.content[y * self.width + x] # Write image data cache_last_pixel = None for i in xrange(0, number_of_pixels): if number_of_bytes <= 0: break if pp_format == 0x13: common.write_uint8(f, tiled_image_data[i]) number_of_bytes -= 1 elif pp_format == 0x14: if (i & 1) == 0: # Even write cache_last_pixel = (tiled_image_data[i] & 0xF) else: # Odd write common.write_uint8( f, cache_last_pixel | ((tiled_image_data[i] & 0xF) << 4)) number_of_bytes -= 1 elif pp_format == 0x8800: # Write full RGBA common.write_uint8(f, tiled_image_data[i][0]) common.write_uint8(f, tiled_image_data[i][1]) common.write_uint8(f, tiled_image_data[i][2]) common.write_uint8(f, encode_alpha(tiled_image_data[i][3])) number_of_bytes -= 4 # Write palette if pp_format == 0x8800: # There is no palette pass else: # Write ppc header common.write_uint32(f, ppc_header) # Write zero padding f.write('\0' * 2) # Write number of palette entries (but needing to be divided by 8) common.write_uint16(f, palette_total / 8) # Write zero padding f.write('\0' * 8) # Write palette for c in self.palette: common.write_uint8(f, c[0]) common.write_uint8(f, c[1]) common.write_uint8(f, c[2]) common.write_uint8(f, encode_alpha(c[3]))
def save(self, file_path): with open(file_path, 'wb') as f: # Write magic header f.write('TEXT') # Write number of entries common.write_uint32(f, len(self.entries)) # Write size of header common.write_uint32(f, 16) # Write content start offset # Size of each entry * number of entries + header size content_start_offset = 8 * len(self.entries) + 16 common.write_uint32(f, content_start_offset) # Generate the string offsets previous_string_end = content_start_offset converted_strings = [] for (unknown_first, unknown_second, string_content) in self.strings: # Calculate the offset of this string based on the end of the previous string string_offset = previous_string_end # Convert UTF8 to Shift-JIS string_content = common.to_eva_sjis(string_content) # Calculate how much memory this string takes string_padded_size = common.align_size(len(string_content), 4) # Append null-terminators to ensure a 32-bit alignment string_content = (string_content + '\0\0\0\0')[:string_padded_size] # Update previous_string_end for the next iteration (+ 8 for the two unknowns) previous_string_end += string_padded_size + 8 # Add the string to converted strings converted_strings.append((unknown_first, unknown_second, string_content, string_offset)) # Write entries for (entry_unknown, entry_string_index) in self.entries: # Write unknown common.write_uint32(f, entry_unknown) # Write string offset # 3rd index is the string offset common.write_uint32(f, converted_strings[entry_string_index][3]) # Write the actual strings (which are used multiple times per entry) # They should already be in order of appearance for (unknown_first, unknown_second, string_content, string_offset) in converted_strings: # Write the unknowns common.write_uint32(f, unknown_first) common.write_uint32(f, unknown_second) # Write the string f.write(string_content)
def save(self, file_path): with open(file_path, 'wb') as f: file_size = common.get_file_size(f) # Write magic header f.write(b'.EVS') # Write the number of entries common.write_uint32(f, len(self.entries)) # Write entry offsets # Size of header + size of entry table previous_entry_end = 8 + 4 * len(self.entries) converted_entries = [] for (entry_type, entry_parameters, entry_content) in self.entries: has_content_section = entry_type in HAS_CONTENT_SECTION # Write offset common.write_uint32(f, previous_entry_end) # Calculate the size of this entry, # so that we know when the next entry starts # Calculate how much memory this string takes if has_content_section: # Convert UTF8 to Shift-JIS raw_entry_content = common.to_eva_sjis(entry_content) # We add a single null terminator and only count the single null terminator, # but later we include the extra alignment padding in the size calculation string_terminated_size = len(raw_entry_content) + 1 raw_entry_content = common.zero_pad_and_align_string( raw_entry_content) else: string_terminated_size = 0 raw_entry_content = b'' # Calculate the entry_size # parameters + content entry_size = 4 * len(entry_parameters) + string_terminated_size # Update previous_entry_end for the next iteration # Add 4 for the entry_type (omitted in the entry_size calculation) previous_entry_end += common.align_size(4 + entry_size, 4) # Add the entry to the converted entries converted_entries.append((entry_type, entry_size, entry_parameters, raw_entry_content)) # Loop through entries for (entry_type, entry_size, entry_parameters, raw_entry_content) in converted_entries: # Write entry type common.write_uint16(f, entry_type) # Write entry size common.write_uint16(f, entry_size) # Write the parameters for entry_parameter in entry_parameters: common.write_uint32(f, entry_parameter) # Write the content f.write(raw_entry_content)
def save(self, file_path): with open(file_path, 'wb') as f: common.write_uint32(f, self.size) result = zlib.compress(self.content) #Skip 2 byte header and four byte checksum at end f.write(result[2:-4])