def save(self, file_path): with open(file_path, 'wb') as f: file_size = common.get_file_size(f) # Write magic header f.write('.EVS') # Write the number of entries common.write_uint32(f, len(self.entries)) # Write entry offsets # Size of header + size of entry table previous_entry_end = 8 + 4 * len(self.entries) converted_entries = [] for (entry_type, entry_parameters, entry_content) in self.entries: # Write offset common.write_uint32(f, previous_entry_end) # Calculate the size of this entry, # so that we know when the next entry starts # Convert UTF8 to Shift-JIS entry_content = common.to_eva_sjis(entry_content) # Calculate the entry_size # parameters + content entry_size = 4 * len(entry_parameters) + len(entry_content) # Update previous_entry_end for the next iteration # Add 4 for the entry_type and entry_size fields, along with padding previous_entry_end += common.align_size(4 + entry_size, 4) # Add the entry to the converted entries converted_entries.append( (entry_type, entry_size, entry_parameters, entry_content)) # Loop through entries for (entry_type, entry_size, entry_parameters, entry_content) in converted_entries: # Write entry type common.write_uint16(f, entry_type) # Write entry size common.write_uint16(f, entry_size) # Write the parameters for entry_parameter in entry_parameters: common.write_uint32(f, entry_parameter) # Write the content f.write(entry_content) # Add padding entry_padding = (common.align_size(entry_size, 4) - entry_size) f.write('\0' * entry_padding)
def save(self, file_path): with open(file_path, 'wb') as f: # Write magic header f.write(b'BIND') # Write "size" byte size # This determines what's the "size" of the size variable itself common.write_uint16(f, self.size_byte_size) # Write the number of entries common.write_uint16(f, len(self.entries)) # Write block size common.write_uint32(f, self.block_size) # Calculate header size, and padding # The header size is: # (16 + number of entries * self.size_byte_size) aligned to self.block_size header_size = 16 + len(self.entries) * self.size_byte_size padded_header_size = common.align_size(header_size, self.block_size) # Write header size common.write_uint32(f, padded_header_size) # Write the entry sizes for entry in self.entries: # Write the entry size if self.size_byte_size == 1: common.write_uint8(f, entry.get_size()) elif self.size_byte_size == 2: common.write_uint16(f, entry.get_size()) elif self.size_byte_size == 4: common.write_uint32(f, entry.get_size()) # Insert padding f.write(b'\0' * (padded_header_size - header_size)) # Write entries for entry in self.entries: entry_size = entry.get_size() padded_entry_size = common.align_size(entry_size, self.block_size) f.write(entry.content) f.write(b'\0' * (padded_entry_size - entry_size))
def open(self, file_path): with open(file_path, 'rb') as f: file_size = common.get_file_size(f) # Read magic header magic_number = f.read(4) if magic_number != 'BIND': raise Exception('Not a BIND file! Missing BIND header id.') # Read "size" byte size # This determines what's the "size" of the size variable itself self.size_byte_size = common.read_uint16(f) if self.size_byte_size not in (1, 2, 4): raise Exception('Illegal BIND size byte size: %s' % self.index_size) # Read number of entries number_of_entries = common.read_uint16(f) # Read block size self.block_size = common.read_uint32(f) # Read header size header_size = common.read_uint32(f) # The header size is: # (16 + number of entries * self.size_byte_size) aligned to self.block_size # Load the entry sizes processed_entries = [] previous_entry_end = header_size for i in xrange(0, number_of_entries): # Calculate the offset of this entry based on the end of the previous entry entry_offset = previous_entry_end # Read the entry size entry_size = 0 if self.size_byte_size == 1: entry_size = common.read_uint8(f) elif self.size_byte_size == 2: entry_size = common.read_uint16(f) elif self.size_byte_size == 4: entry_size = common.read_uint32(f) # Calculate how much memory this entry takes entry_padded_size = common.align_size(entry_size, self.block_size) # Update previous_entry_end for the next iteration previous_entry_end += entry_padded_size # Add the entry to processed entries processed_entries.append((entry_offset, entry_size)) # Read entries for (entry_offset, entry_size) in processed_entries: f.seek(entry_offset) content = f.read(entry_size) new_entry = self.add_entry(content)
def save(self, file_path): with open(file_path, 'wb') as f: counter = 0 offset = 0 for entry in self.entries: wave_size = entry.get_size() block_aligned_wave_size = common.align_size( wave_size, WaveArchive.BLOCK_SIZE) print('#\tWriting entry %s at offset 0x%X, size %s' % (counter, offset, wave_size)) f.write(entry.content) padding_size = block_aligned_wave_size - wave_size f.write(b'\0' * padding_size) counter += 1 offset += block_aligned_wave_size
def open(self, file_path): with open(file_path, 'rb') as f: archive_size = common.get_file_size(f) while f.tell() < archive_size: magic_number = f.read(4).decode('ascii', 'ignore') if magic_number != 'RIFF': raise Exception('Not a WAVE entry!') wave_size = common.read_uint32(f) + 8 f.seek(-8, os.SEEK_CUR) content = f.read(wave_size) new_entry = self.add_entry(content) # Find how many blocks are needed to contain wave_size # and skip that amount of remaining bytes block_aligned_wave_size = common.align_size( wave_size, WaveArchive.BLOCK_SIZE) f.seek(block_aligned_wave_size - wave_size, os.SEEK_CUR)
def open(self, file_path): with open(file_path, 'rb') as f: file_size = common.get_file_size(f) # Read magic header magic_number = f.read(4) if magic_number != 'HGPT': raise Exception('Not an HGPT file! Missing HGPT header id.') # Read pp offset pp_offset = common.read_uint16(f) if pp_offset < 0x10: raise Exception( 'PP offset less than 0x10, PS2 variant not supported!') # Read if it has an extended header has_extended_header = common.read_uint16(f) if has_extended_header not in (0, 1): raise Exception('Unknown has_extended_header value: %s' % has_extended_header) self.has_extended_header = has_extended_header = ( has_extended_header == 1) # Read number of divisions number_of_divisions = common.read_uint16(f) # Read unknowns unknown_one = common.read_uint16(f) if unknown_one != 0x0001: raise Exception('First unknown is not 0x0001: %08X' % unknown_one) # ff ff ff ff in pictures with extended header # 00 00 00 00 in pictures w/o extended header unknown_two = common.read_uint32(f) # Load divisions if has_extended_header: # Read number of divisions (again) number_of_divisions_repeat = common.read_uint16(f) if number_of_divisions != number_of_divisions_repeat: raise Exception( 'Number of divisions and its repeat don\'t match: %s != %s' % (number_of_divisions, number_of_divisions_repeat)) # Read unknown # 0x0013 is the most common # 0x0014 is the other occurence unknown_three = common.read_uint16(f) if unknown_three != 0x0013: print '# Warning: UnknownThree (0x%X) != 0x0013' % unknown_three # Read division name self.division_name = (f.read(8) + '\0' * 8)[0:8] # Read divisions for i in xrange(0, number_of_divisions): division_start_x = common.read_uint16(f) division_start_y = common.read_uint16(f) division_width = common.read_uint16(f) division_height = common.read_uint16(f) self.divisions.append((division_start_x, division_start_y, division_width, division_height)) # Calculate and skip zero padding divisions_size = 12 + 8 * number_of_divisions divisions_padded_size = common.align_size(divisions_size, 16) divisions_padding = divisions_padded_size - divisions_size f.seek(divisions_padding, os.SEEK_CUR) # Check that it's the correct pp_offset if f.tell() != pp_offset: raise Exception('Incorrect pp offset') # Read pp header pp_header = common.read_uint32(f) if pp_header & 0x0000FFFF != 0x00007070: raise Exception('Missing pp header! %08X' % pp_header) # Decipher pp format pp_format = (pp_header >> 16) & 0xFFFF if pp_format not in (0x13, 0x14, 0x8800): raise Exception('PP format (0x%X) is unknown' % pp_format) # Calculate values that depend on the pp format bytes_per_pixel = 1 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 16 if pp_format == 0x8800: bytes_per_pixel = 4 bytes_per_pixel_ppd_size = 1 tile_width = 4 elif pp_format == 0x13: bytes_per_pixel = 1 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 16 elif pp_format == 0x14: bytes_per_pixel = 0.5 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 32 # Read picture display dimensions self.width = pp_display_width = common.read_uint16(f) self.height = pp_display_height = common.read_uint16(f) # Skip zero padding f.seek(2 * 4, os.SEEK_CUR) # Read ppd header ppd_header = common.read_uint32(f) if ppd_header & 0x00FFFFFF != 0x00647070: raise Exception('Missing ppd header!') # Decipher ppd format ppd_format = (ppd_header >> 24) & 0xFF if ppd_format != (pp_format & 0xFF): raise Exception( 'PPD format (0x%X) does not match PP format (0x%X)' % (ppd_format, pp_format & 0xFF)) # Read ppd display resolution ppd_display_width = common.read_uint16(f) ppd_display_height = common.read_uint16(f) if pp_display_width != ppd_display_width: raise Exception( 'PP display width (%s) != PPD display width (%s)' % (pp_display_width, ppd_display_width)) if pp_display_height != ppd_display_height: raise Exception( 'PP display height (%s) != PPD display height (%s)' % (pp_display_height, ppd_display_height)) # Skip zero padding f.seek(4, os.SEEK_CUR) # Read ppd sixteenths resolution ppd_sixteenths_width = common.read_uint16(f) ppd_sixteenths_height = common.read_uint16(f) # Calculate ppd sixteenths resolution (using the pp_display resolution) calculated_sixteenths_width = common.align_size( pp_display_width, 16) calculated_sixteenths_height = common.align_size( pp_display_height, 8) if (calculated_sixteenths_width != ppd_sixteenths_width) or ( calculated_sixteenths_height != ppd_sixteenths_height): raise Exception( 'PPD sixteenths resolution (%s x %s) doesn\'t match the calculated eights resolution (%s x %s)' % (ppd_sixteenths_width, ppd_sixteenths_height, calculated_sixteenths_width, calculated_sixteenths_height)) # Calculate storage resolution (using the pp_display resolution) # This is the resolution that ties in with the ppd_size calculated_storage_width = common.align_size( pp_display_width, tile_width) calculated_storage_height = common.align_size(pp_display_height, 8) number_of_pixels = calculated_storage_width * calculated_storage_height # Read ppd_size which is the size of the ppd header + number of pixels # The ppd header which is 0x20 bytes in size ppd_size = common.read_uint32(f) calculated_ppd_size = int( number_of_pixels * bytes_per_pixel_ppd_size) + 0x20 if (calculated_ppd_size != ppd_size): raise Exception( 'PPD size %s does not match the calculated ppd size %s' % (ppd_size, calculated_ppd_size)) # Calculate the number of bytes number_of_bytes = int(number_of_pixels * bytes_per_pixel) # Skip padding f.seek(3 * 4, os.SEEK_CUR) # Read the tiled image data # The image data is stored in a scrambled tiled format, so we'll have to reprocess it tiled_image_data = [0] * number_of_pixels cache_last_pixel = None for i in xrange(0, number_of_pixels): if number_of_bytes <= 0: break if pp_format == 0x13: tiled_image_data[i] = common.read_uint8(f) number_of_bytes -= 1 elif pp_format == 0x14: if (i & 1) == 0: # Even read cache_last_pixel = common.read_uint8(f) else: # Odd read number_of_bytes -= 1 tiled_image_data[i] = cache_last_pixel & 0xF cache_last_pixel = cache_last_pixel >> 4 elif pp_format == 0x8800: # Read full RGBA tiled_image_data[i] = (common.read_uint8(f), common.read_uint8(f), common.read_uint8(f), decode_alpha(common.read_uint8(f))) number_of_bytes -= 4 # Skip unread bytes f.seek(number_of_bytes, os.SEEK_CUR) # Un-tile and store the information as the content self.content = [0] * (pp_display_width * pp_display_height) tile_height = 8 tile_size = tile_width * tile_height tile_row = tile_size * int(calculated_storage_width / tile_width) for y in xrange(0, pp_display_height): for x in xrange(0, pp_display_width): tile_y = int(y / tile_height) tile_x = int(x / tile_width) tile_sub_y = y % tile_height tile_sub_x = x % tile_width self.content[y * pp_display_width + x] = tiled_image_data[tile_y * tile_row + tile_x * tile_size + tile_sub_y * tile_width + tile_sub_x] # Check if optional ppc header exists if pp_format == 0x8800: self.palette = [] else: # Read ppc header ppc_header = common.read_uint32(f) if ppc_header & 0xFFFFFFFF != 0x00637070: raise Exception('Missing ppc header!') # Skip zero padding f.seek(2, os.SEEK_CUR) # Read the number of palette entries # It needs to be multiplied by 8 to get the correct total palette_total = common.read_uint16(f) * 8 # Skip zero padding f.seek(2 * 4, os.SEEK_CUR) # Read palette self.palette = [(common.read_uint8(f), common.read_uint8(f), common.read_uint8(f), decode_alpha(common.read_uint8(f))) for i in xrange(0, palette_total)]
def save(self, file_path): with open(file_path, 'wb') as f: # Calculate various sizes # Divisions sizes divisions_padded_size = 0 divisions_padding = 0 if self.has_extended_header: divisions_size = 12 + len(self.divisions) * 8 divisions_padded_size = common.align_size(divisions_size, 16) divisions_padding = divisions_padded_size - divisions_size # PP offset pp_offset = 16 + divisions_padded_size # Palette total, PP format, bytes_per_pixel, and tile_width palette_total = len(self.palette) pp_format = 0x13 bytes_per_pixel = 1 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 16 if palette_total == 0: pp_format = 0x8800 bytes_per_pixel = 4 bytes_per_pixel_ppd_size = 1 tile_width = 4 elif palette_total == 16: pp_format = 0x14 bytes_per_pixel = 0.5 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 32 elif palette_total == 256: pp_format = 0x13 bytes_per_pixel = 1 bytes_per_pixel_ppd_size = bytes_per_pixel tile_width = 16 else: raise Exception('Unknown palette total, %s' % palette_total) # Sixteenths resolution and storage resolution ppd_sixteenths_width = common.align_size(self.width, 16) ppd_sixteenths_height = common.align_size(self.height, 8) storage_width = common.align_size(self.width, tile_width) storage_height = common.align_size(self.height, 8) number_of_pixels = storage_width * storage_height # PPD Size # The size also includes the PPD header in the count, which is 0x20 bytes ppd_size = int(number_of_pixels * bytes_per_pixel_ppd_size) + 0x20 # Calculate the number of bytes number_of_bytes = int(number_of_pixels * bytes_per_pixel) # Calculate headers pp_header = 0x00007070 | ((pp_format & 0xFFFF) << 16) ppd_header = 0x00647070 | ((pp_format & 0xFF) << 24) ppc_header = 0x00637070 # Begin writing # Write magic header f.write('HGPT') # Write pp offset common.write_uint16(f, pp_offset) # Write if it has an extended header common.write_uint16(f, 1 if self.has_extended_header else 0) # Write number of divisions common.write_uint16(f, len(self.divisions)) # Write unknowns common.write_uint16(f, 0x0001) # ff ff ff ff in pics with extended header # 00 00 00 00 in pictures w/o extended header common.write_uint32( f, 0xFFFFFFFF if self.has_extended_header else 0x00000000) if self.has_extended_header: # Write the number of divisions again common.write_uint16(f, len(self.divisions)) # Write unknown # 0x0013 is the most common # 0x0014 is the other occurence common.write_uint16(f, self.unknown_three) # Write division name f.write((self.division_name + '\0' * 8)[0:8]) # Write divisions for division in self.divisions: common.write_uint16(f, division[0]) # division_start_x common.write_uint16(f, division[1]) # division_start_y common.write_uint16(f, division[2]) # division_width common.write_uint16(f, division[3]) # division_height # Add zero padding f.write('\0' * divisions_padding) # Write PP header common.write_uint32(f, pp_header) # Write display dimensions common.write_uint16(f, self.width) # display_width common.write_uint16(f, self.height) # display_height # Write zero padding f.write('\0' * 8) # Write ppd header common.write_uint32(f, ppd_header) # Write display dimensions again common.write_uint16(f, self.width) # ppd_display_width common.write_uint16(f, self.height) # ppd_display_height # Write zero padding f.write('\0' * 4) # Write ppd sixteenths dimensions common.write_uint16(f, ppd_sixteenths_width) common.write_uint16(f, ppd_sixteenths_height) # Write ppd_size common.write_uint32(f, ppd_size) # Write zero padding f.write('\0' * 12) # Re-tile tiled_image_data = [0] * number_of_pixels tile_height = 8 tile_size = tile_width * tile_height tile_row = tile_size * int(storage_width / tile_width) for y in xrange(0, self.height): for x in xrange(0, self.width): tile_y = int(y / tile_height) tile_x = int(x / tile_width) tile_sub_y = y % tile_height tile_sub_x = x % tile_width tiled_image_data[tile_y * tile_row + tile_x * tile_size + tile_sub_y * tile_width + tile_sub_x] = self.content[y * self.width + x] # Write image data cache_last_pixel = None for i in xrange(0, number_of_pixels): if number_of_bytes <= 0: break if pp_format == 0x13: common.write_uint8(f, tiled_image_data[i]) number_of_bytes -= 1 elif pp_format == 0x14: if (i & 1) == 0: # Even write cache_last_pixel = (tiled_image_data[i] & 0xF) else: # Odd write common.write_uint8( f, cache_last_pixel | ((tiled_image_data[i] & 0xF) << 4)) number_of_bytes -= 1 elif pp_format == 0x8800: # Write full RGBA common.write_uint8(f, tiled_image_data[i][0]) common.write_uint8(f, tiled_image_data[i][1]) common.write_uint8(f, tiled_image_data[i][2]) common.write_uint8(f, encode_alpha(tiled_image_data[i][3])) number_of_bytes -= 4 # Write palette if pp_format == 0x8800: # There is no palette pass else: # Write ppc header common.write_uint32(f, ppc_header) # Write zero padding f.write('\0' * 2) # Write number of palette entries (but needing to be divided by 8) common.write_uint16(f, palette_total / 8) # Write zero padding f.write('\0' * 8) # Write palette for c in self.palette: common.write_uint8(f, c[0]) common.write_uint8(f, c[1]) common.write_uint8(f, c[2]) common.write_uint8(f, encode_alpha(c[3]))
def save(self, file_path): with open(file_path, 'wb') as f: # Write magic header f.write('TEXT') # Write number of entries common.write_uint32(f, len(self.entries)) # Write size of header common.write_uint32(f, 16) # Write content start offset # Size of each entry * number of entries + header size content_start_offset = 8 * len(self.entries) + 16 common.write_uint32(f, content_start_offset) # Generate the string offsets previous_string_end = content_start_offset converted_strings = [] for (unknown_first, unknown_second, string_content) in self.strings: # Calculate the offset of this string based on the end of the previous string string_offset = previous_string_end # Convert UTF8 to Shift-JIS string_content = common.to_eva_sjis(string_content) # Calculate how much memory this string takes string_padded_size = common.align_size(len(string_content), 4) # Append null-terminators to ensure a 32-bit alignment string_content = (string_content + '\0\0\0\0')[:string_padded_size] # Update previous_string_end for the next iteration (+ 8 for the two unknowns) previous_string_end += string_padded_size + 8 # Add the string to converted strings converted_strings.append((unknown_first, unknown_second, string_content, string_offset)) # Write entries for (entry_unknown, entry_string_index) in self.entries: # Write unknown common.write_uint32(f, entry_unknown) # Write string offset # 3rd index is the string offset common.write_uint32(f, converted_strings[entry_string_index][3]) # Write the actual strings (which are used multiple times per entry) # They should already be in order of appearance for (unknown_first, unknown_second, string_content, string_offset) in converted_strings: # Write the unknowns common.write_uint32(f, unknown_first) common.write_uint32(f, unknown_second) # Write the string f.write(string_content)
def save(self, file_path): with open(file_path, 'wb') as f: file_size = common.get_file_size(f) # Write magic header f.write(b'.EVS') # Write the number of entries common.write_uint32(f, len(self.entries)) # Write entry offsets # Size of header + size of entry table previous_entry_end = 8 + 4 * len(self.entries) converted_entries = [] for (entry_type, entry_parameters, entry_content) in self.entries: has_content_section = entry_type in HAS_CONTENT_SECTION # Write offset common.write_uint32(f, previous_entry_end) # Calculate the size of this entry, # so that we know when the next entry starts # Calculate how much memory this string takes if has_content_section: # Convert UTF8 to Shift-JIS raw_entry_content = common.to_eva_sjis(entry_content) # We add a single null terminator and only count the single null terminator, # but later we include the extra alignment padding in the size calculation string_terminated_size = len(raw_entry_content) + 1 raw_entry_content = common.zero_pad_and_align_string( raw_entry_content) else: string_terminated_size = 0 raw_entry_content = b'' # Calculate the entry_size # parameters + content entry_size = 4 * len(entry_parameters) + string_terminated_size # Update previous_entry_end for the next iteration # Add 4 for the entry_type (omitted in the entry_size calculation) previous_entry_end += common.align_size(4 + entry_size, 4) # Add the entry to the converted entries converted_entries.append((entry_type, entry_size, entry_parameters, raw_entry_content)) # Loop through entries for (entry_type, entry_size, entry_parameters, raw_entry_content) in converted_entries: # Write entry type common.write_uint16(f, entry_type) # Write entry size common.write_uint16(f, entry_size) # Write the parameters for entry_parameter in entry_parameters: common.write_uint32(f, entry_parameter) # Write the content f.write(raw_entry_content)
def decompress_as(self, file_path): result = zlib.decompress(self.content, -15) with open(file_path, 'wb') as f: aligned_size = common.align_size(len(result), 4) f.write((result + b'\0\0\0\0')[:aligned_size])