Ejemplo n.º 1
0
 def _gen_hash_table(self, data_to_hash, block_size=0x4000):
     hash_table = b''
     n = 0
     while True:
         block = data_to_hash[n * block_size:(n + 1) * block_size]
         if not block:
             break
         hash_table += sha256(pad_to(block, multiple=block_size))
         n += 1
     return hash_table
Ejemplo n.º 2
0
 def add(self, entry):
     assert isinstance(entry, self.entry_type), 'Invalid entry type'
     if (len(self.data) <= self.entry_nb) and (self.entry_type is DirEntry):
         self.data.append({
             'Parent': pk_u32(entry.offsets['Parent']),
             'Sibling': pk_u32(entry.offsets['Sibling']),
             'Child': pk_u32(entry.offsets['Child']),
             'File': pk_u32(entry.offsets['File']),
             'PreviousHash': pk_u32(ROMFS_ENTRY_EMPTY),
             'NameLength': pk_u32(len(entry.name)),
             'Name': pad_to(entry.name.encode(), multiple=4)
         })
     elif len(self.data) <= self.entry_nb:
         self.data.append({
             'Parent': pk_u32(entry.offsets['Parent']),
             'Sibling': pk_u32(entry.offsets['Sibling']),
             'DataOffset': pk_u64(entry.offsets['Data']),
             'Size': pk_u64(entry.offsets['Size']),
             'PreviousHash': pk_u32(ROMFS_ENTRY_EMPTY),
             'NameLength': pk_u32(len(entry.name)),
             'Name': pad_to(entry.name.encode(), multiple=4)
         })
     else:
         raise ValueError('Table is already filled up')
Ejemplo n.º 3
0
    def _buffered_repack(self, disp):
        header, footer = self._get_metadata()

        yield header
        cur_file = self.first_file
        while cur_file is not None:
            if disp:
                print('Appending %s...' % cur_file.path_in_romfs)
            inf = self.open(cur_file)
            while True:
                buf = inf.read(0x10000)
                if not buf:
                    break
                yield pad_to(buf, multiple=0x10)
            inf.close()
            cur_file = cur_file.next
        yield footer
Ejemplo n.º 4
0
    def _gen_header(self):
        dir_hash_table_offset = ROMFS_FILEPARTITION_OFS + self.data_size
        dir_hash_table_length = 4 * self.dir_hash_table_entry_count
        dir_table_offset = dir_hash_table_offset + dir_hash_table_length
        dir_table_length = len(self.dir_table)
        file_hash_table_offset = dir_table_offset + len(self.dir_table)
        file_hash_table_length = 4 * self.file_hash_table_entry_count
        file_table_offset = file_hash_table_offset + file_hash_table_length
        file_table_length = len(self.file_table)

        header = b''
        header += pk_u64(ROMFS_HEADER_LENGTH)
        header += pk_u64(dir_hash_table_offset)
        header += pk_u64(dir_hash_table_length)
        header += pk_u64(dir_table_offset)
        header += pk_u64(dir_table_length)
        header += pk_u64(file_hash_table_offset)
        header += pk_u64(file_hash_table_length)
        header += pk_u64(file_table_offset)
        header += pk_u64(file_table_length)
        header += pk_u64(ROMFS_FILEPARTITION_OFS)
        header = pad_to(header, length=ROMFS_FILEPARTITION_OFS)
        return header
Ejemplo n.º 5
0
    def _gen_hash_tree(self, disp=True):
        # If we want to yield the section in the right order,
        # we have no choice but to completely hash the romfs beforehand
        if disp:
            print('Hashing RomFS...')
        lvl_5 = b''
        data_to_hash = b''
        block_size = self.section_header.lvls[5].block_size
        for buf in super(HashTreeWrappedRomFS,
                         self)._buffered_repack(disp=False):
            data_to_hash += buf
            while len(data_to_hash) >= block_size:
                lvl_5 += sha256(data_to_hash[:block_size])
                data_to_hash = data_to_hash[block_size:]
        if data_to_hash:
            lvl_5 += sha256(pad_to(data_to_hash, multiple=block_size))
        self.lvl_5_size = len(lvl_5)

        if disp:
            print('Hashing levels...')
        lvl_4 = self._gen_hash_table(
            lvl_5, block_size=self.section_header.lvls[4].block_size)
        self.lvl_4_size = len(lvl_4)
        lvl_3 = self._gen_hash_table(
            lvl_4, block_size=self.section_header.lvls[3].block_size)
        self.lvl_3_size = len(lvl_3)
        lvl_2 = self._gen_hash_table(
            lvl_3, block_size=self.section_header.lvls[2].block_size)
        self.lvl_2_size = len(lvl_2)
        lvl_1 = self._gen_hash_table(
            lvl_2, block_size=self.section_header.lvls[1].block_size)
        self.lvl_1_size = len(lvl_1)
        self.master_hash = self._gen_hash_table(
            lvl_1, block_size=blself.section_header.lvls[0].block_sizeock_size)

        return lvl_1, lvl_2, lvl_3, lvl_4, lvl_5