def write_nse(fname, time_stamps, remarks=''): """Write out the given time stamps into a nse file.""" with open(fname, 'wb') as fout: fmt = '=QII8I32h' dwScNumber = 1 dwCellNumber = 1 #dnParams = [0]*8 #snData = [0]*32 garbage = [0] * 40 header = remarks.ljust(16 * 1024, '\x00') fout.write(header) for ts in time_stamps: fout.write(pk(fmt, ts, dwScNumber, dwCellNumber, *garbage))
def decrypt_block_ecb(self, block): assert (len(block) == self.block_size) words = list(up('>IIII', block)) for rnd in range(self.num_rounds, 0, -1): for i in range(len(words)): words[i] ^= self.keys[rnd][i] if rnd != self.num_rounds: words = self.unmix_columns(words) words = self.unshift_columns(words) for i in range(len(words)): words[i] = self.send_through_sbox(words[i], self.sbox_dec) for i in range(len(words)): words[i] ^= self.keys[0][i] return pk('>IIII', words[0], words[1], words[2], words[3])
def write_nse(fname, time_stamps, remarks=''): """Write out the given time stamps into a nse file.""" with open(fname,'wb') as fout: fmt = '=QII8I32h' dwScNumber = 1 dwCellNumber = 1 #dnParams = [0]*8 #snData = [0]*32 garbage = [0]*40 header = remarks.ljust(16*1024,'\x00') fout.write(header) for ts in time_stamps: fout.write(pk(fmt, ts, dwScNumber, dwCellNumber, *garbage))
def Q(delay, w, h, x, y, tidx): assert 0 <= tidx <= 255 assert 0 <= delay < 2**16 # this is used for the compression # multiplies tidx by (w*h) times # so eventually the size of list is w*h and each cell is tidx indices = [tidx] * (w * h) buf = BIO('') # 0x21 0xf9 = intro & label # 0x04 = block size # 0x05 = 0000 0101 # bit 0 is reserved (???) # bit 2 is is also reserved (?????) buf.write('\x21\xF9\x04\x05') # write delay as unsigned short 2 bytes buf.write(pk('H', delay)) # write index as unsigned char 1 byte buf.write(pk('B', tidx)) # terminate block buf.write('\x00') # new image block buf.write('\x2c') # left pos buf.write(pk('H', x)) # top pos buf.write(pk('H', y)) # width & height buf.write(pk('H', w)) buf.write(pk('H', h)) # flag section & local color table = 0 buf.write('\x00') # min code size equals 8 LZWMinimumCodeSize = 8 # compression cmprs, _ = lzwlib.Lzwg.compress(indices, LZWMinimumCodeSize) # write min code size and the compressed blocks obuf = pk('B', LZWMinimumCodeSize) + WB(cmprs) # write obuf to buf buf.write(obuf) buf.seek(0) # read entire buffer return buf.read()
def make_standard(exp): std = exp[:] _, metadata_offset, is_exp = up('<III', exp[:12]) assert is_exp == 1 # Patch the experimental flag to zero. std = std[:8] + pk('<I', 0) + std[12:] # Locate the mesosphere content header, patch to be experimental. magic, size, code_ofs, content_ofs, num_contents, ver, sup_ver, rev = up( '<IIIIIIII', exp[metadata_offset:metadata_offset + 0x20]) for i in range(num_contents): start, size, cnt_type, flag0, flag1, flag2, pad = up( '<IIBBBBI', exp[content_ofs + 0x20 * i:content_ofs + 0x20 * i + 0x10]) if cnt_type == 10: # CONTENT_TYPE_KRN assert exp[content_ofs + 0x20 * i + 0x10:content_ofs + 0x20 * i + 0x10 + len(b'mesosphere') + 1] == (b'mesosphere\x00') assert flag0 == 0 and flag1 == 0 and flag2 == 0 std = std[:content_ofs + 0x20 * i] + pk( '<IIBBBBI', start, size, cnt_type, flag0 | 0x1, flag1, flag2, pad) + std[content_ofs + 0x20 * i + 0x10:] return std
def convert_image(image_fn): splash = Image.open(image_fn, 'r') w, h = splash.size if w == 1280 and h == 720: splash = splash.transpose(Image.ROTATE_90) w, h = splash.size assert w == 720 assert h == 1280 splash = splash.convert('RGBA') splash_bin = b'' for row in range(SPLASH_SCREEN_WIDTH): for col in range(SPLASH_SCREEN_HEIGHT): r, g, b, a = splash.getpixel((col, row)) splash_bin += pk('<BBBB', b, g, r, a) splash_bin += b'\x00' * ((SPLASH_SCREEN_STRIDE - SPLASH_SCREEN_HEIGHT) * 4) return splash_bin
def main(argc, argv): if argc != 4: print('Usage: %s kernel_ldr.bin kernel.bin output.bin' % argv[0]) return 1 with open(argv[1], 'rb') as f: kernel_ldr = f.read() with open(argv[2], 'rb') as f: kernel = f.read() kernel_metadata_offset = 4 assert (kernel_metadata_offset <= len(kernel) - 0x40) assert (kernel[kernel_metadata_offset:kernel_metadata_offset + 4] == b'MSS0') bss_start, bss_end, kernel_end = up('<III', kernel[kernel_metadata_offset + 0x30:kernel_metadata_offset + 0x3C]) assert (bss_end >= bss_start) assert (bss_end == kernel_end) assert (len(kernel) <= kernel_end) if len(kernel) < kernel_end: kernel += b'\x00' * (kernel_end - len(kernel)) assert (kernel_end == len(kernel)) embedded_ini = b'' try: with open('ini.bin', 'rb') as f: embedded_ini = f.read() except: pass embedded_ini_offset = align_up(kernel_end, 0x1000) embedded_ini_end = embedded_ini_offset + len(embedded_ini) # TODO: Create and embed an INI, eventually. kernel_ldr_offset = align_up(embedded_ini_end, 0x1000) + (0x1000 if len(embedded_ini) == 0 else 0) kernel_ldr_end = kernel_ldr_offset + len(kernel_ldr) mesosphere_end = align_up(kernel_ldr_end, 0x1000) with open(argv[3], 'wb') as f: f.write(kernel[:kernel_metadata_offset + 4]) f.write(pk('<QQI', embedded_ini_offset, kernel_ldr_offset, atmosphere_target_firmware(13, 0, 0))) f.write(kernel[kernel_metadata_offset + 0x18:]) f.seek(embedded_ini_offset) f.write(embedded_ini) f.seek(embedded_ini_end) f.seek(kernel_ldr_offset) f.write(kernel_ldr) f.seek(mesosphere_end) f.write(b'\x00'*0x1000) return 0
def main(argc, argv): if argc != 1: print('Usage: %s' % argv[0]) return 1 with open('kernel_ldr/kernel_ldr.bin', 'rb') as f: kernel_ldr = f.read() with open('kernel/kernel.bin', 'rb') as f: kernel = f.read() kernel_metadata_offset = 4 assert (kernel_metadata_offset <= len(kernel) - 0x40) assert (kernel[kernel_metadata_offset:kernel_metadata_offset + 4] == b'MSS0') kernel_end = up( '<I', kernel[kernel_metadata_offset + 0x38:kernel_metadata_offset + 0x3C])[0] assert (kernel_end >= len(kernel)) embedded_ini = b'' try: with open('ini.bin', 'rb') as f: embedded_ini = f.read() except: pass embedded_ini_offset = align_up(kernel_end, 0x1000) + 0x1000 embedded_ini_end = embedded_ini_offset + len( embedded_ini) # TODO: Create and embed an INI, eventually. kernel_ldr_offset = align_up(embedded_ini_end, 0x1000) + 0x1000 kernel_ldr_end = kernel_ldr_offset + len(kernel_ldr) mesosphere_end = align_up(kernel_ldr_end, 0x1000) with open('mesosphere.bin', 'wb') as f: f.write(kernel[:kernel_metadata_offset + 4]) f.write( pk('<QQI', embedded_ini_offset, kernel_ldr_offset, atmosphere_target_firmware(10, 1, 0))) f.write(kernel[kernel_metadata_offset + 0x18:]) f.seek(embedded_ini_offset) f.write(embedded_ini) f.seek(embedded_ini_end) f.seek(kernel_ldr_offset) f.write(kernel_ldr) f.seek(mesosphere_end) f.write(b'\x00' * 0x1000) return 0
def WB(buf): # amount of blocks. each block size should be FF = 255 blockcount = len(buf) / 0xFF # if amount of blocks is not divisble by 255, add 1, else nothing blockcount += 1 if len(buf) % 0xFF else 0 # pack/parse length of each subblock as unsigned integer # each sub block is the size FF # so each "mapped" cell in blocks array should be of the format # ${parsedLength}${subBlock} blocks = [ pk('B', len(subblock)) + subblock for subblock in [buf[i:0xFF + i] for i in xrange(0, blockcount * 0xFF, 0xFF)] ] # join all blocks to a string and append 00 at the end # 0x00 represents end of block return ''.join(blocks) + '\x00'
def testGetValveCfg(s): # frame head data = pk('B', 0x02) data += pk('B', 0X06) # payload length data += pk('<H', 0x0006) # payload data += pk('<I', 0x0901000a) data += pk('<H', 0x0008) # frame tail data += pk('<H', 0xd1b8) data += pk('B', 0x03) print "testGetValveCfg frame on network is:" print repr(data) print "send testGetValveCfg data to server" s.sendall(data) data = s.recv(4096) print('received', repr(data))
def hook_code(uc, address, size, self): assert size == 4 insn = up('<I', uc.mem_read(address, 4))[0] self.restore_instructions(uc) if insn in self.invalid_instructions: self.patch_instruction(uc, address, pk('<I', self.invalid_instructions[insn])) if self.calling_function: self.add_function_call(uc, address) self.calling_function = False if (insn & 0xFC000000) in [0x94000000]: self.calling_function = True if (insn & 0xFFFFFFE0) == 0xD51E1000 and len(self.virt_to_phys) == 0: mmu_funcs = [ func for func in self.function_calls if self.is_mmu_func(func) ] assert len(mmu_funcs) == 1 mmu_func = mmu_funcs[0] for call in self.function_calls[mmu_func]: l3_tab, vaddr, paddr, size, attr = tuple(call[:5]) self.l3_table = l3_tab if vaddr != paddr: assert vaddr >= 0x80000000 uc.mem_map(vaddr, size) mem = uc.mem_read(paddr, size) uc.mem_write(vaddr, str(mem)) self.virt_to_phys[vaddr] = (paddr, size, attr) self.phys_to_virt[paddr] = (vaddr, size, attr) self.mappings.append((vaddr, paddr, size, attr)) if (insn & 0xFFFFFFE0) == 0xD51EC000: # VBAR set which = insn & 0x1F self.vbar = uc.reg_read(Emulator.REGISTER_IDS[which]) if (insn & 0xFFFFFFE0) == 0xD51E2000: # TTBR set which = insn & 0x1F self.ttbr = uc.reg_read(Emulator.REGISTER_IDS[which]) assert (self.ttbr & 0x7FF) == 0x7C0
def locate_fields(full): start = ['TestU64', 'TestU32', 'TestI64', 'TestI32'] inds = [LOAD_BASE + full.index('%s\x00' % s) for s in start] target = pk('<QQQQ', inds[0], inds[1], inds[2], inds[3]) return full.index(target)
def put_qword(z, target, val): return z[:target] + pk('<Q', val) + z[target + 8:]
def put_dword(z, target, val): return z[:target] + pk('<I', val) + z[target + 4:]
def E(f, s, o): # Headers of file global_colors, bgcoloridx, size_count, hh, ww = F(f) # (mutated) Flag (??) mp, ks = M(s) # By here, current file cursor should be at the end of the header hdr_end = f.tell() # Go back to file beginning f.seek(0) # Write header string to output o.write(f.read(hdr_end)) fc = 0 # write these 2 bytes. # 0x21 is extension into # 0xfe is COMMENT LABEL o.write('\x21\xFE') # writes the mapped flags with RDBNB prepended after mutating iwth WB o.write(WB('RDBNB' + mp)) o.flush() # iterate on the generator C returns for t, buf in C(f): print('.', end='') # if comment # IGNORES COMMENT SECTION TO OUTPUT. if t == T.EC: continue # if graphic control label if t == T.EG: # if havent finished encoding the flag into file, mutate in the following way: if ks: # reads 2 bytes from buffer ([4,5]) and parses as H - unsigned short delay = up('<H', buf[4:6]) # all delays of gif were over 6 assert delay >= 6 # changes the delay. decrease by 3 buf = buf[:4] + pk('<H', delay - 3) + buf[6:] obuf = buf # else if image sep elif t == T.I: fc += 1 total_raw_blocks_data = '' bf = BIO(buf) pref = bf.read(10) # reads the code size but parses it as char LZWMinimumCodeSize = ord(bf.read(1)) # read block data total_raw_blocks_data = k(bf) indices, dcmprsdcodes = lzwlib.Lzwg.decompress( total_raw_blocks_data, LZWMinimumCodeSize) # < = little endian # B = unsigned char # H = unsigned short xxx = unpack('<B H H H H B', pref) cmprs, codes = lzwlib.Lzwg.compress(indices, LZWMinimumCodeSize) obuf = pref + pk('B', LZWMinimumCodeSize) + WB(cmprs) # pop ks here - only on image section if ks: mpindx, isup = ks.pop(0) obuf += h(mpindx, isup, ww, hh, len(global_colors) - 1) else: obuf = buf # write to output o.write(obuf) o.flush() # eventually, ks should be false assert not ks, '' return 0
def __init__(self, data): self.ftype = data[TYPE] tmpbuf = pk('<I', data[DEVID]) tmpbuf += pk('B', data[STATUS]) self.payload = tmpbuf
def groupNetData(self): data = '' data += pk('B', self.head) data += pk('B', self.type) data += pk('<H', self.len) data += pk('<I', self.devid) data += pk('<I', self.model) data += pk('<I', self.hwVer) data += pk('<I', self.fwVer) data += pk('B', self.lang) data += pk('<H', self.year) data += pk('B', self.month) data += pk('B', self.day) data += pk('B',self.hour) data += pk('B', self.minute) data += pk('B', self.second) data += pk('<H', self.crc) data += pk('B',self.etx) return data
def __init__(self): data = pk('B' ,0x02) data += pk('B', 0X05) data += pk('<H', 0x0036)# length data += pk('<I', 0x0901000a) data += pk('<H', 0x0008) for i in range(8): data += pk('B', i) data += pk('<H', 0x000a+i) data += pk('B', i) data += pk('<H', 0x000a+i) data += pk('<H', 0xd1b8) data += pk('B', 0x03) self.data = data
def repack_nso(path_original, path_patch, path_out): nso = b'' patched_text_hash = b'' # Read the original NSO with open(path_original, 'rb') as f: nso = bytearray(f.read()) # Read the patched text with open(path_patch, 'rb') as f: data = f.read() patched_text_hash = sha256(data) compressed_patched_text = lz4.block.compress(data, store_size=False) text_off = up('<I', nso[0x10:0x14])[0] text_compressed_size = len(compressed_patched_text) # Retrieve original rodata segment rodata_off = up('<I', nso[0x20:0x24])[0] rodata_compressed_size = up('<I', nso[0x64:0x68])[0] compressed_rodata = nso[rodata_off:rodata_off + rodata_compressed_size] # Retrieve original data segment data_off = up('<I', nso[0x30:0x34])[0] data_compressed_size = up('<I', nso[0x68:0x6C])[0] compressed_data = nso[data_off:data_off + data_compressed_size] # Set to the offsets of the output nso rodata_off = text_off + text_compressed_size data_off = rodata_off + rodata_compressed_size # Create the output nso out_nso = bytearray(data_off + data_compressed_size) # Copy over the original header out_nso[0x0:0x100] = nso[0x0:text_off] # Write the new text hash out_nso[0xA0:0xC0] = patched_text_hash # Write the new compressed text size out_nso[0x60:0x64] = pk('<I', text_compressed_size) # Correct the header offsets out_nso[0x20:0x24] = pk('<I', rodata_off) # rodata offset out_nso[0x30:0x34] = pk('<I', data_off) # data offset # Write new data out_nso[text_off:text_off + text_compressed_size] = compressed_patched_text out_nso[rodata_off:rodata_off + rodata_compressed_size] = compressed_rodata out_nso[data_off:data_off + data_compressed_size] = compressed_data print('text:') print('\nrodata:') print('Offset {}'.format(rodata_off)) print('len(compressed_rodata) {}'.format(len(compressed_rodata))) print('Original header size {}'.format(rodata_compressed_size)) print('\ndata:') print('Offset {}'.format(data_off)) print('len(compressed_rodata) {}'.format(len(compressed_data))) print('Original header size {}'.format(data_compressed_size)) with open(path_out, 'wb') as f: f.write(out_nso)
def sxor(s1, s2): assert (len(s1) == len(s2)) return b''.join([pk('B', x ^ y) for x, y in zip(s1, s2)])
def find_types(full, num_fields): KNOWN = range(10) + [4, 4, 2, 4] ind = full.index(''.join(pk('<I', i) for i in KNOWN)) return list(up('<' + 'I' * num_fields, full[ind:ind + 4 * num_fields]))
def decrypt(encryptedFile, decodedFile): # Headers of file global_colors, bgcoloridx, size_count, hh, ww = readHeader(encryptedFile) # By here, current file cursor should be at the end of the header hdr_end = encryptedFile.tell() # Go back to file beginning encryptedFile.seek(0) # Write header string to output decodedFile.write(encryptedFile.read(hdr_end)) # Reaching here means the header section of decoded file is done # Moving on ... # The first block should be a comment section which includes the secret flag (mutated) # should look like: # 0x21 extension introducer # 0xfe comment label # [{ # blockSize, RDBNB+flag # }] # 0x00 readFirstComment(encryptedFile) encryptedFlag = 'OE7AUKL}_GY#0FR{!HMTWS' originalFlag = '' print('Finished reading first block (comment)') print 'The flag is: {0}'.format(encryptedFlag) print 'Flag size: {0}'.format(len(encryptedFlag)) print 'Now pointing to the next section...' firstPotential = 0 secondPotential = 0 encodedFlag = [] # After this, there should be no comment sections at all, becuase the encoder doesnt write any comments. gen = readBlocks(f) # Iterate on sections using the generator for t, buf in gen: if t == BlockType.GRAPHIC_CONTROL: # Get delay delay = up('<H', buf[4:6]) # Flags flags = up('<B', buf[3:4]) # Trans Color Idx transColIdx = up('<B', buf[6:7]) if flags == 5 and delay == 3 and (transColIdx >= 0 and transColIdx <= 63): secondPotential += 1 encodedFlag.append( (BlockType.GRAPHIC_CONTROL, secondPotential, { 'delay': delay, 'flags': flags, 'transColIdx': transColIdx })) obuf = buf elif t == BlockType.IMAGE: total_raw_blocks_data = '' # re-read buffer bf = BIO(buf) # skip first 10 bytes to get to lzw size pref = bf.read(10) # reads the code size but parses it as char LZWMinimumCodeSize = ord(bf.read(1)) # Read all blocks of image data total_raw_blocks_data = readImageBlock(bf) # Decompress the data (compressed by lzw) indices, dcmprsdcodes = lzwlib.Lzwg.decompress( total_raw_blocks_data, LZWMinimumCodeSize) # < = little endian # B = unsigned char = 1 byte # H = unsigned short = 2 bytes sep, leftp, topp, width, height, flags = unpack( '<B H H H H B', pref) if (LZWMinimumCodeSize == 8 and flags == 0): firstPotential += 1 encodedFlag.append((BlockType.IMAGE, firstPotential, { 'leftp': leftp, 'topp': topp, 'width': width, 'height': height, 'flags': flags, 'minCodeSize': LZWMinimumCodeSize, 'indices': indices, 'dcmprsdcodes': dcmprsdcodes })) cmprs, codes = lzwlib.Lzwg.compress(indices, LZWMinimumCodeSize) obuf = pref + pk('B', LZWMinimumCodeSize) + writeBlock(cmprs) # until flag has been encoded, 2 more blocks are added: # 1. Graphic Control Extension Block withe the following: # 1.1. block size = 4 (constant ?) # 1.2. flags = 0000 0101 = 5 = bit 0 & bit 2 # 1.3. delay = 3 # 1.4. transp color idx = if even then lowercase, else uppercase # 2. Image Block # 2.1. left pos, top pos, width, height = nothing useful now # 2.2. flags = 0 # 2.3. Local color table = 0 # 2.4. lzw min code size = 8 # 2.5. blocks = array of length (width*height) containing only [tidx (from 1.4)] # if not graphic control or image, just write without mutating else: obuf = buf o.write(obuf) o.flush() i = 0 while i < len(encodedFlag): grphic = encodedFlag[i] img = encodedFlag[i + 1] i += 2 leftp = img[2]['leftp'] topp = img[2]['topp'] width = img[2]['width'] height = img[2]['height'] letterIdxInEnc = firstDecFunc(leftp, topp, width, height) isLower = grphic[2]['transColIdx'] % 2 == 0 originalFlag += encryptedFlag[ letterIdxInEnc] if not isLower else encryptedFlag[ letterIdxInEnc].lower() print originalFlag return 0
def write_header(f, all_kips, wb_size, tk_size, xf_size, ex_size, ms_size, fs_size, rb_size, git_revision, major, minor, micro, relstep, s_major, s_minor, s_micro, s_relstep): # Unpack kips emummc, kips = all_kips # Write magic as PK31 magic. f.write(b'PK31') # Write metadata offset = 0x10 f.write(pk('<I', 0x20)) # Write flags f.write(pk('<I', 0x00000000)) # Write meso_size f.write(pk('<I', ms_size)) # Write num_kips f.write(pk('<I', len(KIP_NAMES))) # Write reserved1 f.write(b'\xCC' * 0xC) # Write legacy magic f.write(b'FSS0') # Write total size f.write(pk('<I', 0x800000)) # Write reserved2 f.write(pk('<I', 0xCCCCCCCC)) # Write content_header_offset f.write(pk('<I', 0x40)) # Write num_content_headers; f.write(pk('<I', 8 + len(KIP_NAMES))) # Write supported_hos_version; f.write(pk('<BBBB', s_relstep, s_micro, s_minor, s_major)) # Write release_version; f.write(pk('<BBBB', relstep, micro, minor, major)) # Write git_revision; f.write(pk('<I', git_revision)) # Write content metas f.write( pk('<IIBBBBI16s', 0x000800, wb_size, 2, 0, 0, 0, 0xCCCCCCCC, b'warmboot')) f.write( pk('<IIBBBBI16s', 0x002000, tk_size, 12, 0, 0, 0, 0xCCCCCCCC, b'tsec_keygen')) f.write( pk('<IIBBBBI16s', 0x004000, xf_size, 11, 0, 0, 0, 0xCCCCCCCC, b'exosphere_fatal')) f.write( pk('<IIBBBBI16s', 0x048000, ex_size, 1, 0, 0, 0, 0xCCCCCCCC, b'exosphere')) f.write( pk('<IIBBBBI16s', 0x056000, ms_size, 10, 0, 0, 0, 0xCCCCCCCC, b'mesosphere')) f.write( pk('<IIBBBBI16s', 0x7C0000, fs_size, 0, 0, 0, 0, 0xCCCCCCCC, b'fusee')) f.write( pk('<IIBBBBI16s', 0x7E0000, rb_size, 3, 0, 0, 0, 0xCCCCCCCC, b'rebootstub')) f.write( pk('<IIBBBBI16s', 0x100000, len(emummc), 8, 0, 0, 0, 0xCCCCCCCC, b'emummc')) ofs = (0x100000 + len(emummc) + 0xF) & ~0xF for kip_name in KIP_NAMES: kip_data = kips[kip_name] f.write( pk('<IIBBBBI16s', ofs, len(kip_data), 6, 0, 0, 0, 0xCCCCCCCC, kip_name)) ofs += len(kip_data) ofs += 0xF ofs &= ~0xF # Pad to kip metas. f.write(b'\xCC' * (0x400 - 0x40 - (0x20 * (8 + len(KIP_NAMES))))) # Write emummc_meta. */ write_kip_meta(f, emummc, 0x100000) # Write kip metas ofs = (0x100000 + len(emummc) + 0xF) & ~0xF for kip_name in KIP_NAMES: kip_data = kips[kip_name] write_kip_meta(f, kip_data, ofs) ofs += len(kip_data) ofs += 0xF ofs &= ~0xF # Pad to end of header f.write(b'\xCC' * (0x800 - (0x400 + (1 + len(KIP_NAMES)) * 0x30)))
def groupFrame(self): head = pk('B', FRAME_HEAD) frameType = pk('B', self.ftype) tmpbuf = encodePayload(frameType, self.payload) return (head + tmpbuf)
def find_categories(full, num_fields): KNOWN = [0] * 10 + [1] * 2 + [0x3B] * 2 ind = full.index(''.join(pk('<I', i) for i in KNOWN)) return list(up('<' + 'I' * num_fields, full[ind:ind + 4 * num_fields]))
def gen_rhfs0_head(self, upd_list, norm_list, sec_list, sec_fileSizes, sec_shalist): hreg = 0x200 hashregion = hreg.to_bytes(0x04, byteorder='little') #UPD HEADER filesNb = len(upd_list) stringTable = '\x00'.join(str(nca) for nca in upd_list) headerSize = 0x10 + (filesNb) * 0x40 + len(stringTable) upd_multiplier = math.ceil(headerSize / 0x200) remainder = 0x200 * upd_multiplier - headerSize headerSize += remainder fileSizes = list() fileOffsets = list() shalist = list() fileNamesLengths = [ len(os.path.basename(file)) + 1 for file in upd_list ] # +1 for the \x00 stringTableOffsets = [ sum(fileNamesLengths[:n]) for n in range(filesNb) ] upd_header = b'' upd_header += b'HFS0' upd_header += pk('<I', filesNb) upd_header += pk('<I', len(stringTable) + remainder) upd_header += b'\x00\x00\x00\x00' for n in range(filesNb): upd_header += pk('<Q', fileOffsets[n]) upd_header += pk('<Q', fileSizes[n]) upd_header += pk('<I', stringTableOffsets[n]) upd_header += hashregion upd_header += b'\x00\x00\x00\x00\x00\x00\x00\x00' upd_header += bytes.fromhex(shalist[n]) upd_header += stringTable.encode() upd_header += remainder * b'\x00' updSize = len(upd_header) + sum(fileSizes) #print (hx(upd_header)) #NORMAL HEADER filesNb = len(norm_list) stringTable = '\x00'.join(str(nca) for nca in norm_list) headerSize = 0x10 + (filesNb) * 0x40 + len(stringTable) norm_multiplier = math.ceil(headerSize / 0x200) remainder = 0x200 * norm_multiplier - headerSize headerSize += remainder fileSizes = list() fileOffsets = list() shalist = list() fileNamesLengths = [ len(os.path.basename(file)) + 1 for file in norm_list ] # +1 for the \x00 stringTableOffsets = [ sum(fileNamesLengths[:n]) for n in range(filesNb) ] norm_header = b'' norm_header += b'HFS0' norm_header += pk('<I', filesNb) norm_header += pk('<I', len(stringTable) + remainder) norm_header += b'\x00\x00\x00\x00' for n in range(filesNb): norm_header += pk('<Q', fileOffsets[n]) norm_header += pk('<Q', fileSizes[n]) norm_header += pk('<I', stringTableOffsets[n]) norm_header += hashregion norm_header += b'\x00\x00\x00\x00\x00\x00\x00\x00' norm_header += bytes.fromhex(shalist[n]) norm_header += stringTable.encode() norm_header += remainder * b'\x00' normSize = len(norm_header) + sum(fileSizes) #print (hx(norm_header)) #SECURE HEADER filesNb = len(sec_list) stringTable = '\x00'.join(str(nca) for nca in sec_list) headerSize = 0x10 + (filesNb) * 0x40 + len(stringTable) sec_multiplier = math.ceil(headerSize / 0x200) remainder = 0x200 * sec_multiplier - headerSize headerSize += remainder fileSizes = sec_fileSizes fileOffsets = [sum(fileSizes[:n]) for n in range(filesNb)] shalist = sec_shalist fileNamesLengths = [ len(os.path.basename(file)) + 1 for file in sec_list ] # +1 for the \x00 stringTableOffsets = [ sum(fileNamesLengths[:n]) for n in range(filesNb) ] sec_header = b'' sec_header += b'HFS0' sec_header += pk('<I', filesNb) sec_header += pk('<I', len(stringTable) + remainder) sec_header += b'\x00\x00\x00\x00' for n in range(filesNb): sec_header += pk('<Q', fileOffsets[n]) sec_header += pk('<Q', fileSizes[n]) sec_header += pk('<I', stringTableOffsets[n]) sec_header += hashregion sec_header += b'\x00\x00\x00\x00\x00\x00\x00\x00' sec_header += bytes.fromhex(shalist[n]) sec_header += stringTable.encode() sec_header += remainder * b'\x00' secSize = len(sec_header) + sum(fileSizes) #print (hx(sec_header)) #ROOT HEADER root_hreg = list() hr = 0x200 * upd_multiplier root_hreg.append(hr.to_bytes(4, byteorder='little')) hr = 0x200 * norm_multiplier root_hreg.append(hr.to_bytes(4, byteorder='little')) hr = 0x200 * sec_multiplier root_hreg.append(hr.to_bytes(4, byteorder='little')) root_list = list() root_list.append("update") root_list.append("normal") root_list.append("secure") fileSizes = list() fileSizes.append(updSize) fileSizes.append(normSize) fileSizes.append(secSize) #print(fileSizes) filesNb = len(root_list) #print(filesNb) stringTable = '\x00'.join(os.path.basename(file) for file in root_list) #print(stringTable) headerSize = 0x10 + (filesNb) * 0x40 + len(stringTable) root_multiplier = math.ceil(headerSize / 0x200) remainder = 0x200 * root_multiplier - headerSize headerSize += remainder #print(headerSize) fileOffsets = [sum(fileSizes[:n]) for n in range(filesNb)] shalist = list() sha = sha256(upd_header).hexdigest() shalist.append(sha) sha = sha256(norm_header).hexdigest() shalist.append(sha) sha = sha256(sec_header).hexdigest() shalist.append(sha) fileNamesLengths = [ len(os.path.basename(file)) + 1 for file in root_list ] # +1 for the \x00 stringTableOffsets = [ sum(fileNamesLengths[:n]) for n in range(filesNb) ] root_header = b'' root_header += b'HFS0' root_header += pk('<I', filesNb) root_header += pk('<I', len(stringTable) + remainder) root_header += b'\x00\x00\x00\x00' for n in range(filesNb): root_header += pk('<Q', fileOffsets[n]) root_header += pk('<Q', fileSizes[n]) root_header += pk('<I', stringTableOffsets[n]) root_header += root_hreg[n] root_header += b'\x00\x00\x00\x00\x00\x00\x00\x00' root_header += bytes.fromhex(shalist[n]) root_header += stringTable.encode() root_header += remainder * b'\x00' #print (hx(root_header)) rootSize = len(root_header) + sum(fileSizes) return root_header, upd_header, norm_header, sec_header, rootSize, upd_multiplier, norm_multiplier, sec_multiplier
def repack_nso(path_original, path_patch, path_out): nso = b'' patched_text_hash = b'' # Read the original NSO with open(path_original, 'rb') as f: nso = bytearray(f.read()) # Read the patched text with open(path_patch, 'rb') as f: data = f.read() patched_text_hash = sha256(data) compressed_patched_text = lz4.block.compress(data, store_size=False) text_off = up('<I', nso[0x10:0x14])[0] text_compressed_size = len(compressed_patched_text) # Retrieve original rodata segment rodata_off = up('<I', nso[0x20:0x24])[0] rodata_compressed_size = up('<I', nso[0x64:0x68])[0] compressed_rodata = nso[rodata_off:rodata_off+rodata_compressed_size] # Retrieve original data segment data_off = up('<I', nso[0x30:0x34])[0] data_compressed_size = up('<I', nso[0x68:0x6C])[0] compressed_data = nso[data_off:data_off+data_compressed_size] # Set to the offsets of the output nso rodata_off = text_off + text_compressed_size data_off = rodata_off + rodata_compressed_size # Create the output nso out_nso = bytearray(data_off + data_compressed_size) # Copy over the original header out_nso[0x0:0x100] = nso[0x0:text_off] # Write the new text hash out_nso[0xA0:0xC0] = patched_text_hash # Write the new compressed text size out_nso[0x60:0x64] = pk('<I', text_compressed_size) # Correct the header offsets out_nso[0x20:0x24] = pk('<I', rodata_off) # rodata offset out_nso[0x30:0x34] = pk('<I', data_off) # data offset # Write new data out_nso[text_off:text_off+text_compressed_size] = compressed_patched_text out_nso[rodata_off:rodata_off+rodata_compressed_size] = compressed_rodata out_nso[data_off:data_off+data_compressed_size] = compressed_data print('text:') print('\nrodata:') print('Offset {}'.format(rodata_off)) print('len(compressed_rodata) {}'.format(len(compressed_rodata))) print('Original header size {}'.format(rodata_compressed_size)) print('\ndata:') print('Offset {}'.format(data_off)) print('len(compressed_rodata) {}'.format(len(compressed_data))) print('Original header size {}'.format(data_compressed_size)) with open(path_out, 'wb') as f: f.write(out_nso)
def main(argc, argv): if argc < 4: print( 'Usage: %s kernel_ldr.bin kernel.bin output.bin [initial_process.kip ...]' % argv[0]) return 1 with open(argv[1], 'rb') as f: kernel_ldr = f.read() with open(argv[2], 'rb') as f: kernel = f.read() kernel_metadata_offset = 4 assert (kernel_metadata_offset <= len(kernel) - 0x40) assert (kernel[kernel_metadata_offset:kernel_metadata_offset + 4] == b'MSS0') bss_start, bss_end, kernel_end = up( '<III', kernel[kernel_metadata_offset + 0x30:kernel_metadata_offset + 0x3C]) assert (bss_end >= bss_start) assert (bss_end == kernel_end) assert (len(kernel) <= kernel_end) if len(kernel) < kernel_end: kernel += b'\x00' * (kernel_end - len(kernel)) assert (kernel_end == len(kernel)) embedded_kips = b'' num_kips = 0 for kip_file in argv[4:]: try: with open(kip_file, 'rb') as f: data = f.read() if data.startswith(b'KIP1'): embedded_kips += data num_kips += 1 except: pass if num_kips > 0: embedded_ini_header = pk('<4sIII', b'INI1', len(embedded_kips) + 0x10, num_kips, 0) else: embedded_ini_header = b'' embedded_ini_offset = align_up(kernel_end, 0x1000) embedded_ini_end = embedded_ini_offset + len(embedded_ini_header) + len( embedded_kips) kernel_ldr_offset = align_up(embedded_ini_end, 0x1000) + ( 0x1000 if len(embedded_ini_header) == 0 else 0) kernel_ldr_end = kernel_ldr_offset + len(kernel_ldr) mesosphere_end = align_up(kernel_ldr_end, 0x1000) with open(argv[3], 'wb') as f: f.write(kernel[:kernel_metadata_offset + 4]) f.write( pk('<QQI', embedded_ini_offset, kernel_ldr_offset, atmosphere_target_firmware(13, 0, 0))) f.write(kernel[kernel_metadata_offset + 0x18:]) f.seek(embedded_ini_offset) f.write(embedded_ini_header) f.write(embedded_kips) f.seek(embedded_ini_end) f.seek(kernel_ldr_offset) f.write(kernel_ldr) f.seek(mesosphere_end) f.write(b'\x00' * 0x1000) return 0
def __init__(self): data = pk('B' ,0x02) data += pk('B', 0X07) data += pk('<H', 0x0cec)# length data += pk('<I', 0x0901000a) data += pk('<H', 0x0002) data += pk('<H', 20) for i in range(20): # formula type data += pk('B', i) for j in range(4): #step data += pk('B', 1) # prio for h in range(8): # pump data += pk('B', h) # pump type data += pk('<H', h) # open data += pk('<H', h) # volume data += pk('<H', 0xd1b8) data += pk('B', 0x03) self.data = data print "data len is" print len(data)
def pk_u64(nb, endianness='<'): return pk(endianness + 'Q', nb)
def __init__(self, month=4, day=1, devId=0x09010009, washId=1): data = pk('B' ,0x02) data += pk('B', 0X02) data += pk('<H', 0x0036) data += pk('<I', devId) data += pk('<H', washId) data += pk('<I', 0x00000203) data += pk('<H', 0x0006) data += pk('B', 0x02) data += pk('B', 0x02) data += pk('<H', 0x07e1) data += pk('B', month) data += pk('B', day) data += pk('B', 0x10) data += pk('B', 0x39) data += pk('B', 0x32) data += pk('<H', 0x07e1) data += pk('B', month) data += pk('B', day) data += pk('B', 0x10) data += pk('B', 0x3a) data += pk('B', 0x0f) data += pk('<H', 0x0008) for i in range(8): data += pk('B', i) data += pk('<H', 0x0005*(i+1)) data += pk('<H', 0xab14) data += pk('B', 0x03) self.data = data
def __init__(self, data): self.ftype = data[TYPE] self.tmpbuf = pk('<I', data[DEVID])