def dump(self):
        print self._header
        print ""

        if self._text:
            print "Text:"
            hexdump(self._text)
            print ""

        if self._data:
            print "Data:"
            hexdump(self._data)
            print ""

        print "Symbols:"
        for symbol in self._symbols:
            print " ", symbol.as_string(self._strings)
        print ""

        if self._text_relocs:
            print "Text relocations:"
            for reloc in self._text_relocs:
                print " ", reloc.as_string(self._strings)
            print ""

        if self._data_relocs:
            print "Data relocations:"
            for reloc in self._text_relocs:
                print " ", reloc.as_string(self._strings)
            print ""
Beispiel #2
0
def ReadFile(path):
    with HunkFile(path) as hf:
        hunks = []
        units = 0

        while not hf.eof():
            with hf.rollback():
                hunkId = hf.readLong()

            type_ = Hunk.getType(hunkId)

            if type_ is 'HUNK_HEADER':
                hf.type = 'executable'

            if type_ is 'HUNK_UNIT':
                units += 1
                if units > 1:
                    hf.type = 'library'

            hunk = HunkClassMap.get(type_, None)

            if not hunk:
                raise NotImplementedError('%s not handled.' % type_)

            try:
                hunks.append(hunk.parse(hf))
            except ValueError:
                log.error('Parse error at position 0x%x.', hf.tell())
                util.hexdump(hf.read())

        return hunks
Beispiel #3
0
  def dump(self):
    print self._header
    print ''

    if self._text:
      print 'Text:'
      hexdump(self._text)
      print ''

    if self._data:
      print 'Data:'
      hexdump(self._data)
      print ''

    print 'Symbols:'
    for symbol in self._symbols:
      print ' ', symbol.as_string(self._strings)
    print ''

    if self._text_relocs:
      print 'Text relocations:'
      for reloc in self._text_relocs:
        print ' ', reloc.as_string(self._strings)
      print ''

    if self._data_relocs:
      print 'Data relocations:'
      for reloc in self._text_relocs:
        print ' ', reloc.as_string(self._strings)
      print ''
Beispiel #4
0
    def writeFile(self, f, decrypted_chunks):
        path = os.path.join(self.outputFolder,
                            re.sub(r'[:|*<>?"]', "_", f.RelativePath))
        print path
        makedirs(os.path.dirname(path))
        ff = open(path, "wb")
        h = hashlib.sha1()
        for i in xrange(len(decrypted_chunks)):
            d = decrypted_chunks[i]
            h.update(d)
            ff.write(d)
        ff.close()

        if f.Attributes.EncryptionKey:
            EncryptionKey = f.Attributes.EncryptionKey
            #ProtectionClass = f.Attributes.ProtectionClass
            hexdump(EncryptionKey)
            ProtectionClass = struct.unpack(">L", EncryptionKey[0x18:0x1C])[0]
            assert ProtectionClass == f.Attributes.ProtectionClass
            #EncryptionKeyVersion=2 => starts with keybag uuid
            if f.Attributes.EncryptionKeyVersion and f.Attributes.EncryptionKeyVersion == 2:
                assert self.kb.uuid == EncryptionKey[:0x10]
                keyLength = struct.unpack(">L", EncryptionKey[0x20:0x24])[0]
                assert keyLength == 0x48
                wrapped_key = EncryptionKey[0x24:]
            else:  #XXX old format ios 5 backup
                wrapped_key = EncryptionKey[0x1C:]
            print "ProtectionClass= %d" % ProtectionClass
            filekey = self.kb.unwrapCurve25519(ProtectionClass, wrapped_key)
            if not filekey:
                print "Failed to unwrap file key for file %s !!!" % f.RelativePath
            else:
                print "filekey", filekey.encode("hex")
                self.decryptProtectedFile(path, filekey,
                                          f.Attributes.DecryptedSize)
Beispiel #5
0
    def writeFile(self, f, decrypted_chunks):
        path = os.path.join(self.outputFolder, re.sub(r'[:|*<>?"]', "_", f.RelativePath))
        print path
        makedirs(os.path.dirname(path))
        ff = open(path, "wb")
        h = hashlib.sha1()
        for i in xrange(len(decrypted_chunks)):
            d = decrypted_chunks[i]
            h.update(d)
            ff.write(d)
        ff.close()

        if f.Attributes.EncryptionKey:
            EncryptionKey = f.Attributes.EncryptionKey
            #ProtectionClass = f.Attributes.ProtectionClass
            hexdump(EncryptionKey)
            ProtectionClass = struct.unpack(">L", EncryptionKey[0x18:0x1C])[0]
            assert ProtectionClass == f.Attributes.ProtectionClass
            #EncryptionKeyVersion=2 => starts with keybag uuid
            if f.Attributes.EncryptionKeyVersion and f.Attributes.EncryptionKeyVersion == 2:
                assert self.kb.uuid == EncryptionKey[:0x10]
                keyLength = struct.unpack(">L", EncryptionKey[0x20:0x24])[0]
                assert keyLength == 0x48
                wrapped_key = EncryptionKey[0x24:]
            else:#XXX old format ios 5 backup
                wrapped_key = EncryptionKey[0x1C:]
            print "ProtectionClass= %d" % ProtectionClass
            filekey = self.kb.unwrapCurve25519(ProtectionClass, wrapped_key)
            if not filekey:
                print "Failed to unwrap file key for file %s !!!" % f.RelativePath
            else:
                print "filekey",filekey.encode("hex")
                self.decryptProtectedFile(path, filekey, f.Attributes.DecryptedSize)
Beispiel #6
0
def validate_readv(expecteds, actual, msg):
    if expecteds is None:
        return
    if type(actual) is int:
        return validate_readiv(expecteds, actual, msg)

    if actual not in expecteds:
        print 'Failed %s' % msg
        for expected in expecteds:
            if do_str2hex:
                print '  Expected; %d' % (len(expected), )
                print str2hex(expected, prefix='    ')
            else:
                print '  Expected:   %d %s' % (len(expected),
                                               binascii.hexlify(expected))
            if do_hexdump:
                hexdump(expected, indent='    ')
        if do_str2hex:
            print '  Actual; %d' % (len(actual), )
            print str2hex(actual, prefix='    ')
        else:
            print '  Actual:   %d %s' % (len(actual), binascii.hexlify(actual))
        if do_hexdump:
            hexdump(actual, indent='    ')
        if do_exception:
            raise Exception('failed validate: %s' % msg)
def ReadFile(path):
  with HunkFile(path) as hf:
    hunks = []
    units = 0

    while not hf.eof():
      with hf.rollback():
        hunkId = hf.readLong()

      type_ = Hunk.getType(hunkId)

      if type_ is 'HUNK_HEADER':
        hf.type = 'executable'

      if type_ is 'HUNK_UNIT':
        units += 1
        if units > 1:
          hf.type = 'library'

      hunk = HunkClassMap.get(type_, None)

      if not hunk:
        raise NotImplementedError('%s not handled.' % type_)

      try:
        hunks.append(hunk.parse(hf))
      except ValueError:
        log.error('Parse error at position 0x%x.', hf.tell())
        util.hexdump(hf.read())

    return hunks
Beispiel #8
0
def validate_readv(expecteds, actual, msg):
    if expecteds is None:
        return
    if type(actual) is int:
        return validate_readiv(expecteds, actual, msg)
    
    if actual not in expecteds:
        print 'Failed %s' % msg
        for expected in expecteds:
            if do_str2hex:
                print '  Expected; %d' % (len(expected),)
                print str2hex(expected, prefix='    ')
            else:
                print '  Expected:   %d %s' % (len(expected), binascii.hexlify(expected))
            if do_hexdump:
                hexdump(expected, indent='    ')
        if do_str2hex:
            print '  Actual; %d' % (len(actual),)
            print str2hex(actual, prefix='    ')
        else:
            print '  Actual:   %d %s' % (len(actual), binascii.hexlify(actual))
        if do_hexdump:
            hexdump(actual, indent='    ')
        if do_exception:
            raise Exception('failed validate: %s' % msg)
Beispiel #9
0
    def _recv_cmd(self, c, noprint=False):
        msg = c.read_device(0x140)

        pos = 0x0
        cursize = 0x20
        msgauth = msg[pos:pos + cursize]
        pos += cursize
        if msgauth != nxsm_auth:
            raise Exception("TransportCmd: Recv-msg auth is invalid.")

        cursize = 0x4
        magic = struct.unpack('<I', msg[pos:pos + cursize])[0]
        pos += cursize
        if magic != 0x4d53584e:
            raise Exception(
                "TransportCmd: Recv-msg magic is invalid: 0x%08x." % magic)
        cursize = 0x4
        version = struct.unpack('<I', msg[pos:pos + cursize])[0]
        pos += cursize
        if version != 1:
            raise Exception(
                "TransportCmd: Recv-msg version is invalid: 0x%08x." % version)
        cursize = 0x4
        raw_data_size = struct.unpack('<I', msg[pos:pos + cursize])[0]
        pos += cursize
        if raw_data_size >= (0x100 >> 2):
            raise Exception(
                "TransportCmd: Recv-msg raw_data_size is too large: 0x%x." %
                raw_data_size)

        cursize = 0x4
        tmp_types = struct.unpack('<BBBB', msg[pos:pos + cursize])
        pos += cursize
        cursize = 0x10
        tmp_sizes = struct.unpack('<IIII', msg[pos:pos + cursize])
        pos += cursize

        for i in range(4):
            if i < len(self.buffer_types):
                self.buffer_types[i] = tmp_types[i]
                self.buffer_sizes[i] = tmp_sizes[i]

        cursize = raw_data_size << 2
        self.rawdata = msg[pos:pos + cursize]
        pos += cursize

        if not noprint:
            print "Raw msg reply:"
            print hexdump(msg)
            print "Rawdata payload:"
            print hexdump(self.rawdata[0x4:])
            print "Retval: 0x%x" % self.recv_ret
        self.recv_ret = struct.unpack('<I', self.rawdata[0x0:0x0 + 0x4])[0]

        for i in range(4):
            if i < len(self.buffer_types):
                self.buffers[i] = ''
                if self.buffer_types[i] == 1:  # To host
                    self.buffers[i] = c.read_device(self.buffer_sizes[i])
Beispiel #10
0
 def read(self, addr, length):
     """Read data from RAM."""
     self._checkConnected()
     addr = int(addr, 16)
     length = int(length, 16)
     data = self.client.read(addr, length)
     self.client.conn.send(b"\xAA")  # ACK
     hexdump(data, addr)
Beispiel #11
0
    def dump(self):
        print '{0} (format: {1!r})'.format(self.type, self.fmt)

        if self.fmt is 'GNU':
            for symbol in self.data[0]:
                print ' ', symbol.as_string(self.data[1])
        else:
            util.hexdump(self.data)
  def dump(self):
    print '{0} (format: {1!r})'.format(self.type, self.fmt)

    if self.fmt is 'GNU':
      for symbol in self.data[0]:
        print ' ', symbol.as_string(self.data[1])
    else:
      util.hexdump(self.data)
 def do_xxd(self, p):
     t = p.split()
     path = self.get_path(t[0])
     data = self.volume.readFile(path, returnString=True)
     if not data:
         return
     if len(t) > 1:
         hexdump(data[:int(t[1])])
     else:
         hexdump(data)
 def do_xxd(self, p):
     t = p.split()
     path = self.get_path(t[0])
     data = self.volume.readFile(path, returnString=True)
     if not data:
         return
     if len(t) > 1:
         hexdump(data[:int(t[1])])
     else:
         hexdump(data)
Beispiel #15
0
 def do_xxd(self, p):
     t = p.split()
     path = self.get_path(t[0])
     data = self.volume.readFile(path, returnString=True)
     if not data:
         return
     if len(t) > 1:
         hexdump(data[:int(t[1])])
     else:
         hexdump(data[:0x200])
         if len(data) > 0x200:
             print "Output truncated to %d bytes" % 0x200
Beispiel #16
0
 def do_hexdump(self, p):
     t = p.split(" ")
     l = 0
     if len(t) < 1:
         return
     if len(t) == 2:
         l = int(t[1])
     z = self.afc.get_file_contents(self.curdir + "/" + t[0])
     if not z:
         return
     if l:
         z = z[:l]
     hexdump(z)
Beispiel #17
0
 def do_hexdump(self, p):
     t = p.split(" ")
     l = 0
     if len(t) < 1:
         return
     if len(t) == 2:
         l = int(t[1])
     z = self.afc.get_file_contents(self.curdir + "/" + t[0])
     if not z:
         return
     if l:
         z = z[:l]
     hexdump(z)
Beispiel #18
0
 def watch(self, addr, length):
     """Read data from RAM until stopped."""
     self._checkConnected()
     addr = int(addr, 16)
     length = int(length, 16)
     try:
         while True:
             data = self.client.read(addr, length)
             print("\x1B[2J\x1B[H", end='')  # clear; cursor to 1,1
             hexdump(data, addr)
             self.client.conn.send(b"\xAA")  # ACK; BB=retry, CC=cancel
             time.sleep(0.01)
     except KeyboardInterrupt:
         self.client.conn.send(b"\xAA")
Beispiel #19
0
def dumpCurrentUser(buf):

    rVer, rInst, rType, rLen = getHdr(buf[:8])

    printHdr(rVer, rInst, rType, rLen, 1, 1, 0)

    (
        size,
        headerToken,
        offsetToCurrentEdit,
        lenUserName,
        docFileVersion,
        majorVersion,
        minorVersion,
        unused,
    ) = struct.unpack("IIIHHBBH", buf[8:28])
    ansiUserName = buf[28 : 28 + lenUserName]
    relVersion = struct.unpack("I", buf[28 + lenUserName : 28 + lenUserName + 4])[0]
    uniUserName = buf[28 + lenUserName + 4 :]

    print (
        "\tDWORD size %#x (%d)\n\tDWORD headerToken %#x (%d)\n"
        "\tDWORD offsetToCurrentEdit %#x (%d)\n\tWORD lenUserName %#x (%d)\n"
        "\tWORD docFileVersion %#x (%d)\n\tBYTE majorVersion %#x (%d)\n"
        "\tBYTE minorVersion %#x (%d)\n\tWORD unused %#x (%d)\n"
        "\tASCIIZ AnsiUserName '%s'\n\tDWORD relVersion %#x (%d)\n"
        "\tUNICODE uniUserName:\n"
        % (
            size,
            size,
            headerToken,
            headerToken,
            offsetToCurrentEdit,
            offsetToCurrentEdit,
            lenUserName,
            lenUserName,
            docFileVersion,
            docFileVersion,
            majorVersion,
            majorVersion,
            minorVersion,
            minorVersion,
            unused,
            unused,
            ansiUserName,
            relVersion,
            relVersion,
        )
    )
    print hexdump(uniUserName, indent=12)
Beispiel #20
0
def dumpPPD(buf, maxLen, depth, count_offset, recList=None):

    nRead = 0
    while nRead  < maxLen:

        #read the header, check for atom/container
        rVer, rInst, rType, rLen = getHdr(buf[nRead:nRead + PPT_HDR_LEN])
        nRead += PPT_HDR_LEN
        
        if (rVer & 0xf) == 0xf:
            isAtom = False
            prefix = "["*depth + "Container"
        else:
            isAtom = True
            prefix = "{"*depth + "Atom"

        #try to look up record description
        try:
            rDesc = recDict[rType]
            rName = rDesc[0]
        except KeyError:
            rDesc = None
            rName = "!!!Unknown"

        #pretty print the header
        print "%s(%d) %s (%#x, %d) size %#x (%d), instance %#x version %#x [offset %#x (%d)]" % \
                (prefix, count_offset[REC_COUNT],
                rName, rType, rType, rLen, rLen, rInst, rVer,
                count_offset[REC_OFFSET], count_offset[REC_OFFSET])

        #update ghetoo reference parameters, record count and total offset
        count_offset[REC_COUNT] += 1
        count_offset[REC_OFFSET] += PPT_HDR_LEN

        if not isAtom:
            nRead += dumpPPD(buf[nRead:maxLen], min(maxLen - nRead, rLen), depth + 1, count_offset, recList)
            print "]"*depth, "End container %s" % rName
        else:

            #only save atoms
            if recList is not None:
                recList.append((rType, buf[nRead:nRead+(min(rLen, maxLen - nRead))]))

            if rDesc:
                printAtom(buf[nRead:nRead+(min(rLen, maxLen - nRead))], rLen, rDesc)

                #look for a manual rec printer
                try:
                    handler = customPrinters[rType]
                    handler(buf[nRead:nRead+(min(rLen, maxLen - nRead))])
                except KeyError:
                    pass
            else:
                print("No descriptor, dumping:\n",
                    hexdump(buf[nRead+PPT_HDR_LEN:nRead+PPT_HDR_LEN+rLen], indent=8))
            
            nRead += rLen
            count_offset[REC_OFFSET] += rLen

    return nRead
Beispiel #21
0
def decode_nsec3_rdata(pkt, offset, rdlen):

    b32_to_ext_hex = bytes.maketrans(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
                                     b'0123456789ABCDEFGHIJKLMNOPQRSTUV')

    end_rdata = offset + rdlen
    hashalg, flags, iterations, saltlen = struct.unpack(
        '!BBHB', pkt[offset:offset + 5])
    salt = hexdump(pkt[offset + 5:offset + 5 + saltlen])
    offset += (5 + saltlen)
    hashlen, = struct.unpack('!B', pkt[offset:offset + 1])
    offset += 1

    hashed_next_owner = base64.b32encode(pkt[offset:offset + hashlen])
    hashed_next_owner = hashed_next_owner.translate(b32_to_ext_hex).decode()
    offset += hashlen
    type_bitmap = pkt[offset:end_rdata]
    p = type_bitmap
    rrtypelist = []
    while p:
        windownum, winlen = struct.unpack('BB', p[0:2])
        bitmap = p[2:2 + winlen]
        rrtypelist += decode_typebitmap(windownum, bitmap)
        p = p[2 + winlen:]
    rrtypes = ' '.join(rrtypelist)
    result = "%d %d %d %s %s %s" % \
             (hashalg, flags, iterations, salt, hashed_next_owner, rrtypes)
    return result
Beispiel #22
0
 def __str__(self):
     s = 'Section ' + self.name + '\n'
     s += '---\n'
     s += 'Type:     ' + self.type + '\n'
     s += 'Flags:    ' + self.flags + '\n'
     padding = 8 if self._class == 'ELF32' else 16
     s += 'Offset:   ' + '0x{num:0{width}x}'.format(num=self.offset,
                                                    width=padding) + '\n'
     s += 'Content:\n' + hexdump(self.content, self.offset) + '\n'
     return s
Beispiel #23
0
def dump_loop(dev, req, max_read, dump_addr, dump_len, do_hexdump=True):
    bin = ''
    to_dump = dump_len
    addr_cur = dump_addr
    while to_dump > 0:
        l_this = max(to_dump, to_dump - max_read)
        l_this = min(l_this, max_read)
        print
        print 'Addr 0x%04X, len 0x%04X' % (addr_cur, l_this)
        
        res = dev.controlRead(0xC0, 0xB0, req, addr_cur, l_this, timeout=1000)
        bin += res
        if do_hexdump:
            hexdump(res)
        if len(res) != l_this:
            print "WARNING: wanted 0x%04X bytes but got 0x%04X" % (dump_len, len(res),)
        to_dump -= l_this
        addr_cur += l_this
    return bin
def msoDrawPrinter(data, dLen, depth=2):
    off = 0
    while off <= dLen - 8:
        try:
            vif, l = struct.unpack("<LL", data[off:off+8])
        except struct.error:
            return

        ver = vif & 0xf
        inst = (vif & 0xfff0) >> 4
        fbt = (vif & 0xffff0000) >> 16

        try:
            desc = msoDrawTypeMap[fbt]
            print(" "*(depth*4) + "Subtype %s [%#x (%d)] ver %#x inst %#x offset %#x, len %#x (%d) (%s)" %
                    (desc[0], fbt, fbt, ver, inst, off, l, l, desc[1]))
            if ver == 0xf:
                msoDrawPrinter(data[off+8:off+8+l], l, depth + 1)
            else:

                #CONTINUE records can make the actual length of the property span across
                #multiple records, which we don't currently support, so we chop it at what
                #we have left in the current record
                lenLeft = min(l, dLen - off - 8)
                if lenLeft != l:
                    print "!!Warning, data chopped short, wanted %d, got %d, CONTINUE rec?" % \
                            (l, lenLeft)
                
                #print out sub-record data with custom printer if available
                if desc[2]:
                    if isinstance(desc[2],  list):
                        printFmt(data[off+8:off+8+lenLeft], lenLeft, desc[2][0], desc[2][1], depth + 1)
                    elif callable(desc[2]):
                        desc[2](data[off+8:off+8+lenLeft], lenLeft, inst, depth + 1)
                else:
                    print hexdump(data[off+8:off+8+lenLeft], indent=(depth + 1)*4),
        except KeyError:
            print " "*(depth*4) + "obj.subtype %#x (%d) ver %#x inst %#x obj.sublen %#x (%d)" % \
                        (fbt, fbt, ver, inst, l, l)
            print hexdump(data[off+8:min(off+8+l, dLen - 8)], indent=(depth + 1)*4),
        off += l + 8
    return -1
def optPrinter(data, dLen, propCount, depth):
    
    cProps = []
    off = 0
    
    #first we have an array of property descriptors packed together
    while off <= dLen - 6 and propCount > 0:
        try:
            pff, op = struct.unpack("<HL", data[off:off+6])
        except struct.error:
            return

        pid = pff & 0x3fff
        fBid = (pff & 0x4000) >> 14
        fComplex = (pff & 0x8000) >> 15

        #op contains the length
        if fComplex:
            cProps.append((op, pid))

        print(" "*(depth*4) + "Prop ID %#x (%d), Blip ID %#x (%d), Complex %#x, Op %#x (%#d)" %
                        (pid, pid, fBid, fBid, fComplex, op, op))
        
        off += 6
        propCount -= 1

    #dump the data for any complex properties
    if off < dLen:

        print("\n" + " "*(depth*4) + "Dumping data for %d complex props\n" % len(cProps))

        for (cLen, pid) in cProps:

            if off + cLen > dLen:
                print "Warning: breaking early, not enough to print remaining props"
                break
            
            lenLeft = min(cLen, dLen - off)
            print(" "*(depth*4) + "Dumping property %#x (%d)...\n" % (pid, pid))
            print hexdump(data[off:off+lenLeft], indent=(depth)*4)

            off += cLen
Beispiel #26
0
def decode_ds_rdata(pkt, offset, rdlen):

    keytag, alg, digesttype = struct.unpack('!HBB', pkt[offset:offset + 4])
    digest = hexdump(pkt[offset + 4:offset + rdlen])
    if options['DEBUG']:
        result = "%d %d(%s) %d(%s) %s" % \
                 (keytag, alg, dnssec_alg[alg], digesttype,
                  dnssec_digest[digesttype], digest)
    else:
        result = "%d %d %d %s" % (keytag, alg, digesttype, digest)
    return result
def objSubPrinter(data, dLen, depth=2):

    off = 0
    while off <= dLen - 4:
        t, l = struct.unpack("<HH", data[off:off+4])
        try:
            desc = objSubTypeMap[t]
            print("        Subtype %s [%#x (%d)] offset %#x, len %#x (%d) (%s)" %
                    (desc[0], t, t, off, l, l, desc[1]))
            
            lenLeft = min(l, dLen - off - 4)
            
            if desc[2]:
                desc[2](data[off+4:off+4+lenLeft], lenLeft, depth + 1)
            else:
                print hexdump(data[off+4:off+4+lenLeft], indent=(depth + 1)*4),
        except KeyError:
            print "        obj.subtype %#x (%d) obj.sublen %#x (%d)" % (t, t, l, l)
        off += l + 4
    return -1
Beispiel #28
0
    def _request_device(self, command, params='', accept=[], debug=False):
        req = struct.pack('>BHB', self.CMD_PREFIX, 1+len(params), command)
        req += params
        req += struct.pack('>B', self._calc_checksum(req[1:]))

        self._port.timeout = 2
        resp_h = None
        accept.append(command)
        for knock in range(4):
            self._drain()
            if debug:
                self.log.debug("Write:\n%s" % hexdump(req))
            self._port.write(req)
            resp_h = self._port.read(3)
            if len(resp_h) < 3:
                continue
            (cmd, resp_len) = struct.unpack('>BH', resp_h)
            if cmd not in accept:
                self.log.error('Unexpected response %s' % hexdump(resp_h))
                self._drain()
                continue
            break
        if not resp_h:
            raise AssertionError('No answer from device')
        if len(resp_h) < 3:
            raise AssertionError('Communication error')
        if debug:
            self.log.debug('%d bytes to receive' % resp_len)
        resp = self._port.read(resp_len)
        cksum = self._port.read(1)
        if debug:
            self.log.debug("Read:\n%s" % hexdump(resp))
        self._port.timeout = 1
        if not len(cksum):
            raise AssertionError('Communication error')
        rcksum = ord(cksum)
        dcksum = self._calc_checksum(resp_h[1:], resp)
        if rcksum != dcksum:
            raise AssertionError('Comm. error, checksum error 0x%02x/0x%02x' \
                                    % (rcksum, dcksum))
        return (resp, cmd)
Beispiel #29
0
 def _request_device(self, command, params='', accept=[], debug=False):
     req = struct.pack('>BHB', self.CMD_PREFIX, 1 + len(params), command)
     req += params
     req += struct.pack('>B', self._calc_checksum(req[1:]))
     self._port.timeout = 2
     resp_h = None
     accept.append(command)
     for knock in range(4):
         self._drain()
         if debug:
             self.log.debug("Write:\n%s" % hexdump(req))
         self._port.write(req)
         resp_h = self._port.read(3)
         if len(resp_h) < 3:
             continue
         (cmd, resp_len) = struct.unpack('>BH', resp_h)
         if cmd not in accept:
             self.log.error('Unexpected response %s' % hexdump(resp_h))
             self._drain()
             continue
         break
     if not resp_h:
         raise AssertionError('No answer from device')
     if len(resp_h) < 3:
         raise AssertionError('Communication error')
     if debug:
         self.log.debug('%d bytes to receive' % resp_len)
     resp = self._port.read(resp_len)
     cksum = self._port.read(1)
     if debug:
         self.log.debug("Read:\n%s" % hexdump(resp))
     self._port.timeout = 1
     if not len(cksum):
         raise AssertionError('Communication error')
     rcksum = ord(cksum)
     dcksum = self._calc_checksum(resp_h[1:], resp)
     if rcksum != dcksum:
         raise AssertionError('Comm. error, checksum error 0x%02x/0x%02x' \
                                 % (rcksum, dcksum))
     return (resp, cmd)
Beispiel #30
0
def decode_rr(pkt, offset, hexrdata):
    """ Decode a resource record, given DNS packet and offset"""

    orig_offset = offset
    domainname, offset = name_from_wire_message(pkt, offset)
    rrtype, rrclass, ttl, rdlen = \
            struct.unpack("!HHIH", pkt[offset:offset+10])
    offset += 10
    rdata = pkt[offset:offset + rdlen]
    if hexrdata:
        rdata = hexdump(rdata)
    elif options["generic"]:
        rdata = generic_rdata_encoding(rdata, rdlen)
    elif rrtype == 1:  # A
        rdata = socket.inet_ntop(socket.AF_INET, rdata)
    elif rrtype in [2, 5, 12, 39]:  # NS, CNAME, PTR
        rdata, _ = name_from_wire_message(pkt, offset)  # DNAME
        rdata = rdata.text()
    elif rrtype == 6:  # SOA
        rdata = decode_soa_rdata(pkt, offset, rdlen)
    elif rrtype == 15:  # MX
        mx_pref, = struct.unpack('!H', pkt[offset:offset + 2])
        rdata, _ = name_from_wire_message(pkt, offset + 2)
        rdata = "%d %s" % (mx_pref, rdata.text())
    elif rrtype in [16, 99]:  # TXT, SPF
        rdata = decode_txt_rdata(rdata, rdlen)
    elif rrtype == 28:  # AAAA
        rdata = socket.inet_ntop(socket.AF_INET6, rdata)
    elif rrtype == 33:  # SRV
        rdata = decode_srv_rdata(pkt, offset)
    elif rrtype == 41:  # OPT
        pass
    elif rrtype in [43, 59, 32769]:  # [C]DS, DLV
        rdata = decode_ds_rdata(pkt, offset, rdlen)
    elif rrtype == 45:  # IPSECKEY
        rdata = decode_ipseckey_rdata(pkt, offset, rdlen)
    elif rrtype in [46, 24]:  # RRSIG, SIG
        rdata = decode_rrsig_rdata(pkt, offset, rdlen)
    elif rrtype == 47:  # NSEC
        rdata = decode_nsec_rdata(pkt, offset, rdlen)
    elif rrtype in [48, 25, 60]:  # [C]DNSKEY, KEY
        rdata = decode_dnskey_rdata(pkt, offset, rdlen)
    elif rrtype == 50:  # NSEC3
        rdata = decode_nsec3_rdata(pkt, offset, rdlen)
    elif rrtype == 51:  # NSEC3PARAM
        rdata = decode_nsec3param_rdata(rdata)
    elif rrtype == 257:  # CAA
        rdata = decode_caa_rdata(rdata)
    else:  # use RFC 3597
        rdata = generic_rdata_encoding(rdata, rdlen)
    offset += rdlen
    return (domainname, rrtype, rrclass, ttl, rdata, offset)
Beispiel #31
0
    def tree(path):
        for root, dirs, files in os.walk(path):
            base = os.path.basename(root)
            idnt = '    ' * (root.replace(path, '').count(os.sep))
            print('%s%s/' % (idnt, base))
            for f in files:
                pn = os.path.join(root, f)
                print('    %s%s [%s]' % (idnt, f, os.stat(pn).st_size))

                # dump some content of blobs
                if opts.dump and "objects" == base:
                    print(util.hexdump(util.read_file(pn, 32 * 2)))
                    print
Beispiel #32
0
    def tree(path):
        for root, dirs, files in os.walk(path):
            base = os.path.basename(root)
            idnt = '    ' * (root.replace(path, '').count(os.sep))
            print('%s%s/' % (idnt, base))
            for f in files:
                pn = os.path.join(root, f)
                print('    %s%s [%s]' % (idnt, f, os.stat(pn).st_size))

                # dump some content of blobs
                if opts.dump and "objects" == base:
                    print(util.hexdump(util.read_file(pn, 32*2)))
                    print
Beispiel #33
0
def dumpCurrentUser(buf):

    rVer, rInst, rType, rLen = getHdr(buf[:8])

    printHdr(rVer, rInst, rType, rLen, 1, 1, 0)
    
    (size, headerToken, offsetToCurrentEdit, lenUserName, docFileVersion,
            majorVersion, minorVersion, unused) = struct.unpack("IIIHHBBH", buf[8:28])
    ansiUserName = buf[28:28+lenUserName]
    relVersion = struct.unpack("I", buf[28+lenUserName:28+lenUserName+4])[0]
    uniUserName = buf[28+lenUserName+4:]
    
    print("\tDWORD size %#x (%d)\n\tDWORD headerToken %#x (%d)\n"
            "\tDWORD offsetToCurrentEdit %#x (%d)\n\tWORD lenUserName %#x (%d)\n"
            "\tWORD docFileVersion %#x (%d)\n\tBYTE majorVersion %#x (%d)\n"
            "\tBYTE minorVersion %#x (%d)\n\tWORD unused %#x (%d)\n"
            "\tASCIIZ AnsiUserName '%s'\n\tDWORD relVersion %#x (%d)\n"
            "\tUNICODE uniUserName:\n" % (
                    size, size, headerToken, headerToken, offsetToCurrentEdit,
                    offsetToCurrentEdit, lenUserName, lenUserName, docFileVersion,
                    docFileVersion, majorVersion, majorVersion, minorVersion,
                    minorVersion, unused, unused, ansiUserName,
                    relVersion, relVersion))
    print hexdump(uniUserName, indent=12)
Beispiel #34
0
    def _recv_cmd(self, c, noprint=False):
        msg = c.read_device(0x11c)

        magic = struct.unpack('<I', msg[0x0:0x0 + 0x4])[0]
        if magic != 0x4d53584e:
            raise Exception(
                "TransportCmd: Recv-msg magic is invalid: 0x%08x." % magic)
        raw_data_size = struct.unpack('<I', msg[0x4:0x4 + 0x4])[0]
        if raw_data_size >= (0x100 >> 2):
            raise Exception(
                "TransportCmd: Recv-msg raw_data_size is too large: 0x%x." %
                raw_data_size)

        tmp_types = struct.unpack('<BBBB', msg[0x8:0x8 + 0x4])
        tmp_sizes = struct.unpack('<IIII', msg[0xc:0xc + 0x10])

        for i in range(4):
            if i < len(self.buffer_types):
                self.buffer_types[i] = tmp_types[i]
                self.buffer_sizes[i] = tmp_sizes[i]

        self.rawdata = msg[0x1c:0x1c + (raw_data_size << 2)]

        if not noprint:
            print "Raw msg reply:"
            print hexdump(msg)
            print "Rawdata payload:"
            print hexdump(self.rawdata[0x4:])
            print "Retval: 0x%x" % self.recv_ret
        self.recv_ret = struct.unpack('<I', self.rawdata[0x0:0x0 + 0x4])[0]

        for i in range(4):
            if i < len(self.buffer_types):
                self.buffers[i] = ''
                if self.buffer_types[i] == 1:  # To host
                    self.buffers[i] = c.read_device(self.buffer_sizes[i])
Beispiel #35
0
def dump(ole):

    #
    wb = ole.openstream("Workbook")
    buf = wb.read()
    wbLen = len(buf)
    print("*" * 80 +
          "\n[*]Dumping Workbook stream %#x (%d) bytes...\n") % (wbLen, wbLen)

    bofCount = offset = count = 0
    while offset + XL_HDRLEN <= wbLen:

        rType, rLen = struct.unpack("<HH", buf[offset:offset + XL_HDRLEN])
        nLeft = wbLen - offset - XL_HDRLEN

        if rLen > nLeft:
            print "!!Invalid record length (%#x, %d) only have %#x (%d) left" % (
                rLen, rLen, nLeft, nLeft)
            print "!!Attempting to recover from error"

            #assume 0 length record and advance past type/len
            oOff = offset
            success, offset = recover(buf, wbLen, offset + XL_HDRLEN)
            if success:
                print "!!Recovered from error, skipped %#x (%d) bytes\n" % (
                    offset - oOff, offset - oOff)
            else:
                print "!!Couldn't recover from length error, hexdumping rest of stream"
                sys.stdout.write(hexdump(buf[oOff:]))
                return False
        else:
            if rType == 0x809:
                bofCount += 1
                print "[ii]BOF record: current count %d" % bofCount
            elif rType == 0xa:
                bofCount -= 1
                print "[ii]EOF record: current count %d" % bofCount
            printRec(rType, rLen,
                     buf[offset + XL_HDRLEN:offset + XL_HDRLEN + rLen], count,
                     offset)

            offset += XL_HDRLEN + rLen
            count += 1

    return True
Beispiel #36
0
def dump(ole):

    #
    wb = ole.openstream("Workbook")
    buf = wb.read()
    wbLen = len(buf)
    print ("*"*80+ "\n[*]Dumping Workbook stream %#x (%d) bytes...\n") % (wbLen, wbLen)

    bofCount = offset = count = 0
    while offset + XL_HDRLEN <= wbLen:

        rType, rLen = struct.unpack("<HH", buf[offset:offset + XL_HDRLEN])
        nLeft = wbLen - offset - XL_HDRLEN

        if rLen > nLeft:
            print "!!Invalid record length (%#x, %d) only have %#x (%d) left" % (rLen, rLen, nLeft,
                    nLeft)
            print "!!Attempting to recover from error"
            
            #assume 0 length record and advance past type/len
            oOff = offset
            success, offset = recover(buf, wbLen, offset + XL_HDRLEN)
            if success:
                print "!!Recovered from error, skipped %#x (%d) bytes\n" % (offset - oOff, offset -
                        oOff)
            else:
                print "!!Couldn't recover from length error, hexdumping rest of stream"
                sys.stdout.write(hexdump(buf[oOff:]))
                return False
        else:
            if rType == 0x809:
                bofCount += 1
                print "[ii]BOF record: current count %d" % bofCount
            elif rType == 0xa:
                bofCount -= 1
                print "[ii]EOF record: current count %d" % bofCount
            printRec(rType, rLen, buf[offset+XL_HDRLEN:offset+XL_HDRLEN+rLen], count, offset)
        
            offset += XL_HDRLEN + rLen
            count += 1

    return True
Beispiel #37
0
def test_hexdump():
    s = ''

    b = b'\x41\x42\x43'
    s += '\n'
    s += util.hexdump(b)
    s += '\n'

    s += file_test1('C:/test/a.txt')
    s += file_test1('C:/test/127.txt')
    s += file_test1('C:/test/128.txt')
    s += file_test1('C:/test/129.txt')

    #----------------------------
    s += file_test2('C:/test/127.txt')
    s += file_test2('C:/test/128.txt')
    s += file_test2('C:/test/129.txt')

    #----------------------------
    s += file_test2('C:/test/32.txt')


    #----------------------------
    s += 'no ascii-------------'
    s += file_test3('C:/test/128.txt')

    s += 'no addr-------------'
    s += file_test4('C:/test/128.txt')

    s += 'no header-------------'
    s += file_test5('C:/test/128.txt')

    #----------------------------
    s += 'file_dump-------------'
    s += file_dump_test1('C:/test/32.txt')
    s += file_dump_test1('C:/test/127.txt')

    return s
Beispiel #38
0
def print_optrr(rcode, rrclass, ttl, rdata):

    packed_ttl = struct.pack('!I', ttl)
    ercode_hi, version, z = struct.unpack('!BBH', packed_ttl)
    ercode = (ercode_hi << 4) | rcode
    flags = []
    if z & 0x8000:
        flags.append("do")
    print(";; OPT: edns_version=%d, udp_payload=%d, flags=%s, ercode=%d(%s)" %
          (version, rrclass, ' '.join(flags), ercode, rc.get_name(ercode)))
    blob = rdata
    while blob:
        ocode, olen = struct.unpack('!HH', blob[:4])
        odesc = edns_opt.get(ocode, "Unknown")
        print(";; OPT code=%d (%s), length=%d" % (ocode, odesc, olen))
        data_raw = blob[4:4 + olen]
        data_out = hexdump(data_raw)
        if ocode == 3:
            human_readable_data = ''
            try:
                human_readable_data = data_raw.decode('ascii')
            except (TypeError, UnicodeDecodeError):
                pass
            if human_readable_data:
                data_out = '%s (%s)' % (data_out, human_readable_data)
        elif ocode in [5, 6, 7]:
            data_out = ' '.join([str(x) for x in data_raw])
        elif ocode == 15:
            info_code, = struct.unpack('!H', data_raw[0:2])
            extra_text = data_raw[2:]
            info_code_desc = extended_error.get(info_code, "Unknown")
            data_out = "{} ({})".format(info_code, info_code_desc)
            if extra_text:
                data_out += " :{}".format(extra_text)
        print(";; DATA: %s" % data_out)
        blob = blob[4 + olen:]
Beispiel #39
0
def fwd(self, data):
   self.factory.log.debug('received: %s', '\n' + hexdump(data))
   self.__class__.__bases__[0].dataReceived(self, data)
Beispiel #40
0
def dumpPropSetStream(ole, streamName):

    stream = ole.openstream(streamName)
    data = stream.read()
    dLen = len(data)
    if streamName[0] == "\x05":
        streamName = "\\x05" + streamName[1:]

    print "*" * 80, "\n[*]Dumping '%s' stream %#x (%d) bytes\n" % (streamName,
                                                                   dLen, dLen)

    #get the header
    (wByteOrder, wFormat, dwOsVer, clsid,
     cSections) = struct.unpack("HHI16sI", data[:28])
    curOffset = 28

    print "\nDumping property set header:\n"
    print "wByteOrder %#hx, wFormat %#hx, dwOsVer %#x" % (wByteOrder, wFormat,
                                                          dwOsVer)
    print "CLSID [%s] cSections %#x (%d)\n" % (clsidStr(clsid), cSections,
                                               cSections)

    #get each section header
    sections = []
    for i in xrange(cSections):

        #fmtid/offset
        (fmtid, dwOffset) = struct.unpack("16sI",
                                          data[curOffset:curOffset + 20])
        sections.append((fmtid, dwOffset))
        curOffset += 20

    #for each section
    print "Dumping sections:\n"
    for (sectionID, sectionOffset) in sections:

        props = []
        (cbSection,
         cProps) = struct.unpack("II", data[sectionOffset:sectionOffset + 8])
        print "Section\tFMTID [%s] sectionOffset %#x (%d)" % (
            clsidStr(fmtid), sectionOffset, sectionOffset)
        print "\tcbSection %#x (%d) cProps %#x (%d)" % (cbSection, cbSection,
                                                        cProps, cProps)

        #get the offset/length of each property
        curOffset = sectionOffset + 8
        last = None
        for i in xrange(cProps):

            (propid, offset) = struct.unpack("II",
                                             data[curOffset:curOffset + 8])

            cur = [propid, offset]
            props.append(cur)

            #fill in length of last property
            if last:
                last.append(offset - last[1])
            last = cur
            curOffset += 8

        #fill in the last offset
        last.append(len(data) - sectionOffset - last[1])

        #display the properties
        for prop in props:
            pId = prop[0]
            pOff = prop[1]
            pSz = prop[2]

            #take into account the section start
            realOffset = pOff + sectionOffset
            pType = struct.unpack("I", data[realOffset:realOffset + 4])[0]
            try:
                origType = pType
                pType = pType & 0xfff
                pName = ""
                if pType & VT_VECTOR:
                    pName = "VECTOR|"
                    pType &= ~VT_VECTOR
                if pType & VT_ARRAY:
                    pName = "ARRAY|"
                    pType &= ~VT_ARRAY
                if pType & VT_BYREF:
                    pName = "BYREF|"
                    pType &= ~VT_BYREF
                if pType & VT_RESERVED:
                    pName = "RESERVED|"
                    pType &= ~VT_RESERVED
                pName += propTypes[pType]
            except KeyError:
                pName = "!!UNKNOWN"

            print "    Prop ID %#x (%d) Masked Type %#x (%s) (Orig Type %#x), offset %#x (%d) size %#x (%d)" % \
                    (pId, pId, pType, pName, origType, pOff, pOff, pSz, pSz)
            pData = data[realOffset + 4:realOffset + 4 + pSz - 4]
            print hexdump(pData, indent=8)
Beispiel #41
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Replay captured USB packets')
    parser.add_argument('--verbose', '-v', action='store_true', help='verbose')
    parser.add_argument('fout', nargs='?', default=None, help='File out')
    args = parser.parse_args()

    usbcontext = usb1.USBContext()
    dev = open_dev(usbcontext)

    print
    print 'Reading memory'

    # The FX2 has eight kbytes of internal program/data RAM,
    # Only the internal eight kbytes and scratch pad 0.5 kbytes RAM spaces have the following access:

    '''
    The available RAM spaces are 8 kbytes from
    0x0000-0x1FFF (code/data) and 512 bytes from 0xE000-0xE1FF (scratch pad RAM).
    '''
    r = ram_r(dev, 0x0000, 0x2000)
    #r = ram_r(dev, 0xE000, 0x2000)
    if args.fout:
        if args.fout.find('.hex') >= 0:
            pass
        else:
            open(args.fout, 'w').write(r)
    else:
        hexdump(r)

Beispiel #42
0
def printRec(rType, rLen, rData, rCount, rOffset):
    
    #attempt to lookup the description for this record
    try:
        rec = recDict[rType]
        rName, rDesc, rFmt, rFNames = rec

        print "[%d]Record %s [%#x (%d)] offset %#x (%d), len %#x (%d) (%s)" % (
                rCount, rName, rType, rType, rOffset, rOffset, rLen, rLen, rDesc)
        
        fieldCount = offset = 0
        
        #skip last element due to trailing '%' in format string
        for fmt in rFmt.split("%")[:-1]:

            nLeft = rLen - offset
            fieldName = rFNames[fieldCount]

            if fmt == "1":
                if ensure(fieldName, 1, nLeft):
                    val = struct.unpack("B", rData[offset:offset+1])[0]
                    print "        BYTE %s = %#x (%d)" % (fieldName, val, val)
                    offset += 1
            elif fmt == "2":
                if ensure(fieldName, 2, nLeft):
                    val = struct.unpack("H", rData[offset:offset+2])[0]
                    print "        WORD %s = %#x (%d)" % (fieldName, val, val)
                    offset += 2
            elif fmt == "4":
                if ensure(fieldName, 4, nLeft):
                    val = struct.unpack("I", rData[offset:offset+4])[0]
                    print "        DWORD %s = %#x (%d)" % (fieldName, val, val)
                    offset += 4
            elif fmt[0] == "f":
                dataLen = int(fmt[1:])
                if dataLen > nLeft:
                    print "        Warning, field %s is longer (%d) than data left (%d). Dumping what's left:" % \
                                (fieldName, dataLen, nLeft)
                    sys.stdout.write(hexdump(rData[offset:], indent=12))
                else:
                    print "        Field %s is %#x (%d) bytes, dumping:" % (fieldName, dataLen,
                                                                        dataLen)
                    sys.stdout.write(hexdump(rData[offset:offset+dataLen], indent=12))
                
                offset += dataLen
            elif fmt == "v":
                print "        Field '%s' is variable length, dumping rest of record:" % (fieldName)
                sys.stdout.write(hexdump(rData[offset:], indent=12))
                break
            elif fmt[0] == "[":
                try:
                    handler = extraPrinters[fmt]
                    o = handler(rData[offset:], nLeft)
                    if o == -1:
                        break
                    else:
                        offset += o
                except KeyError:
                    print "Error: no handler defined for custom format '%s'" % (fmt)
            else:
                print "ERROR:Invalid format in record format string [%s]. Developer error!!" % (f)
                sys.exit(1)
            
            fieldCount += 1
    except KeyError:
        print "WARNING:No record description for id %#x (%d) len %#x (%d)" % (rType, rType, rLen, rLen)
        sys.stdout.write(hexdump(rData, indent=8))
    
    return
Beispiel #43
0
 def get_trackpoints(self, track):
     """Obtain the trackpoints of an activity"""
     (resp, ack) = self._request_device(self.CMD_TP_GET_HDR, 
                                        struct.pack('>HH', 1, track))
     trackpoints = []
     start = 0
     # New catatalog entry
     end = start+struct.calcsize('>%s' % (self.TP_CAT_FMT))
     if end > len(resp):
         raise AssertionError('Missing data in response %d / %d' % \
                                 (end, len(resp)))
     # there are 6 trailing bytes whose signification is yet to be
     # discovered, decoded here into the silent variable '_'
     (yy,mm,dd,hh,mn,ss,lap,dtime,dst,kcal,mspd,mhr,ahr,cmi,cmd,_,track,idx) = \
         struct.unpack('>%s' % (self.TP_CAT_FMT), 
                       resp[start:end])
     #if lap > 1:
     #    raise AssertionError('Multi-lap entries not supported')
     rem_lap = lap
     laps = []
     count = 0
     while rem_lap > 0:
         start = end
         end = start + struct.calcsize('%s' % (self.TP_DB_LAP))
         (stop,ttime,tdst,kcal,mspd,mhr,ahr,sidx,eidx) = \
             struct.unpack('>%s' % (self.TP_DB_LAP), 
                           resp[start:end])
         lap = {
             'acctime' : stop,
             'tottime' : ttime,
             'totdist' : tdst,
             'kcal' :  kcal,
             'maxspeed': mspd,
             'maxheart': mhr,
             'avgheart': ahr,
             'startptidx': sidx,
             'endptidx': eidx,
             'points' : []}
         laps.append(lap)
         count = eidx
         rem_lap -= 1
     lap_sec = dtime//10
     lap_msec = (dtime-lap_sec*10)*100
     tp = { 'start': datetime.datetime(2000+yy,mm,dd,hh,mn,ss),
            'time' : datetime.timedelta(0, lap_sec, 0, lap_msec),
            'distance': dst,
            'kcal' : kcal,
            'maxspeed' : mspd,
            'maxheart' : mhr,
            'avgheart' : ahr,
            'cmlplus' : cmi,
            'cmlmin' : cmd,
            'count' : count,
            'laps' : lap,
            'points' : []}
     rem_tp = count
     print 'Points: %d' % count 
     
     while rem_tp > 0:
         (resp, ack) = self._request_device(self.CMD_TP_GET_NEXT,
                                            accept=[self.ACK_TP_GET_NONE,
                                                    self.CMD_TP_GET_HDR])
         if ack == self.ACK_TP_GET_NONE:
             # no more point
             return tp
         start = 0
         end = start+struct.calcsize('>%s' % self.TP_CAT_FMT)
         header = struct.unpack('>%s' % self.TP_CAT_FMT, resp[start:end])
         entry_len = struct.calcsize('>%s' % self.TP_ENT_FMT)
         start = end
         points = []
         while start+entry_len <= len(resp):
             lapIdx = self._get_lap_index_for_point(laps, count-rem_tp)
             (x,y,z,s,h,d) = struct.unpack('>%s' % self.TP_ENT_FMT, 
                                           resp[start:start+entry_len])
             points.append((lapIdx,x,y,z,s,h,d))
             rem_tp -= 1
             start += entry_len
         tp['points'].extend(points)
         pc = (50*(count-rem_tp))/count
         progress = '%s%s: %d%%' % ('+'*pc, '.'*(50-pc), 2*pc)
         print 'TP: ', progress, '\r', 
         sys.stdout.flush()
         if start < len(resp):
             self.log.error("Remaining bytes: %s" % hexdump(resp[start:]))
     print ''
     return (laps, tp)
    def readFileHax(self, filename, filerecord, filekeys):
        lba0 = self.first_lba + filerecord.dataFork.HFSPlusExtentDescriptor[0].startBlock
        filekey = None
        good_usn = None
        first_usn = 0
        lba0_versions = self.nand.ftl.findAllVersions(lba0)
        print "%d versions for first lba" % len(lba0_versions)
        for k in filekeys:
            for addr in lba0_versions:
                ciphertext = self.nand.ftl.readPage1(addr, key=None, lpn=lba0)
                if not ciphertext:
                    continue
                d = self.decryptFileBlock2(ciphertext, k, lba0, 0)
                if isDecryptedCorrectly(d):
                    hexdump(d[:16])
                    filekey = k
                    weaveSeq = addr[0]
                    break
        if not filekey:
            return False
        logicalSize = filerecord.dataFork.logicalSize
        missing_pages = 0
        file_pages = []
        lbns = []
        for extent in self.volume.getAllExtents(filerecord.dataFork, filerecord.fileID):
            for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
                lbns.append(self.first_lba + bn)
        datas = {}
        
        first_block = True
        done = False
        for weaveSeq,lbn,ce,block,page in self.nand.ftl.findPagesInRange(weaveSeq, weaveSeq+50000):
            if not lbn in lbns:
                continue
            idx = lbns.index(lbn)
            ciphertext = self.nand.ftl.readPage1((weaveSeq,ce,block,page), key=None, lpn=lbn)
            if not ciphertext:
                continue
            ciphertext = self.decryptFileBlock2(ciphertext, filekey, lbn, idx*self.pageSize)
            if idx == 0:
                if not isDecryptedCorrectly(ciphertext):
                    continue
            datas[idx*self.pageSize] = (ciphertext, lbn - self.first_lba)
            #if idx == len(lbns):
            if len(datas) == len(lbns):
                done=True
                break
            if done:
                break
        cleartext = ""
        decrypt_offset = 0
        for i in xrange(0,logicalSize, self.pageSize):
            if datas.has_key(i):
                ciphertext, lbn = datas[i]
                cleartext += ciphertext
            else:
                cleartext += self.blankPage
                missing_pages += 1
            decrypt_offset += self.pageSize

        print "Recovered %d:%s %d missing pages, size %s, created %s, contentModDate %s" % \
            (filerecord.fileID, filename.encode("utf-8"), missing_pages, sizeof_fmt(logicalSize), hfs_date(filerecord.createDate), hfs_date(filerecord.contentModDate))
        filename =  "%d_%d_%s" % (filerecord.fileID, first_usn, filename)
        if missing_pages == 0:
            filename = "OK_" + filename
            self.okfiles += 1
        if True:#exactSize:
            cleartext = cleartext[:logicalSize]
        self.writeUndeletedFile(filename, cleartext)
        return True
Beispiel #45
0
def gbld_mod_parser(hdr, km, istr):
    return 'mod {}:\n{}'.format(km, util.hexdump(istr.data))
Beispiel #46
0
def decode_nsec3param_rdata(rdata):

    hashalg, flags, iterations, saltlen = struct.unpack('!BBHB', rdata[:5])
    salt = hexdump(rdata[5:5 + saltlen])
    result = "%d %d %d %s" % (hashalg, flags, iterations, salt)
    return result
Beispiel #47
0
def dumpPropSetStream(ole, streamName):
        
    stream = ole.openstream(streamName)
    data = stream.read()
    dLen = len(data)
    if streamName[0] == "\x05":
        streamName = "\\x05" + streamName[1:]

    print "*"*80, "\n[*]Dumping '%s' stream %#x (%d) bytes\n" % (streamName, dLen, dLen)
    
    #get the header
    (wByteOrder, wFormat, dwOsVer, clsid, cSections) = struct.unpack("HHI16sI", data[:28])
    curOffset = 28
    
    print "\nDumping property set header:\n"
    print "wByteOrder %#hx, wFormat %#hx, dwOsVer %#x" % (wByteOrder, wFormat, dwOsVer)
    print "CLSID [%s] cSections %#x (%d)\n" % (clsidStr(clsid), cSections, cSections)

    #get each section header
    sections = []
    for i in xrange(cSections):
        
        #fmtid/offset
        (fmtid, dwOffset) = struct.unpack("16sI", data[curOffset:curOffset + 20])
        sections.append((fmtid, dwOffset))
        curOffset += 20
     
    #for each section
    print "Dumping sections:\n"
    for (sectionID, sectionOffset) in sections:
        
        props = []
        (cbSection, cProps) = struct.unpack("II", data[sectionOffset:sectionOffset + 8])
        print "Section\tFMTID [%s] sectionOffset %#x (%d)" % (clsidStr(fmtid), sectionOffset, sectionOffset)
        print "\tcbSection %#x (%d) cProps %#x (%d)" % (cbSection, cbSection, cProps, cProps)

        #get the offset/length of each property
        curOffset = sectionOffset + 8
        last = None
        for i in xrange(cProps):
        
            (propid, offset) = struct.unpack("II", data[curOffset:curOffset + 8])
        
            cur = [propid, offset]
            props.append(cur)

            #fill in length of last property
            if last:
                last.append(offset - last[1])
            last = cur
            curOffset += 8

        #fill in the last offset
        last.append(len(data) - sectionOffset - last[1])

        #display the properties
        for prop in props:
            pId = prop[0]
            pOff = prop[1]
            pSz = prop[2]

            #take into account the section start
            realOffset = pOff + sectionOffset
            pType = struct.unpack("I", data[realOffset:realOffset + 4])[0]
            try:
                origType = pType
                pType = pType & 0xfff
                pName = ""
                if pType & VT_VECTOR:
                    pName = "VECTOR|"
                    pType &= ~VT_VECTOR
                if pType & VT_ARRAY:
                    pName = "ARRAY|"
                    pType &= ~VT_ARRAY
                if pType & VT_BYREF:
                    pName = "BYREF|"
                    pType &= ~VT_BYREF
                if pType & VT_RESERVED:
                    pName = "RESERVED|"
                    pType &= ~VT_RESERVED
                pName += propTypes[pType]
            except KeyError:
                pName = "!!UNKNOWN"
            
            print "    Prop ID %#x (%d) Masked Type %#x (%s) (Orig Type %#x), offset %#x (%d) size %#x (%d)" % \
                    (pId, pId, pType, pName, origType, pOff, pOff, pSz, pSz)
            pData = data[realOffset + 4:realOffset + 4 + pSz - 4]
            print hexdump(pData, indent=8)
Beispiel #48
0
def printAtom(rData, rLen, rDesc):

    # see if actual len is equal to real len
    if len(rData) != rLen:
        print "!!Warning: claimed len %#x (%d), actual %#x (%d)" % (rLen, rLen, len(rData), len(rData))
        rLen = len(rData)

    rFmt = rDesc[2]
    rFNames = rDesc[3]
    fieldCount = offset = 0

    # skip last element due to trailing '%' in format string
    for fmt in rFmt.split("%")[:-1]:

        nLeft = rLen - offset

        fieldType = rFNames[fieldCount][0]
        fieldName = rFNames[fieldCount][1]
        fieldDesc = rFNames[fieldCount][2]

        if fieldType in COMPLEX_TYPES:
            isComplex = True
        else:
            isComplex = False

        if fmt == "1":
            if ensure(fieldName, 1, nLeft):
                val = struct.unpack("B", rData[offset : offset + 1])[0]
                if isComplex:
                    print "        %s.BYTE %s = %#x (%d)" % (fieldType, fieldName, val, val)
                else:
                    print "        BYTE %s = %#x (%d)" % (fieldName, val, val)
                offset += 1
        elif fmt == "2":
            if ensure(fieldName, 2, nLeft):
                val = struct.unpack("H", rData[offset : offset + 2])[0]
                if isComplex:
                    print "        %s.WORD %s = %#x (%d)" % (fieldType, fieldName, val, val)
                else:
                    print "        WORD %s = %#x (%d)" % (fieldName, val, val)
                offset += 2
        elif fmt == "4":
            if ensure(fieldName, 4, nLeft):
                val = struct.unpack("I", rData[offset : offset + 4])[0]
                if isComplex:
                    print "        %s.DWORD %s = %#x (%d)" % (fieldType, fieldName, val, val)
                else:
                    print "        DWORD %s = %#x (%d)" % (fieldName, val, val)
                offset += 4
        elif fmt == "s":
            print "        Field '%s' is variable length ASCIIZ, dumping rest of record:" % (fieldName)
            sys.stdout.write(hexdump(rData[offset:], indent=12))
            break
        elif fmt == "u":
            print "        Field '%s' is variable length UNICODE, dumping rest of record:" % (fieldName)
            sys.stdout.write(hexdump(rData[offset:], indent=12))
            break
        elif fmt == "v":
            print "        Field '%s' is variable length DATA, dumping rest of record:" % (fieldName)
            sys.stdout.write(hexdump(rData[offset:], indent=12))
            break
        else:
            print "ERROR:Invalid format in record format string [%s]. Developer error!!" % (f)
            sys.exit(1)

        fieldCount += 1
 def dump(self):
   print '{0} {1}'.format(self.type, ', '.join(self.flags))
   if self.data:
     util.hexdump(self.data)
   else:
     print '  [empty]'
Beispiel #50
0
 def do_BAG1(self, p):
     print "BAG1 locker from effaceable storage"
     bag1 = self.image.lockers.get("BAG1")
     hexdump(bag1)
     print "IV:", bag1[4:20].encode("hex")
     print "Key:", bag1[20:].encode("hex")
Beispiel #51
0
    #
    if ole.exists("Workbook"):
        print "\n[**]Detected Excel file %s" % fName
        dumpXL.dump(ole)
    
    #
    if ole.exists("PowerPoint Document"):
        print "\n[**]Detected PowerPoint file %s" % fName
        dumpPPT.dump(ole, xole=extractOLE)

    #
    if ole.exists("\x05SummaryInformation"):
        dumpPropSetStream(ole, "\x05SummaryInformation")
    
    #
    if ole.exists("\x05DocumentSummaryInformation"):
        dumpPropSetStream(ole, "\x05DocumentSummaryInformation")

    #
    if verbose:
        paths = ole.getPaths()
        for path in paths:

            #dont try to read a storage
            if path[-1] != "/":
                print "Opening %s" % (path)
                stream = ole.openstream(path)
                buf = stream.read()
                sys.stdout.write(util.hexdump(buf, indent=4))
Beispiel #52
0
def generic_rdata_encoding(rdata, rdlen):

    return r"\# %d %s" % (rdlen, hexdump(rdata))
Beispiel #53
0
    #
    if ole.exists("Workbook"):
        print "\n[**]Detected Excel file %s" % fName
        dumpXL.dump(ole)

    #
    if ole.exists("PowerPoint Document"):
        print "\n[**]Detected PowerPoint file %s" % fName
        dumpPPT.dump(ole, xole=extractOLE)

    #
    if ole.exists("\x05SummaryInformation"):
        dumpPropSetStream(ole, "\x05SummaryInformation")

    #
    if ole.exists("\x05DocumentSummaryInformation"):
        dumpPropSetStream(ole, "\x05DocumentSummaryInformation")

    #
    if verbose:
        paths = ole.getPaths()
        for path in paths:

            #dont try to read a storage
            if path[-1] != "/":
                print "Opening %s" % (path)
                stream = ole.openstream(path)
                buf = stream.read()
                sys.stdout.write(util.hexdump(buf, indent=4))
Beispiel #54
0
def dumpPPD(buf, maxLen, depth, count_offset, recList=None):

    nRead = 0
    while nRead < maxLen:

        # read the header, check for atom/container
        rVer, rInst, rType, rLen = getHdr(buf[nRead : nRead + PPT_HDR_LEN])
        nRead += PPT_HDR_LEN

        if (rVer & 0xF) == 0xF:
            isAtom = False
            prefix = "[" * depth + "Container"
        else:
            isAtom = True
            prefix = "{" * depth + "Atom"

        # try to look up record description
        try:
            rDesc = recDict[rType]
            rName = rDesc[0]
        except KeyError:
            rDesc = None
            rName = "!!!Unknown"

        # pretty print the header
        print "%s(%d) %s (%#x, %d) size %#x (%d), instance %#x version %#x [offset %#x (%d)]" % (
            prefix,
            count_offset[REC_COUNT],
            rName,
            rType,
            rType,
            rLen,
            rLen,
            rInst,
            rVer,
            count_offset[REC_OFFSET],
            count_offset[REC_OFFSET],
        )

        # update ghetoo reference parameters, record count and total offset
        count_offset[REC_COUNT] += 1
        count_offset[REC_OFFSET] += PPT_HDR_LEN

        if not isAtom:
            nRead += dumpPPD(buf[nRead:maxLen], min(maxLen - nRead, rLen), depth + 1, count_offset, recList)
            print "]" * depth, "End container %s" % rName
        else:

            # only save atoms
            if recList is not None:
                recList.append((rType, buf[nRead : nRead + (min(rLen, maxLen - nRead))]))

            if rDesc:
                printAtom(buf[nRead : nRead + (min(rLen, maxLen - nRead))], rLen, rDesc)

                # look for a manual rec printer
                try:
                    handler = customPrinters[rType]
                    handler(buf[nRead : nRead + (min(rLen, maxLen - nRead))])
                except KeyError:
                    pass
            else:
                print (
                    "No descriptor, dumping:\n",
                    hexdump(buf[nRead + PPT_HDR_LEN : nRead + PPT_HDR_LEN + rLen], indent=8),
                )

            nRead += rLen
            count_offset[REC_OFFSET] += rLen

    return nRead