def getTag(self, tagNum): if self.record['FirstAvailablePageTag'] < tagNum: LOG.error('Trying to grab an unknown tag 0x%x' % tagNum) raise tags = self.data[-4 * self.record['FirstAvailablePageTag']:] baseOffset = len(self.record) for i in range(tagNum): tags = tags[:-4] tag = tags[-4:] if self.__DBHeader['Version'] == 0x620 and self.__DBHeader[ 'FileFormatRevision'] >= 17 and self.__DBHeader[ 'PageSize'] > 8192: valueSize = unpack('<H', tag[:2])[0] & 0x7fff valueOffset = unpack('<H', tag[2:])[0] & 0x7fff tmpData = list(self.data[baseOffset + valueOffset:][:valueSize]) pageFlags = ord(tmpData[1]) >> 5 tmpData[1] = chr(ord(tmpData[1]) & 0x1f) tagData = "".join(tmpData) else: valueSize = unpack('<H', tag[:2])[0] & 0x1fff pageFlags = (unpack('<H', tag[2:])[0] & 0xe000) >> 13 valueOffset = unpack('<H', tag[2:])[0] & 0x1fff tagData = self.data[baseOffset + valueOffset:][:valueSize] #return pageFlags, self.data[baseOffset+valueOffset:][:valueSize] return pageFlags, tagData
def get_address(self): address = get_bytes(self.buffer, 5, self.get_address_length()) if self.get_protocol() == AddressDetails.PROTOCOL_IP: return socket.inet_ntoa(address) else: LOG.error("Address not IP") return address
def getTag(self, tagNum): if self.record['FirstAvailablePageTag'] < tagNum: LOG.error('Trying to grab an unknown tag 0x%x' % tagNum) raise tags = self.data[-4*self.record['FirstAvailablePageTag']:] baseOffset = len(self.record) for i in range(tagNum): tags = tags[:-4] tag = tags[-4:] if self.__DBHeader['Version'] == 0x620 and self.__DBHeader['FileFormatRevision'] >= 17 and self.__DBHeader['PageSize'] > 8192: valueSize = unpack('<H', tag[:2])[0] & 0x7fff valueOffset = unpack('<H',tag[2:])[0] & 0x7fff tmpData = list(self.data[baseOffset+valueOffset:][:valueSize]) pageFlags = ord(tmpData[1]) >> 5 tmpData[1] = chr(ord(tmpData[1]) & 0x1f) tagData = "".join(tmpData) else: valueSize = unpack('<H', tag[:2])[0] & 0x1fff pageFlags = (unpack('<H', tag[2:])[0] & 0xe000) >> 13 valueOffset = unpack('<H',tag[2:])[0] & 0x1fff tagData = self.data[baseOffset+valueOffset:][:valueSize] #return pageFlags, self.data[baseOffset+valueOffset:][:valueSize] return pageFlags, tagData
def __addItem(self, entry): dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(entry['EntryData']) catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY( entry['EntryData'][len(dataDefinitionHeader):]) itemName = self.__parseItemName(entry) if catalogEntry['Type'] == CATALOG_TYPE_TABLE: self.__tables[itemName] = OrderedDict() self.__tables[itemName]['TableEntry'] = entry self.__tables[itemName]['Columns'] = OrderedDict() self.__tables[itemName]['Indexes'] = OrderedDict() self.__tables[itemName]['LongValues'] = OrderedDict() self.__currentTable = itemName elif catalogEntry['Type'] == CATALOG_TYPE_COLUMN: self.__tables[self.__currentTable]['Columns'][itemName] = entry self.__tables[self.__currentTable]['Columns'][itemName][ 'Header'] = dataDefinitionHeader self.__tables[self.__currentTable]['Columns'][itemName][ 'Record'] = catalogEntry elif catalogEntry['Type'] == CATALOG_TYPE_INDEX: self.__tables[self.__currentTable]['Indexes'][itemName] = entry elif catalogEntry['Type'] == CATALOG_TYPE_LONG_VALUE: self.__addLongValue(entry) else: LOG.error('Unknown type 0x%x' % catalogEntry['Type']) raise
def __init__(self, hive, isRemote=False): self.__hive = hive if isRemote is True: self.fd = self.__hive self.__hive.open() else: self.fd = open(hive, "rb") data = self.fd.read(4096) self.__regf = REG_REGF(data) self.indent = "" self.rootKey = self.__findRootKey() if self.rootKey is None: LOG.error("Can't find root key!") elif self.__regf["MajorVersion"] != 1 and self.__regf["MinorVersion"] > 5: LOG.warning( "Unsupported version (%d.%d) - things might not work!" % (self.__regf["MajorVersion"], self.__regf["MinorVersion"]) )
def __init__(self, hive, isRemote=False): self.__hive = hive if isRemote is True: self.fd = self.__hive self.__hive.open() else: self.fd = open(hive, 'rb') data = self.fd.read(4096) self.__regf = REG_REGF(data) self.indent = '' self.rootKey = self.__findRootKey() if self.rootKey is None: LOG.error("Can't find root key!") elif self.__regf['MajorVersion'] != 1 and self.__regf[ 'MinorVersion'] > 5: LOG.warning( "Unsupported version (%d.%d) - things might not work!" % (self.__regf['MajorVersion'], self.__regf['MinorVersion']))
def openFile(self, treeId, pathName, desiredAccess = FILE_READ_DATA | FILE_WRITE_DATA, shareMode = FILE_SHARE_READ, creationOption = FILE_NON_DIRECTORY_FILE, creationDisposition = FILE_OPEN, fileAttributes = FILE_ATTRIBUTE_NORMAL, impersonationLevel = SMB2_IL_IMPERSONATION, securityFlags = 0, oplockLevel = SMB2_OPLOCK_LEVEL_NONE, createContexts = None): """ opens a remote file :param HANDLE treeId: a valid handle for the share where the file is to be opened :param string pathName: the path name to open :return: a valid file descriptor, if not raises a SessionError exception. """ if self.getDialect() == smb.SMB_DIALECT: _, flags2 = self._SMBConnection.get_flags() pathName = pathName.replace('/', '\\') pathName = pathName.encode('utf-16le') if flags2 & smb.SMB.FLAGS2_UNICODE else pathName ntCreate = smb.SMBCommand(smb.SMB.SMB_COM_NT_CREATE_ANDX) ntCreate['Parameters'] = smb.SMBNtCreateAndX_Parameters() ntCreate['Data'] = smb.SMBNtCreateAndX_Data(flags=flags2) ntCreate['Parameters']['FileNameLength']= len(pathName) ntCreate['Parameters']['AccessMask'] = desiredAccess ntCreate['Parameters']['FileAttributes']= fileAttributes ntCreate['Parameters']['ShareAccess'] = shareMode ntCreate['Parameters']['Disposition'] = creationDisposition ntCreate['Parameters']['CreateOptions'] = creationOption ntCreate['Parameters']['Impersonation'] = impersonationLevel ntCreate['Parameters']['SecurityFlags'] = securityFlags ntCreate['Parameters']['CreateFlags'] = 0x16 ntCreate['Data']['FileName'] = pathName if flags2 & smb.SMB.FLAGS2_UNICODE: ntCreate['Data']['Pad'] = 0x0 if createContexts is not None: LOG.error("CreateContexts not supported in SMB1") try: return self._SMBConnection.nt_create_andx(treeId, pathName, cmd = ntCreate) except (smb.SessionError, smb3.SessionError), e: raise SessionError(e.get_error_code())
def __addItem(self, entry): dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(entry['EntryData']) catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY(entry['EntryData'][len(dataDefinitionHeader):]) itemName = self.__parseItemName(entry) if catalogEntry['Type'] == CATALOG_TYPE_TABLE: self.__tables[itemName] = OrderedDict() self.__tables[itemName]['TableEntry'] = entry self.__tables[itemName]['Columns'] = OrderedDict() self.__tables[itemName]['Indexes'] = OrderedDict() self.__tables[itemName]['LongValues'] = OrderedDict() self.__currentTable = itemName elif catalogEntry['Type'] == CATALOG_TYPE_COLUMN: self.__tables[self.__currentTable]['Columns'][itemName] = entry self.__tables[self.__currentTable]['Columns'][itemName]['Header'] = dataDefinitionHeader self.__tables[self.__currentTable]['Columns'][itemName]['Record'] = catalogEntry elif catalogEntry['Type'] == CATALOG_TYPE_INDEX: self.__tables[self.__currentTable]['Indexes'][itemName] = entry elif catalogEntry['Type'] == CATALOG_TYPE_LONG_VALUE: self.__addLongValue(entry) else: LOG.error('Unknown type 0x%x' % catalogEntry['Type']) raise
def __init__(self,data): # Depending on the type of data we'll end up building a different struct dataType = unpack('<H', data[4:][:2])[0] self.structure = self.fixed if dataType == CATALOG_TYPE_TABLE: self.structure += self.other + self.table_stuff elif dataType == CATALOG_TYPE_COLUMN: self.structure += self.column_stuff elif dataType == CATALOG_TYPE_INDEX: self.structure += self.other + self.index_stuff elif dataType == CATALOG_TYPE_LONG_VALUE: self.structure += self.other + self.lv_stuff elif dataType == CATALOG_TYPE_CALLBACK: LOG.error('CallBack types not supported!') raise else: LOG.error('Unknown catalog type 0x%x' % dataType) self.structure = () Structure.__init__(self,data) self.structure += self.common Structure.__init__(self,data)
def __init__(self, data): # Depending on the type of data we'll end up building a different struct dataType = unpack('<H', data[4:][:2])[0] self.structure = self.fixed if dataType == CATALOG_TYPE_TABLE: self.structure += self.other + self.table_stuff elif dataType == CATALOG_TYPE_COLUMN: self.structure += self.column_stuff elif dataType == CATALOG_TYPE_INDEX: self.structure += self.other + self.index_stuff elif dataType == CATALOG_TYPE_LONG_VALUE: self.structure += self.other + self.lv_stuff elif dataType == CATALOG_TYPE_CALLBACK: LOG.error('CallBack types not supported!') raise else: LOG.error('Unknown catalog type 0x%x' % dataType) self.structure = () Structure.__init__(self, data) self.structure += self.common Structure.__init__(self, data)
def __tagToRecord(self, cursor, tag): # So my brain doesn't forget, the data record is composed of: # Header # Fixed Size Data (ID < 127) # The easiest to parse. Their size is fixed in the record. You can get its size # from the Column Record, field SpaceUsage # Variable Size Data (127 < ID < 255) # At VariableSizeOffset you get an array of two bytes per variable entry, pointing # to the length of the value. Values start at: # numEntries = LastVariableDataType - 127 # VariableSizeOffset + numEntries * 2 (bytes) # Tagged Data ( > 255 ) # After the Variable Size Value, there's more data for the tagged values. # Right at the beggining there's another array (taggedItems), pointing to the # values, size. # # The interesting thing about this DB records is there's no need for all the columns to be there, hence # saving space. That's why I got over all the columns, and if I find data (of any type), i assign it. If # not, the column's empty. # # There are a lot of caveats in the code, so take your time to explore it. # # ToDo: Better complete this description # record = OrderedDict() taggedItems = OrderedDict() taggedItemsParsed = False dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(tag) #dataDefinitionHeader.dump() variableDataBytesProcessed = (dataDefinitionHeader['LastVariableDataType'] - 127) * 2 prevItemLen = 0 tagLen = len(tag) fixedSizeOffset = len(dataDefinitionHeader) variableSizeOffset = dataDefinitionHeader['VariableSizeOffset'] columns = cursor['TableData']['Columns'] for column in columns.keys(): columnRecord = columns[column]['Record'] #columnRecord.dump() if columnRecord['Identifier'] <= dataDefinitionHeader['LastFixedSize']: # Fixed Size column data type, still available data record[column] = tag[fixedSizeOffset:][:columnRecord['SpaceUsage']] fixedSizeOffset += columnRecord['SpaceUsage'] elif 127 < columnRecord['Identifier'] <= dataDefinitionHeader['LastVariableDataType']: # Variable data type index = columnRecord['Identifier'] - 127 - 1 itemLen = unpack('<H',tag[variableSizeOffset+index*2:][:2])[0] if itemLen & 0x8000: # Empty item itemLen = prevItemLen record[column] = None else: itemValue = tag[variableSizeOffset+variableDataBytesProcessed:][:itemLen-prevItemLen] record[column] = itemValue #if columnRecord['Identifier'] <= dataDefinitionHeader['LastVariableDataType']: variableDataBytesProcessed +=itemLen-prevItemLen prevItemLen = itemLen elif columnRecord['Identifier'] > 255: # Have we parsed the tagged items already? if taggedItemsParsed is False and (variableDataBytesProcessed+variableSizeOffset) < tagLen: index = variableDataBytesProcessed+variableSizeOffset #hexdump(tag[index:]) endOfVS = self.__pageSize firstOffsetTag = (unpack('<H', tag[index+2:][:2])[0] & 0x3fff) + variableDataBytesProcessed+variableSizeOffset while True: taggedIdentifier = unpack('<H', tag[index:][:2])[0] index += 2 taggedOffset = (unpack('<H', tag[index:][:2])[0] & 0x3fff) # As of Windows 7 and later ( version 0x620 revision 0x11) the # tagged data type flags are always present if self.__DBHeader['Version'] == 0x620 and self.__DBHeader['FileFormatRevision'] >= 17 and self.__DBHeader['PageSize'] > 8192: flagsPresent = 1 else: flagsPresent = (unpack('<H', tag[index:][:2])[0] & 0x4000) index += 2 if taggedOffset < endOfVS: endOfVS = taggedOffset taggedItems[taggedIdentifier] = (taggedOffset, tagLen, flagsPresent) #print "ID: %d, Offset:%d, firstOffset:%d, index:%d, flag: 0x%x" % (taggedIdentifier, taggedOffset,firstOffsetTag,index, flagsPresent) if index >= firstOffsetTag: # We reached the end of the variable size array break # Calculate length of variable items # Ugly.. should be redone prevKey = taggedItems.keys()[0] for i in range(1,len(taggedItems)): offset0, length, flags = taggedItems[prevKey] offset, _, _ = taggedItems.items()[i][1] taggedItems[prevKey] = (offset0, offset-offset0, flags) #print "ID: %d, Offset: %d, Len: %d, flags: %d" % (prevKey, offset0, offset-offset0, flags) prevKey = taggedItems.keys()[i] taggedItemsParsed = True # Tagged data type if taggedItems.has_key(columnRecord['Identifier']): offsetItem = variableDataBytesProcessed + variableSizeOffset + taggedItems[columnRecord['Identifier']][0] itemSize = taggedItems[columnRecord['Identifier']][1] # If item have flags, we should skip them if taggedItems[columnRecord['Identifier']][2] > 0: itemFlag = ord(tag[offsetItem:offsetItem+1]) offsetItem += 1 itemSize -= 1 else: itemFlag = 0 #print "ID: %d, itemFlag: 0x%x" %( columnRecord['Identifier'], itemFlag) if itemFlag & (TAGGED_DATA_TYPE_COMPRESSED ): LOG.error('Unsupported tag column: %s, flag:0x%x' % (column, itemFlag)) record[column] = None elif itemFlag & TAGGED_DATA_TYPE_MULTI_VALUE: # ToDo: Parse multi-values properly LOG.debug('Multivalue detected in column %s, returning raw results' % (column)) record[column] = (hexlify(tag[offsetItem:][:itemSize]),) else: record[column] = tag[offsetItem:][:itemSize] else: record[column] = None else: record[column] = None # If we understand the data type, we unpack it and cast it accordingly # otherwise, we just encode it in hex if type(record[column]) is tuple: # A multi value data, we won't decode it, just leave it this way record[column] = record[column][0] elif columnRecord['ColumnType'] == JET_coltypText or columnRecord['ColumnType'] == JET_coltypLongText: # Let's handle strings if record[column] is not None: if columnRecord['CodePage'] not in StringCodePages: LOG.error('Unknown codepage 0x%x'% columnRecord['CodePage']) raise stringDecoder = StringCodePages[columnRecord['CodePage']] record[column] = record[column].decode(stringDecoder) else: unpackData = ColumnTypeSize[columnRecord['ColumnType']] if record[column] is not None: if unpackData is None: record[column] = hexlify(record[column]) else: unpackStr = unpackData[1] unpackSize = unpackData[0] record[column] = unpack(unpackStr, record[column])[0] return record
def dump(self): baseOffset = len(self.record) self.record.dump() tags = self.data[-4*self.record['FirstAvailablePageTag']:] print "FLAGS: " self.printFlags() print for i in range(self.record['FirstAvailablePageTag']): tag = tags[-4:] if self.__DBHeader['Version'] == 0x620 and self.__DBHeader['FileFormatRevision'] > 11 and self.__DBHeader['PageSize'] > 8192: valueSize = unpack('<H', tag[:2])[0] & 0x7fff valueOffset = unpack('<H',tag[2:])[0] & 0x7fff hexdump((self.data[baseOffset+valueOffset:][:6])) pageFlags = ord(self.data[baseOffset+valueOffset:][1]) >> 5 #print "TAG FLAG: 0x%x " % (unpack('<L', self.data[baseOffset+valueOffset:][:4]) ) >> 5 #print "TAG FLAG: 0x " , ord(self.data[baseOffset+valueOffset:][0]) else: valueSize = unpack('<H', tag[:2])[0] & 0x1fff pageFlags = (unpack('<H', tag[2:])[0] & 0xe000) >> 13 valueOffset = unpack('<H',tag[2:])[0] & 0x1fff print "TAG %-8d offset:0x%-6x flags:0x%-4x valueSize:0x%x" % (i,valueOffset,pageFlags,valueSize) #hexdump(self.getTag(i)[1]) tags = tags[:-4] if self.record['PageFlags'] & FLAGS_ROOT > 0: rootHeader = ESENT_ROOT_HEADER(self.getTag(0)[1]) rootHeader.dump() elif self.record['PageFlags'] & FLAGS_LEAF == 0: # Branch Header flags, data = self.getTag(0) branchHeader = ESENT_BRANCH_HEADER(data) branchHeader.dump() else: # Leaf Header flags, data = self.getTag(0) if self.record['PageFlags'] & FLAGS_SPACE_TREE > 0: # Space Tree spaceTreeHeader = ESENT_SPACE_TREE_HEADER(data) spaceTreeHeader.dump() else: leafHeader = ESENT_LEAF_HEADER(data) leafHeader.dump() # Print the leaf/branch tags for tagNum in range(1,self.record['FirstAvailablePageTag']): flags, data = self.getTag(tagNum) if self.record['PageFlags'] & FLAGS_LEAF == 0: # Branch page branchEntry = ESENT_BRANCH_ENTRY(flags, data) branchEntry.dump() elif self.record['PageFlags'] & FLAGS_LEAF > 0: # Leaf page if self.record['PageFlags'] & FLAGS_SPACE_TREE > 0: # Space Tree spaceTreeEntry = ESENT_SPACE_TREE_ENTRY(data) #spaceTreeEntry.dump() elif self.record['PageFlags'] & FLAGS_INDEX > 0: # Index Entry indexEntry = ESENT_INDEX_ENTRY(data) #indexEntry.dump() elif self.record['PageFlags'] & FLAGS_LONG_VALUE > 0: # Long Page Value LOG.error('Long value still not supported') raise else: # Table Value leafEntry = ESENT_LEAF_ENTRY(flags, data) dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(leafEntry['EntryData']) dataDefinitionHeader.dump() catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY(leafEntry['EntryData'][len(dataDefinitionHeader):]) catalogEntry.dump() hexdump(leafEntry['EntryData'])
def __tagToRecord(self, cursor, tag): # So my brain doesn't forget, the data record is composed of: # Header # Fixed Size Data (ID < 127) # The easiest to parse. Their size is fixed in the record. You can get its size # from the Column Record, field SpaceUsage # Variable Size Data (127 < ID < 255) # At VariableSizeOffset you get an array of two bytes per variable entry, pointing # to the length of the value. Values start at: # numEntries = LastVariableDataType - 127 # VariableSizeOffset + numEntries * 2 (bytes) # Tagged Data ( > 255 ) # After the Variable Size Value, there's more data for the tagged values. # Right at the beggining there's another array (taggedItems), pointing to the # values, size. # # The interesting thing about this DB records is there's no need for all the columns to be there, hence # saving space. That's why I got over all the columns, and if I find data (of any type), i assign it. If # not, the column's empty. # # There are a lot of caveats in the code, so take your time to explore it. # # ToDo: Better complete this description # record = OrderedDict() taggedItems = OrderedDict() taggedItemsParsed = False dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(tag) #dataDefinitionHeader.dump() variableDataBytesProcessed = ( dataDefinitionHeader['LastVariableDataType'] - 127) * 2 prevItemLen = 0 tagLen = len(tag) fixedSizeOffset = len(dataDefinitionHeader) variableSizeOffset = dataDefinitionHeader['VariableSizeOffset'] columns = cursor['TableData']['Columns'] for column in columns.keys(): columnRecord = columns[column]['Record'] #columnRecord.dump() if columnRecord['Identifier'] <= dataDefinitionHeader[ 'LastFixedSize']: # Fixed Size column data type, still available data record[column] = tag[ fixedSizeOffset:][:columnRecord['SpaceUsage']] fixedSizeOffset += columnRecord['SpaceUsage'] elif 127 < columnRecord['Identifier'] <= dataDefinitionHeader[ 'LastVariableDataType']: # Variable data type index = columnRecord['Identifier'] - 127 - 1 itemLen = unpack('<H', tag[variableSizeOffset + index * 2:][:2])[0] if itemLen & 0x8000: # Empty item itemLen = prevItemLen record[column] = None else: itemValue = tag[variableSizeOffset + variableDataBytesProcessed:][:itemLen - prevItemLen] record[column] = itemValue #if columnRecord['Identifier'] <= dataDefinitionHeader['LastVariableDataType']: variableDataBytesProcessed += itemLen - prevItemLen prevItemLen = itemLen elif columnRecord['Identifier'] > 255: # Have we parsed the tagged items already? if taggedItemsParsed is False and ( variableDataBytesProcessed + variableSizeOffset) < tagLen: index = variableDataBytesProcessed + variableSizeOffset #hexdump(tag[index:]) endOfVS = self.__pageSize firstOffsetTag = ( unpack('<H', tag[index + 2:][:2])[0] & 0x3fff ) + variableDataBytesProcessed + variableSizeOffset while True: taggedIdentifier = unpack('<H', tag[index:][:2])[0] index += 2 taggedOffset = (unpack('<H', tag[index:][:2])[0] & 0x3fff) # As of Windows 7 and later ( version 0x620 revision 0x11) the # tagged data type flags are always present if self.__DBHeader['Version'] == 0x620 and self.__DBHeader[ 'FileFormatRevision'] >= 17 and self.__DBHeader[ 'PageSize'] > 8192: flagsPresent = 1 else: flagsPresent = (unpack('<H', tag[index:][:2])[0] & 0x4000) index += 2 if taggedOffset < endOfVS: endOfVS = taggedOffset taggedItems[taggedIdentifier] = (taggedOffset, tagLen, flagsPresent) #print "ID: %d, Offset:%d, firstOffset:%d, index:%d, flag: 0x%x" % (taggedIdentifier, taggedOffset,firstOffsetTag,index, flagsPresent) if index >= firstOffsetTag: # We reached the end of the variable size array break # Calculate length of variable items # Ugly.. should be redone prevKey = taggedItems.keys()[0] for i in range(1, len(taggedItems)): offset0, length, flags = taggedItems[prevKey] offset, _, _ = taggedItems.items()[i][1] taggedItems[prevKey] = (offset0, offset - offset0, flags) #print "ID: %d, Offset: %d, Len: %d, flags: %d" % (prevKey, offset0, offset-offset0, flags) prevKey = taggedItems.keys()[i] taggedItemsParsed = True # Tagged data type if taggedItems.has_key(columnRecord['Identifier']): offsetItem = variableDataBytesProcessed + variableSizeOffset + taggedItems[ columnRecord['Identifier']][0] itemSize = taggedItems[columnRecord['Identifier']][1] # If item have flags, we should skip them if taggedItems[columnRecord['Identifier']][2] > 0: itemFlag = ord(tag[offsetItem:offsetItem + 1]) offsetItem += 1 itemSize -= 1 else: itemFlag = 0 #print "ID: %d, itemFlag: 0x%x" %( columnRecord['Identifier'], itemFlag) if itemFlag & (TAGGED_DATA_TYPE_COMPRESSED): LOG.error('Unsupported tag column: %s, flag:0x%x' % (column, itemFlag)) record[column] = None elif itemFlag & TAGGED_DATA_TYPE_MULTI_VALUE: # ToDo: Parse multi-values properly LOG.debug( 'Multivalue detected in column %s, returning raw results' % (column)) record[column] = (hexlify( tag[offsetItem:][:itemSize]), ) else: record[column] = tag[offsetItem:][:itemSize] else: record[column] = None else: record[column] = None # If we understand the data type, we unpack it and cast it accordingly # otherwise, we just encode it in hex if type(record[column]) is tuple: # A multi value data, we won't decode it, just leave it this way record[column] = record[column][0] elif columnRecord['ColumnType'] == JET_coltypText or columnRecord[ 'ColumnType'] == JET_coltypLongText: # Let's handle strings if record[column] is not None: if columnRecord['CodePage'] not in StringCodePages: LOG.error('Unknown codepage 0x%x' % columnRecord['CodePage']) raise stringDecoder = StringCodePages[columnRecord['CodePage']] record[column] = record[column].decode(stringDecoder) else: unpackData = ColumnTypeSize[columnRecord['ColumnType']] if record[column] is not None: if unpackData is None: record[column] = hexlify(record[column]) else: unpackStr = unpackData[1] unpackSize = unpackData[0] record[column] = unpack(unpackStr, record[column])[0] return record
def dump(self): baseOffset = len(self.record) self.record.dump() tags = self.data[-4 * self.record['FirstAvailablePageTag']:] print "FLAGS: " self.printFlags() print for i in range(self.record['FirstAvailablePageTag']): tag = tags[-4:] if self.__DBHeader['Version'] == 0x620 and self.__DBHeader[ 'FileFormatRevision'] > 11 and self.__DBHeader[ 'PageSize'] > 8192: valueSize = unpack('<H', tag[:2])[0] & 0x7fff valueOffset = unpack('<H', tag[2:])[0] & 0x7fff hexdump((self.data[baseOffset + valueOffset:][:6])) pageFlags = ord(self.data[baseOffset + valueOffset:][1]) >> 5 #print "TAG FLAG: 0x%x " % (unpack('<L', self.data[baseOffset+valueOffset:][:4]) ) >> 5 #print "TAG FLAG: 0x " , ord(self.data[baseOffset+valueOffset:][0]) else: valueSize = unpack('<H', tag[:2])[0] & 0x1fff pageFlags = (unpack('<H', tag[2:])[0] & 0xe000) >> 13 valueOffset = unpack('<H', tag[2:])[0] & 0x1fff print "TAG %-8d offset:0x%-6x flags:0x%-4x valueSize:0x%x" % ( i, valueOffset, pageFlags, valueSize) #hexdump(self.getTag(i)[1]) tags = tags[:-4] if self.record['PageFlags'] & FLAGS_ROOT > 0: rootHeader = ESENT_ROOT_HEADER(self.getTag(0)[1]) rootHeader.dump() elif self.record['PageFlags'] & FLAGS_LEAF == 0: # Branch Header flags, data = self.getTag(0) branchHeader = ESENT_BRANCH_HEADER(data) branchHeader.dump() else: # Leaf Header flags, data = self.getTag(0) if self.record['PageFlags'] & FLAGS_SPACE_TREE > 0: # Space Tree spaceTreeHeader = ESENT_SPACE_TREE_HEADER(data) spaceTreeHeader.dump() else: leafHeader = ESENT_LEAF_HEADER(data) leafHeader.dump() # Print the leaf/branch tags for tagNum in range(1, self.record['FirstAvailablePageTag']): flags, data = self.getTag(tagNum) if self.record['PageFlags'] & FLAGS_LEAF == 0: # Branch page branchEntry = ESENT_BRANCH_ENTRY(flags, data) branchEntry.dump() elif self.record['PageFlags'] & FLAGS_LEAF > 0: # Leaf page if self.record['PageFlags'] & FLAGS_SPACE_TREE > 0: # Space Tree spaceTreeEntry = ESENT_SPACE_TREE_ENTRY(data) #spaceTreeEntry.dump() elif self.record['PageFlags'] & FLAGS_INDEX > 0: # Index Entry indexEntry = ESENT_INDEX_ENTRY(data) #indexEntry.dump() elif self.record['PageFlags'] & FLAGS_LONG_VALUE > 0: # Long Page Value LOG.error('Long value still not supported') raise else: # Table Value leafEntry = ESENT_LEAF_ENTRY(flags, data) dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER( leafEntry['EntryData']) dataDefinitionHeader.dump() catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY( leafEntry['EntryData'][len(dataDefinitionHeader):]) catalogEntry.dump() hexdump(leafEntry['EntryData'])
# # Description: # RFC 4493 implementation (http://www.ietf.org/rfc/rfc4493.txt) # RFC 4615 implementation (http://www.ietf.org/rfc/rfc4615.txt) # # NIST SP 800-108 Section 5.1, with PRF HMAC-SHA256 implementation # (http://tools.ietf.org/html/draft-irtf-cfrg-kdf-uses-00#ref-SP800-108) # # [MS-LSAD] Section 5.1.2 # [MS-SAMR] Section 2.2.11.1.1 from mitmflib.impacket import LOG try: from Crypto.Cipher import DES, AES, ARC4 except Exception: LOG.error("Warning: You don't have any crypto installed. You need PyCrypto") LOG.error("See http://www.pycrypto.org/") from struct import pack, unpack from mitmflib.impacket.structure import Structure import hmac, hashlib def Generate_Subkey(K): # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # + Algorithm Generate_Subkey + # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # + + # + Input : K (128-bit key) + # + Output : K1 (128-bit first subkey) + # + K2 (128-bit second subkey) + # +-------------------------------------------------------------------+