def get_location_information_wifi(backup_dir, outputFile): wifiPositionList = [] try: cf = open(backup_dir + "/cache.wifi", 'rb') dbVersion, dbEntriesNumber = struct.unpack('>hh', cf.read(4)) i = 0 while i < dbEntriesNumber: key = cf.read(struct.unpack('>h', cf.read(2))[0]) (accuracy, confidence, latitude, longitude, readtime) = struct.unpack('>iiddQ', cf.read(32)) outputFile.write( '%25s %7d %5d %10f %10f %s \n' % (key, accuracy, confidence, latitude, longitude, time.strftime("%x %X %z", time.localtime(readtime / 1000)))) wifiPositionList.append([ key, str(latitude), str(longitude), str(accuracy), time.strftime("%x %X", time.localtime(readtime / 1000)) ]) i = i + 1 cf.close() except: _adel_log.log("LocationInfo: ----> no Wifi cache found", 2) return wifiPositionList
def parse_table_btree_interior_cell(page_hex_string, page_offset): # 1 byte is represented by two characters in the hexString, so internally we need to calculate the offset in nibbles page_offset_in_bytes = page_offset # store for log reasons only page_offset = page_offset * 2 # now dealing with nibbles because we treat a string (1 character = 1 nibble) _adel_log.log( "parse_table_btree_interior_cell: ----> parsing b-tree interior cell at offset %(page_offset_in_bytes)s...." % vars(), 4) # Get total number of bytes of payload left_child_pointer = int( page_hex_string[page_offset:(page_offset + (4 * 2))], 16) _adel_log.log( "parse_table_btree_interior_cell: OK - left child pointer is: %(left_child_pointer)s" % vars(), 4) # Get row_id row_id_string = page_hex_string[(page_offset + (4 * 2)):(page_offset + ((4 + 9) * 2))] row_id_tuple = _sqliteVarInt.parse_next_var_int(row_id_string) row_id = row_id_tuple[0] _adel_log.log( "parse_table_btree_interior_cell: ----> row_id (index) is: %(row_id)s...." % vars(), 4) # Build tuple of node contents node_tuple = [left_child_pointer, row_id] _adel_log.log( "parse_table_btree_interior_cell: OK - returning tuple of node content: %(node_tuple)s" % vars(), 4) _adel_log.log( "parse_table_btree_interior_cell: ----> b-tree interior cell at offset %(page_offset_in_bytes)s parsed" % vars(), 4) return node_tuple
def get_location_information(backup_dir, device_name): _adel_log.log("\n############ LOCATION INFORMATION ############\n", 2) output_file = open(backup_dir + "/LocationInformation.log", 'a+') output_file.write( ('%25s %6s %11s %11s %11s %5s \n' % ('key', 'accuracy', 'confidence', 'latitude', 'longitude', 'time'))) if device_name != "local": picture_position_list = _getEXIF.get_exif_information( backup_dir, output_file) backup_dir = backup_dir + "/databases" else: picture_position_list = "" twitter_position_list = _locationInformation.get_location_information_twitter( backup_dir, output_file) gmaps_position_list = _locationInformation.get_location_information_gmaps( backup_dir, output_file) cell_position_list = _locationInformation.get_location_information_cell( backup_dir, output_file) wifi_position_list = _locationInformation.get_location_information_wifi( backup_dir, output_file) browser_position_list = _locationInformation.get_location_information_browser( backup_dir, output_file) _locationInformation.createMap(backup_dir, cell_position_list, wifi_position_list, picture_position_list, twitter_position_list, gmaps_position_list, browser_position_list) output_file.close()
def dump_standard_databases(database, backup_dir, hash_value): database_location = find_db_location(database) try: dbprocess = subprocess.Popen(['adb', 'pull', database_location, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) dbprocess.wait() _adel_log.log(database +" -> " + dbprocess.communicate()[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/" + database).hexdigest(), 3) hash_value.write(database + " -> " + hashlib.sha256(backup_dir + "/" + database).hexdigest() + " \n") except Exception,e: #print ("Error: ----> " + str(e)) _adel_log.log("dumpDBs: ----> " + database + " doesn't exist!!", 2)
def open_db(file_name): global DB_FILE_NAME global DB_FO global DB_FO_SET global DB_FILESIZE_IN_BYTES global DB_PAGESIZE_IN_BYTES global DB_RESERVED_SPACE # Ensure that there is no file open yet if DB_FO_SET == 0: _adel_log.log("open_db: ----> WARNING! database file " + str(file_name).split("/")[-1] + " could not be opened, because a file is already open", 2) return 1 # Ensure that the database file exists if os.path.exists(file_name): DB_FILE_NAME = file_name DB_FO = open(DB_FILE_NAME, "rb") DB_FO_SET = 0 DB_FILESIZE_IN_BYTES = os.path.getsize(file_name) _adel_log.log("open_db: ----> database file " + str(file_name).split("/")[-1] + " successfully loaded", 3) _adel_log.log("open_db: ----> database file size is " + str(DB_FILESIZE_IN_BYTES) + " bytes", 3) # quick and dirty hack: retrieve database page size # alternatively we could parse the database header and store its values here DB_FO.seek(16, 0) DB_PAGESIZE_IN_BYTES = int(DB_FO.read(2).encode("hex"), 16) # quick and dirty hack: retrieve database page reserved space # alternatively we could parse the database header and store its values here DB_FO.seek(20, 0) DB_RESERVED_SPACE = int(DB_FO.read(1).encode("hex"), 16) return DB_FILESIZE_IN_BYTES _adel_log.log("open_db: ----> ERROR! could not open database file " + str(file_name).split("/")[-1], 1) return 0
def parse_table_btree_leaf_page(page_hex_string, page_offset): # Parse the page header header = parse_btree_page_header(page_hex_string, page_offset) # Ensure that we deal with a correct page headerLength = len(header) if (headerLength != 6 or header[0] != 13): # no valid headerLength _adel_log.log("parse_table_btree_leaf_page: ERROR - invalid page type in table b-tree leaf page header", 1) _adel_log.log(" Page header was said to start at page offset: " + str(page_offset), 1) _adel_log.log(" Printing page content....", 1) _adel_log.log(page_hex_string, 1) return [] # Initialize resulting list content_list = [] # Parse cell pointer array cell_pointers = parse_cell_pointer_array(page_hex_string, (page_offset + header[headerLength - 1]), header[2]) # parse cells for cell_pointer in cell_pointers: content_list.append(parse_table_btree_leaf_cell(page_hex_string, cell_pointer, cell_pointers, header[1])) return content_list
def parse_free_blocks(page_hex_strings, next_free_block_pointer): # Check whether there are any free blocks on this page if (next_free_block_pointer == 0): # No free blocks on this page return [] # Parse the free block list _adel_log.log( "parse_free_blocks: ----> parsing b-tree page free block chain....", 4) free_blocks = [] while (next_free_block_pointer != 0): # We have a free block in the chain free_blocks.append([ next_free_block_pointer, int( page_hex_strings[((next_free_block_pointer + 2) * 2):((next_free_block_pointer + 4) * 2)], 16) ]) _adel_log.log( "parse_free_blocks: OK - append free block tuple to list [offset, length]: %(next_free_block_pointer)s" % vars(), 4) next_free_block_pointer = int( page_hex_strings[(next_free_block_pointer * 2):((next_free_block_pointer + 2) * 2)], 16) # Return results _adel_log.log( "parse_free_blocks: OK - returning list of free block pointers: %(free_blocks)s" % vars(), 3) _adel_log.log( "parse_free_blocks: ----> b-tree page free block chain parsed", 4) return free_blocks
def parse_table_btree_leaf_page(page_hex_string, page_offset): # Parse the page header header = parse_btree_page_header(page_hex_string, page_offset) # Ensure that we deal with a correct page headerLength = len(header) if (headerLength != 6 or header[0] != 13): # no valid headerLength _adel_log.log( "parse_table_btree_leaf_page: ERROR - invalid page type in table b-tree leaf page header", 1) _adel_log.log( " Page header was said to start at page offset: " + str(page_offset), 1) _adel_log.log(" Printing page content....", 1) _adel_log.log(page_hex_string, 1) return [] # Initialize resulting list content_list = [] # Parse cell pointer array cell_pointers = parse_cell_pointer_array( page_hex_string, (page_offset + header[headerLength - 1]), header[2]) # parse cells for cell_pointer in cell_pointers: content_list.append( parse_table_btree_leaf_cell(page_hex_string, cell_pointer, cell_pointers, header[1])) return content_list
def parse_table_btree_page(page_hex_string, page_offset): # Parse the page header page_type = int(page_hex_string[(page_offset * 2):((page_offset + 1) * 2)], 16) #if (page_type == 2): # index b-tree interior page # TODO: define and implement parseIndexBTreeInteriorCell() (do the methods for TABLES also work for INDICES?) # IGNORED RIGHT NOW if (page_type == 5): # table b-tree interior page return parse_table_btree_interior_page(page_hex_string, page_offset) #if (page_type == 10): # index b-tree leaf page # TODO: define and implement parseIndexBTreeLeafCell() (do the methods for TABLES also work for INDICES?) # IGNORED RIGHT NOW if (page_type == 13): # Table b-tree leaf page return parse_table_btree_leaf_page(page_hex_string, page_offset) _adel_log.log( "parse_table_btree_page: ERROR - invalid page type in table b-tree page header", 1) _adel_log.log( " Page header was said to start at page offset: " + str(page_offset), 1) _adel_log.log(" Printing page content....", 1) _adel_log.log(page_hex_string, 1) return []
def parse_cell_pointer_array(pageHexString, pageOffset, elements): # 1 byte is represented by two characters in the hexString, so internally we need to calculate the offset in nibbles pageOffset = pageOffset * 2 _adel_log.log( "parse_cell_pointer_array: ----> parsing b-tree page cell pointer array....", 4) cell_pointers = [] i = 4 while i <= ( elements * 4 ): # times 4, because a cell pointer element is a 2-byte integer (= 4 nibbles and 4 characters in pageHexString) current_pointer = int( pageHexString[pageOffset + (i - 4):(pageOffset + i)], 16) _adel_log.log( "parse_cell_pointer_array: OK - append cell pointer to list: %(current_pointer)s" % vars(), 4) cell_pointers.append(current_pointer) i += 4 _adel_log.log( "parse_cell_pointer_array: OK - returning list of cell pointers: %(cell_pointers)s" % vars(), 3) _adel_log.log( "parse_cell_pointer_array: ----> b-tree page cell pointer array parsed", 4) return cell_pointers
def parse_overflow_page_chain(page_hex_string): if (len(page_hex_string) == 0): _adel_log.log( "parse_overflow_page_chain: WARNING! empty hexadecimal page string received", 2) return "" if (len(page_hex_string) < 8): _adel_log.log( "parse_overflow_page_chain: WARNING! hexadecimal page string is too short: " + str(page_hex_string), 2) return "" # Check whether there is another overflow page: first 8 nibbles is 4-byte integer pointer to next overflow page or 00 00 00 00 if no further overflow page exists next_overflow_page_number = int( page_hex_string[0:8], 16) # will be zero if we reached the last overflow page in the chain # Build content string: append all content of this page overflow_page_content = page_hex_string[8:] _adel_log.log( "parse_overflow_page_chain: OK - overflow page parsed" % vars(), 4) if next_overflow_page_number != 0: # There is at least one more overflow page: append further content _adel_log.log( "parse_overflow_page_chain: ----> parsing next overflow page in chain, page number is: %(next_overflow_page_number)s...." % vars(), 4) overflow_page_content += parse_overflow_page_chain( _sqliteFileHandler.read_page(next_overflow_page_number)) return overflow_page_content
def parse_record(record_hex_string): # parse the record header _adel_log.log( "parse_record: ----> parsing record header....", 4) header_length_tuple = _sqliteVarInt.parse_next_var_int( record_hex_string[0:18]) header_string = record_hex_string[(header_length_tuple[1] * 2):(header_length_tuple[0] * 2)] record_header_field_list = _sqliteVarInt.parse_all_var_ints(header_string) _adel_log.log( "parse_record: OK - record header field list is %(record_header_field_list)s" % vars(), 4) # Get the record content content_offset = header_length_tuple[0] * 2 content_list = [] element = 0 for var_int in record_header_field_list: entry_content = parse_content_entry(record_header_field_list[element], record_hex_string, content_offset) content_list.append(entry_content[0]) content_offset += entry_content[1] * 2 element += 1 # Return the record content list _adel_log.log( "parse_record: OK - returning list of record contents", 4) #: %(content_list)s" %vars(), 4) _adel_log.log("parse_record: ----> record header parsed", 4) return content_list
def get_location_information_wifi(backup_dir, outputFile): wifiPositionList = [] try: cf = open(backup_dir + "/cache.wifi", 'rb') dbVersion, dbEntriesNumber = struct.unpack('>hh', cf.read(4)) i = 0 while i < dbEntriesNumber: key = cf.read(struct.unpack('>h', cf.read(2))[0]) (accuracy, confidence, latitude, longitude, readtime) = struct.unpack('>iiddQ', cf.read(32)) outputFile.write('%25s %7d %5d %10f %10f %s \n' % (key, accuracy, confidence, latitude, longitude, time.strftime("%x %X %z", time.localtime(readtime / 1000)))) wifiPositionList.append([key, str(latitude), str(longitude), str(accuracy), time.strftime("%x %X", time.localtime(readtime / 1000))]) i = i + 1 cf.close() except: _adel_log.log("LocationInfo: ----> no Wifi cache found", 2) return wifiPositionList
def get_location_information(backup_dir, device_name): _adel_log.log("\n############ LOCATION INFORMATION ############\n", 2) output_file = open(backup_dir + "/LocationInformation.log", 'a+') output_file.write(('%25s %6s %11s %11s %11s %5s \n' % ('key', 'accuracy', 'confidence', 'latitude', 'longitude', 'time'))) if device_name != "local": picture_position_list = _getEXIF.get_exif_information(backup_dir, output_file) backup_dir = backup_dir + "/databases" else: picture_position_list = "" twitter_position_list = _locationInformation.get_location_information_twitter(backup_dir, output_file) gmaps_position_list = _locationInformation.get_location_information_gmaps(backup_dir, output_file) cell_position_list = _locationInformation.get_location_information_cell(backup_dir, output_file) wifi_position_list = _locationInformation.get_location_information_wifi(backup_dir, output_file) browser_position_list = _locationInformation.get_location_information_browser(backup_dir, output_file) _locationInformation.createMap(backup_dir, cell_position_list, wifi_position_list, picture_position_list, twitter_position_list, gmaps_position_list, browser_position_list) output_file.close()
def close_db(): global DB_FILE_NAME global DB_FO global DB_FO_SET global DB_FILESIZE_IN_BYTES global DB_PAGESIZE_IN_BYTES # ensure that the database file is opened if DB_FO_SET == 0: DB_FO.close() _adel_log.log("close_db: ----> database file \"" + str(DB_FILE_NAME) + "\" closed", 3) DB_FILE_NAME = None DB_FO = None DB_FO_SET = 1 DB_FILESIZE_IN_BYTES = None DB_PAGESIZE_IN_BYTES = None else: _adel_log.log("close_db: ----> WARNING! database file could not be closed, because none is open", 2)
def analyzeDBs(file_dir, os_version, xml_dir, device_name, os_version2): config = PhoneConfig("xml/phone_configs.xml", device_name, os_version2) # Call the analysis module _adel_log.log("analyzeDBs: ----> starting to parse and analyze the databases....", 0) _analyzeDB.phone_info(file_dir, os_version, xml_dir, device_name, config) twitter_dbname_list = _dumpFiles.get_twitter_sqlite_files(file_dir, os_version) _analyzeDB.analyze(file_dir, os_version2, xml_dir, twitter_dbname_list, config) _adel_log.log("analyzeDBs: ----> all databases parsed and analyzed....", 0) # Create report _adel_log.log("createReport: ----> creating report....", 0) _createReport.report(xml_dir) _adel_log.log("ADEL MAIN: ----> report \033[0;32m" + xml_dir + "/report.xml\033[m created", 0)
def compare(backup_dir): _adel_log.log("\n############ HASH COMPARISION ############\n", 2) _adel_log.log("compareHash: ----> starting to compare calculated hash values", 0) hash_value_file = open(backup_dir + "/databases/hash_values.log", "a+") for line in hash_value_file: database = line.split(" ")[0] hash_value_old = line.split(" ")[2] hash_value_new = hashlib.sha256(backup_dir + "/databases/" + database).hexdigest() if hash_value_old != hash_value_new: _adel_log.log("hash_comparision -> hash vlaue missmatch on database: " + database, 2) else: _adel_log.log("hash_comparision -> hash value match on database: " + database, 3) hash_value_file.close()
def parse_table_btree_interior_cell(page_hex_string, page_offset): # 1 byte is represented by two characters in the hexString, so internally we need to calculate the offset in nibbles page_offset_in_bytes = page_offset # store for log reasons only page_offset = page_offset * 2 # now dealing with nibbles because we treat a string (1 character = 1 nibble) _adel_log.log("parse_table_btree_interior_cell: ----> parsing b-tree interior cell at offset %(page_offset_in_bytes)s...." % vars(), 4) # Get total number of bytes of payload left_child_pointer = int(page_hex_string[page_offset:(page_offset + (4 * 2))], 16) _adel_log.log("parse_table_btree_interior_cell: OK - left child pointer is: %(left_child_pointer)s" % vars(), 4) # Get row_id row_id_string = page_hex_string[(page_offset + (4 * 2)):(page_offset + ((4 + 9) * 2))] row_id_tuple = _sqliteVarInt.parse_next_var_int(row_id_string) row_id = row_id_tuple[0] _adel_log.log("parse_table_btree_interior_cell: ----> row_id (index) is: %(row_id)s...." % vars(), 4) # Build tuple of node contents node_tuple = [left_child_pointer, row_id] _adel_log.log("parse_table_btree_interior_cell: OK - returning tuple of node content: %(node_tuple)s" % vars(), 4) _adel_log.log("parse_table_btree_interior_cell: ----> b-tree interior cell at offset %(page_offset_in_bytes)s parsed" % vars(), 4) return node_tuple
def dump_standard_databases(database, backup_dir, hash_value): database_location = find_db_location(database) try: dbprocess = subprocess.Popen( ['adb', 'pull', database_location, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) dbprocess.wait() _adel_log.log( database + " -> " + dbprocess.communicate()[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/" + database).hexdigest(), 3) hash_value.write(database + " -> " + hashlib.sha256(backup_dir + "/" + database).hexdigest() + " \n") except Exception, e: #print ("Error: ----> " + str(e)) _adel_log.log("dumpDBs: ----> " + database + " doesn't exist!!", 2)
def close_db(): global DB_FILE_NAME global DB_FO global DB_FO_SET global DB_FILESIZE_IN_BYTES global DB_PAGESIZE_IN_BYTES # ensure that the database file is opened if DB_FO_SET == 0: DB_FO.close() _adel_log.log( "close_db: ----> database file \"" + str(DB_FILE_NAME) + "\" closed", 3) DB_FILE_NAME = None DB_FO = None DB_FO_SET = 1 DB_FILESIZE_IN_BYTES = None DB_PAGESIZE_IN_BYTES = None else: _adel_log.log( "close_db: ----> WARNING! database file could not be closed, because none is open", 2)
def determine_serial_type_content_size(serial_type): if serial_type < 12: _adel_log.log( "determine_serial_type_content_size: WARNING! invalid serial type (must be >= 12): %(serial_type)s" % vars(), 2) return 1 # at least one byte has an invalid serial type, thus return 1 so the program has a chance to continue with the next byte else: if serial_type % 2 == 0: # serial_type is an even number = String in the database serialTypeLength = (serial_type - 12) / 2 _adel_log.log( "determine_serial_type_content_size: OK - serial type is a STRING of length: %(serialTypeLength)s" % vars(), 4) return serialTypeLength else: # serial_type is an odd number = BLOB in the database serialTypeLength = (serial_type - 13) / 2 _adel_log.log( "determine_serial_type_content_size: OK - serial type is a BLOB of length: %(serialTypeLength)s" % vars(), 4) return serialTypeLength #-----------------Example------------------- #if __name__ == "__main__": # db = open("sql3_test.db", "rb") # db.seek(0, 0) # print parseBTreeHeader(db.read(1024), 100) # db.close() #-----------------Example-------------------
def select_smartphone(self): phones = self.xml_doc.getElementsByTagName("phone") # Search specific config for smartphone for phone in phones: if (phone.getAttribute("device") == self.device) and ( phone.getAttribute("model") == self.model) and (phone.getAttribute("os_version") == self.os_version): self.phone = phone _adel_log.log( "PhoneConfig: ----> Found right config in the database.", 3) return # Otherwise take the first match or die for phone in phones: if (phone.getAttribute("device") == self.device): self.phone = phone _adel_log.log( "PhoneConfig: ----> The ADEL configuration for this phone \"" + self.device + " " + phone.getAttribute("model") + " Android " + phone.getAttribute("os_version") + "\" is different from the real phone \"" + self.device + " " + self.model + " Android " + self.os_version + "\"! Please check output carefully!", 2) return _adel_log.log( "PhoneConfig: ----> No suitable config found for " + str(self.device), 2) sys.exit(1)
def get_twitter_sqlite_files(backup_dir, os_version): _adel_log.log("\n############ DUMP TWITTER SQLite FILES ############\n", 2) twitterdbnamelist = [] try: for i in range(6): try: file_name = subprocess.Popen( [ 'adb', 'shell', 'ls', '/data/data/com.twitter.android/databases/' ], stdout=subprocess.PIPE).communicate(0)[0].split()[i] if ".db" in file_name: twitterdbnamelist.append(file_name) twitter_db = '/data/data/com.twitter.android/databases/' + file_name twitter_db_name = subprocess.Popen( ['adb', 'pull', twitter_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) twitter_db_name.wait() _adel_log.log( file_name + " -> " + twitter_db_name.communicate( 0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3) else: continue except: continue except: _adel_log.log("dumpDBs: ----> Twitter database doesn't exist!", 2) return twitterdbnamelist
def read_page(page_number): global DB_FO global DB_FO_SET global DB_PAGESIZE_IN_BYTES if (page_number < 1): # database file not open _adel_log.log( "read_page: ----> ERROR! invalid page number received, cannot read database page: " + str(page_number), 1) return "" if (DB_FO_SET == 0): # database file open fileOffset = ((page_number - 1) * DB_PAGESIZE_IN_BYTES) DB_FO.seek(fileOffset, 0) # transform to hex string for log output fileOffset = hex(fileOffset) _adel_log.log( "read_page: ----> database page " + str(page_number) + " read, file offset: " + str(fileOffset), 3) return DB_FO.read(DB_PAGESIZE_IN_BYTES).encode("hex") else: # database file not open _adel_log.log( "read_page: ----> ERROR! page could not be read, because database file is not open, call open_db() first", 1) return ""
def parse_cell_pointer_array(pageHexString, pageOffset, elements): # 1 byte is represented by two characters in the hexString, so internally we need to calculate the offset in nibbles pageOffset = pageOffset * 2 _adel_log.log("parse_cell_pointer_array: ----> parsing b-tree page cell pointer array....", 4) cell_pointers = [] i = 4 while i <= (elements * 4): # times 4, because a cell pointer element is a 2-byte integer (= 4 nibbles and 4 characters in pageHexString) current_pointer = int(pageHexString[pageOffset + (i - 4):(pageOffset + i)], 16) _adel_log.log("parse_cell_pointer_array: OK - append cell pointer to list: %(current_pointer)s" % vars(), 4) cell_pointers.append(current_pointer) i += 4 _adel_log.log("parse_cell_pointer_array: OK - returning list of cell pointers: %(cell_pointers)s" % vars(), 3) _adel_log.log("parse_cell_pointer_array: ----> b-tree page cell pointer array parsed", 4) return cell_pointers
def sms_messages_to_xml(xml_dir, sms_list): _adel_log.log( "############ XML OUTPUT GENERATION -> SMS MESSAGES ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("SMS_Messages") doc.appendChild(xml) for i in range(0, len(sms_list)): # Create the <SMS_Message> element sms_message = doc.createElement("SMS_Message") xml.appendChild(sms_message) id = doc.createElement("id") sms_message.appendChild(id) id_text = doc.createTextNode(sms_list[i][0]) id.appendChild(id_text) thread_id = doc.createElement("thread_id") sms_message.appendChild(thread_id) thread_id_text = doc.createTextNode(sms_list[i][1]) thread_id.appendChild(thread_id_text) number = doc.createElement("number") sms_message.appendChild(number) number_text = doc.createTextNode(sms_list[i][2]) number.appendChild(number_text) person = doc.createElement("person") sms_message.appendChild(person) person_text = doc.createTextNode(sms_list[i][3]) person.appendChild(person_text) date = doc.createElement("date") sms_message.appendChild(date) date_text = doc.createTextNode(sms_list[i][4]) date.appendChild(date_text) read = doc.createElement("read") sms_message.appendChild(read) read_text = doc.createTextNode(sms_list[i][5]) read.appendChild(read_text) type = doc.createElement("type") sms_message.appendChild(type) type_text = doc.createTextNode(sms_list[i][6]) type.appendChild(type_text) subject = doc.createElement("subject") sms_message.appendChild(subject) subject_text = doc.createTextNode(sms_list[i][7]) subject.appendChild(subject_text) body = doc.createElement("body") sms_message.appendChild(body) body_text = doc.createTextNode(sms_list[i][8]) body.appendChild(body_text) # Print our newly created XML files to Log _adel_log.log( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_sms_messages = open(xml_dir + "/sms_Messages.xml", "a+") xml_sms_messages.write( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_sms_messages.close() _adel_log.log("xmlParser: ----> sms_Messages.xml created!", 4)
def calendar_to_xml(xml_dir, calendar_list): _adel_log.log( "############ XML OUTPUT GENERATION -> CALENDAR ENTRIES ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("Calendar_Entries") doc.appendChild(xml) for i in range(0, len(calendar_list)): # Create the <Calendar_Entry> element calendar_entry = doc.createElement("Calendar_Entry") xml.appendChild(calendar_entry) id = doc.createElement("id") calendar_entry.appendChild(id) id_text = doc.createTextNode(calendar_list[i][0]) id.appendChild(id_text) calendarName = doc.createElement("calendarName") calendar_entry.appendChild(calendarName) calendarName_text = doc.createTextNode(calendar_list[i][1]) calendarName.appendChild(calendarName_text) title = doc.createElement("title") calendar_entry.appendChild(title) title_text = doc.createTextNode(calendar_list[i][2]) title.appendChild(title_text) eventLocation = doc.createElement("eventLocation") calendar_entry.appendChild(eventLocation) event_location_text = doc.createTextNode(calendar_list[i][3]) eventLocation.appendChild(event_location_text) description = doc.createElement("description") calendar_entry.appendChild(description) description_text = doc.createTextNode(calendar_list[i][4]) description.appendChild(description_text) all_day = doc.createElement("all_day") calendar_entry.appendChild(all_day) allDay_text = doc.createTextNode(calendar_list[i][5]) all_day.appendChild(allDay_text) start = doc.createElement("start") calendar_entry.appendChild(start) start_text = doc.createTextNode(calendar_list[i][6]) start.appendChild(start_text) end = doc.createElement("end") calendar_entry.appendChild(end) end_text = doc.createTextNode(calendar_list[i][7]) end.appendChild(end_text) has_alarm = doc.createElement("has_alarm") calendar_entry.appendChild(has_alarm) has_alarm_text = doc.createTextNode(calendar_list[i][8]) has_alarm.appendChild(has_alarm_text) # Print our newly created XML files to Log _adel_log.log( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_calendar = open(xml_dir + "/calendar.xml", "a+") xml_calendar.write( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_calendar.close() _adel_log.log("xmlParser: ----> calendar.xml created!", 4)
def parse_free_blocks(page_hex_strings, next_free_block_pointer): # Check whether there are any free blocks on this page if (next_free_block_pointer == 0): # No free blocks on this page return [] # Parse the free block list _adel_log.log("parse_free_blocks: ----> parsing b-tree page free block chain....", 4) free_blocks = [] while (next_free_block_pointer != 0): # We have a free block in the chain free_blocks.append([next_free_block_pointer, int(page_hex_strings[((next_free_block_pointer + 2) * 2):((next_free_block_pointer + 4) * 2)], 16)]) _adel_log.log("parse_free_blocks: OK - append free block tuple to list [offset, length]: %(next_free_block_pointer)s" % vars(), 4) next_free_block_pointer = int(page_hex_strings[(next_free_block_pointer * 2):((next_free_block_pointer + 2) * 2)], 16) # Return results _adel_log.log("parse_free_blocks: OK - returning list of free block pointers: %(free_blocks)s" % vars(), 3) _adel_log.log("parse_free_blocks: ----> b-tree page free block chain parsed", 4) return free_blocks
def analyzeDBs(file_dir, os_version, xml_dir, device_name, os_version2): config = PhoneConfig("xml/phone_configs.xml", device_name, os_version2) # Call the analysis module _adel_log.log( "analyzeDBs: ----> starting to parse and analyze the databases....", 0) _analyzeDB.phone_info(file_dir, os_version, xml_dir, device_name, config) twitter_dbname_list = _dumpFiles.get_twitter_sqlite_files( file_dir, os_version) _analyzeDB.analyze(file_dir, os_version2, xml_dir, twitter_dbname_list, config) _adel_log.log("analyzeDBs: ----> all databases parsed and analyzed....", 0) # Create report _adel_log.log("createReport: ----> creating report....", 0) _createReport.report(xml_dir) _adel_log.log( "ADEL MAIN: ----> report \033[0;32m" + xml_dir + "/report.xml\033[m created", 0)
def sms_messages_to_xml(xml_dir, sms_list): _adel_log.log("############ XML OUTPUT GENERATION -> SMS MESSAGES ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("SMS_Messages") doc.appendChild(xml) for i in range(0, len(sms_list)): # Create the <SMS_Message> element sms_message = doc.createElement("SMS_Message") xml.appendChild(sms_message) id = doc.createElement("id") sms_message.appendChild(id) id_text = doc.createTextNode(sms_list[i][0]) id.appendChild(id_text) thread_id = doc.createElement("thread_id") sms_message.appendChild(thread_id) thread_id_text = doc.createTextNode(sms_list[i][1]) thread_id.appendChild(thread_id_text) number = doc.createElement("number") sms_message.appendChild(number) number_text = doc.createTextNode(sms_list[i][2]) number.appendChild(number_text) person = doc.createElement("person") sms_message.appendChild(person) person_text = doc.createTextNode(sms_list[i][3]) person.appendChild(person_text) date = doc.createElement("date") sms_message.appendChild(date) date_text = doc.createTextNode(sms_list[i][4]) date.appendChild(date_text) read = doc.createElement("read") sms_message.appendChild(read) read_text = doc.createTextNode(sms_list[i][5]) read.appendChild(read_text) type = doc.createElement("type") sms_message.appendChild(type) type_text = doc.createTextNode(sms_list[i][6]) type.appendChild(type_text) subject = doc.createElement("subject") sms_message.appendChild(subject) subject_text = doc.createTextNode(sms_list[i][7]) subject.appendChild(subject_text) body = doc.createElement("body") sms_message.appendChild(body) body_text = doc.createTextNode(sms_list[i][8]) body.appendChild(body_text) # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_sms_messages = open(xml_dir + "/sms_Messages.xml", "a+") xml_sms_messages.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_sms_messages.close() _adel_log.log("xmlParser: ----> sms_Messages.xml created!", 4)
def calendar_to_xml(xml_dir, calendar_list): _adel_log.log("############ XML OUTPUT GENERATION -> CALENDAR ENTRIES ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("Calendar_Entries") doc.appendChild(xml) for i in range(0, len(calendar_list)): # Create the <Calendar_Entry> element calendar_entry = doc.createElement("Calendar_Entry") xml.appendChild(calendar_entry) id = doc.createElement("id") calendar_entry.appendChild(id) id_text = doc.createTextNode(calendar_list[i][0]) id.appendChild(id_text) calendarName = doc.createElement("calendarName") calendar_entry.appendChild(calendarName) calendarName_text = doc.createTextNode(calendar_list[i][1]) calendarName.appendChild(calendarName_text) title = doc.createElement("title") calendar_entry.appendChild(title) title_text = doc.createTextNode(calendar_list[i][2]) title.appendChild(title_text) eventLocation = doc.createElement("eventLocation") calendar_entry.appendChild(eventLocation) event_location_text = doc.createTextNode(calendar_list[i][3]) eventLocation.appendChild(event_location_text) description = doc.createElement("description") calendar_entry.appendChild(description) description_text = doc.createTextNode(calendar_list[i][4]) description.appendChild(description_text) all_day = doc.createElement("all_day") calendar_entry.appendChild(all_day) allDay_text = doc.createTextNode(calendar_list[i][5]) all_day.appendChild(allDay_text) start = doc.createElement("start") calendar_entry.appendChild(start) start_text = doc.createTextNode(calendar_list[i][6]) start.appendChild(start_text) end = doc.createElement("end") calendar_entry.appendChild(end) end_text = doc.createTextNode(calendar_list[i][7]) end.appendChild(end_text) has_alarm = doc.createElement("has_alarm") calendar_entry.appendChild(has_alarm) has_alarm_text = doc.createTextNode(calendar_list[i][8]) has_alarm.appendChild(has_alarm_text) # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_calendar = open(xml_dir + "/calendar.xml", "a+") xml_calendar.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_calendar.close() _adel_log.log("xmlParser: ----> calendar.xml created!", 4)
def open_db(file_name): global DB_FILE_NAME global DB_FO global DB_FO_SET global DB_FILESIZE_IN_BYTES global DB_PAGESIZE_IN_BYTES global DB_RESERVED_SPACE # Ensure that there is no file open yet if DB_FO_SET == 0: _adel_log.log( "open_db: ----> WARNING! database file " + str(file_name).split("/")[-1] + " could not be opened, because a file is already open", 2) return 1 # Ensure that the database file exists if os.path.exists(file_name): DB_FILE_NAME = file_name DB_FO = open(DB_FILE_NAME, "rb") DB_FO_SET = 0 DB_FILESIZE_IN_BYTES = os.path.getsize(file_name) _adel_log.log( "open_db: ----> database file " + str(file_name).split("/")[-1] + " successfully loaded", 3) _adel_log.log( "open_db: ----> database file size is " + str(DB_FILESIZE_IN_BYTES) + " bytes", 3) # quick and dirty hack: retrieve database page size # alternatively we could parse the database header and store its values here DB_FO.seek(16, 0) DB_PAGESIZE_IN_BYTES = int(DB_FO.read(2).encode("hex"), 16) # quick and dirty hack: retrieve database page reserved space # alternatively we could parse the database header and store its values here DB_FO.seek(20, 0) DB_RESERVED_SPACE = int(DB_FO.read(1).encode("hex"), 16) return DB_FILESIZE_IN_BYTES _adel_log.log( "open_db: ----> ERROR! could not open database file " + str(file_name).split("/")[-1], 1) return 0
def parse_overflow_page_chain(page_hex_string): if (len(page_hex_string) == 0): _adel_log.log("parse_overflow_page_chain: WARNING! empty hexadecimal page string received", 2) return "" if (len(page_hex_string) < 8): _adel_log.log("parse_overflow_page_chain: WARNING! hexadecimal page string is too short: " + str(page_hex_string), 2) return "" # Check whether there is another overflow page: first 8 nibbles is 4-byte integer pointer to next overflow page or 00 00 00 00 if no further overflow page exists next_overflow_page_number = int(page_hex_string[0:8], 16) # will be zero if we reached the last overflow page in the chain # Build content string: append all content of this page overflow_page_content = page_hex_string[8:] _adel_log.log("parse_overflow_page_chain: OK - overflow page parsed" % vars(), 4) if next_overflow_page_number != 0: # There is at least one more overflow page: append further content _adel_log.log("parse_overflow_page_chain: ----> parsing next overflow page in chain, page number is: %(next_overflow_page_number)s...." % vars(), 4) overflow_page_content += parse_overflow_page_chain(_sqliteFileHandler.read_page(next_overflow_page_number)) return overflow_page_content
def parse_record(record_hex_string): # parse the record header _adel_log.log("parse_record: ----> parsing record header....", 4) header_length_tuple = _sqliteVarInt.parse_next_var_int(record_hex_string[0:18]) header_string = record_hex_string[(header_length_tuple[1] * 2):(header_length_tuple[0] * 2)] record_header_field_list = _sqliteVarInt.parse_all_var_ints(header_string) _adel_log.log("parse_record: OK - record header field list is %(record_header_field_list)s" % vars(), 4) # Get the record content content_offset = header_length_tuple[0] * 2 content_list = [] element = 0 for var_int in record_header_field_list: entry_content = parse_content_entry(record_header_field_list[element], record_hex_string, content_offset) content_list.append(entry_content[0]) content_offset += entry_content[1] * 2 element += 1 # Return the record content list _adel_log.log("parse_record: OK - returning list of record contents", 4)#: %(content_list)s" %vars(), 4) _adel_log.log("parse_record: ----> record header parsed", 4) return content_list
def crack(backup_dir): try: f = open(backup_dir + "/databases/gesture.key", "rb") for line in f: lookup_hash = hexlify(line).decode() _adel_log.log("Screenlock: ----> Screenlock Hash: \033[0;32m" + lookup_hash + "\033[m\n", 0) conn = sqlite3.connect(SQLITE_DB) cur = conn.cursor() cur.execute("SELECT pattern FROM RainbowTable WHERE hash = ?", (lookup_hash,)) result = cur.fetchone() if result: gesture = result[0] _adel_log.log("Screenlock: ----> Screenlock Gesture: \033[0;32m" + gesture + "\033[m\n", 0) except: _adel_log.log("Screenlock: ----> Can't find gesture in RainbowTable !!!\n", 2)
def crack(backup_dir): try: f = open(backup_dir + "/databases/gesture.key", "rb") for line in f: lookup_hash = hexlify(line).decode() _adel_log.log("Screenlock: ----> Screenlock Hash: \033[0;32m" + lookup_hash + "\033[m", 0) conn = sqlite3.connect(SQLITE_DB) cur = conn.cursor() cur.execute("SELECT pattern FROM RainbowTable WHERE hash = ?", (lookup_hash,)) result = cur.fetchone() if result: gesture = result[0] _adel_log.log("Screenlock: ----> Screenlock Gesture: \033[0;32m" + gesture + "\033[m", 0) except: _adel_log.log("Screenlock: ----> Can't find gesture in RainbowTable !!!", 2)
def select_smartphone(self): phones = self.xml_doc.getElementsByTagName("phone") # Search specific config for smartphone for phone in phones: if (phone.getAttribute("device") == self.device) and (phone.getAttribute("model") == self.model) and (phone.getAttribute("os_version") == self.os_version): self.phone = phone _adel_log.log("PhoneConfig: ----> Found right config in the database.", 3) return # Otherwise take the first match or die for phone in phones: if (phone.getAttribute("device") == self.device): self.phone = phone _adel_log.log("PhoneConfig: ----> The ADEL configuration for this phone \"" + self.device + " " + phone.getAttribute("model") + " Android " + phone.getAttribute("os_version") + "\" is different from the real phone \"" + self.device + " " + self.model + " Android " + self.os_version + "\"! Please check output carefully!", 2) return _adel_log.log("PhoneConfig: ----> No suitable config found for " + str(self.device), 2) sys.exit(1)
def parse_table_btree_page(page_hex_string, page_offset): # Parse the page header page_type = int(page_hex_string[(page_offset * 2):((page_offset + 1) * 2)], 16) #if (page_type == 2): # index b-tree interior page # TODO: define and implement parseIndexBTreeInteriorCell() (do the methods for TABLES also work for INDICES?) # IGNORED RIGHT NOW if (page_type == 5): # table b-tree interior page return parse_table_btree_interior_page(page_hex_string, page_offset) #if (page_type == 10): # index b-tree leaf page # TODO: define and implement parseIndexBTreeLeafCell() (do the methods for TABLES also work for INDICES?) # IGNORED RIGHT NOW if (page_type == 13): # Table b-tree leaf page return parse_table_btree_leaf_page(page_hex_string, page_offset) _adel_log.log("parse_table_btree_page: ERROR - invalid page type in table b-tree page header", 1) _adel_log.log(" Page header was said to start at page offset: " + str(page_offset), 1) _adel_log.log(" Printing page content....", 1) _adel_log.log(page_hex_string, 1) return []
def smartphone_info_to_xml(xml_dir, smartphone_infolist): _adel_log.log( "############ XML OUTPUT GENERATION -> SMARTPHONE INFOS ############ \n", 2) # Create the minidom document doc = Document() info = doc.createElement("smartphone_info") doc.appendChild(info) account_name = doc.createElement("account_name") info.appendChild(account_name) account_name_text = doc.createTextNode(smartphone_infolist[0]) account_name.appendChild(account_name_text) account_type = doc.createElement("account_type") info.appendChild(account_type) account_type_text = doc.createTextNode(smartphone_infolist[1]) account_type.appendChild(account_type_text) imsi = doc.createElement("imsi") info.appendChild(imsi) imsi_text = doc.createTextNode(smartphone_infolist[2]) imsi.appendChild(imsi_text) android_id = doc.createElement("android_id") info.appendChild(android_id) android_id_text = doc.createTextNode(smartphone_infolist[3]) android_id.appendChild(android_id_text) handheld_id = doc.createElement("handheld_id") info.appendChild(handheld_id) handheld_id_text = doc.createTextNode(smartphone_infolist[4]) handheld_id.appendChild(handheld_id_text) model = doc.createElement("model") info.appendChild(model) model_text = doc.createTextNode(smartphone_infolist[5]) model.appendChild(model_text) android_version = doc.createElement("android_version") info.appendChild(android_version) android_version_text = doc.createTextNode(smartphone_infolist[6]) android_version.appendChild(android_version_text) # Print our newly created XML files to Log _adel_log.log( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_info = open(xml_dir + "/info.xml", "a+") xml_info.write( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_info.close() _adel_log.log("xmlParser: ----> info.xml created!", 4)
def call_log_to_xml(xml_dir, callLogList): _adel_log.log( "############ XML OUTPUT GENERATION -> CALL LOG ENTRIES ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("Call_Log_Entries") doc.appendChild(xml) for i in range(0, len(callLogList)): # Create the <Call_Log_Entry> element call_log_entry = doc.createElement("Call_Log_Entry") xml.appendChild(call_log_entry) id = doc.createElement("id") call_log_entry.appendChild(id) id_text = doc.createTextNode(callLogList[i][0]) id.appendChild(id_text) number = doc.createElement("number") call_log_entry.appendChild(number) number_text = doc.createTextNode(callLogList[i][1]) number.appendChild(number_text) date = doc.createElement("date") call_log_entry.appendChild(date) date_text = doc.createTextNode(callLogList[i][2]) date.appendChild(date_text) duration = doc.createElement("duration") call_log_entry.appendChild(duration) duration_text = doc.createTextNode(callLogList[i][3]) duration.appendChild(duration_text) type = doc.createElement("type") call_log_entry.appendChild(type) type_text = doc.createTextNode(callLogList[i][4]) type.appendChild(type_text) name = doc.createElement("name") call_log_entry.appendChild(name) name_text = doc.createTextNode(callLogList[i][5]) name.appendChild(name_text) # Print our newly created XML files to Log _adel_log.log( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_callLogs = open(xml_dir + "/call_logs.xml", "a+") xml_callLogs.write( make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_callLogs.close() _adel_log.log("xmlParser: ----> call_logs.xml created!", 4)
def get_twitter_sqlite_files(backup_dir, os_version): _adel_log.log("\n############ DUMP TWITTER SQLite FILES ############\n", 2) twitterdbnamelist = [] try: for i in range(6): try: file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.twitter.android/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i] if ".db" in file_name: twitterdbnamelist.append(file_name) twitter_db = '/data/data/com.twitter.android/databases/' + file_name twitter_db_name = subprocess.Popen(['adb', 'pull', twitter_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) twitter_db_name.wait() _adel_log.log(file_name + " -> " + twitter_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3) else: continue except: continue except: _adel_log.log("dumpDBs: ----> twitter.db doesn't exist!!", 2) return twitterdbnamelist
def call_log_to_xml(xml_dir, callLogList): _adel_log.log("############ XML OUTPUT GENERATION -> CALL LOG ENTRIES ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("Call_Log_Entries") doc.appendChild(xml) for i in range(0, len(callLogList)): # Create the <Call_Log_Entry> element call_log_entry = doc.createElement("Call_Log_Entry") xml.appendChild(call_log_entry) id = doc.createElement("id") call_log_entry.appendChild(id) id_text = doc.createTextNode(callLogList[i][0]) id.appendChild(id_text) number = doc.createElement("number") call_log_entry.appendChild(number) number_text = doc.createTextNode(callLogList[i][1]) number.appendChild(number_text) date = doc.createElement("date") call_log_entry.appendChild(date) date_text = doc.createTextNode(callLogList[i][2]) date.appendChild(date_text) duration = doc.createElement("duration") call_log_entry.appendChild(duration) duration_text = doc.createTextNode(callLogList[i][3]) duration.appendChild(duration_text) type = doc.createElement("type") call_log_entry.appendChild(type) type_text = doc.createTextNode(callLogList[i][4]) type.appendChild(type_text) name = doc.createElement("name") call_log_entry.appendChild(name) name_text = doc.createTextNode(callLogList[i][5]) name.appendChild(name_text) # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_callLogs = open(xml_dir + "/call_logs.xml", "a+") xml_callLogs.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_callLogs.close() _adel_log.log("xmlParser: ----> call_logs.xml created!", 4)
def smartphone_info_to_xml(xml_dir, smartphone_infolist): _adel_log.log("############ XML OUTPUT GENERATION -> SMARTPHONE INFOS ############ \n", 2) # Create the minidom document doc = Document() info = doc.createElement("smartphone_info") doc.appendChild(info) account_name = doc.createElement("account_name") info.appendChild(account_name) account_name_text = doc.createTextNode(smartphone_infolist[0]) account_name.appendChild(account_name_text) account_type = doc.createElement("account_type") info.appendChild(account_type) account_type_text = doc.createTextNode(smartphone_infolist[1]) account_type.appendChild(account_type_text) imsi = doc.createElement("imsi") info.appendChild(imsi) imsi_text = doc.createTextNode(smartphone_infolist[2]) imsi.appendChild(imsi_text) android_id = doc.createElement("android_id") info.appendChild(android_id) android_id_text = doc.createTextNode(smartphone_infolist[3]) android_id.appendChild(android_id_text) handheld_id = doc.createElement("handheld_id") info.appendChild(handheld_id) handheld_id_text = doc.createTextNode(smartphone_infolist[4]) handheld_id.appendChild(handheld_id_text) model = doc.createElement("model") info.appendChild(model) model_text = doc.createTextNode(smartphone_infolist[5]) model.appendChild(model_text) android_version = doc.createElement("android_version") info.appendChild(android_version) android_version_text = doc.createTextNode(smartphone_infolist[6]) android_version.appendChild(android_version_text) # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_info = open(xml_dir + "/info.xml", "a+") xml_info.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_info.close() _adel_log.log("xmlParser: ----> info.xml created!", 4)
def read_page(page_number): global DB_FO global DB_FO_SET global DB_PAGESIZE_IN_BYTES if (page_number < 1): # database file not open _adel_log.log("read_page: ----> ERROR! invalid page number received, cannot read database page: " + str(page_number), 1) return "" if (DB_FO_SET == 0): # database file open fileOffset = ((page_number - 1) * DB_PAGESIZE_IN_BYTES) DB_FO.seek(fileOffset, 0) # transform to hex string for log output fileOffset = hex(fileOffset) _adel_log.log("read_page: ----> database page " + str(page_number) + " read, file offset: " + str(fileOffset), 3) return DB_FO.read(DB_PAGESIZE_IN_BYTES).encode("hex") else: # database file not open _adel_log.log("read_page: ----> ERROR! page could not be read, because database file is not open, call open_db() first", 1) return ""
def determine_serial_type_content_size(serial_type): if serial_type < 12: _adel_log.log("determine_serial_type_content_size: WARNING! invalid serial type (must be >= 12): %(serial_type)s" % vars(), 2) return 1 # at least one byte has an invalid serial type, thus return 1 so the program has a chance to continue with the next byte else: if serial_type % 2 == 0: # serial_type is an even number = String in the database serialTypeLength = (serial_type - 12) / 2 _adel_log.log("determine_serial_type_content_size: OK - serial type is a STRING of length: %(serialTypeLength)s" % vars(), 4) return serialTypeLength else: # serial_type is an odd number = BLOB in the database serialTypeLength = (serial_type - 13) / 2 _adel_log.log("determine_serial_type_content_size: OK - serial type is a BLOB of length: %(serialTypeLength)s" % vars(), 4) return serialTypeLength #-----------------Example------------------- #if __name__ == "__main__": # db = open("sql3_test.db", "rb") # db.seek(0, 0) # print parseBTreeHeader(db.read(1024), 100) # db.close() #-----------------Example-------------------
def createMap(backup_dir, cellPositionList, wifiPositionList, picturePositionList, twitterPositionList, gMapsPositionList, browserPositionList): backup_dir = backup_dir.split("/")[0] mapFile = open(backup_dir + "/map.html", "a+") mapFile.write('''<!DOCTYPE html> <html> <head> <meta name="viewport" content="initial-scale=1.0, user-scalable=no" /> <style type="text/css"> html { height: 100% } body { height: 100%; margin: 0px; padding: 0px } #map_canvas { height: 100% } </style> <script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"> </script> <script type="text/javascript"> var cellList = {};\n''') i = 0 while i < len(cellPositionList): if cellPositionList[0][1] == "0.000000": i = i + 1 else: title = "'Cell-ID: " + cellPositionList[i][ 0] + " -> Time: " + cellPositionList[i][4] + "'" mapFile.write("cellList['" + str(i) + "'] = {center: new google.maps.LatLng(" + cellPositionList[i][1] + ", " + cellPositionList[i][2] + "), accuracy: " + cellPositionList[i][3] + ", title: " + title + "};\n") i = i + 1 mapFile.write('''var wifiList = {};\n''') for j in range(0, len(wifiPositionList)): if wifiPositionList[j][1] == "0.000000": j = j + 1 else: title = "'Wifi-MAC: " + wifiPositionList[j][ 0] + " -> Time: " + wifiPositionList[j][4] + "'" mapFile.write("wifiList['" + str(j) + "'] = {center: new google.maps.LatLng(" + wifiPositionList[j][1] + ", " + wifiPositionList[j][2] + "), accuracy: " + wifiPositionList[j][3] + ", title: " + title + "};\n") j = j + 1 mapFile.write('''var exifList = {};\n''') for k in range(0, len(picturePositionList)): title = "'Picture: " + picturePositionList[k][ 0] + " -> Time: " + picturePositionList[k][4] + "'" mapFile.write("exifList['" + str(k) + "'] = {center: new google.maps.LatLng(" + picturePositionList[k][1] + ", " + picturePositionList[k][2] + "), accuracy: " + picturePositionList[k][3] + ", title: " + title + "};\n") k = k + 1 mapFile.write('''var twitterList = {};\n''') for l in range(0, len(twitterPositionList)): title = "'Message: " + twitterPositionList[l][ 0] + " -> Time: " + twitterPositionList[l][4] + "'" mapFile.write("twitterList['" + str(l) + "'] = {center: new google.maps.LatLng(" + twitterPositionList[l][1] + ", " + twitterPositionList[l][2] + "), accuracy: " + twitterPositionList[l][3] + ", title: " + title + "};\n") l = l + 1 mapFile.write('''var gMapsList = {};\n''') for m in range(0, len(gMapsPositionList)): title = "'Destination: " + gMapsPositionList[m][ 0] + " -> Time: " + gMapsPositionList[m][4] + "'" mapFile.write("gMapsList['" + str(m) + "'] = {center: new google.maps.LatLng(" + gMapsPositionList[m][1] + ", " + gMapsPositionList[m][2] + "), accuracy: " + gMapsPositionList[m][3] + ", title: " + title + "};\n") m = m + 1 mapFile.write('''var browserList = {};\n''') for n in range(0, len(browserPositionList)): title = "'" + browserPositionList[n][ 0] + " -> Time: " + browserPositionList[n][4] + "'" mapFile.write("browserList['" + str(n) + "'] = {center: new google.maps.LatLng(" + browserPositionList[n][1] + ", " + browserPositionList[n][2] + "), accuracy: " + browserPositionList[n][3] + ", title: " + title + "};\n") n = n + 1 mapFile.write('''function initialize() { var mapOptions = {zoom: 7, center: new google.maps.LatLng(51.163375, 10.447683), mapTypeId: google.maps.MapTypeId.ROADMAP}; var map = new google.maps.Map(document.getElementById("map_canvas"), mapOptions); for (var cell in cellList) { var accuracy = {strokeColor: "#0000FF", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#0000FF", fillOpacity: 0.15, map: map, center: cellList[cell].center, radius: cellList[cell].accuracy}; var marker = new google.maps.Marker({position: cellList[cell].center, map: map, title: cellList[cell].title, icon: '../xml/cell.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var wifi in wifiList) { var accuracy = {strokeColor: "#9e7151", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#9e7151", fillOpacity: 0.15, map: map, center: wifiList[wifi].center, radius: wifiList[wifi].accuracy}; var marker = new google.maps.Marker({position: wifiList[wifi].center, map: map, title: wifiList[wifi].title, icon: '../xml/wifi.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var exif in exifList) { var accuracy = {strokeColor: "#076e33", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#09a133", fillOpacity: 0.15, map: map, center: exifList[exif].center, radius: exifList[exif].accuracy}; var marker = new google.maps.Marker({position: exifList[exif].center, map: map, title: exifList[exif].title, icon: '../xml/jpg.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var twitter in twitterList) { var accuracy = {strokeColor: "#383838", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#a8a8a8", fillOpacity: 0.15, map: map, center: twitterList[twitter].center, radius: twitterList[twitter].accuracy}; var marker = new google.maps.Marker({position: twitterList[twitter].center, map: map, title: twitterList[twitter].title, icon: '../xml/twitter.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var gMap in gMapsList) { var accuracy = {strokeColor: "#ffffff", strokeOpacity: 0.8, strokeWeight: 2, fillColor: "#ffffff", fillOpacity: 0.3, map: map, center: gMapsList[gMap].center, radius: gMapsList[gMap].accuracy}; var marker = new google.maps.Marker({position: gMapsList[gMap].center, map: map, title: gMapsList[gMap].title, icon: '../xml/g_maps.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var browser in browsersList) { var accuracy = {strokeColor: "#000000", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#000000", fillOpacity: 0.15, map: map, center: browsersList[browser].center, radius: browsersList[browser].accuracy}; var marker = new google.maps.Marker({position: browsersList[browser].center, map: map, title: browsersList[browser].title, icon: '../xml/g_maps.png'}); cityCircle = new google.maps.Circle(accuracy); } } </script> </head> <body onload="initialize()"> <div id="map_canvas" style="width:100%; height:100%"></div> </body> </html>''') mapFile.close() _adel_log.log( "LocationInfo: ----> Location map \033[0;32m" + backup_dir + "/map.html\033[m created", 0)
def twitter_to_xml (xml_dir, twitter_list, tweet_list): _adel_log.log("############ XML OUTPUT GENERATION -> TWITTER ENTRIES ############ \n", 2) # Create the minidom document doc = Document() twitter_entries = doc.createElement("Twitter_Entries") doc.appendChild(twitter_entries) ## Entry generated is User_ID, User_Name, Real_Name, description, location (if given), profile_created, updated, followers, friends USER_ID = 0 USER_NAME = 1 REAL_NAME = 2 DESCRIPTION = 3 LOCATION = 4 PROFILE_CREATED = 5 UPDATED = 6 FOLLOWERS = 7 FRIENDS = 8 for i in range (0,len(twitter_list)): if i == 0: user_entry_node = doc.createElement("Twitter_Account_Owner") else: user_entry_node = doc.createElement("User_Entry") twitter_entries.appendChild(user_entry_node) user_id_node = doc.createElement("User_id") user_entry_node.appendChild(user_id_node) user_id_node_text = doc.createTextNode(twitter_list[i][USER_ID]) user_id_node.appendChild(user_id_node_text) user_name_node = doc.createElement("User_Name") user_entry_node.appendChild(user_name_node) user_name_node_text = doc.createTextNode(twitter_list[i][USER_NAME]) user_name_node.appendChild(user_name_node_text) real_name_node = doc.createElement("Real_Name") user_entry_node.appendChild(real_name_node) real_name_node_text = doc.createTextNode(twitter_list[i][REAL_NAME]) real_name_node.appendChild(real_name_node_text) description_node = doc.createElement("Description") user_entry_node.appendChild(description_node) description_node_text = doc.createTextNode(twitter_list[i][DESCRIPTION]) description_node.appendChild(description_node_text) location_node = doc.createElement("Location") user_entry_node.appendChild(location_node) location_node_text = doc.createTextNode(twitter_list[i][LOCATION]) location_node.appendChild(location_node_text) profile_created_node = doc.createElement("Profile_created") user_entry_node.appendChild(profile_created_node) profile_created_node_text = doc.createTextNode(twitter_list[i][PROFILE_CREATED]) profile_created_node.appendChild(profile_created_node_text) updated_node = doc.createElement("Updated") user_entry_node.appendChild(updated_node) updated_note_text = doc.createTextNode(twitter_list[i][UPDATED]) updated_node.appendChild(updated_note_text) followers_node = doc.createElement("Followers") user_entry_node.appendChild(followers_node) followers_node_text = doc.createTextNode(twitter_list[i][FOLLOWERS]) followers_node.appendChild(followers_node_text) friends_node = doc.createElement("Friends") user_entry_node.appendChild(friends_node) friends_node_text = doc.createTextNode(twitter_list[i][FRIENDS]) friends_node.appendChild(friends_node_text) user_id = int(twitter_list[i][USER_ID]) #print user_id if user_id in tweet_list: tweets = tweet_list[user_id] tweets_node = doc.createElement("Tweets") user_entry_node.appendChild(tweets_node) for j in range (0,len(tweets)): tweet_node = doc.createElement("Tweet") tweets_node.appendChild(tweet_node) data_node = doc.createElement("Tweet_created") tweet_node.appendChild(data_node) data_node_text = doc.createTextNode(tweets[j][3]) data_node.appendChild(data_node_text) message_node = doc.createElement("Message") tweet_node.appendChild(message_node) message_node_text = doc.createTextNode(tweets[j][0]) message_node.appendChild(message_node_text) source_node = doc.createElement("Source") tweet_node.appendChild(source_node) source_node_text = doc.createTextNode(tweets[j][1]) source_node.appendChild(source_node_text) source_url_node = doc.createElement("Source_Url") tweet_node.appendChild(source_url_node) source_url_node_text = doc.createTextNode(tweets[j][2]) source_url_node.appendChild(source_url_node_text) # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # Create xml file twitter_xml_name = "twitter_" + twitter_list[0][USER_ID] + ".xml" xml_twitter = open(xml_dir + "/" + twitter_xml_name, "a+") xml_twitter.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_twitter.close() _adel_log.log("xmlParser: ----> " + twitter_xml_name + " created!", 4)
def facebook_to_xml (xml_dir, UserDict, FriendsList, ConnList): _adel_log.log("############ XML OUTPUT GENERATION -> FACEBOOK ENTRIES ############ \n", 2) # Create the minidom document doc = Document() fb_entries = doc.createElement("Facebook_Entries") doc.appendChild(fb_entries) # UserDict is a dictionary structure containing the FB_user informations. Following keys are available: # uid -> Facebook User ID # secret \ # access token \ # session_key --> these three seem to be hash values for cryptographic purpose # first_name # last_name -> as given in the account # name -> screen name for friendslists (?) # username -> loginname for FB account # machine_id -> another hash to pinpoint used device (?) # pic_square -> Link to account picture # Entry generated for FriendsList is User_ID, Last_Name, First_Name, # Birthday, E-mail (if given) USER_ID = 0 NAME = 1 BIRTHDAY = 2 E_MAIL = 3 for i in range (0, len(FriendsList)): user_entry_node = doc.createElement("Friends_Entry") fb_entries.appendChild(user_entry_node) user_id_node = doc.createElement("Facebook_User_id") user_entry_node.appendChild(user_id_node) user_id_note_text = doc.createTextNode(FriendsList[i][USER_ID]) user_id_node.appendChild(user_id_note_text) user_name_node = doc.createElement("Friends_Name") user_entry_node.appendChild(user_name_node) user_name_node_text = doc.createTextNode(FriendsList[i][NAME]) user_name_node.appendChild(user_name_node_text) birthday_note = doc.createElement("Birthday") user_entry_node.appendChild(birthday_note) birthday_note_text = doc.createTextNode(FriendsList[i][BIRTHDAY]) birthday_note.appendChild(birthday_note_text) email_node = doc.createElement("E-Mail_Adress") user_entry_node.appendChild(email_node) email_node_text = doc.createTextNode(FriendsList[i][E_MAIL]) email_node.appendChild(email_node_text) """ user_id = int(TwitterList[i][USER_ID]) #print user_id if user_id in TweetList: tweets = TweetList[user_id] tweetsNode = doc.createElement("Tweets") user_entry_node.appendChild(tweetsNode) for j in range (0,len(tweets)): tweetNode = doc.createElement("Tweet") tweetsNode.appendChild(tweetNode) dateNode = doc.createElement("Tweet_created") tweetNode.appendChild(dateNode) dateNode_text = doc.createTextNode(tweets[j][3]) dateNode.appendChild(dateNode_text) messageNode = doc.createElement("Message") tweetNode.appendChild(messageNode) messageNode_text = doc.createTextNode(str(tweets[j][0])) messageNode.appendChild(messageNode_text) sourceNode = doc.createElement("Source") tweetNode.appendChild(sourceNode) sourceNode_text = doc.createTextNode(tweets[j][1]) sourceNode.appendChild(sourceNode_text) sourceUrlNode = doc.createElement("Source_Url") tweetNode.appendChild(sourceUrlNode) sourceUrlNode_text = doc.createTextNode(tweets[j][2]) sourceUrlNode.appendChild(sourceUrlNode_text) """ # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # Create xml file xml_fb = open(xml_dir + "/facebook.xml", "a+") xml_fb.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_fb.close() _adel_log.log("xmlParser: ----> facebook.xml created!", 4)
def contacts_to_xml(xml_dir, contactsList): _adel_log.log("############ XML OUTPUT GENERATION -> CONTACTS ############ \n", 2) # Create the minidom document doc = Document() xml = doc.createElement("contacts") doc.appendChild(xml) for i in range(0, len(contactsList)): # Create the <contact> element contact = doc.createElement("contact") xml.appendChild(contact) id = doc.createElement("id") contact.appendChild(id) id_text = doc.createTextNode(contactsList[i][0]) id.appendChild(id_text) photo_id = doc.createElement("photo_id") contact.appendChild(photo_id) photo_id_text = doc.createTextNode(contactsList[i][1]) photo_id.appendChild(photo_id_text) times_contacted = doc.createElement("times_contacted") contact.appendChild(times_contacted) times_contacted_text = doc.createTextNode(contactsList[i][2]) times_contacted.appendChild(times_contacted_text) last_time_contacted = doc.createElement("last_time_contacted") contact.appendChild(last_time_contacted) last_time_contacted_text = doc.createTextNode(contactsList[i][3]) last_time_contacted.appendChild(last_time_contacted_text) starred = doc.createElement("starred") contact.appendChild(starred) starred_text = doc.createTextNode(contactsList[i][4]) starred.appendChild(starred_text) number = doc.createElement("number") contact.appendChild(number) number_text = doc.createTextNode(contactsList[i][5]) number.appendChild(number_text) display_name = doc.createElement("display_name") contact.appendChild(display_name) display_name_text = doc.createTextNode(contactsList[i][6]) display_name.appendChild(display_name_text) lastname = doc.createElement("lastname") contact.appendChild(lastname) lastname_text = doc.createTextNode(contactsList[i][7]) lastname.appendChild(lastname_text) firstname = doc.createElement("firstname") contact.appendChild(firstname) firstname_text = doc.createTextNode(contactsList[i][8]) firstname.appendChild(firstname_text) company = doc.createElement("company") contact.appendChild(company) company_text = doc.createTextNode(contactsList[i][9]) company.appendChild(company_text) email = doc.createElement("email") contact.appendChild(email) email_text = doc.createTextNode(contactsList[i][10]) email.appendChild(email_text) url = doc.createElement("url") contact.appendChild(url) url_text = doc.createTextNode(contactsList[i][11]) url.appendChild(url_text) address = doc.createElement("address") contact.appendChild(address) address_text = doc.createTextNode(contactsList[i][12]) address.appendChild(address_text) # Print our newly created XML files to Log _adel_log.log(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8")), 3) # create xml file xml_contacts = open(xml_dir + "/contacts.xml", "a+") xml_contacts.write(make_pretty_xml(doc.toprettyxml(indent=" ", encoding="UTF-8"))) xml_contacts.close() _adel_log.log("xmlParser: ----> contacts.xml created!", 4)
def parse_content_entry(serial_type, record_hex_string, content_offset): # initial checks if serial_type < 0: _adel_log.log( "getEntryContent: WARNING! invalid serial type (must be >= 0): %(serial_type)s" % vars(), 2) return None _adel_log.log( "getEntryContent: ----> retrieving serial type content at relative offset: %(content_offset)s...." % vars(), 4) # Initialise result list entry_content_list = [] if (serial_type == 0): # Defined as NULL, zero bytes in length entry_content_list.append(None) entry_content_list.append(0) _adel_log.log( "getEntryContent: OK - serial type is: NULL, zero bytes in length", 4) return entry_content_list if (serial_type == 1): entryContent = _helpersBinaryOperations.twos_complement_to_int( int(record_hex_string[(content_offset):(content_offset + 2)], 16), 1 * 8) entry_content_list.append(entryContent) entry_content_list.append(1) _adel_log.log( "getEntryContent: OK - serial type is: 8-bit twos-complement integer: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 2): entryContent = _helpersBinaryOperations.twos_complement_to_int( int(record_hex_string[(content_offset):(content_offset + 4)], 16), 2 * 8) entry_content_list.append(entryContent) entry_content_list.append(2) _adel_log.log( "getEntryContent: OK - serial type is: Big-endian 16-bit twos-complement integer: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 3): entryContent = _helpersBinaryOperations.twos_complement_to_int( int(record_hex_string[(content_offset):(content_offset + 6)], 16), 3 * 8) entry_content_list.append(entryContent) entry_content_list.append(3) _adel_log.log( "getEntryContent: OK - serial type is: Big-endian 24-bit twos-complement integer: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 4): entryContent = _helpersBinaryOperations.twos_complement_to_int( int(record_hex_string[(content_offset):(content_offset + 8)], 16), 4 * 8) entry_content_list.append(entryContent) entry_content_list.append(4) _adel_log.log( "getEntryContent: OK - serial type is: Big-endian 32-bit twos-complement integer: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 5): entryContent = _helpersBinaryOperations.twos_complement_to_int( int(record_hex_string[(content_offset):(content_offset + 12)], 16), 6 * 8) entry_content_list.append(entryContent) entry_content_list.append(6) _adel_log.log( "getEntryContent: OK - serial type is: Big-endian 48-bit twos-complement integer: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 6): entryContent = _helpersBinaryOperations.twos_complement_to_int( int(record_hex_string[(content_offset):(content_offset + 16)], 16), 8 * 8) entry_content_list.append(entryContent) entry_content_list.append(8) _adel_log.log( "getEntryContent: OK - serial type is: Big-endian 64-bit twos-complement integer: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 7): entryContent = struct.unpack( 'd', struct.pack( 'Q', int(record_hex_string[(content_offset):(content_offset + 16)], 16)))[0] entry_content_list.append(entryContent) entry_content_list.append(8) _adel_log.log( "getEntryContent: OK - serial type is: Big-endian IEEE 754-2008 64-bit floating point number: %(entryContent)s" % vars(), 4) return entry_content_list if (serial_type == 8): # Integer constant 0 (only schema format > 4), zero bytes in length entry_content_list.append(0) entry_content_list.append(0) _adel_log.log( "getEntryContent: OK - serial type is an integer constant: 0, zero bytes in length", 4) return entry_content_list if (serial_type == 9): # Integer constant 1 (only schema format > 4), zero bytes in length entry_content_list.append(1) entry_content_list.append(0) _adel_log.log( "getEntryContent: OK - serial type is an integer constant: 1, zero bytes in length", 4) return entry_content_list if (serial_type == 10): # Not used, reserved for expansion _adel_log.log( "getEntryContent: WARNING! invalid serial type (not used, reserved for expansion): %(serial_type)s" % vars(), 2) entry_content_list.append(None) entry_content_list.append(0) return entry_content_list if (serial_type == 11): # Not used, reserved for expansion _adel_log.log( "getEntryContent: WARNING! invalid serial type (not used, reserved for expansion): %(serial_type)s" % vars(), 2) entry_content_list.append(None) entry_content_list.append(0) return entry_content_list if (serial_type >= 12): # either a STRING or a BLOB entrySize = determine_serial_type_content_size(serial_type) entryContent = record_hex_string[(content_offset):(content_offset + (entrySize * 2))] # build return list entry_content_list.append( _helpersStringOperations.hexstring_to_ascii(entryContent)) entry_content_list.append(entrySize) return entry_content_list
def parse_db(file_name): global DB_FILE_SIZE_IN_BYTES global ROW_ID_COLUMN _adel_log.log("\n############ SQLite PARSER -> " + file_name + " ############ \n", 2) _adel_log.log("parse_db: ----> parsing sqlite3 database file....", 3) # Open the database DB_FILE_SIZE_IN_BYTES = _sqliteFileHandler.open_db(file_name) if DB_FILE_SIZE_IN_BYTES == 0: # file could not be opened correctly return [] # Read first page of database file first_page_hex_string = _sqliteFileHandler.read_page(1) # ensure that read page could retrieve an existing page if (first_page_hex_string == ""): _adel_log.log("parse_db: ERROR - cannot read first page of database", 1) return [] # Parse the database header on the first page (first 100 bytes in the database file) parse_db_header(first_page_hex_string) if HEADER_DATABASE_TEXT_ENCODING > 1: _adel_log.log("parse_db: ERROR - database text encoding " + str(HEADER_DATABASE_TEXT_ENCODING) + " not supported in this version of FSP", 1) return [] # Parse database schema (first page of the database file is root b-tree page for the schema btree) # Database schema is stored in a well defined way (sqlite master table) # CREATE TABLE sqlite_master( # type text, # must be one of the following: ['table', 'index', 'view', 'trigger'] # name text, # tbl_name text, # rootpage integer, # sql text # ); _adel_log.log("\nparseDB: ----> parsing sqlite3 database SCHEMA....", 3) db_schemata = _sqlitePageParser.parse_table_btree_page(first_page_hex_string, 100) # 100 bytes database file header _adel_log.log("parse_db: ----> sqlite3 database SCHEMA parsed", 3) # Initialize the resulting content list result_list = [] final_list = [] # loop through all schemata of the database for db_schema in db_schemata: if len(db_schema) != 5 + 1: # +1 due to manually added leading rowID _adel_log.log("parse_db: WARNING! invalid length of database schema statement entry detected: ", 2) _adel_log.log(str(db_schema), 2) continue # Reset result list for new element result_list = [] # Parse this database element (table, index, view or trigger) if (_helpersStringOperations.starts_with_string(str(db_schema[1]), "TABLE") == 0): # PARSE TABLE STATEMENT # Ensure that we treat a valid schema db_schemata_statement = db_schema[len(db_schema) - 1] if ((db_schemata_statement == None) or (db_schemata_statement == "")): _adel_log.log("parse_db: WARNING! missing database schema statement entry detected, printing schema statement:", 2) _adel_log.log(str(db_schema), 3) continue sql_statement = (db_schema[5]) # db_schema[5] is expected to be the "sql text" as defined in sqlite_master _adel_log.log("\nparseDB: ----> parsing new database structure with SQL statement:", 3) _adel_log.log(str(sql_statement), 3) # Extract and check command (expected to be CREATE) command_tuple = _helpersStringOperations.split_at_first_occurrence(sql_statement, " ") if (len(command_tuple) == 0): _adel_log.log("parse_db: WARNING! invalid sql COMMAND detected, continuing with next database element (e.g. next table)", 2) continue if (_helpersStringOperations.starts_with_string(str(command_tuple[0]), "CREATE") != 0): _adel_log.log("parse_db: WARNING! invalid sql COMMAND detected, expected \"CREATE\" but found: " + str(command_tuple[0]), 2) _adel_log.log(" continuing with next database element (e.g. next table)", 2) continue # Extract and check first command operand (expected to be TEMP, TEMPORARY, TABLE or VIRTUAL TABLE) type_tuple = _helpersStringOperations.split_at_first_occurrence(command_tuple[1], " ") if len(type_tuple) == 0: _adel_log.log("parse_db: WARNING! invalid sql COMMAND TYPE detected, continuing with next database element (e.g. next table)", 2) continue # According to the syntax diagrams of the sqlite SQL create table statement there are TEMP or TEMPORARY key words allowed at this place if (_helpersStringOperations.starts_with_string(str(type_tuple[0]), "TEMP") == 0 or _helpersStringOperations.starts_with_string(str(type_tuple[0]), "TEMPORARY") == 0 or _helpersStringOperations.starts_with_string(str(type_tuple[0]), "VIRTUAL") == 0): # Ignore and proceed with next fragement (must then be TABLE) type_tuple = _helpersStringOperations.split_at_first_occurrence(type_tuple[1], " ") if len(type_tuple) == 0: _adel_log.log("parse_db: WARNING! invalid sql COMMAND TYPE after TEMP(ORARY) detected, continuing with next database element (e.g. next table)", 2) continue # This fragment must be table if (_helpersStringOperations.starts_with_string(str(type_tuple[0]), "TABLE") != 0): _adel_log.log("parse_db: WARNING! invalid sql COMMAND TYPE detected, expected \"TABLE\" but found: " + str(type_tuple[0]), 2) _adel_log.log(" continuing with next database element (e.g. next table)", 2) continue # Extract and check second command operand (expected to be table name) name_tuple = [] next_space = _helpersStringOperations.split_at_first_occurrence(type_tuple[1], " ") next_parenthesis = _helpersStringOperations.split_at_first_occurrence(type_tuple[1], "(") if (next_space < next_parenthesis): # "IF NOT EXISTS" statement possible if (_helpersStringOperations.starts_with_string(str(_helpersStringOperations.crop_whitespace(type_tuple[1])), "IF") == 0): type_tuple[1] = type_tuple[1][2:] if (_helpersStringOperations.starts_with_string(str(_helpersStringOperations.crop_whitespace(type_tuple[1])), "NOT") == 0): type_tuple[1] = type_tuple[1][3:] if (_helpersStringOperations.starts_with_string(str(_helpersStringOperations.crop_whitespace(type_tuple[1])), "EXISTS") == 0): type_tuple[1] = type_tuple[1][6:] type_tuple[1] = _helpersStringOperations.crop_whitespace(type_tuple[1]) # Extract name tuple name_tuple = _helpersStringOperations.split_at_first_occurrence(type_tuple[1], " ") if len(name_tuple) == 0: name_tuple = _helpersStringOperations.split_at_first_occurrence(type_tuple[1], "(") if len(name_tuple) == 0: _adel_log.log("parse_db: WARNING! invalid sql COMMAND TYPE NAME detected, continuing with next database element (e.g. next table)", 2) continue # Append leading opening parenthesis that we cut off before name_tuple[1] = "(" + str(name_tuple[1]) else: # "AS ..." statement possible tmp_string = _helpersStringOperations.crop_whitespace(name_tuple[1]) if (tmp_string.startswith("AS")): _adel_log.log("parse_db: OK - \"AS\" statement detected: " + str(tmp_string), 3) _adel_log.log("parse_db: OK - no data stored, thus continuing with next database element (e.g. next table)", 3) continue else: name_tuple = _helpersStringOperations.split_at_first_occurrence(type_tuple[1], "(") if len(name_tuple) == 0: _adel_log.log("parse_db: WARNING! invalid sql COMMAND TYPE NAME detected, continuing with next database element (e.g. next table)", 2) continue # Append leading opening parenthesis that we cut off before name_tuple[1] = "(" + str(name_tuple[1]) # Now ready to parse TABLE _adel_log.log("parse_db: ----> parsing database structure " + str(type_tuple[0]) + " \"" + str(name_tuple[0]) + "\"", 3) _adel_log.log("parse_db: ----> parsing SQL statement of " + str(type_tuple[0]) + "....", 3) _adel_log.log("parse_db: OK - SQL statement is of type: " + str(command_tuple[0]) + " " + str(type_tuple[0]), 3) # Parse and append sql statement name_tuple[1] = _helpersStringOperations.cut_first_last_exclude(name_tuple[1], "(", ")") result_list.append(parse_sql_statement_params(name_tuple[1])) # Ensure we deal with a real table, virtual tables have no b-tree and thus the b-tree root page pointer is 0 if (db_schema[4] == 0): _adel_log.log("parse_db: OK - this table holds no content (e.g. virtual table), continuing with next database element (e.g. next table)", 3) _adel_log.log("parse_db: ----> database structure " + str(type_tuple[0]) + " \"" + str(name_tuple[0]) + "\" parsed", 3) # Append result from table, index, view or trigger to final list final_list.append(result_list) continue # Parse and append table contents btree_root_page_string = _sqliteFileHandler.read_page(db_schema[4]) # Ensure that read page could retrieve an existing page if (btree_root_page_string == ""): _adel_log.log("parse_db: ERROR - could not refer to b-tree root page: " + str(db_schema[4]), 1) _adel_log.log(" continuing with next database element (e.g. next table)", 1) continue _adel_log.log("parse_db: ----> parsing contents of " + str(type_tuple[0]) + "....", 3) table_contents = _sqlitePageParser.parse_table_btree_page(btree_root_page_string, 0) # Check whether the table contains a dedicated row ID column if (ROW_ID_COLUMN == 0): # Table has no dedicated row ID column, add "rowID" to the table statement (the rowID is already extractet) index_of_last_element_in_result_list = len(result_list) - 1 temp_list = result_list[index_of_last_element_in_result_list] result_list[index_of_last_element_in_result_list] = [["rowID", "INTEGER"]] for element in range(len(temp_list)): result_list[index_of_last_element_in_result_list].append(temp_list[element]) # Append table contents to the result list for row in table_contents: result_list.append(row) else: # Table has a dedicated row ID column (integer primary key column), link values stored as row ID in the b-tree to this column (at the place of this column) # Append table contents to the result list for row in table_contents: # Replace "None" entries in integer primary key column of each row through the actual row ID row[ROW_ID_COLUMN] = row[0] # Delete manually appended row ID column (in parse_sql_statement_params) temp_row = row row = [] for index in range(len(temp_row) - 1): row.append(temp_row[index + 1]) # Append corrected row result_list.append(row) # Append result from table, index, view or trigger to final list final_list.append(result_list) _adel_log.log("parse_db: ----> database structure " + str(type_tuple[0]) + " \"" + str(name_tuple[0]) + "\" parsed", 3) # TODO: comment out the following print statements in productive environment #_adel_log.log("\n_sqliteParser.py:234, parse_db ----> printing database schema for " + str(type_tuple[0]) + " \"" + str(name_tuple[0]) + "\" for test purposes:", 4) #_adel_log.log(str(db_schema[len(db_schema) - 1]), 4) #_adel_log.log("\n_sqliteParser.py:236, parse_db ----> printing database contents for " + str(type_tuple[0]) + " \"" + str(name_tuple[0]) + "\" for test purposes:", 4) #for result in result_list: # _adel_log.log(str(result), 4) # comment out the above print statements in productive environment # PARSE INDEX STATEMENT #if ((str(db_schema[1]) == "INDEX") or (str(db_schema[1]) == "Index") or (str(db_schema[1]) == "index")): # TODO: implement if necessary # IGNORED RIGHT NOW # PARSE VIEW STATEMENT #if ((str(db_schema[1]) == "VIEW") or (str(db_schema[1]) == "View") or (str(db_schema[1]) == "view")): # TODO: implement if necessary # IGNORED RIGHT NOW # PARSE TRIGGER STATEMENT #if ((str(db_schema[1]) == "TRIGGER") or (str(db_schema[1]) == "Trigger") or (str(db_schema[1]) == "trigger")): # TODO: implement if necessary # IGNORED RIGHT NOW _adel_log.log("\nparseDB: ----> returning contents of the database file", 3) # Close the database file _sqliteFileHandler.close_db() _adel_log.log("parse_db: ----> sqlite3 database file parsed", 3) return final_list
def run(argv): # Manual usage = "\033[0;32m adel.py -d <device/backup_folder> -l <loglevel>\033[m" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-l', '--loglevel', default=4, const='4', nargs='?', help='The loglevel is an optional value between 0 (no logging) and 4 (full debug logging).') parser.add_argument('-d', '--device', default=False, nargs='?', help='Use the device name of the smartphone to load the correct config.') parser.add_argument('-db', '--database', default=False, nargs='?', help='Absolute path for already dumped databases') options = parser.parse_args(argv[1:]) if not options.device: print "Illegal number of arguments" print usage sys.exit(1) else: mode = options.device if (int(options.loglevel) >= 0) and (int(options.loglevel) <= 4): _adel_log.LOG_LEVEL_GLOBAL = int(options.loglevel) else: _adel_log.LOG_LEVEL_GLOBAL = 4 # Programm header os.system("clear") print """\033[0;32m _____ ________ ___________.____ / _ \ \______ \ \_ _____/| | / /_\ \ | | \ | __)_ | | / | \| ` \| \| |___ \____|__ /_______ /_______ /|_______ \ \/ \/ \/ \/ Android Data Extractor Lite v2.2 BETA \033[m""" print "\n" print "ADEL MAIN: ----> starting script...." if not options.database: # Opening the connection to the smartphone or emulator print "ADEL MAIN: ----> Trying to connect to smartphone or emulator...." # Check if there is a smartphone or emulator connected if len(subprocess.Popen(['adb', 'devices'], stdout=subprocess.PIPE).communicate(0)[0].split()) > 4: # Create backup directory try: device_name = subprocess.Popen(['adb', 'devices'], stdout=subprocess.PIPE).communicate(0)[0].split()[4] except: print "\033[0;31mADEL MAIN: ----> ERROR! No Android smartphone connected !!\033[m" sys.exit(3) # indicates that no smartphone or emulator was connected to the PC print "dumpDBs: ----> opening connection to device: \033[0;32m" + device_name + "\033[m" # Starting the deamon with root privileges subprocess.Popen(['adb', 'root'], stdout=subprocess.PIPE).communicate(0)[0] backup_dir = DATE + "__" + TIME[0] + "-" + TIME[1] + "-" + TIME[2] + "__" + device_name os.mkdir(backup_dir) # Create log file and log directory if LOGLEVEL is > 0 if _adel_log.LOG_LEVEL_GLOBAL > 0: log_file = backup_dir + "/log/adel.log" os.mkdir(backup_dir + "/log") _adel_log.FILE_HANDLE = open(log_file, "a+") _adel_log.log("\n# (c) mspreitzenbarth & sschmitt 2012 \n\n\n _____ ________ ___________.____ \n / _ \ \______ \ \_ _____/| | \n / /_\ \ | | \ | __)_ | | \n / | \| ` \| \| |___ \n \____|__ /_______ /_______ /|_______ \ \n \/ \/ \/ \/ \n Android Data Extractor Lite v2.0", 2) _adel_log.log("dumpDBs: ----> evidence directory \033[0;32m" + backup_dir + "\033[m created", 0) _adel_log.log("ADEL MAIN: ----> log file \033[0;32m" + log_file + "\033[m created", 0) _adel_log.log("ADEL MAIN: ----> log level: \033[0;32m" + str(_adel_log.LOG_LEVEL_GLOBAL) + "\033[m", 0) # Get Android OS version which is running on the connected device try: os_version = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.build.version.release'], stdout=subprocess.PIPE).communicate(0)[0].split()[0] except: os_version = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.build.version.release'], stdout=subprocess.PIPE).communicate(0)[0] os_version2 = os_version.replace(".", "") if len(os_version2) < 3: os_version2 = os_version2.join("0") _adel_log.log("dumpDBs: ----> device is running\033[0;32m Android OS " + os_version + "\033[m", 0) # Do the main logging action _adel_log.log("\n############ SMARTPHONE & GLOBAL INFO ############\n", 2) _adel_log.log("dumpDBs: Date: " + DATE + " " + TIME[0] + ":" + TIME[1] + ":" + TIME[2], 3) _adel_log.log("dumpDBs: Device Name: " + device_name, 3) _adel_log.log("dumpDBs: Device is running: Android v" + os_version, 3) _adel_log.log("dumpDBs: Log Level: " + str(_adel_log.LOG_LEVEL_GLOBAL), 3) # Call the dumping and analysing methods and create output directory xml_dir = backup_dir + "/xml" os.mkdir(xml_dir) # Copy the xml stylesheet to the evidence directory shutil.copy("xml/report.xsl", xml_dir + "/report.xsl") file_dir = backup_dir + "/databases" os.mkdir(file_dir) dumpDBs(file_dir, os_version, device_name) _getGestureLock.crack(backup_dir) get_location_information(backup_dir, device_name, os_version) analyzeDBs(file_dir, os_version, xml_dir, options.device, os_version2) _compareHashValues.compare(backup_dir) # Killing deamon and closing the logfile _adel_log.log("ADEL MAIN: ----> stopping script....", 0) print "\n" print "\033[0;32m (c) m.spreitzenbarth & s.schmitt 2012\033[m" print "\n\n" # Close log file if any was created (log level must be > 0) if _adel_log.LOG_LEVEL_GLOBAL > 0: _adel_log.FILE_HANDLE.close() else: print "\033[0;31mADEL MAIN: ----> ERROR! No Android smartphone connected !!\033[m" print "ADEL MAIN: ----> stopping script...." subprocess.Popen(['adb', 'kill-server'], stdout=subprocess.PIPE).communicate(0)[0] print "\n" print "\033[0;32m (c) m.spreitzenbarth & s.schmitt 2012\033[m" print "\n\n" sys.exit(3) # indicates that no smartphone or emulator was connected to the PC else: # Define global variables backup_dir = options.database file_dir = backup_dir device_name = "local" os_version = "2.3.3" os_version2 = "233" # Create log file and log directory if LOGLEVEL is > 0 if _adel_log.LOG_LEVEL_GLOBAL > 0: log_file = backup_dir + "/log/adel.log" os.mkdir(backup_dir + "/log") _adel_log.FILE_HANDLE = open(log_file, "a+") _adel_log.log("\n# (c) mspreitzenbarth & sschmitt 2011 \n\n\n _____ ________ ___________.____ \n / _ \ \______ \ \_ _____/| | \n / /_\ \ | | \ | __)_ | | \n / | \| ` \| \| |___ \n \____|__ /_______ /_______ /|_______ \ \n \/ \/ \/ \/ \n Android Data Extractor Lite v2.0", 2) _adel_log.log("dumpDBs: ----> evidence directory \033[0;32m" + backup_dir + "\033[m created", 0) _adel_log.log("ADEL MAIN: ----> log file \033[0;32m" + log_file + "\033[m created", 0) _adel_log.log("ADEL MAIN: ----> log level: \033[0;32m" + str(_adel_log.LOG_LEVEL_GLOBAL) + "\033[m", 0) _adel_log.log("dumpDBs: ----> using configuration for\033[0;32m Android 2.3.3\033[m", 0) # Do the main logging action _adel_log.log("\n############ SMARTPHONE & GLOBAL INFO ############\n", 2) _adel_log.log("dumpDBs: Date: " + DATE + " " + TIME[0] + ":" + TIME[1] + ":" + TIME[2], 3) _adel_log.log("dumpDBs: Log Level: " + str(_adel_log.LOG_LEVEL_GLOBAL), 3) # Call the dumping and analysing methods and create output directory xml_dir = backup_dir + "/xml" os.mkdir(xml_dir) # Copy the xml stylesheet to the evidence directory shutil.copy("xml/report.xsl", xml_dir + "/report.xsl") dumpDBs(file_dir, os_version, device_name) get_location_information(backup_dir, device_name) analyzeDBs(file_dir, os_version, xml_dir, options.device, os_version2) # Killing deamon and closing the logfile _adel_log.log("ADEL MAIN: ----> stopping script....", 0) print "\n" print "\033[0;32m (c) m.spreitzenbarth & s.schmitt 2012\033[m" print "\n\n" # Close log file if any was created (log level must be > 0) if _adel_log.LOG_LEVEL_GLOBAL > 0: _adel_log.FILE_HANDLE.close()
def parse_table_btree_interior_page(page_hex_string, page_offset): # Parse the page header header = parse_btree_page_header(page_hex_string, page_offset) # Ensure that we deal with a correct page header_length = len(header) if (header_length != 7 or header[0] != 5): # No valid header_length _adel_log.log( "parse_table_btree_interior_page: ERROR - invalid page type in table b-tree interior page header", 1) _adel_log.log( " Page header was said to start at page offset: " + str(page_offset), 1) _adel_log.log(" Printing page content....", 1) _adel_log.log(page_hex_string, 1) return [] # initialize resulting list content_list = [] # Initialize node list node_pointers = [] # Initialize page content list page_contents = [] # Parse cell pointer array cell_pointers = parse_cell_pointer_array( page_hex_string, (page_offset + header[header_length - 1]), header[2]) # Parse cells for cell_pointer in cell_pointers: node_pointers.append( parse_table_btree_interior_cell(page_hex_string, cell_pointer)) # This is an interior page, thus we append the right-most pointer as well node_pointers.append([header[5], 0]) # Iterate through every node for node_tuple in node_pointers: _adel_log.log( "parse_table_btree_interior_page: ----> fetching child page to parse, page number: " + str(node_tuple[0]) + "....", 3) child_page = _sqliteFileHandler.read_page(node_tuple[0]) # Ensure we fetched a valid page if (child_page == ""): _adel_log.log( "parse_table_btree_interior_page: ERROR - invalid node tuple detected, cannot reference child page pointer: " + str(node_tuple), 1) continue # Parse child pages page_contents = parse_table_btree_page(child_page, 0) for page_content in page_contents: content_list.append(page_content) _adel_log.log( "parse_table_btree_interior_page: ----> child page parsed, page number: " + str(node_tuple[0]) + "....", 4) return content_list
def parse_btree_page_header(page_hex_string, page_offset): # 1 byte is represented by two characters in the hexString, so internally we need to calculate the offset in nibbles page_offset = page_offset * 2 # Parse sqlite b-tree header structure _adel_log.log( "parse_btree_page_header: ----> parsing b-tree page header structure....", 4) # B-tree header byte 0: b-tree page type btree_page_type = int(page_hex_string[(page_offset + 0):(page_offset + 2)], 16) if btree_page_type in (2, 5, 10, 13): _adel_log.log( "parse_btree_page_header: OK - sqlite b-tree page type (must be 2,5,10 or 13): %(btree_page_type)s" % vars(), 4) else: _adel_log.log( "parse_btree_page_header: WARNING! - invalid sqlite b-tree page type (must be 2,5,10 or 13): %(btree_page_type)s" % vars(), 2) # B-tree header bytes 1-2: bytes offset into the page of the first freeblock btree_number_of_bytes_offset_in_first_free_block = int( page_hex_string[(page_offset + 2):(page_offset + 6)], 16) _adel_log.log( "parse_btree_page_header: OK - bytes offset into the page of the first freeblock: %(btree_number_of_bytes_offset_in_first_free_block)s" % vars(), 4) # B-tree header bytes 3-4: number of cells on this page btree_number_of_cells = int( page_hex_string[(page_offset + 6):(page_offset + 10)], 16) _adel_log.log( "parse_btree_page_header: OK - number of cells on this page: %(btree_number_of_cells)s" % vars(), 4) # B-tree header bytes 5-6: offset of the first byte of cell content area btree_offset_of_first_byte_content = int( page_hex_string[(page_offset + 10):(page_offset + 14)], 16) _adel_log.log( "parse_btree_page_header: OK - offset of the first byte of cell content area: %(btree_offset_of_first_byte_content)s" % vars(), 4) # B-tree header byte 7: number of fragmented free bytes btree_number_of_fragmented_free_bytes = int( page_hex_string[(page_offset + 14):(page_offset + 16)], 16) _adel_log.log( "parse_btree_page_header: OK - number of fragmented free bytes: %(btree_number_of_fragmented_free_bytes)s" % vars(), 4) # Build list of well defined header elements header_elements = [ btree_page_type, btree_number_of_bytes_offset_in_first_free_block, btree_number_of_cells, btree_offset_of_first_byte_content, btree_number_of_fragmented_free_bytes ] # check for optional header element 6: right-most pointer # b-tree header bytes 8-11: right-most pointer if page is an interior b-tree page if btree_page_type == 2 or btree_page_type == 5: btree_right_most_pointer = int( page_hex_string[(page_offset + 16):(page_offset + 24)], 16) header_elements.append(btree_right_most_pointer) length = 12 _adel_log.log( "parse_btree_page_header: OK - right-most pointer: %(btree_right_most_pointer)s" % vars(), 4) else: length = 8 _adel_log.log( "parse_btree_page_header: OK - page is a b-tree leaf page and thus does not include a right-most pointer", 4) # Return list of header elements header_elements.append(length) _adel_log.log( "parse_btree_page_header: OK - returning list of header elements: %(header_elements)s" % vars(), 3) _adel_log.log( "parse_btree_page_header: ----> b-tree page header structure parsed", 4) return header_elements
def parse_table_btree_leaf_cell(page_hex_string, page_offset, cell_pointers, free_block_pointer): # 1 byte is represented by two characters in the hexString, so internally we need to calculate the offset in nibbles page_offset_in_bytes = page_offset # store for log reasons only page_offset = page_offset * 2 # now dealing with nibbles because we treat a string (1 character = 1 nibble) db_page_size_in_bytes = _sqliteFileHandler.DB_PAGESIZE_IN_BYTES usable_page_space = db_page_size_in_bytes - _sqliteFileHandler.DB_RESERVED_SPACE _adel_log.log( "parse_table_btree_leaf_cell: ----> parsing b-tree leaf cell at offset %(page_offset_in_bytes)s...." % vars(), 4) # Get total number of bytes of payload bytes_of_payload_tuple = _sqliteVarInt.parse_next_var_int( page_hex_string[page_offset:(page_offset + 18)] ) # a variable integer can be maximum 9 byte (= 18 nibbles) long bytes_of_payload = bytes_of_payload_tuple[0] _adel_log.log( "parse_table_btree_leaf_cell: OK - payload is %(bytes_of_payload)s bytes long" % vars(), 4) # Get row_id row_id_string = page_hex_string[(page_offset + (bytes_of_payload_tuple[1] * 2)):( page_offset + (bytes_of_payload_tuple[1] + 9) * 2)] row_id_tuple = _sqliteVarInt.parse_next_var_int(row_id_string) row_id = row_id_tuple[0] _adel_log.log( "parse_table_btree_leaf_cell: ----> extracting contents for row_id %(row_id)s...." % vars(), 4) # Check for overflow pages and append content of those pages, if any # Calculate the overflow limits for table b-tree leaf cell remaining_page_space = db_page_size_in_bytes - page_offset_in_bytes if (bytes_of_payload > (remaining_page_space)): # We expext content to overflow, because there is not enough space left on this page _adel_log.log( "parse_table_btree_leaf_cell: OK - payload is too large for this page, there are overflow pages" % vars(), 4) # Check at which position the next cell starts next_cell = usable_page_space for cell_pointer in cell_pointers: if (cell_pointer > page_offset_in_bytes) and (cell_pointer < next_cell): next_cell = cell_pointer # Check at which position the next freeblock starts (we ignore theoretically possible freebytes in this case, # Because we expect no freebyte at the end of a cell that overflows to another page next_free_block = usable_page_space free_blocks = parse_free_blocks(page_hex_string, free_block_pointer) for free_block in free_blocks: if (free_block[0] > page_offset_in_bytes) and (free_block[0] < next_free_block): next_free_block = free_block[0] # Get the end of this record: either closest following cell or closest following freeblock or end of page end_of_record = usable_page_space # Check of the end of this record is given through a following cell if (next_cell != usable_page_space) and ( (next_cell <= next_free_block) or (next_free_block == usable_page_space)): # next element is not end of page but a cell end_of_record = next_cell # Check of the end of this record is given through a following free block if (next_free_block != usable_page_space) and ( (next_free_block < next_cell) or (next_cell == usable_page_space)): # Next element is not end of page but a free block end_of_record = next_free_block # Cut record hex string from the beginning to the offset of the next following element record_hex_string = page_hex_string[(page_offset + ( (bytes_of_payload_tuple[1] + row_id_tuple[1]) * 2)):( end_of_record * 2)] record_hex_string_length = len( record_hex_string ) / 2 # string length is count in nibbles, we need bytes here # Save overflow page pointer at the end of record hex string first_overflow_page_number = int( record_hex_string[((record_hex_string_length - 4) * 2):(record_hex_string_length * 2)], 16) _adel_log.log( "parse_table_btree_leaf_cell: ----> parsing overflow page chain beginning at page %(first_overflow_page_number)s...." % vars(), 4) # Cut off overflow page number from record_hex_string record_hex_string = record_hex_string[( 0):((record_hex_string_length - 4) * 2)] first_overflow_page_string = _sqliteFileHandler.read_page( first_overflow_page_number) # Ensure that read page could retrieve an existing page if (first_overflow_page_string == ""): _adel_log.log( "parse_table_btree_leaf_cell: ERROR - invalid overflow page pointer, cannot reference first overflow page: " + str(first_overflow_page_number), 1) return [] # Append content from overflow pages record_hex_string += parse_overflow_page_chain( first_overflow_page_string) # Ensure correct length of string (maybe not all bytes of the last overflow page in the chain contain content) record_hex_string_length = len( record_hex_string ) / 2 # string length is count in nibbles, we need bytes here if (bytes_of_payload < record_hex_string_length): # Cut record hex string again record_hex_string = record_hex_string[:bytes_of_payload * 2] else: # The entire payload is stored on this page record_hex_string = page_hex_string[(page_offset + ( (bytes_of_payload_tuple[1] + row_id_tuple[1]) * 2)):( page_offset + ((bytes_of_payload_tuple[1] + row_id_tuple[1] + bytes_of_payload_tuple[0]) * 2))] # Parse the record read_content_list = parse_record(record_hex_string) # Build the resulting list (including the row_id used sqlite internally) cell_content_list = [] cell_content_list.append(row_id) for element in range(len(read_content_list)): cell_content_list.append(read_content_list[element]) # Return results _adel_log.log( "parse_table_btree_leaf_cell: OK - returning list of cell contents", 4) _adel_log.log( "parse_table_btree_leaf_cell: ----> b-tree leaf cell at offset %(page_offset_in_bytes)s parsed" % vars(), 4) return cell_content_list
def dumpDBs(file_dir, os_version, device_name): # Backup the SQLite files _adel_log.log("dumpDBs: ----> dumping all SQLite databases....", 0) _dumpFiles.get_SQLite_files(file_dir, os_version, device_name) _adel_log.log("dumpDBs: ----> all SQLite databases dumped", 0) _adel_log.log("", 3)
def createMap(backup_dir, cellPositionList, wifiPositionList, picturePositionList, twitterPositionList, gMapsPositionList, browserPositionList): backup_dir = backup_dir.split("/")[0] mapFile = open(backup_dir + "/map.html", "a+") mapFile.write('''<!DOCTYPE html> <html> <head> <meta name="viewport" content="initial-scale=1.0, user-scalable=no" /> <style type="text/css"> html { height: 100% } body { height: 100%; margin: 0px; padding: 0px } #map_canvas { height: 100% } </style> <script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"> </script> <script type="text/javascript"> var cellList = {};\n''') i = 0 while i < len(cellPositionList): if cellPositionList[0][1] == "0.000000": i = i + 1 else: title = "'Cell-ID: " + cellPositionList[i][0] + " -> Time: " + cellPositionList[i][4] + "'" mapFile.write("cellList['" + str(i) + "'] = {center: new google.maps.LatLng(" + cellPositionList[i][1] + ", " + cellPositionList[i][2] + "), accuracy: " + cellPositionList[i][3] + ", title: " + title + "};\n") i = i + 1 mapFile.write('''var wifiList = {};\n''') for j in range(0, len(wifiPositionList)): if wifiPositionList[j][1] == "0.000000": j = j + 1 else: title = "'Wifi-MAC: " + wifiPositionList[j][0] + " -> Time: " + wifiPositionList[j][4] + "'" mapFile.write("wifiList['" + str(j) + "'] = {center: new google.maps.LatLng(" + wifiPositionList[j][1] + ", " + wifiPositionList[j][2] + "), accuracy: " + wifiPositionList[j][3] + ", title: " + title + "};\n") j = j + 1 mapFile.write('''var exifList = {};\n''') for k in range(0, len(picturePositionList)): title = "'Picture: " + picturePositionList[k][0] + " -> Time: " + picturePositionList[k][4] + "'" mapFile.write("exifList['" + str(k) + "'] = {center: new google.maps.LatLng(" + picturePositionList[k][1] + ", " + picturePositionList[k][2] + "), accuracy: " + picturePositionList[k][3] + ", title: " + title + "};\n") k = k + 1 mapFile.write('''var twitterList = {};\n''') for l in range(0, len(twitterPositionList)): title = "'Message: " + twitterPositionList[l][0] + " -> Time: " + twitterPositionList[l][4] + "'" mapFile.write("twitterList['" + str(l) + "'] = {center: new google.maps.LatLng(" + twitterPositionList[l][1] + ", " + twitterPositionList[l][2] + "), accuracy: " + twitterPositionList[l][3] + ", title: " + title + "};\n") l = l + 1 mapFile.write('''var gMapsList = {};\n''') for m in range(0, len(gMapsPositionList)): title = "'Destination: " + gMapsPositionList[m][0] + " -> Time: " + gMapsPositionList[m][4] + "'" mapFile.write("gMapsList['" + str(m) + "'] = {center: new google.maps.LatLng(" + gMapsPositionList[m][1] + ", " + gMapsPositionList[m][2] + "), accuracy: " + gMapsPositionList[m][3] + ", title: " + title + "};\n") m = m + 1 mapFile.write('''var browserList = {};\n''') for n in range(0, len(browserPositionList)): title = "'" + browserPositionList[n][0] + " -> Time: " + browserPositionList[n][4] + "'" mapFile.write("browserList['" + str(n) + "'] = {center: new google.maps.LatLng(" + browserPositionList[n][1] + ", " + browserPositionList[n][2] + "), accuracy: " + browserPositionList[n][3] + ", title: " + title + "};\n") n = n + 1 mapFile.write('''function initialize() { var mapOptions = {zoom: 7, center: new google.maps.LatLng(51.163375, 10.447683), mapTypeId: google.maps.MapTypeId.ROADMAP}; var map = new google.maps.Map(document.getElementById("map_canvas"), mapOptions); for (var cell in cellList) { var accuracy = {strokeColor: "#0000FF", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#0000FF", fillOpacity: 0.15, map: map, center: cellList[cell].center, radius: cellList[cell].accuracy}; var marker = new google.maps.Marker({position: cellList[cell].center, map: map, title: cellList[cell].title, icon: '../xml/cell.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var wifi in wifiList) { var accuracy = {strokeColor: "#9e7151", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#9e7151", fillOpacity: 0.15, map: map, center: wifiList[wifi].center, radius: wifiList[wifi].accuracy}; var marker = new google.maps.Marker({position: wifiList[wifi].center, map: map, title: wifiList[wifi].title, icon: '../xml/wifi.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var exif in exifList) { var accuracy = {strokeColor: "#076e33", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#09a133", fillOpacity: 0.15, map: map, center: exifList[exif].center, radius: exifList[exif].accuracy}; var marker = new google.maps.Marker({position: exifList[exif].center, map: map, title: exifList[exif].title, icon: '../xml/jpg.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var twitter in twitterList) { var accuracy = {strokeColor: "#383838", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#a8a8a8", fillOpacity: 0.15, map: map, center: twitterList[twitter].center, radius: twitterList[twitter].accuracy}; var marker = new google.maps.Marker({position: twitterList[twitter].center, map: map, title: twitterList[twitter].title, icon: '../xml/twitter.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var gMap in gMapsList) { var accuracy = {strokeColor: "#ffffff", strokeOpacity: 0.8, strokeWeight: 2, fillColor: "#ffffff", fillOpacity: 0.3, map: map, center: gMapsList[gMap].center, radius: gMapsList[gMap].accuracy}; var marker = new google.maps.Marker({position: gMapsList[gMap].center, map: map, title: gMapsList[gMap].title, icon: '../xml/g_maps.png'}); cityCircle = new google.maps.Circle(accuracy); } for (var browser in browsersList) { var accuracy = {strokeColor: "#000000", strokeOpacity: 0.7, strokeWeight: 2, fillColor: "#000000", fillOpacity: 0.15, map: map, center: browsersList[browser].center, radius: browsersList[browser].accuracy}; var marker = new google.maps.Marker({position: browsersList[browser].center, map: map, title: browsersList[browser].title, icon: '../xml/g_maps.png'}); cityCircle = new google.maps.Circle(accuracy); } } </script> </head> <body onload="initialize()"> <div id="map_canvas" style="width:100%; height:100%"></div> </body> </html>''') mapFile.close() _adel_log.log("LocationInfo: ----> Location map \033[0;32m" + backup_dir + "/map.html\033[m created", 0)
def get_SQLite_files(backup_dir, os_version, device_name): hash_value_file = backup_dir + "/hash_values.log" hash_value = open(hash_value_file, "a+") _adel_log.log("\n############ DUMP SQLite FILES ############\n", 2) # Standard applications # Accounts database (IMSI, Account_Name, Account_Type, sha1_hash) try: accountdb = subprocess.Popen(['adb', 'pull', '/data/system/accounts.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) accountdb.wait() _adel_log.log("accounts.db -> " + accountdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/accounts.db").hexdigest(), 3) hash_value.write("accounts.db -> " + hashlib.sha256(backup_dir + "/accounts.db").hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> accounts.db doesn't exist!!", 2) # Contacts database () if os_version < 2.0: contactsdb_name = "contacts.db" else: contactsdb_name = "contacts2.db" try: contactsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.contacts/databases/' + contactsdb_name, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) contactsdb.wait() _adel_log.log(contactsdb_name + " -> " + contactsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/contacts2.db").hexdigest(), 3) hash_value.write(contactsdb_name + " -> " + hashlib.sha256(backup_dir + "/" + contactsdb_name).hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> contacts.db doesn't exist!!", 2) # MMS and SMS database () try: smsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.telephony/databases/mmssms.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) smsdb.wait() _adel_log.log("mmssms.db -> " + smsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/mmssms.db").hexdigest(), 3) hash_value.write("mmssms.db -> " + hashlib.sha256(backup_dir + "/mmssms.db").hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> mmssms.db doesn't exist!!", 2) # Calendar database () try: calendardb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.calendar/databases/calendar.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) calendardb.wait() _adel_log.log("calendar.db -> " + calendardb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/calendar.db").hexdigest(), 3) hash_value.write("calendar.db -> " + hashlib.sha256(backup_dir + "/calendar.db").hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> calendar.db doesn't exist!!", 2) # Settings database () try: settingsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.settings/databases/settings.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) settingsdb.wait() _adel_log.log("settings.db -> " + settingsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/settings.db").hexdigest(), 3) hash_value.write("settings.db -> " + hashlib.sha256(backup_dir + "/settings.db").hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> settingsdb.db doesn't exist!!", 2) # Location caches (cell & wifi) try: cachecell = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.location/files/cache.cell', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) cachecell.wait() _adel_log.log("chache.cell-> " + cachecell.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/chache.cell").hexdigest(), 3) hash_value.write("chache.cell -> " + hashlib.sha256(backup_dir + "/chache.cell").hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> chache.cell - cell cache doesn't exist!!", 2) try: cachewifi = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.location/files/cache.wifi', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) cachewifi.wait() _adel_log.log("chache.wifi-> " + cachewifi.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/chache.wifi").hexdigest(), 3) hash_value.write("chache.wifi -> " + hashlib.sha256(backup_dir + "/chache.wifi").hexdigest() + " \n") except: _adel_log.log("dumpDBs: ----> chache.wifi - wifi cache doesn't exist!!", 2) # Optional applications and databases ----> analyzing is not implemented right now # Downloaded data and apps database () try: downloadsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.downloads/databases/downloads.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) downloadsdb.wait() _adel_log.log("downloads.db -> " + downloadsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/downloads.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> downloads.db doesn't exist!!", 2) # User dictionary database () try: userdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.userdictionary/databases/user_dict.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) userdb.wait() _adel_log.log("user_dict.db -> " + userdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/user_dict.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> user_dict.db doesn't exist!!", 2) # Phone database () try: phonedb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.telephony/databases/telephony.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) phonedb.wait() _adel_log.log("telephony.db -> " + phonedb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/telephony.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> telephony.db doesn't exist!!", 2) # Automated dictionary database () try: autodb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.inputmethod.latin/databases/auto_dict.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) autodb.wait() _adel_log.log("auto_dict.db -> " + autodb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/auto_dict.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> auto_dict.db doesn't exist!!", 2) # Weather data database () try: weatherdb = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.apps.genie.geniewidget/databases/weather.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) weatherdb.wait() _adel_log.log("weather.db -> " + weatherdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/weather.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> weather.db doesn't exist!!", 2) try: weatherdb = subprocess.Popen(['adb', 'pull', '/data/data/com.sec.android.widgetapp.weatherclock/databases/WeatherClock', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) weatherdb.wait() _adel_log.log("WeatherClock.db -> " + weatherdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/WeatherClock.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> weather widget doesn't exist!!", 2) # Google-Mail programm database () try: gmaildb = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.gm/databases/gmail.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) gmaildb.wait() _adel_log.log("gmail.db -> " + gmaildb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/gmail.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> gmail.db doesn't exist!!", 2) # Other Email Accounts than Gmail () try: providerdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.email/databases/EmailProvider.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) providerdb.wait() _adel_log.log("EmailProvider.db -> " + providerdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/EmailProvider.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> EmailProvider.db doesn't exist!!", 2) # Clock and alarms database () try: alarmdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.deskclock/databases/alarms.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) alarmdb.wait() _adel_log.log("alarms.db -> " + alarmdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/alarms.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> alarms.db doesn't exist!!", 2) # Twitter database () try: for i in range(6): try: file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.twitter.android/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i] if ".db" in file_name: twitter_db = '/data/data/com.twitter.android/databases/' + file_name twitter_db_name = subprocess.Popen(['adb', 'pull', twitter_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) twitter_db_name.wait() _adel_log.log(file_name + " -> " + twitter_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3) else: continue except: continue except: _adel_log.log("dumpDBs: ----> twitter.db doesn't exist!!", 2) # Google-Talk database () try: gtalkdb = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.gsf/databases/talk.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) gtalkdb.wait() _adel_log.log("talk.db -> " + gtalkdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/talk.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> talk.db (Google-Talk) doesn't exist!!", 2) # Search and download the Google-Mail mail database () try: for i in range(6): file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.google.android.gm/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i] if file_name.startswith('mailstore'): mail_db = '/data/data/com.google.android.gm/databases/' + file_name emaildb = subprocess.Popen(['adb', 'pull', mail_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) emaildb.wait() _adel_log.log(file_name + " -> " + emaildb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3) break else: continue except: _adel_log.log("dumpDBs: ----> Google-Mail database doesn't exist!!", 2) # Google+ database try: for i in range(6): try: file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.google.android.apps.plus/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i] if ".db" in file_name: plus_db = '/data/data/com.google.android.apps.plus/databases/' + file_name plus_db_name = subprocess.Popen(['adb', 'pull', plus_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) plus_db_name.wait() _adel_log.log(file_name + " -> " + plus_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3) else: continue except: continue except: _adel_log.log("dumpDBs: ----> Google+ database doesn't exist!!", 2) # Google-Maps database try: try: maps_file_name = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.apps.maps/databases/da_destination_history', backup_dir + "/da_destination_history.db"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) maps_file_name.wait() _adel_log.log("da_destination_history -> " + maps_file_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "da_destination_history.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> Google-Maps navigation history doesn't exist!!", 2) for i in range(6): try: file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.google.android.apps.maps/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i] if ".db" in file_name: maps_db = '/data/data/com.google.android.apps.maps/databases/' + file_name maps_db_name = subprocess.Popen(['adb', 'pull', maps_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) maps_db_name.wait() _adel_log.log(file_name + " -> " + maps_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3) else: continue except: continue except: _adel_log.log("dumpDBs: ----> Google-Maps database doesn't exist!!", 2) # Facebook database try: facebook = subprocess.Popen(['adb', 'pull', '/data/data/com.facebook.katana/databases/fb.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) facebook.wait() _adel_log.log("fb.db -> " + facebook.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/fb.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> Facebook database doesn't exist!!", 2) # Browser GPS database try: browserGPS = subprocess.Popen(['adb', 'pull', '/data/data/com.android.browser/app_geolocation/CachedGeoposition.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) browserGPS.wait() _adel_log.log("CachedGeoposition.db -> " + browserGPS.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/CachedGeoposition.db").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> CachedGeoposition.db (Browser) doesn't exist!!", 2) # Gesture Lock File try: gesture = subprocess.Popen(['adb', 'pull', '/data/system/gesture.key', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) gesture.wait() _adel_log.log("gesture.key -> " + gesture.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/gesture.key").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> No gesture lock found!", 2) # Password Lock File try: password = subprocess.Popen(['adb', 'pull', '/data/system/password.key', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) password.wait() _adel_log.log("password.key -> " + password.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/password.key").hexdigest(), 3) except: _adel_log.log("dumpDBs: ----> No password lock found!", 2) # Stored files (pictures, documents, etc.) if device_name != "local": # Pictures picture_dir = backup_dir.split("/")[0] + "/pictures/" os.mkdir(picture_dir) try: _adel_log.log("dumpDBs: ----> dumping pictures (internal_sdcard)....", 0) pictures = subprocess.Popen(['adb', 'pull', '/sdcard/DCIM/Camera/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) pictures.wait() except: _adel_log.log("dumpDBs: ----> No pictures on the internal SD-card found !!", 2) try: _adel_log.log("dumpDBs: ----> dumping pictures (external_sdcard)....", 0) pictures = subprocess.Popen(['adb', 'pull', '/sdcard/external_sd/DCIM/Camera/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) pictures.wait() except: _adel_log.log("dumpDBs: ----> No pictures on the external SD-card found !!", 2) try: _adel_log.log("dumpDBs: ----> dumping screen captures (internal_sdcard)....", 0) pictures = subprocess.Popen(['adb', 'pull', '/sdcard/ScreenCapture/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) pictures.wait() except: _adel_log.log("dumpDBs: ----> No screen captures on the internal SD-card found !!", 2) hash_value.close()