def _parse_memories_db(db_file, db_file_name, report_folder): db = open_sqlite_db_readonly(db_file) cursor = db.cursor() memories_count, rows = _perform_query(cursor, MEMORIES_ENTRY_QUERY) if memories_count > 0 and rows: _parse_memories_entry(memories_count, rows, report_folder, db_file_name) else: logfunc(f'No {APP_NAME} memories data found') meo_count, rows = _perform_query(cursor, MEO_QUERY) if meo_count > 0 and rows: _parse_meo(meo_count, rows, report_folder, db_file_name) else: logfunc(f'No {APP_NAME} MEO (My Eyes Only) data found') snap_media_count, rows = _perform_query(cursor, SNAP_MEDIA_QUERY) if snap_media_count > 0 and rows: _parse_snap_media(snap_media_count, rows, report_folder, db_file_name) else: logfunc(f'No {APP_NAME} snap media memories data found') cursor.close() db.close()
def get_Zapya(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT device, name, direction, createtime, path, title FROM transfer ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Zapya') report.start_artifact_report(report_folder, 'Zapya') report.add_script() data_headers = ( 'Device', 'Name', 'direction', 'createtime', 'path', 'title' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2], row[3], row[4], row[5])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Zapya' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Zapya' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Zapya data available') db.close() return
def get_installedappsGass(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT package_name FROM app_info ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Installed Apps') report.start_artifact_report(report_folder, 'Installed Apps (GMS)') report.add_script() data_headers = ( 'Bundle ID', ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[0], )) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'installed apps - GMS' tsv(report_folder, data_headers, data_list, tsvname) else: logfunc('No Installed Apps data available') db.close() return
def get_chrome(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not os.path.basename( file_found) == 'History': # skip -journal and other files continue elif file_found.find('.magisk') >= 0 and file_found.find( 'mirror') >= 0: continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data?? browser_name = get_browser_name(file_found) if file_found.find('app_sbrowser') >= 0: browser_name = 'Browser' db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' select datetime(last_visit_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch"), url, title, visit_count, hidden from urls ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport(f'{browser_name} History') #check for existing and get next name for report file, so report from another file does not get overwritten report_path = os.path.join(report_folder, f'{browser_name} History.temphtml') report_path = get_next_unused_name( report_path)[:-9] # remove .temphtml report.start_artifact_report(report_folder, os.path.basename(report_path)) report.add_script() data_headers = ('Last Visit Time', 'URL', 'Title', 'Visit Count', 'Hidden') data_list = [] for row in all_rows: if wrap_text: data_list.append((textwrap.fill(row[0], width=100), row[1], row[2], row[3], row[4])) else: data_list.append((row[0], row[1], row[2], row[3], row[4])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'{browser_name} History' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'{browser_name} History' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc(f'No {browser_name} history data available') db.close()
def get_pSettings(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' select name, value from partner ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Partner Settings') report.start_artifact_report(report_folder, 'Partner Settings') report.add_script() data_headers = ( 'Name', 'Value' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[0], row[1])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'partner settings' tsv(report_folder, data_headers, data_list, tsvname) else: logfunc('No Partner Settings data available') db.close() return
def get_chromeNetworkActionPredictor(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not file_found.endswith('Network Action Predictor'): continue # Skip all other files browser_name = get_browser_name(file_found) if file_found.find('app_sbrowser') >= 0: browser_name = 'Browser' elif file_found.find('.magisk') >= 0 and file_found.find( 'mirror') >= 0: continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data?? db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' select user_text, url, number_of_hits, number_of_misses from network_action_predictor ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport( f'{browser_name} - Network Action Predictor') #check for existing and get next name for report file, so report from another file does not get overwritten report_path = os.path.join( report_folder, f'{browser_name} - Network Action Predictor.temphtml') report_path = get_next_unused_name( report_path)[:-9] # remove .temphtml report.start_artifact_report(report_folder, os.path.basename(report_path)) report.add_script() data_headers = ( 'User Text', 'URL', 'Number of Hits', 'Number of Misses' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2], row[3])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'{browser_name} - Network Action Predictor' tsv(report_folder, data_headers, data_list, tsvname) else: logfunc( f'No {browser_name} - Network Action Predictor data available') db.close() return
def process_accounts_ce(folder, uid, report_folder): #Query to create report db = open_sqlite_db_readonly(folder) cursor = db.cursor() #Query to create report cursor.execute(''' SELECT name, type, password FROM accounts ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Accounts_ce') report.start_artifact_report(report_folder, f'accounts_ce_{uid}') report.add_script() data_headers = ('Name', 'Type', 'Password') data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2])) report.write_artifact_data_table(data_headers, data_list, folder) report.end_artifact_report() tsvname = f'accounts ce {uid}' tsv(report_folder, data_headers, data_list, tsvname) else: logfunc(f'No accounts_ce_{uid} data available') db.close()
def get_kikMessages(files_found, report_folder, seeker): for file_found in files_found: file_found = str(file_found) if file_found.endswith('.sqlite'): break db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(ZKIKMESSAGE.ZRECEIVEDTIMESTAMP +978307200,'UNIXEPOCH') AS RECEIVEDTIME, datetime(ZKIKMESSAGE.ZTIMESTAMP +978307200,'UNIXEPOCH') as TIMESTAMP, ZKIKMESSAGE.ZBODY, case ZKIKMESSAGE.ZTYPE when 1 then 'rcvd' when 2 then 'sent' when 3 then 'grp admin' when 4 then 'grp msg' else 'unkn' end as 'Type', ZKIKMESSAGE.ZUSER, ZKIKUSER.ZDISPLAYNAME, ZKIKUSER.ZUSERNAME, ZKIKATTACHMENT.ZCONTENT from ZKIKMESSAGE left join ZKIKUSER on ZKIKMESSAGE.ZUSER = ZKIKUSER.Z_PK left join ZKIKATTACHMENT on ZKIKMESSAGE.Z_PK = ZKIKATTACHMENT.ZMESSAGE ''') all_rows = cursor.fetchall() usageentries = len(all_rows) data_list = [] if usageentries > 0: for row in all_rows: data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])) description = 'Kik Messages' report = ArtifactHtmlReport('Kik Messages') report.start_artifact_report(report_folder, 'Kik Messages', description) report.add_script() data_headers = ('Received Time', 'Timestamp', 'Message', 'Type', 'User', 'Display Name', 'Username', 'Content') report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'Kik Messages' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'Kik Messages' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Kik data available') db.close() return
def get_Cast(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT case last_published_timestamp_millis when 0 then '' else datetime(last_published_timestamp_millis/1000, 'unixepoch') end as "Last Published Timestamp", device_id, capabilities, device_version, friendly_name, model_name, receiver_metrics_id, service_instance_name, service_address, service_port, supported_criteria, rcn_enabled_status, hotspot_bssid, cloud_devcie_id, case last_discovered_timestamp_millis when 0 then '' else datetime(last_discovered_timestamp_millis/1000, 'unixepoch') end as "Last Discovered Timestamp", case last_discovered_by_ble_timestamp_millis when 0 then '' else datetime(last_discovered_by_ble_timestamp_millis/1000, 'unixepoch') end as "Last Discovered By BLE Timestamp" from DeviceInfo ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Cast') report.start_artifact_report(report_folder, 'Cast') report.add_script() data_headers = ('Last Published Timestamp','Device ID (SSDP UDN)','Capabilities','Device Version','Device Friendly Name','Device Model Name','Receiver Metrics ID','Service Instance Name','Device IP Address','Device Port','Supported Criteria','RCN Enabled Status','Hotspot BSSID','Cloud Device ID','Last Discovered Timestamp','Last Discovered By BLE Timestamp') data_list = [] for row in all_rows: data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Cast' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Cast' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Cast data available') db.close() return
def get_DocList(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() try: cursor.execute(''' select case creationTime when 0 then '' else datetime("creationTime"/1000, 'unixepoch') end as creationTime, title, owner, case lastModifiedTime when 0 then '' else datetime("lastModifiedTime"/1000, 'unixepoch') end as lastModifiedTime, case lastOpenedTime when 0 then '' else datetime("lastOpenedTime"/1000, 'unixepoch') end as lastOpenedTime, lastModifierAccountAlias, lastModifierAccountName, kind, shareableUri, htmlUri, md5Checksum, size from EntryView ''') all_rows = cursor.fetchall() usageentries = len(all_rows) except: usageentries = 0 if usageentries > 0: report = ArtifactHtmlReport('DocList') report.start_artifact_report(report_folder, 'DocList') report.add_script() data_headers = ('Created Date','File Name','Owner','Modified Date','Opened Date','Last Modifier Account Alias','Last Modifier Account Name','File Type','Shareable URI','HTML URI','MD5 Checkusm','Size') # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],)) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Google Drive - DocList' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Google Drive - DocList' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Google Drive - DocList data available') db.close() return
def get_chromeTopSites(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not os.path.basename( file_found) == 'Top Sites': # skip -journal and other files continue browser_name = get_browser_name(file_found) if file_found.find('app_sbrowser') >= 0: browser_name = 'Browser' elif file_found.find('.magisk') >= 0 and file_found.find( 'mirror') >= 0: continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data?? db = open_sqlite_db_readonly(file_found) cursor = db.cursor() try: cursor.execute(''' select url, url_rank, title, redirects FROM top_sites ORDER by url_rank asc ''') all_rows = cursor.fetchall() usageentries = len(all_rows) except: usageentries = 0 if usageentries > 0: report = ArtifactHtmlReport(f'{browser_name} Top Sites') #check for existing and get next name for report file, so report from another file does not get overwritten report_path = os.path.join(report_folder, f'{browser_name} Top Sites.temphtml') report_path = get_next_unused_name( report_path)[:-9] # remove .temphtml report.start_artifact_report(report_folder, os.path.basename(report_path)) report.add_script() data_headers = ('URL', 'Rank', 'Title', 'Redirects') data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2], row[3])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'{browser_name} top sites' tsv(report_folder, data_headers, data_list, tsvname) else: logfunc(f'No {browser_name} Top Sites data available') db.close()
def get_cashApp(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if file_found.endswith('.db'): db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute('''Select payment.role, payment.sender_id, CASE WHEN customer.cashtag IS NULL THEN '***NO CASH TAG PRESENT***' ELSE customer.cashtag END, customer.customer_display_name, payment.recipient_id, CASE WHEN customer1.cashtag IS NULL THEN '***NO CASH TAG PRESENT***' ELSE customer1.cashtag END, customer1.customer_display_name, payment.state, datetime(payment.display_date / 1000.0, 'unixepoch'), CASE WHEN json_extract (payment.render_data, '$."note"') IS NULL THEN '***NO NOTE SUBMITTED***' ELSE json_extract (payment.render_data, '$."note"') END, printf("$%.2f", json_extract(payment.render_data, '$."amount"."amount"') / 100.0) From payment Inner Join customer On customer.customer_id = payment.sender_id Inner Join customer customer1 On payment.recipient_id = customer1.customer_id ORDER BY payment.display_date DESC ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Transactions') report.start_artifact_report(report_folder, 'Transactions') report.add_script() data_headers = ( 'Transaction Date', 'User Account Role', 'Sender Display Name', 'Sender Unique ID', 'Sender Cashtag', 'Recipient Display Name', 'Recipient Unique ID', 'Recipient Cashtag', 'Transaction Amount', 'Transaction Status', 'Note' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[8], row[0], row[3], row[1], row[2], row[6], row[4], row[5], row[10], row[7], row[9])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Cash App Transactions' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Cash App Transactions' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Cash App Transactions data available') db.close() return
def get_airGuard(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT device.lastSeen AS "Last Time Device Seen", beacon.receivedAt AS "Time (Local)", beacon.deviceAddress AS "Device MAC Address", beacon.longitude AS "Latitude", beacon.latitude as "Longitude", beacon.rssi AS "Signal Strength (RSSI)", device.firstDiscovery AS "First Time Device Seen", device.lastNotificationSent as "Last Time User Notified" FROM beacon LEFT JOIN device on device.address=beacon.deviceAddress ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('AirGuard AirTag Tracker') report.start_artifact_report(report_folder, 'AirGuard AirTag Tracker') report.add_script() data_headers = ('Last Time Device Seen', 'Time (Local)', 'Device MAC Address', 'Latitude', 'Longitude', 'Signal Strength (RSSI)', 'First Time Device Seen', 'Last Time User Notified') data_headers_kml = ('Timestamp', 'Time (Local)', 'Device MAC Address', 'Latitude', 'Longitude', 'Signal Strength (RSSI)', 'First Time Device Seen', 'Last Time User Notified') data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'AirGuard AirTag Tracker' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'AirGuard AirTag Tracker' timeline(report_folder, tlactivity, data_list, data_headers) kmlactivity = 'AirGuard AirTag Tracker' kmlgen(report_folder, kmlactivity, data_list, data_headers_kml) else: logfunc('No AirGuard AirTag Tracker data available') db.close()
def get_safariWebsearch(files_found, report_folder, seeker): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(""" select datetime(history_visits.visit_time+978307200,'unixepoch') , history_items.url, history_items.visit_count, history_visits.title, case history_visits.origin when 1 then "icloud synced" when 0 then "visited local device" else history_visits.origin end "icloud sync", history_visits.load_successful, history_visits.id, history_visits.redirect_source, history_visits.redirect_destination from history_items, history_visits where history_items.id = history_visits.history_item and history_items.url like '%search?q=%' """) all_rows = cursor.fetchall() usageentries = len(all_rows) data_list = [] if usageentries > 0: for row in all_rows: search = row[1].split('search?q=')[1].split('&')[0] search = search.replace('+', ' ') data_list.append((row[0], search, row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])) description = '' report = ArtifactHtmlReport('Safari Browser') report.start_artifact_report(report_folder, 'Search Terms', description) report.add_script() data_headers = ('Visit Time', 'Search Term', 'URL', 'Visit Count', 'Title', 'iCloud Sync', 'Load Successful', 'Visit ID', 'Redirect Source', 'Redirect Destination') report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'Safari Web Search' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'Safari Web Search' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No data available in table') db.close() return
def get_wellbeing(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not file_found.endswith('app_usage'): continue # Skip all other files db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT events._id, datetime(events.timestamp /1000, 'UNIXEPOCH') as timestamps, packages.package_name, events.type, case when events.type = 1 THEN 'ACTIVITY_RESUMED' when events.type = 2 THEN 'ACTIVITY_PAUSED' when events.type = 12 THEN 'NOTIFICATION' when events.type = 18 THEN 'KEYGUARD_HIDDEN & || Device Unlock' when events.type = 19 THEN 'FOREGROUND_SERVICE_START' when events.type = 20 THEN 'FOREGROUND_SERVICE_STOP' when events.type = 23 THEN 'ACTIVITY_STOPPED' when events.type = 26 THEN 'DEVICE_SHUTDOWN' when events.type = 27 THEN 'DEVICE_STARTUP' else events.type END as eventtype FROM events INNER JOIN packages ON events.package_id=packages._id ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Wellbeing events') report.start_artifact_report(report_folder, 'Events') report.add_script() data_headers = ('Timestamp', 'Package ID', 'Event Type') data_list = [] for row in all_rows: data_list.append((row[1], row[2], row[4])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'wellbeing - events' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Wellbeing - Events' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Wellbeing event data available') db.close() return
def get_firefoxDownloads(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not os.path.basename( file_found ) == 'mozac_downloads_database': # skip -journal and other files continue db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(created_at/1000,'unixepoch') AS CreatedDate, file_name AS FileName, url AS URL, content_type AS MimeType, content_length AS FileSize, CASE status WHEN 3 THEN 'Paused' WHEN 4 THEN 'Canceled' WHEN 5 THEN 'Failed' WHEN 6 THEN 'Finished' END AS Status, destination_directory AS DestDir FROM downloads ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Firefox - Downloads') report.start_artifact_report(report_folder, 'Firefox - Downloads') report.add_script() data_headers = ('Created Timestamp', 'File Name', 'URL', 'MIME Type', 'File Size (Bytes)', 'Status', 'Destination Directory') data_list = [] for row in all_rows: data_list.append( (row[0], row[1], row[2], row[3], row[4], row[5], row[6])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Firefox - Downloads' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Firefox - Downloads' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Firefox - Downloads data available') db.close()
def get_shareit(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if file_found.endswith('history.db'): break source_file = file_found.replace(seeker.directory, '') db = open_sqlite_db_readonly(file_found) cursor = db.cursor() try: cursor.execute(''' SELECT case history_type when 1 then "Incoming" else "Outgoing" end direction, case history_type when 1 then device_id else null end from_id, case history_type when 1 then null else device_id end to_id, device_name, description, timestamp/1000 as timestamp, file_path FROM history JOIN item where history.content_id = item.item_id ''') all_rows = cursor.fetchall() usageentries = len(all_rows) except: usageentries = 0 if usageentries > 0: report = ArtifactHtmlReport('Shareit file transfer') report.start_artifact_report(report_folder, 'shareit file transfer') report.add_script() data_headers = ( 'direction', 'from_id', 'to_id', 'device_name', 'description', 'timestamp', 'file_path' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: timestamp = datetime.datetime.fromtimestamp(int( row[5])).strftime('%Y-%m-%d %H:%M:%S') data_list.append( (row[0], row[1], row[2], row[3], row[4], timestamp, row[6])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Shareit file transfer' tsv(report_folder, data_headers, data_list, tsvname, source_file) tlactivity = f'Shareit file transfer' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Shareit file transfer data available') db.close() return
def get_googlemaplocation(files_found, report_folder, seeker, wrap_text): source_file = '' for file_found in files_found: file_found = str(file_found) if 'journal' in file_found: source_file = file_found.replace(seeker.directory, '') continue source_file = file_found.replace(seeker.directory, '') db = open_sqlite_db_readonly(file_found) cursor = db.cursor() try: cursor.execute(''' SELECT time/1000, dest_lat, dest_lng, dest_title, dest_address, source_lat, source_lng FROM destination_history; ''') all_rows = cursor.fetchall() usageentries = len(all_rows) except: usageentries = 0 if usageentries > 0: report = ArtifactHtmlReport('Google Map Locations') report.start_artifact_report(report_folder, 'Google Map Locations') report.add_script() data_headers = ( 'timestamp', 'destination_latitude', 'destination_longitude', 'destination_title', 'destination_address', 'source_latitude', 'source_longitude' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: timestamp = datetime.datetime.fromtimestamp(int( row[0])).strftime('%Y-%m-%d %H:%M:%S') data_list.append( (timestamp, convertGeo(str(row[1])), convertGeo(str(row[2])), row[3], row[4], convertGeo(str(row[5])), convertGeo(str(row[6])))) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Google Map Locations' tsv(report_folder, data_headers, data_list, tsvname, source_file) else: logfunc('No Google Map Locations found') db.close()
def get_wellbeingURLs(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not file_found.endswith('app_usage'): continue # Skip all other files db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(component_events.timestamp/1000, "UNIXEPOCH") as timestamp, component_events._id, components.package_id, packages.package_name, components.component_name as website, CASE when component_events.type=1 THEN 'ACTIVITY_RESUMED' when component_events.type=2 THEN 'ACTIVITY_PAUSED' else component_events.type END as eventType FROM component_events INNER JOIN components ON component_events.component_id=components._id INNER JOIN packages ON components.package_id=packages._id ORDER BY timestamp ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Wellbeing URL events') report.start_artifact_report(report_folder, 'URL Events') report.add_script() data_headers = ('Timestamp', 'Event ID', 'Package ID', 'Package Name', 'Website', 'Event') data_list = [] for row in all_rows: data_list.append( (row[0], row[1], row[2], row[3], row[4], row[5])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'wellbeing - URL events' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Wellbeing - URL Events' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Wellbeing URL event data available') db.close() return
def get_addressBook(files_found, report_folder, seeker): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT ABPerson.ROWID, c16Phone, FIRST, MIDDLE, LAST, c17Email, DATETIME(CREATIONDATE+978307200,'UNIXEPOCH'), DATETIME(MODIFICATIONDATE+978307200,'UNIXEPOCH'), NAME FROM ABPerson LEFT OUTER JOIN ABStore ON ABPerson.STOREID = ABStore.ROWID LEFT OUTER JOIN ABPersonFullTextSearch_content on ABPerson.ROWID = ABPersonFullTextSearch_content.ROWID ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: data_list = [] for row in all_rows: if row[1] is not None: numbers = row[1].split(" +") number = numbers[1].split(" ") phone_number = "+{}".format(number[0]) else: phone_number = '' data_list.append((row[0], phone_number, row[2], row[3], row[4], row[5], row[6], row[7], row[8])) report = ArtifactHtmlReport('Address Book Contacts') report.start_artifact_report(report_folder, 'Address Book Contacts') report.add_script() data_headers = ('Contact ID', 'Contact Number', 'First Name', 'Middle Name', 'Last Name', 'Email Address', 'Creation Date', 'Modification Date', 'Storage Place') report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'Address Book' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'Address Book' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Address Book data available') db.close() return
def get_Turbo(files_found, report_folder, seeker, wrap_text): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' select case timestamp_millis when 0 then '' else datetime(timestamp_millis/1000,'unixepoch') End as D_T, battery_level, case charge_type when 0 then '' when 1 then 'Charging Rapidly' when 2 then 'Charging Slowly' when 3 then 'Charging Wirelessly' End as C_Type, case battery_saver when 2 then '' when 1 then 'Enabled' End as B_Saver, timezone from battery_event ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Turbo') report.start_artifact_report(report_folder, 'Turbo') report.add_script() data_headers = ( 'Date/Time', 'Battery %', 'Charge Type', 'Battery Saver', 'Timezone' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2], row[3], row[4])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Turbo' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Turbo' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Turbo data available') db.close() return
def get_tileAppDb(files_found, report_folder, seeker): for file_found in files_found: file_found = str(file_found) if file_found.endswith('tile-TileNetworkDB.sqlite'): break db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(ZTIMESTAMP,'unixepoch','31 years'), ZNAME, datetime(ZACTIVATION_TIMESTAMP,'unixepoch','31 years'), datetime(ZREGISTRATION_TIMESTAMP,'unixepoch','31 years'), ZALTITUDE, ZLATITUDE, ZLONGITUDE, ZID, ZNODE_TYPE, ZSTATUS, ZIS_LOST, datetime(ZLAST_LOST_TILE_COMMUNITY_CONNECTION,'unixepoch','31 years') FROM ZTILENTITY_NODE INNER JOIN ZTILENTITY_TILESTATE ON ZTILENTITY_NODE.ZTILE_STATE = ZTILENTITY_TILESTATE.Z_PK ''') all_rows = cursor.fetchall() usageentries = len(all_rows) data_list = [] if usageentries > 0: for row in all_rows: data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11])) description = '' report = ArtifactHtmlReport('Tile App - Tile Information & Geolocation') report.start_artifact_report(report_folder, 'Tile App DB Info & Geolocation', description) report.add_script() data_headers = ('Timestamp','Tile Name','Activation Timestamp','Registration Timestamp','Altitude','Latitude','Longitude','Tile ID','Tile Type','Status','Is Lost?','Last Community Connection' ) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'Tile App DB Info Geolocation' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'Tile App DB Info Geolocation' timeline(report_folder, tlactivity, data_list, data_headers) kmlactivity = 'Tile App DB Info Geolocation' kmlgen(report_folder, kmlactivity, data_list, data_headers) else: logfunc('No Tile App DB data available') db.close() return
def get_cloudkitServerSharedData(file_found, report_folder, seeker): user_dictionary = {} db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT Z_PK, ZSERVERSHAREDATA FROM ZICCLOUDSYNCINGOBJECT WHERE ZSERVERSHAREDATA NOT NULL ''') all_rows = cursor.fetchall() for row in all_rows: filename = os.path.join(report_folder, 'zserversharedata_'+str(row[0])+'.bplist') output_file = open(filename, "wb") output_file.write(row[1]) output_file.close() deserialized_plist = nd.deserialize_plist(io.BytesIO(row[1])) for item in deserialized_plist: if 'Participants' in item: for participant in item['Participants']: record_id = participant['UserIdentity']['UserRecordID']['RecordName'] email_address = participant['UserIdentity']['LookupInfo']['EmailAddress'] phone_number = participant['UserIdentity']['LookupInfo']['PhoneNumber'] first_name = participant['UserIdentity']['NameComponents']['NS.nameComponentsPrivate']['NS.givenName'] middle_name = participant['UserIdentity']['NameComponents']['NS.nameComponentsPrivate']['NS.middleName'] last_name = participant['UserIdentity']['NameComponents']['NS.nameComponentsPrivate']['NS.familyName'] name_prefix = participant['UserIdentity']['NameComponents']['NS.nameComponentsPrivate']['NS.namePrefix'] name_suffix = participant['UserIdentity']['NameComponents']['NS.nameComponentsPrivate']['NS.nameSuffix'] nickname = participant['UserIdentity']['NameComponents']['NS.nameComponentsPrivate']['NS.nickname'] user_dictionary[record_id] = [record_id, email_address, phone_number, name_prefix, first_name, middle_name, last_name, name_suffix, nickname] db.close() # Build the array after dealing with all the files user_list = list(user_dictionary.values()) if len(user_list) > 0: description = 'CloudKit Participants - Cloudkit accounts participating in CloudKit shares.' report = ArtifactHtmlReport('Participants') report.start_artifact_report(report_folder, 'Participants', description) report.add_script() user_headers = ('Record ID','Email Address','Phone Number','Name Prefix','First Name','Middle Name','Last Name','Name Suffix','Nickname') report.write_artifact_data_table(user_headers, user_list, '', write_location=False) report.end_artifact_report() tsvname = 'Cloudkit Participants' tsv(report_folder, user_headers, user_list, tsvname) else: logfunc('No Cloudkit - Cloudkit Participants data available')
def get_chromeOfflinePages(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not os.path.basename(file_found) == 'OfflinePages.db': # skip -journal and other files continue browser_name = get_browser_name(file_found) if file_found.find('app_sbrowser') >= 0: browser_name = 'Browser' elif file_found.find('.magisk') >= 0 and file_found.find('mirror') >= 0: continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data?? db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(creation_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch") as creation_time, datetime(last_access_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch") as last_access_time, online_url, file_path, title, access_count, file_size from offlinepages_v1 ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport(f'{browser_name} - Offline Pages') #check for existing and get next name for report file, so report from another file does not get overwritten report_path = os.path.join(report_folder, f'{browser_name} - Offline Pages.temphtml') report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml report.start_artifact_report(report_folder, os.path.basename(report_path)) report.add_script() data_headers = ('Creation Time','Last Access Time', 'Online URL', 'File Path', 'Title', 'Access Count', 'File Size' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element data_list = [] for row in all_rows: if wrap_text: data_list.append((row[0],row[1],(textwrap.fill(row[2], width=75)),row[3],row[4],row[5],row[6])) else: data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'{browser_name} - Offline Pages' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'{browser_name} - Offline Pages' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc(f'No {browser_name} - Offline Pages data available') db.close()
def _parse_app_database(db_file, db_file_name, report_folder): db = open_sqlite_db_readonly(db_file) cursor = db.cursor() messages_count, rows = _perform_query(cursor, CHAT_MESSAGES_QUERY) if messages_count > 0 and rows: _parse_chat_messages(messages_count, rows, report_folder, db_file_name) else: logfunc(f'No {APP_NAME} chat data found') cursor.close() db.close()
def get_appleWalletTransactions(files_found, report_folder, seeker): file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute('''SELECT DATETIME(TRANSACTION_DATE + 978307200,'UNIXEPOCH'), MERCHANT_NAME, LOCALITY, ADMINISTRATIVE_AREA, CAST(AMOUNT AS REAL)/100, CURRENCY_CODE, DATETIME(LOCATION_DATE + 978307200,'UNIXEPOCH'), LOCATION_LATITUDE, LOCATION_LONGITUDE, LOCATION_ALTITUDE, PEER_PAYMENT_COUNTERPART_HANDLE, PEER_PAYMENT_MEMO, TRANSACTION_STATUS, TRANSACTION_TYPE FROM PAYMENT_TRANSACTION ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: data_list = [] for row in all_rows: data_list.append( (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13])) report = ArtifactHtmlReport('Transactions') report.start_artifact_report(report_folder, 'Transactions') report.add_script() data_headers = ('Transaction Date', 'Merchant', 'Locality', 'Administrative Area', 'Currency Amount', 'Currency Type', 'Location Date', 'Latitude', 'Longitude', 'Altitude', 'Peer Payment Handle', 'Payment Memo', 'Transaction Status', 'Transaction Type') report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'Apple Wallet Transactions' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'Apple Wallet Transactions' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Apple Wallet Transactions available') db.close() return
def get_locationDparkedhistorical(files_found, report_folder, seeker): iOSversion = scripts.artifacts.artGlobals.versionf if version.parse(iOSversion) < version.parse("11"): logfunc("Unsupported version for RoutineD Parked Historical " + iOSversion) return () file_found = str(files_found[0]) db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(""" select datetime(zrtvehicleeventhistorymo.zdate + 978307200, 'unixepoch'), datetime(zrtvehicleeventhistorymo.zlocdate + 978307200, 'unixepoch'), zlocuncertainty, zidentifier, zloclatitude, zloclongitude from zrtvehicleeventhistorymo """) all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: data_list = [] for row in all_rows: data_list.append((row[0], row[1], row[2], row[3], row[4], row[5])) description = '' report = ArtifactHtmlReport('RoutineD Parked Vehicle Historical') report.start_artifact_report(report_folder, 'Parked Vehicle Historical', description) report.add_script() data_headers = ('Timestamp', 'Location Date', 'Location Uncertainty', 'Identifier', 'Latitude', 'Longitude') report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'RoutineD Parked Vehicle Historical' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'RoutineD Parked Vehicle Historical' timeline(report_folder, tlactivity, data_list, data_headers) kmlactivity = 'RoutineD Parked Vehicle Historical' kmlgen(report_folder, kmlactivity, data_list, data_headers) else: logfunc('No data available in Routine Parked Vehicle Historical') db.close() return
def get_googleFitGMS(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) # if not file_found.endswith('fitness.db'): # continue # Skip all other files db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(Sessions.start_time/1000,'unixepoch') AS "Activity Start Time", datetime(Sessions.end_time/1000,'unixepoch') AS "Activity End Time", Sessions.app_package AS "Contributing App", CASE WHEN Sessions.activity=7 THEN "Walking" WHEN Sessions.activity=8 THEN "Running" WHEN Sessions.activity=72 THEN "Sleeping" ELSE Sessions.activity END AS "Activity Type", Sessions.name AS "Activity Name", Sessions.description AS "Activity Description" FROM Sessions ORDER BY "Activity Start Time" ASC ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Google Fit (GMS)') report.start_artifact_report(report_folder, 'Activity Sessions') report.add_script() data_headers = ('Activity Start Time', 'Activity End Time', 'Contributing App', 'Activity Type', 'Activity Name', 'Activity Description') data_list = [] for row in all_rows: data_list.append( (row[0], row[1], row[2], row[3], row[4], row[5])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Google Fit (GMS) - Activity Sessions' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Google Fit (GMS) - Activity Sessions' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Google Fit (GMS) - Activity Sessions data available') db.close()
def get_googleMessages(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not file_found.endswith('bugle_db'): continue # Skip all other files db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(parts.timestamp/1000,'unixepoch') AS "Timestamp (UTC)", parts.content_type AS "Message Type", conversations.name AS "Other Participant/Conversation Name", participants.display_destination AS "Message Sender", parts.text AS "Message", CASE WHEN parts.file_size_bytes=-1 THEN "N/A" ELSE parts.file_size_bytes END AS "Attachment Byte Size", parts.local_cache_path AS "Attachment Location" FROM parts JOIN messages ON messages._id=parts.message_id JOIN participants ON participants._id=messages.sender_id JOIN conversations ON conversations._id=parts.conversation_id ORDER BY "Timestamp (UTC)" ASC ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Google Messages') report.start_artifact_report(report_folder, 'Google Messages') report.add_script() data_headers = ('Message Timestamp (UTC)','Message Type','Other Participant/Conversation Name','Message Sender','Message','Attachment Byte Size','Attachment Location') data_list = [] for row in all_rows: data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Google Messages' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Google Messages' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Google Messages data available') db.close()
def get_firefoxCookies(files_found, report_folder, seeker, wrap_text): for file_found in files_found: file_found = str(file_found) if not os.path.basename( file_found ) == 'cookies.sqlite': # skip -journal and other files continue db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(lastAccessed/1000000,'unixepoch') AS LastAccessedDate, datetime(creationTime/1000000,'unixepoch') AS CreationDate, host AS Host, name AS Name, value AS Value, datetime(expiry,'unixepoch') AS ExpirationDate, path AS Path from moz_cookies ORDER BY lastAccessedDate ASC ''') all_rows = cursor.fetchall() usageentries = len(all_rows) if usageentries > 0: report = ArtifactHtmlReport('Firefox - Cookies') report.start_artifact_report(report_folder, 'Firefox - Cookies') report.add_script() data_headers = ('Last Accessed Timestamp', 'Created Timestamp', 'Host', 'Name', 'Value', 'Expiration Timestamp', 'Path') data_list = [] for row in all_rows: data_list.append( (row[0], row[1], row[2], row[3], row[4], row[5], row[6])) report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = f'Firefox - Cookies' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = f'Firefox - Cookies' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Firefox - Cookies data available') db.close()