def get_chromeNetworkActionPredictor(files_found, report_folder, seeker,
                                     wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not file_found.endswith('Network Action Predictor'):
            continue  # Skip all other files

        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()
        cursor.execute('''
        select
        user_text,
        url,
        number_of_hits,
        number_of_misses
        from network_action_predictor
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(
                f'{browser_name} - Network Action Predictor')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder,
                f'{browser_name} - Network Action Predictor.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = (
                'User Text', 'URL', 'Number of Hits', 'Number of Misses'
            )  # Don't remove the comma, that is required to make this a tuple as there is only 1 element
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Network Action Predictor'
            tsv(report_folder, data_headers, data_list, tsvname)

        else:
            logfunc(
                f'No {browser_name} - Network Action Predictor data available')

        db.close()
        return
예제 #2
0
def get_chrome(files_found, report_folder, seeker, wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'History':  # skip -journal and other files
            continue
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()
        cursor.execute('''
        select
            datetime(last_visit_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch"),
            url,
            title,
            visit_count,
            hidden
        from urls  
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} History')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder,
                                       f'{browser_name} History.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Last Visit Time', 'URL', 'Title', 'Visit Count',
                            'Hidden')
            data_list = []
            for row in all_rows:
                if wrap_text:
                    data_list.append((textwrap.fill(row[0], width=100), row[1],
                                      row[2], row[3], row[4]))
                else:
                    data_list.append((row[0], row[1], row[2], row[3], row[4]))
            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} History'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} History'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} history data available')

        db.close()
예제 #3
0
def get_chromeWebsearch(files_found, report_folder, seeker):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'History':  # skip -journal and other files
            continue
        browser_name = 'Chrome'
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = sqlite3.connect(file_found)
        cursor = db.cursor()
        cursor.execute('''
        SELECT
            url,
            title,
            visit_count,
            datetime(last_visit_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        FROM urls
        WHERE url like '%search?q=%'
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} Search Terms')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder, f'{browser_name} Search Terms.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Search Term', 'URL', 'Title', 'Visit Count',
                            'Last Visit Time')
            data_list = []
            for row in all_rows:
                search = row[0].split('search?q=')[1].split('&')[0]
                search = search.replace('+', ' ')
                data_list.append((search, (textwrap.fill(row[0], width=100)),
                                  row[1], row[2], row[3]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} search terms'
            tsv(report_folder, data_headers, data_list, tsvname)
        else:
            logfunc(f'No {browser_name} web search terms data available')

        db.close()
예제 #4
0
def get_chromeTopSites(files_found, report_folder, seeker, wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'Top Sites':  # skip -journal and other files
            continue
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()
        try:
            cursor.execute('''
            select
            url,
            url_rank,
            title,
            redirects
            FROM
            top_sites ORDER by url_rank asc
            ''')

            all_rows = cursor.fetchall()
            usageentries = len(all_rows)
        except:
            usageentries = 0

        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} Top Sites')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder,
                                       f'{browser_name} Top Sites.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('URL', 'Rank', 'Title', 'Redirects')
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} top sites'
            tsv(report_folder, data_headers, data_list, tsvname)
        else:
            logfunc(f'No {browser_name} Top Sites data available')

        db.close()
예제 #5
0
def get_chromeOfflinePages(files_found, report_folder, seeker, wrap_text):
    
    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(file_found) == 'OfflinePages.db': # skip -journal and other files
            continue
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find('mirror') >= 0:
            continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()
        cursor.execute('''
        SELECT
        datetime(creation_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch") as creation_time,
        datetime(last_access_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch") as last_access_time,
        online_url,
        file_path,
        title,
        access_count,
        file_size
        from offlinepages_v1
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Offline Pages')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder, f'{browser_name} - Offline Pages.temphtml')
            report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
            report.start_artifact_report(report_folder, os.path.basename(report_path))
            report.add_script()
            data_headers = ('Creation Time','Last Access Time', 'Online URL', 'File Path', 'Title', 'Access Count', 'File Size' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element
            data_list = []
            for row in all_rows:
                if wrap_text:
                    data_list.append((row[0],row[1],(textwrap.fill(row[2], width=75)),row[3],row[4],row[5],row[6]))
                else:
                    data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6]))
            report.write_artifact_data_table(data_headers, data_list, file_found)
            report.end_artifact_report()
            
            tsvname = f'{browser_name} - Offline Pages'
            tsv(report_folder, data_headers, data_list, tsvname)
            
            tlactivity = f'{browser_name} - Offline Pages'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Offline Pages data available')
        
        db.close()
예제 #6
0
def get_chromeBookmarks(files_found, report_folder, seeker, wrap_text):
    
    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(file_found) == 'Bookmarks': # skip -journal and other files
            continue
        elif file_found.find('.magisk') >= 0 and file_found.find('mirror') >= 0:
            continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'

        with open(file_found, "r") as f:
            dataa = json.load(f)
        data_list = []
        for x, y in dataa.items():
            flag = 0
            if isinstance(y,dict):
                for key, value in y.items():
                    if isinstance(value,dict):
                        for keyb, valueb in value.items():
                            if keyb == 'children':
                                if len(valueb) > 0:
                                    url = valueb[0]['url']
                                    dateadd = valueb[0]['date_added']
                                    dateaddconv = datetime.datetime(1601, 1, 1) + datetime.timedelta(microseconds=int(dateadd))
                                    name = valueb[0]['name']
                                    typed = valueb[0]['type']
                                    flag = 1
                            if keyb == 'name' and flag == 1:
                                flag = 0
                                parent = valueb
                                data_list.append((dateaddconv, url, name, parent, typed))
        num_entries = len(data_list)
        if num_entries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Bookmarks')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder, f'{browser_name} - Bookmarks.temphtml')
            report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
            report.start_artifact_report(report_folder, os.path.basename(report_path))
            report.add_script()
            data_headers = ('Added Date', 'URL', 'Name', 'Parent', 'Type') 
            report.write_artifact_data_table(data_headers, data_list, file_found)
            report.end_artifact_report()
            
            tsvname = f'{browser_name} - Bookmarks'
            tsv(report_folder, data_headers, data_list, tsvname)
            
            tlactivity = f'{browser_name} - Bookmarks'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Bookmarks data available')
예제 #7
0
def get_chromeCookies(files_found, report_folder, seeker, wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'Cookies':  # skip -journal and other files
            continue
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()
        cursor.execute('''
        SELECT
        CASE
            last_access_utc 
            WHEN
                "0" 
            THEN
                "" 
            ELSE
                datetime(last_access_utc / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "last_access_utc", 
        host_key,
        name,
        value,
        CASE
            creation_utc 
            WHEN
                "0" 
            THEN
                "" 
            ELSE
                datetime(creation_utc / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "creation_utc", 
        CASE
            expires_utc 
            WHEN
                "0" 
            THEN
                "" 
            ELSE
                datetime(expires_utc / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "expires_utc", 
        path
        FROM
        cookies
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Cookies')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder,
                                       f'{browser_name} - Cookies.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Last Access Date', 'Host', 'Name', 'Value',
                            'Created Date', 'Expiration Date', 'Path')
            data_list = []
            for row in all_rows:
                if wrap_text:
                    data_list.append(
                        (row[0], row[1], (textwrap.fill(row[2], width=50)),
                         row[3], row[4], row[5], row[6]))
                else:
                    data_list.append((row[0], row[1], row[2], row[3], row[4],
                                      row[5], row[6]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Cookies'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} - Cookies'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Cookies data available')

        db.close()
예제 #8
0
def get_chromeLoginData(files_found, report_folder, seeker):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'Login Data':  # skip -journal and other files
            continue
        browser_name = 'Chrome'
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = sqlite3.connect(file_found)
        cursor = db.cursor()
        cursor.execute('''
        SELECT
        username_value,
        password_value,
        CASE date_created 
            WHEN "0" THEN "" 
            ELSE datetime(date_created / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
            END AS "date_created_win_epoch", 
        CASE date_created WHEN "0" THEN "" 
            ELSE datetime(date_created / 1000000 + (strftime('%s', '1970-01-01')), "unixepoch")
            END AS "date_created_unix_epoch", 
        origin_url,
        blacklisted_by_user
        FROM logins
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} Login Data')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder,
                                       f'{browser_name} Login Data.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Username', 'Password', 'Created Time',
                            'Origin URL', 'Blacklisted by User')
            data_list = []
            for row in all_rows:
                password = ''
                password_enc = row[1]
                if password_enc:
                    password = decrypt(password_enc).decode("utf-8", 'replace')
                valid_date = get_valid_date(row[2], row[3])
                data_list.append(
                    (row[0], password, valid_date, row[4], row[5]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} login data'
            tsv(report_folder, data_headers, data_list, tsvname)
        else:
            logfunc(f'No {browser_name} Login Data available')

        db.close()
예제 #9
0
def get_chromeDownloads(files_found, report_folder, seeker):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'History':  # skip -journal and other files
            continue
        browser_name = 'Chrome'
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = sqlite3.connect(file_found)
        cursor = db.cursor()

        # check for last_access_time column, an older version of chrome db (32) does not have it
        if does_column_exist_in_db(db, 'downloads',
                                   'last_access_time') == True:
            last_access_time_query = '''
            CASE last_access_time 
                WHEN "0" 
                THEN "" 
                ELSE datetime(last_access_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
            END AS "Last Access Time"'''
        else:
            last_access_time_query = "'' as last_access_query"

        cursor.execute(f'''
        SELECT tab_url,
        CASE start_time  
            WHEN "0" 
            THEN "" 
            ELSE datetime(start_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "Start Time", 
        CASE end_time 
            WHEN "0" 
            THEN "" 
            ELSE datetime(end_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "End Time", 
        {last_access_time_query}, 
        target_path, state, opened, received_bytes, total_bytes
        FROM downloads
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} Downloads')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder,
                                       f'{browser_name} Downloads.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('URL', 'Start Time', 'End Time',
                            'Last Access Time', 'Target Path', 'State',
                            'Opened?', 'Received Bytes', 'Total Bytes')
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3], row[4],
                                  row[5], row[6], row[7], row[8]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} downloads'
            tsv(report_folder, data_headers, data_list, tsvname)
        else:
            logfunc(f'No {browser_name} download data available')

        db.close()
예제 #10
0
def get_chromeMediaHistory(files_found, report_folder, seeker, wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not file_found.endswith('Media History'):
            continue  # Skip all other files

        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()
        cursor.execute('''
        select
        datetime(last_updated_time_s-11644473600, 'unixepoch') as last_updated_time_s,
            origin_id,
            url,
            strftime('%H:%M:%S',position_ms/1000, 'unixepoch') as position_ms,
            strftime('%H:%M:%S',duration_ms/1000, 'unixepoch') as duration_ms,
            title,
            artist,
            album,
            source_title
        from playbackSession
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(
                f'{browser_name} Media History - Sessions')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder,
                f'{browser_name} Media History - Sessions.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = (
                'Last Updated', 'Origin ID', 'URL', 'Position', 'Duration',
                'Title', 'Artist', 'Album', 'Source Title'
            )  # Don't remove the comma, that is required to make this a tuple as there is only 1 element
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3], row[4],
                                  row[5], row[6], row[7], row[8]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} Media History - Sessions'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} Media History - Sessions'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc('No Browser Media History - Sessions data available')

        cursor.execute('''
        select
            datetime(last_updated_time_s-11644473600, 'unixepoch') as last_updated_time_s,
            id,
            origin_id,
            url,
            strftime('%H:%M:%S',watch_time_s, 'unixepoch') as watch_time_s,
            case has_audio
                when 0 then ''
                when 1 then 'Yes'
            end as has_audio,
            case has_video
                when 0 then ''
                when 1 then 'Yes'
            end as has_video  
        from playback
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(
                f'{browser_name} Media History - Playbacks')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder,
                f'{browser_name} Media History - Playbacks.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = (
                'Last Updated', 'ID', 'Origin ID', 'URL', 'Watch Time',
                'Has Audio', 'Has Video'
            )  # Don't remove the comma, that is required to make this a tuple as there is only 1 element
            data_list = []
            for row in all_rows:
                data_list.append(
                    (row[0], row[1], row[2], row[3], row[4], row[5], row[6]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name}  Media History - Playbacks'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} Media History - Playbacks'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc('No Browser Media History - Playbacks data available')

        cursor.execute('''
        select
            datetime(last_updated_time_s-11644473600, 'unixepoch') as last_updated_time_s,
            id,
            origin,
            cast(aggregate_watchtime_audio_video_s/86400 as integer) || ':' || strftime('%H:%M:%S', aggregate_watchtime_audio_video_s ,'unixepoch') as aggregate_watchtime_audio_video_s
        from origin
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(
                f'{browser_name} Media History - Origins')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder,
                f'{browser_name} Media History - Origins.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = (
                'Last Updated', 'ID', 'Origin', 'Aggregate Watchtime'
            )  # Don't remove the comma, that is required to make this a tuple as there is only 1 element
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} Media History - Origins'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} Media History - Origins'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc('No Browser Media History - Origins data available')

        db.close()
        return
예제 #11
0
def get_Cello(files_found, report_folder, seeker, wrap_text):
    file_found = get_cello_db_path(files_found)
    if not file_found:
        logfunc('Error: Could not get Cello.db path')
        return

    if report_folder[-1] == slash:
        folder_name = os.path.basename(report_folder[:-1])
    else:
        folder_name = os.path.basename(report_folder)

    db = open_sqlite_db_readonly(file_found)
    cursor = db.cursor()
    cursor.execute('''
    SELECT
        case created_date
            when 0 then ''
            else datetime(created_date/1000, 'unixepoch')
        end as created_date,
        title,
        case modified_date
            when 0 then ''
            else datetime(modified_date/1000, 'unixepoch')
        end as modified_date,
        case shared_with_me_date
            when 0 then ''
            else datetime(shared_with_me_date/1000, 'unixepoch')
        end as shared_with_me_date,
        case modified_by_me_date
            when 0 then ''
            else datetime(modified_by_me_date/1000, 'unixepoch')
        end as modified_by_me_date,
        case viewed_by_me_date
            when 0 then ''
            else datetime(viewed_by_me_date/1000, 'unixepoch')
        end as viewed_by_me_date,
        mime_type,
        Quota_bytes,
        case is_folder
            when 0 then ''
            when 1 then 'Yes'
        end as is_folder,
        case is_owner
            when 0 then ''
            when 1 then 'Yes'
        end as is_owner,
        case trashed
            when 0 then ''
            when 1 then 'Yes'
        end as trashed,
        (SELECT value from item_properties where key='offlineStatus' and item_stable_id=stable_id) as offline_status,
        (SELECT json_extract(value, '$.blobKey') from item_properties where key LIKE 'com.google.android.apps.docs:content_metadata%' and item_stable_id=stable_id) as content_metadata
    FROM items
    ''')

    all_rows = cursor.fetchall()
    usageentries = len(all_rows)
    if usageentries > 0:
        report = ArtifactHtmlReport('Cello')
        report.start_artifact_report(report_folder, 'Cello')
        report.add_script()
        data_headers = ('Created Date','File Name','Modified Date','Shared with User Date','Modified by User Date','Viewed by User Date','Mime Type', \
                        'Offline','Quota Size','Folder','User is Owner','Deleted')
        data_list = []
        tsv_list = []
        for row in all_rows:
            doc_name = row[1]
            offline_status = "No"
            if row[11] == 1:  # file is offline
                offline_status = "Yes"
                offline_path_name = row[12]
                if offline_path_name not in (None, ''):
                    offline_path = get_offline_path(files_found,
                                                    offline_path_name)
                    if offline_path:
                        destination_path = get_next_unused_name(
                            os.path.join(report_folder, doc_name))
                        shutil.copy2(offline_path, destination_path)
                        dest_name = os.path.basename(destination_path)
                        doc_name = f"<a href=\"{folder_name}/{dest_name}\" target=\"_blank\" style=\"color:green; font-weight:bolder\">{doc_name}</a>"
                    else:
                        logfunc(f'File {doc_name} not present offline!')
                else:
                    logfunc(f'File {doc_name} not present offline!')
            if row[8] == "Yes":
                doc_name = '<i data-feather="folder"></i> ' + doc_name
            else:
                if doc_name.startswith('<a href'):
                    doc_name = '<i data-feather="file" stroke="green"></i> ' + doc_name
                else:
                    doc_name = '<i data-feather="file"></i> ' + doc_name
            data_list.append(
                (row[0], doc_name, row[2], row[3], row[4], row[5], row[6],
                 offline_status, row[7], row[8], row[9], row[10]))
            tsv_list.append(
                (row[0], row[1], row[2], row[3], row[4], row[5], row[6],
                 offline_status, row[7], row[8], row[9], row[10]))

        report.write_artifact_data_table(data_headers,
                                         data_list,
                                         file_found,
                                         html_escape=False)
        report.end_artifact_report()

        tsvname = f'Google Drive - Cello'
        tsv(report_folder, data_headers, tsv_list, tsvname)

        tlactivity = f'Google Drive - Cello'
        timeline(report_folder, tlactivity, tsv_list, data_headers)
    else:
        logfunc('No Google Drive - Cello data available')

    db.close()
    return
예제 #12
0
def get_chromeAutofill(files_found, report_folder, seeker, wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'Web Data':  # skip -journal and other files
            continue
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()

        cursor.execute(f'''
        select
            datetime(date_created, 'unixepoch'),
            name,
            value,
            datetime(date_last_used, 'unixepoch'),
            count
        from autofill
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} Autofill - Entries')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder, f'{browser_name} Autofill - Entries.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Date Created', 'Field', 'Value', 'Date Last Used',
                            'Count')
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3], row[4]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} Autofill - Entries'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} Autofill - Entries'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} Autofill - Entries data available')

        cursor.execute(f'''
        select
            datetime(date_modified, 'unixepoch'),
            autofill_profiles.guid,
            autofill_profile_names.first_name,
            autofill_profile_names.middle_name,
            autofill_profile_names.last_name,
            autofill_profile_emails.email,
            autofill_profile_phones.number,
            autofill_profiles.company_name,
            autofill_profiles.street_address,
            autofill_profiles.city,
            autofill_profiles.state,
            autofill_profiles.zipcode,
            datetime(use_date, 'unixepoch'),
            autofill_profiles.use_count
        from autofill_profiles
        inner join autofill_profile_emails ON autofill_profile_emails.guid = autofill_profiles.guid
        inner join autofill_profile_phones ON autofill_profiles.guid = autofill_profile_phones.guid
        inner join autofill_profile_names ON autofill_profile_phones.guid = autofill_profile_names.guid
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} Autofill - Profiles')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder, f'{browser_name} Autofill - Profiles.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Date Modified', 'GUID', 'First Name',
                            'Middle Name', 'Last Name', 'Email',
                            'Phone Number', 'Company Name', 'Address', 'City',
                            'State', 'Zip Code', 'Date Last Used', 'Use Count')
            data_list = []
            for row in all_rows:
                data_list.append((row[0], row[1], row[2], row[3], row[4],
                                  row[5], row[6], row[7], row[8], row[9],
                                  row[10], row[11], row[12], row[13]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} Autofill - Profiles'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} Autofill - Profiles'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} Autofill - Profiles data available')

        db.close()
예제 #13
0
파일: chrome.py 프로젝트: stark4n6/ALEAPP
def get_chrome(files_found, report_folder, seeker, wrap_text):

    for file_found in files_found:
        file_found = str(file_found)
        if not os.path.basename(
                file_found) == 'History':  # skip -journal and other files
            continue
        browser_name = get_browser_name(file_found)
        if file_found.find('app_sbrowser') >= 0:
            browser_name = 'Browser'
        elif file_found.find('.magisk') >= 0 and file_found.find(
                'mirror') >= 0:
            continue  # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??

        db = open_sqlite_db_readonly(file_found)
        cursor = db.cursor()

        #Web History
        cursor.execute('''
        SELECT
        datetime(last_visit_time/1000000 + (strftime('%s','1601-01-01')),'unixepoch') AS LastVisitDate,
        url AS URL,
        title AS Title,
        visit_count AS VisitCount,
        typed_count AS TypedCount,
        id AS ID,
        CASE hidden
            WHEN 0 THEN ''
            WHEN 1 THEN 'Yes'
        END as Hidden
        FROM urls  
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Web History')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder, f'{browser_name} - Web History.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Last Visit Time', 'URL', 'Title', 'Visit Count',
                            'Typed Count', 'ID', 'Hidden')
            data_list = []
            for row in all_rows:
                if wrap_text:
                    data_list.append((row[0], textwrap.fill(row[1], width=100),
                                      row[2], row[3], row[4], row[5], row[6]))
                else:
                    data_list.append((row[0], row[1], row[2], row[3], row[4],
                                      row[5], row[6]))
            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Web History'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} - Web History'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Web History data available')

        #Web Visits
        cursor.execute('''
        SELECT
        datetime(visits.visit_time/1000000 + (strftime('%s','1601-01-01')),'unixepoch'),
        urls.url,
        urls.title,
        CASE visits.visit_duration
            WHEN 0 THEN ''
            ELSE strftime('%H:%M:%f', visits.visit_duration / 1000000.000,'unixepoch')
        END as Duration,
        CASE visits.transition & 0xff
            WHEN 0 THEN 'LINK'
            WHEN 1 THEN 'TYPED'
            WHEN 2 THEN 'AUTO_BOOKMARK'
            WHEN 3 THEN 'AUTO_SUBFRAME'
            WHEN 4 THEN 'MANUAL_SUBFRAME'
            WHEN 5 THEN 'GENERATED'
            WHEN 6 THEN 'START_PAGE'
            WHEN 7 THEN 'FORM_SUBMIT'
            WHEN 8 THEN 'RELOAD'
            WHEN 9 THEN 'KEYWORD'
            WHEN 10 THEN 'KEYWORD_GENERATED'
            ELSE NULL
        END AS CoreTransitionType,
        trim((CASE WHEN visits.transition & 0x00800000 THEN 'BLOCKED, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x01000000 THEN 'FORWARD_BACK, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x02000000 THEN 'FROM_ADDRESS_BAR, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x04000000 THEN 'HOME_PAGE, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x08000000 THEN 'FROM_API, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x10000000 THEN 'CHAIN_START, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x20000000 THEN 'CHAIN_END, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x40000000 THEN 'CLIENT_REDIRECT, ' ELSE '' END ||
        CASE WHEN visits.transition & 0x80000000 THEN 'SERVER_REDIRECT, ' ELSE '' END ||
        CASE WHEN visits.transition & 0xC0000000 THEN 'IS_REDIRECT_MASK, ' ELSE '' END),', ')
        AS Qualifiers,
        Query2.url AS FromURL
        FROM visits
        LEFT JOIN urls ON visits.url = urls.id
        LEFT JOIN (SELECT urls.url,urls.title,visits.visit_time,visits.id FROM visits LEFT JOIN urls ON visits.url = urls.id) Query2 ON visits.from_visit = Query2.id  
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Web Visits')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder, f'{browser_name} - Web Visits.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Visit Timestamp', 'URL', 'Title', 'Duration',
                            'Transition Type', 'Qualifier(s)',
                            'From Visit URL')
            data_list = []
            for row in all_rows:
                if wrap_text:
                    data_list.append((row[0], textwrap.fill(row[1], width=100),
                                      row[2], row[3], row[4], row[5], row[6]))
                else:
                    data_list.append((row[0], row[1], row[2], row[3], row[4],
                                      row[5], row[6]))
            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Web Visits'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} - Web Visits'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Web Visits data available')

        #Web Search
        cursor.execute('''
        SELECT
            url,
            title,
            visit_count,
            datetime(last_visit_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        FROM urls
        WHERE url like '%search?q=%'
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Search Terms')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder, f'{browser_name} - Search Terms.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Last Visit Time', 'Search Term', 'URL', 'Title',
                            'Visit Count')
            data_list = []
            for row in all_rows:
                search = row[0].split('search?q=')[1].split('&')[0]
                search = urllib.parse.unquote(search).replace('+', ' ')
                if wrap_text:
                    data_list.append(
                        (row[3], search, (textwrap.fill(row[0], width=100)),
                         row[1], row[2]))
                else:
                    data_list.append((row[3], search, row[0], row[1], row[2]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Search Terms'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} - Search Terms'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Search Terms data available')

        #Downloads
        # check for last_access_time column, an older version of chrome db (32) does not have it
        if does_column_exist_in_db(db, 'downloads',
                                   'last_access_time') == True:
            last_access_time_query = '''
            CASE last_access_time 
                WHEN "0" 
                THEN "" 
                ELSE datetime(last_access_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
            END AS "Last Access Time"'''
        else:
            last_access_time_query = "'' as last_access_query"

        cursor.execute(f'''
        SELECT 
        CASE start_time  
            WHEN "0" 
            THEN "" 
            ELSE datetime(start_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "Start Time", 
        CASE end_time 
            WHEN "0" 
            THEN "" 
            ELSE datetime(end_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        END AS "End Time", 
        {last_access_time_query},
        tab_url, 
        target_path, 
        CASE state
            WHEN "0" THEN "In Progress"
            WHEN "1" THEN "Complete"
            WHEN "2" THEN "Canceled"
            WHEN "3" THEN "Interrupted"
            WHEN "4" THEN "Interrupted"
        END,
        CASE danger_type
            WHEN "0" THEN ""
            WHEN "1" THEN "Dangerous"
            WHEN "2" THEN "Dangerous URL"
            WHEN "3" THEN "Dangerous Content"
            WHEN "4" THEN "Content May Be Malicious"
            WHEN "5" THEN "Uncommon Content"
            WHEN "6" THEN "Dangerous But User Validated"
            WHEN "7" THEN "Dangerous Host"
            WHEN "8" THEN "Potentially Unwanted"
            WHEN "9" THEN "Allowlisted by Policy"
            WHEN "10" THEN "Pending Scan"
            WHEN "11" THEN "Blocked - Password Protected"
            WHEN "12" THEN "Blocked - Too Large"
            WHEN "13" THEN "Warning - Sensitive Content"
            WHEN "14" THEN "Blocked - Sensitive Content"
            WHEN "15" THEN "Safe - Deep Scanned"
            WHEN "16" THEN "Dangerous, But User Opened"
            WHEN "17" THEN "Prompt For Scanning"
            WHEN "18" THEN "Blocked - Unsupported Type"
        END,
        CASE interrupt_reason
            WHEN "0" THEN ""
            WHEN "1" THEN "File Error"
            WHEN "2" THEN "Access Denied"
            WHEN "3" THEN "Disk Full"
            WHEN "5" THEN "Path Too Long"
            WHEN "6" THEN "File Too Large"
            WHEN "7" THEN "Virus"
            WHEN "10" THEN "Temporary Problem"
            WHEN "11" THEN "Blocked"
            WHEN "12" THEN "Security Check Failed"
            WHEN "13" THEN "Resume Error"
            WHEN "20" THEN "Network Error"
            WHEN "21" THEN "Operation Timed Out"
            WHEN "22" THEN "Connection Lost"
            WHEN "23" THEN "Server Down"
            WHEN "30" THEN "Server Error"
            WHEN "31" THEN "Range Request Error"
            WHEN "32" THEN "Server Precondition Error"
            WHEN "33" THEN "Unable To Get File"
            WHEN "34" THEN "Server Unauthorized"
            WHEN "35" THEN "Server Certificate Problem"
            WHEN "36" THEN "Server Access Forbidden"
            WHEN "37" THEN "Server Unreachable"
            WHEN "38" THEN "Content Lenght Mismatch"
            WHEN "39" THEN "Cross Origin Redirect"
            WHEN "40" THEN "Canceled"
            WHEN "41" THEN "Browser Shutdown"
            WHEN "50" THEN "Browser Crashed"
        END,
        opened, 
        received_bytes, 
        total_bytes
        FROM downloads
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(f'{browser_name} - Downloads')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(report_folder,
                                       f'{browser_name} - Downloads.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Start Time', 'End Time', 'Last Access Time',
                            'URL', 'Target Path', 'State', 'Danger Type',
                            'Interrupt Reason', 'Opened?', 'Received Bytes',
                            'Total Bytes')
            data_list = []
            for row in all_rows:
                data_list.append(
                    (row[0], row[1], row[2], row[3], row[4], row[5], row[6],
                     row[7], row[8], row[9], row[10]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Downloads'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} - Downloads'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Downloads data available')

        #Search Terms
        cursor.execute('''
        SELECT
            url_id,
            term,
            id,
            url,
            datetime(last_visit_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
        FROM keyword_search_terms, urls
        WHERE url_id = id
        ''')

        all_rows = cursor.fetchall()
        usageentries = len(all_rows)
        if usageentries > 0:
            report = ArtifactHtmlReport(
                f'{browser_name} - Keyword Search Terms')
            #check for existing and get next name for report file, so report from another file does not get overwritten
            report_path = os.path.join(
                report_folder,
                f'{browser_name} - Keyword Search Terms.temphtml')
            report_path = get_next_unused_name(
                report_path)[:-9]  # remove .temphtml
            report.start_artifact_report(report_folder,
                                         os.path.basename(report_path))
            report.add_script()
            data_headers = ('Last Visit Time', 'Term', 'URL')
            data_list = []
            for row in all_rows:
                if wrap_text:
                    data_list.append(
                        (row[4], row[1], (textwrap.fill(row[3], width=100))))
                else:
                    data_list.append((row[4], row[1], row[3]))

            report.write_artifact_data_table(data_headers, data_list,
                                             file_found)
            report.end_artifact_report()

            tsvname = f'{browser_name} - Keyword Search Terms'
            tsv(report_folder, data_headers, data_list, tsvname)

            tlactivity = f'{browser_name} - Keyword Search Terms'
            timeline(report_folder, tlactivity, data_list, data_headers)
        else:
            logfunc(f'No {browser_name} - Keyword Search Terms data available')

        db.close()