def running(self, progressMonitor, callback): progressMonitor.setIndeterminate(True) newDataSources = [] errors = [] result = DataSourceProcessorCallback.DataSourceProcessorResult.NO_ERRORS try: extractor = Extractor(self.selected_apps, self.selected_devices, progressMonitor) folders = extractor.dump_apps() for serial, folder in folders.items(): try: data_source = PsyUtils.add_to_fileset("ADB_{}_{}".format( serial, int(time.time())), folder, notify=False) newDataSources.append(data_source) except Exception as e: message = "Extractor Failed for {} for {}!".format( serial, e) logging.error(message) errors.append(message) result = DataSourceProcessorCallback.DataSourceProcessorResult.NONCRITICAL_ERRORS except Exception as e: message = "Global Extractor Failed. Aborting: {}".format(e) logging.error(message) errors.append(message) result = DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS if len(newDataSources) == 0: result = DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS callback.done(result, errors, newDataSources)
def generateReport(self, settings, progressBar): autopsy_version = PsyUtils.get_autopsy_version() baseReportDir = settings if (autopsy_version["major"] == 4 and autopsy_version["minor"] >= 16): baseReportDir = settings.getReportDirectoryPath() self.report.generateReport(baseReportDir, progressBar)
def running(self, progressMonitor, callback): progressMonitor.setIndeterminate(True) newDataSources = [] errors = [] result = DataSourceProcessorCallback.DataSourceProcessorResult.NO_ERRORS extractor = Extractor(self.selected_apps, self.selected_devices, progressMonitor) folders = extractor.dump_apps() for serial, folder in folders.items(): try: data_source = PsyUtils.add_to_fileset("ADB_{}_{}".format( serial, int(time.time())), folder, notify=False) newDataSources.append(data_source) except Exception as e: message = "Extractor Failed for {} for {}!".format(serial, e) logging.error(message) errors.append(message) result = DataSourceProcessorCallback.DataSourceProcessorResult.NONCRITICAL_ERRORS if len(newDataSources) == 0: result = DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS #folders = ["C:\\Users\\User\\Downloads\\Android 10 Image with Documentation\\Android 10 Image with Documentation\\Non-Cellebrite Extraction\\Pixel 3"] #teste = ["C:\\teste"] callback.done(result, errors, newDataSources)
def __init__(self, settings): #Set logging path to autopsy log Utils.setup_custom_logger(os.path.join(Case.getCurrentCase().getLogDirectoryPath(), "autopsy.log.0")) #Context of the ingest self.context = None #Module Settings choosed in ingest settings self.settings = settings #Autopsy utils methods instance self.utils = PsyUtils() #Filemanager for this case self.fileManager = Case.getCurrentCase().getServices().getFileManager() #Initialize output folder path self.temp_module_path = os.path.join(Case.getCurrentCase().getModulesOutputDirAbsPath(), "AndroidForensics") Utils.check_and_generate_folder(self.temp_module_path)
def generateReport(self, baseReportDir, progressBar): logging.info("Starting Report Module") progressBar.setIndeterminate(True) self.fileManager = Case.getCurrentCase().getServices().getFileManager() progressBar.updateStatusLabel("Finding source data") self.tempDirectory = os.path.join(Case.getCurrentCase().getModulesOutputDirAbsPath(), "FAMA") if not os.path.exists(self.tempDirectory): progressBar.complete(ReportStatus.ERROR) progressBar.updateStatusLabel("Run Ingest Module first!") return progressBar.updateStatusLabel("Creating report") os.environ["CASE_NAME"] = Case.getCurrentCase().getName() os.environ["CASE_NUMBER"] = Case.getCurrentCase().getNumber() os.environ["EXAMINER"] = Case.getCurrentCase().getExaminer() reports = {} reports["reports"] = [] #Android Analyzer Smart Report for fileset in os.listdir(self.tempDirectory): fileset_path = os.path.join(self.tempDirectory, fileset) for app_id in os.listdir(fileset_path): app_path = os.path.join(fileset_path, app_id) for app_report in os.listdir(app_path): report = os.path.join(app_path, app_report, "Report.json") if os.path.exists(report): report_content = Utils.read_json(report) report_path = Analyzer.generate_html_report(report_content, os.path.join(app_path, app_report)) Case.getCurrentCase().addReport(report_path, "Report", "Forensics Report") reports["reports"].append(Analyzer.generate_report_summary(report_content, app_report, fileset = fileset)) #Classic Report if len(reports["reports"]) == 0: report = {} report["header"] = { "report_name": "Generated Report", "report_date": int(time.time()) * 1000, "app_name": "Generic", "app_id": "Generated Report" } has_row = False for artifact in PsyUtils.get_artifacts_list(): ##GGOODD artifact_name = artifact.getDisplayName() report[artifact_name] = [] command = "WHERE (blackboard_artifacts.artifact_type_id = '{}')".format(artifact.getTypeID()) rows = Case.getCurrentCase().getSleuthkitCase().getMatchingArtifacts(command) for row in rows: has_row = True item = {} atts = row.getAttributes() for att in atts: item[att.getAttributeTypeDisplayName()] = str(att.getDisplayString().encode('utf-8','ignore')) report[artifact_name].append(item) if not has_row: progressBar.complete(ReportStatus.ERROR) progressBar.updateStatusLabel("Nothing to report!") return report_path = os.path.join(baseReportDir, report["header"]["app_id"], "Generic") reporthtml = Analyzer.generate_html_report(report, report_path) Case.getCurrentCase().addReport(reporthtml, "Report", "Forensics Report") reports["reports"].append(Analyzer.generate_report_summary(report, "Generic")) report_file_path = Analyzer.generate_html_index(reports, baseReportDir) Case.getCurrentCase().addReport(report_file_path, "Report", "Forensics Report") progressBar.updateStatusLabel("Done") progressBar.complete(ReportStatus.COMPLETE)
def initComponents(self): self.apps_checkboxes_list = [] self.setLayout(BoxLayout(self, BoxLayout.PAGE_AXIS)) self.setPreferredSize(Dimension(300, 0)) # title self.p_title = SettingsUtils.createPanel() self.lb_title = JLabel("Forensic Analysis for Mobile Apps") self.lb_title.setFont(self.lb_title.getFont().deriveFont( Font.BOLD, 15)) self.p_title.add(self.lb_title) self.add(self.p_title) # end of title # info menu self.p_info = SettingsUtils.createPanel() self.p_info.setPreferredSize(Dimension(300, 20)) self.lb_info = SettingsUtils.createInfoLabel("") self.lb_info2 = SettingsUtils.createInfoLabel("") self.sp2 = SettingsUtils.createSeparators(1) self.p_method = SettingsUtils.createPanel() self.bg_method = ButtonGroup() autopsy_version = PsyUtils.get_autopsy_version() if ((autopsy_version["major"] == 4 and autopsy_version["minor"] <= 17) or autopsy_version["major"] < 4): self.p_info.add(self.lb_info) self.p_info.add(self.lb_info2, BorderLayout.SOUTH) self.rb_selectedDatasource = SettingsUtils.createRadioButton( "Analyze selected datasource", "method_datasource", self.onMethodChange) self.bg_method.add(self.rb_selectedDatasource) # self.rb_importReportFile = SettingsUtils.createRadioButton("Import previous generated report file","method_importfile" ,self.onMethodChange) self.rb_liveExtraction = SettingsUtils.createRadioButton( "Live extraction with ADB", "method_adb", self.onMethodChange) self.rb_selectedDatasource.setSelected(True) #self.bg_method.add(self.rb_importReportFile) self.bg_method.add(self.rb_liveExtraction) self.p_method.add(JLabel("Analysis method")) self.p_method.add(self.rb_selectedDatasource) self.p_method.add(self.rb_liveExtraction) else: self.p_info.add( SettingsUtils.createInfoLabel( "It will analyze the data source with previously selected method and index the forensic artifacts." )) self.add(self.p_method) self.p_apps = SettingsUtils.createPanel(True) sorted_items = OrderedDict(sorted(Utils.get_all_packages().items())) for app, app_id in sorted_items.iteritems(): #(app, app_id) checkbox = SettingsUtils.addApplicationCheckbox( app, app_id, self.getSelectedApps) self.add(checkbox) self.apps_checkboxes_list.append(checkbox) self.p_apps.add(checkbox) self.add(self.p_apps) self.add(self.p_info)
class ProjectIngestModule(DataSourceIngestModule): def __init__(self, settings): #Set logging path to autopsy log Utils.setup_custom_logger(os.path.join(Case.getCurrentCase().getLogDirectoryPath(), "autopsy.log.0")) #Context of the ingest self.context = None #Module Settings choosed in ingest settings self.settings = settings #Autopsy utils methods instance self.utils = PsyUtils() #Filemanager for this case self.fileManager = Case.getCurrentCase().getServices().getFileManager() #Initialize output folder path self.temp_module_path = os.path.join(Case.getCurrentCase().getModulesOutputDirAbsPath(), "AndroidForensics") Utils.check_and_generate_folder(self.temp_module_path) #This method runs when we click ok in ingest module selection def startUp(self, context): #Set the environment context self.context = context #Method selected in settings self.method = self.settings.getSetting('method') def process(self, dataSource, progressBar): #Set progressbar to an scale of 100% self.progressBar = progressBar progressBar.switchToDeterminate(100) #Initialize list of possible data sources data_sources = [] #Extract method for adb selected if self.method == "method_adb": #Get list of selected apps to extract self.apps = json.loads(self.settings.getSetting('apps')) jobs = len(self.apps)*3 #extract, analyser, index self.progressJob = ProgressJob(progressBar, jobs) #Extract instance, the dump folder is going to be the same for all apps dumps extract = Extract() self.progressJob.next_job("Extracting from ADB") logging.info("Starting ADB") #Auxiliar variable used to store all folders for each device folders = {} for app_id in self.apps: # For each extract of the app with device context for serial, folder in extract.dump_from_adb(app_id).items(): # If the device not in the list if not folders.get(serial): folders[serial] = [] # If the folder is not the list for the device, add it if not folder in folders[serial]: folders[serial].append(folder) self.progressJob.next_job("Extracting {}".format(app_id)) # Add one datasource for each device, with the list of the possible folders for serial, folders_list in folders.items(): datasource_name = dataSource.getName() + "_ADB_{}_{}".format(serial, int(time.time())) self.utils.add_to_fileset(datasource_name, folders_list) # Add data source to case to be analised for case_datasources in Case.getCurrentCase().getDataSources(): if case_datasources.getName() == datasource_name: data_sources.append(case_datasources) break logging.info("Ending ADB") # Add the selected files for the datasource (json, dumps or mount case) else: logging.info("Using Selected Datasource") data_sources.append(dataSource) if self.method == "method_importfile": self.progressJob = ProgressJob(progressBar, len(data_sources)) #indexing ( x1) else: jobs = 0 for source in data_sources: jobs = jobs + len(self.fileManager.findFiles(source, "%_internal.tar.gz")) self.progressJob = ProgressJob(progressBar, 2 * jobs) #indexing and analying # For each data source, we will process it each one for source in data_sources: self.process_by_datasource(source) self.progressJob.next_job("Done") def process_by_datasource(self, dataSource): #Since we are running ingest for the same datasource, we remove the output folder first but only for the datasource! temp_directory = os.path.join(self.temp_module_path, dataSource.getName().replace(":","_")) Utils.remove_folder(temp_directory) Utils.check_and_generate_folder(self.temp_module_path) self.progressJob.change_text("Analyzing Information for {}".format(dataSource.getName())) # We are going to use import previous json file or other data if self.method == "method_importfile": json_report = "Report.json" reports_by_app = {} # Find all Report.json in the data source json_reports = self.fileManager.findFiles(dataSource, json_report) # Processing all datasource json reports for report in json_reports: # Get app id of the json report info = Utils.read_json(report.getLocalPath()) app_id = info["header"]["app_id"] self.progressJob.next_job("Processing report {} ".format(app_id)) # Since we can have multiple json files for multiple apps, we have to track how many reports exists for each app if not reports_by_app.get(app_id): reports_by_app[app_id] = 1 else: reports_by_app[app_id] += 1 # Path for every report report_folder_path = os.path.join(temp_directory, app_id, str(reports_by_app[app_id])) Utils.check_and_generate_folder(report_folder_path) # Copy json report to output folder report_location = os.path.join(report_folder_path, "Report.json") copyfile(report.getLocalPath(), report_location) item = {} item["report"] = report_location item["file"] = report item["app"] = Utils.find_app_name(app_id) self.process_report(item, dataSource) # Not using json report else: reports_by_app = {} #We will find all dumps on the datasource internal = "%_internal.tar.gz" external = "%_external.tar.gz" dumps = [] dumps.extend(self.fileManager.findFiles(dataSource, internal)) dumps.extend(self.fileManager.findFiles(dataSource, external)) #We found dumps, the datasource is not a mount path if dumps: #For each dump, we are going to check it for base in dumps: #Get app id of the dump app_id = base.getName().replace('_internal.tar.gz', '').replace('_external.tar.gz','') self.progressJob.next_job("Processing report {} ".format(app_id)) #No reports for this app yet if not reports_by_app.get(app_id): reports_by_app[app_id] = [] #We can have multiple dumps for the same app. this ensure we don't add the same folder twice base_path = os.path.dirname(base.getLocalPath()) if base_path in reports_by_app[app_id]: continue #Adds the folder to the list of reports by app reports_by_app[app_id].append(base_path) #Multiple dumps per app, we are going to create the folder based on the number of the reports report_folder_path = os.path.join(temp_directory, app_id, str(len(reports_by_app[app_id]))) Utils.check_and_generate_folder(report_folder_path) self.progressJob.change_text("Analyzing Information for {} ({})".format(dataSource.getName(), app_id)) #We are going to analyze the dumps and generate the report analyzer = Analyzer(app_id, base_path, report_folder_path) analyzer.generate_report() #Generated json report location report_location = os.path.join(report_folder_path, "Report.json") #Add to reports list item = {} item["report"] = report_location item["file"] = base item["app"] = Utils.find_app_name(app_id) self.process_report(item, dataSource) else: base_path = None base = None # Little hack to know datasource real path # We only know the real path on files, folders doesn't provide the real path # So we are going to search every file files = self.fileManager.findFiles(dataSource, "%") for x in files: #We should add artifacts to a file, so we add it to the logicalfileset as reference if not base: base = x # Script needs files inside /data/data/ # We find a file with this path, we got the base path if x.getLocalPath() and '/data/data/' in x.getParentPath(): # Multiple SO normalization local = Utils.replace_slash_platform(x.getLocalPath()) if Utils.get_platform().startswith("windows"): base_path = local.split("\\data\\data\\")[0] else: base_path = local.split("/data/data/")[0] #Already have the base folder, stop the find break # If have the base folder if base_path: # For all supported apps for app_id in Utils.get_all_packages().values(): # If app data exists in mount if os.path.exists(os.path.join(base_path, "data", "data", app_id)): # Create report folder report_number = 1 report_folder_path = os.path.join(temp_directory, app_id, str(report_number)) #report path Utils.check_and_generate_folder(report_folder_path) self.progressJob.change_text("Analyzing Information for {} ({})".format(dataSource.getName(), app_id)) # Folder to analyze analyzer = Analyzer(app_id, base_path, report_folder_path) analyzer.generate_report() # Report report_location = os.path.join(report_folder_path, "Report.json") item = {} item["report"] = report_location item["file"] = base item["app"] = Utils.find_app_name(app_id) self.process_report(item, dataSource) # After all reports, post a message to the ingest messages in box. return IngestModule.ProcessResult.OK def process_report(self, report, dataSource): # Initialize the autopsy module for the report try: m = __import__("modules.autopsy.{}".format(report["app"]), fromlist=[None]) self.progressJob.next_job("Processing report {} ".format(report["app"])) # Oops, we don't have an autopsy module for this app # Standalone app can have an module for an app without having an autopsy module for it except: logging.warning("Autopsy Module not found for {}".format(report["app"])) return #Start autopsy module instance self.module_psy = m.ModulePsy(report["app"]) #Initialize possible blackboard menus self.module_psy.initialize(self.context) #Process report and add information #### FIX NUMBER OF REPORTS self.module_psy.process_report(dataSource.getName(), report["file"], 0, report["report"])
def __init__(self, module_name): self.context = None self.case = Case.getCurrentCase().getSleuthkitCase() self.utils = PsyUtils() self.module_name = module_name.upper() + ": "