def run(self, evidence, result): """Run the Jenkins worker. Args: evidence (Evidence object): The evidence to process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'jenkins_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) # Set the output file as the data source for the output evidence. output_evidence.local_path = output_file_path try: collected_artifacts = extract_artifacts( artifact_names=['JenkinsConfigFile'], disk_path=evidence.local_path, output_dir=os.path.join(self.output_dir, 'artifacts')) except TurbiniaException as e: result.close(self, success=False, status=str(e)) return result version = None credentials = [] for filepath in collected_artifacts: with open(filepath, 'r') as input_file: config = input_file.read() extracted_version = self._extract_jenkins_version(config) extracted_credentials = self._extract_jenkins_credentials(config) if extracted_version: version = extracted_version credentials.extend(extracted_credentials) analysis_report = self.analyze_jenkins(version, credentials) output_evidence.text_data = analysis_report # Write the report to the output file. with open(output_file_path, 'w') as fh: fh.write(output_evidence.text_data.encode('utf8')) fh.write('\n'.encode('utf8')) # Add the resulting evidence to the result object. result.add_evidence(output_evidence, evidence.config) if analysis_report: status = analysis_report[0].strip() else: status = 'Jenkins analysis found no potential issues' result.close(self, success=True, status=status) return result
def run(self, evidence, result): """Run the Linux Account worker. Args: evidence (Evidence object): The evidence to process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # Where to store the resulting output file. output_file_name = 'linux_account_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) # What type of evidence we should output. output_evidence = ReportText(source_path=output_file_path) try: collected_artifacts = extract_artifacts( artifact_names=['LoginPolicyConfiguration'], disk_path=evidence.local_path, output_dir=self.output_dir, credentials=evidence.credentials) except TurbiniaException as e: result.close(self, success=False, status=str(e)) return result for filepath in collected_artifacts: if not filepath.endswith('shadow'): continue shadow_file = [] # Read the input file with open(filepath, 'r') as input_file: shadow_file = input_file.readlines() hashnames = self._extract_linux_credentials(shadow_file) timeout = self.task_config.get('bruteforce_timeout') (report, priority, summary) = self.analyse_shadow_file(shadow_file, hashnames, timeout=timeout) output_evidence.text_data = report result.report_priority = priority result.report_data = report # Write the report to the output file. with open(output_file_path, 'wb') as fh: fh.write(output_evidence.text_data.encode('utf-8')) # Add the resulting evidence to the result object. result.add_evidence(output_evidence, evidence.config) result.close(self, success=True, status=summary) return result result.close(self, success=True, status='No shadow files found') return result
def run(self, evidence, result): """Run Hadoop specific analysis on the evidences. Args: evidence (Evidence object): The evidence we will process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'hadoop_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) output_evidence.local_path = output_file_path try: # We don't use FileArtifactExtractionTask as it export one evidence per # file extracted output_dir = os.path.join(self.output_dir, 'artifacts') collected_artifacts = extract_artifacts( artifact_names=['HadoopAppRoot'], disk_path=evidence.local_path, output_dir=output_dir) (report, priority, summary) = self._AnalyzeHadoopAppRoot(collected_artifacts, output_dir) if not report: raise TurbiniaException( 'Report generated by _AnalyzeHadoopAppRoot() is empty') output_evidence.text_data = '\n'.join(report) result.report_data = output_evidence.text_data # Write the report to the output file. with open(output_file_path, 'wb') as fh: fh.write(output_evidence.text_data.encode('utf8')) fh.write('\n'.encode('utf8')) result.add_evidence(output_evidence, evidence.config) result.report_priority = priority result.close(self, success=True, status=summary) except TurbiniaException as e: result.close(self, success=False, status=str(e)) return result return result
def _collect_windows_files(self, evidence): """Extract artifacts using image_export. Args: evidence (Evidence object): The evidence to process Returns: location (str): The file path to the extracted evidence. number of artifacts (int): The number of files extracted. """ try: collected_artifacts = extract_artifacts( artifact_names=['WindowsSystemRegistryFiles'], disk_path=evidence.local_path, output_dir=self.output_dir) except TurbiniaException as e: raise TurbiniaException('artifact extraction failed: {}'.format(str(e))) # Extract base dir from our list of collected artifacts location = os.path.dirname(collected_artifacts[0]) return (location, len(collected_artifacts))