def run(self, evidence, result): """Run the Jenkins worker. Args: evidence (Evidence object): The evidence to process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'jenkins_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) # Set the output file as the data source for the output evidence. output_evidence.local_path = output_file_path try: collected_artifacts = extract_artifacts( artifact_names=['JenkinsConfigFile'], disk_path=evidence.local_path, output_dir=os.path.join(self.output_dir, 'artifacts')) except TurbiniaException as e: result.close(self, success=False, status=str(e)) return result version = None credentials = [] for filepath in collected_artifacts: with open(filepath, 'r') as input_file: config = input_file.read() extracted_version = self._extract_jenkins_version(config) extracted_credentials = self._extract_jenkins_credentials(config) if extracted_version: version = extracted_version credentials.extend(extracted_credentials) analysis_report = self.analyze_jenkins(version, credentials) output_evidence.text_data = analysis_report # Write the report to the output file. with open(output_file_path, 'w') as fh: fh.write(output_evidence.text_data.encode('utf8')) fh.write('\n'.encode('utf8')) # Add the resulting evidence to the result object. result.add_evidence(output_evidence, evidence.config) if analysis_report: status = analysis_report[0].strip() else: status = 'Jenkins analysis found no potential issues' result.close(self, success=True, status=status) return result
def run(self, evidence, result): """Run the sshd_config analysis worker. Args: evidence (Evidence object): The evidence to process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'sshd_config_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) # Set the output file as the data source for the output evidence. output_evidence.local_path = output_file_path # Read the input file with open(evidence.local_path, 'r') as input_file: sshd_config = input_file.read() analysis = self.analyse_sshd_config(sshd_config) output_evidence.text_data = analysis # Write the report to the output file. with open(output_file_path, 'w') as fh: fh.write(output_evidence.text_data.encode('utf-8')) # Add the resulting evidence to the result object. result.add_evidence(output_evidence, evidence.config) result.close(self, success=True) return result
def run(self, evidence, result): """Run Hadoop specific analysis on the evidences. Args: evidence (Evidence object): The evidence we will process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'hadoop_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) output_evidence.local_path = output_file_path try: # We don't use FileArtifactExtractionTask as it export one evidence per # file extracted output_dir = os.path.join(self.output_dir, 'artifacts') collected_artifacts = extract_artifacts( artifact_names=['HadoopAppRoot'], disk_path=evidence.local_path, output_dir=output_dir) (report, priority, summary) = self._AnalyzeHadoopAppRoot(collected_artifacts, output_dir) if not report: raise TurbiniaException( 'Report generated by _AnalyzeHadoopAppRoot() is empty') output_evidence.text_data = '\n'.join(report) result.report_data = output_evidence.text_data # Write the report to the output file. with open(output_file_path, 'wb') as fh: fh.write(output_evidence.text_data.encode('utf8')) fh.write('\n'.encode('utf8')) result.add_evidence(output_evidence, evidence.config) result.report_priority = priority result.close(self, success=True, status=summary) except TurbiniaException as e: result.close(self, success=False, status=str(e)) return result return result
def run(self, evidence, result): """Run the Wordpress access log analysis worker. Args: evidence (Evidence object): The evidence to process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'wp_acces_log_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) # Set the output file as the data source for the output evidence. output_evidence.local_path = output_file_path # Change open function if file is GZIP compressed. open_function = open if evidence.local_path.lower().endswith('gz'): open_function = gzip.open # Read the input file with open_function(evidence.local_path, 'rb') as input_file: access_logs_content = input_file.read().decode('utf-8') (report, priority, summary) = self.analyze_wp_access_logs(access_logs_content) output_evidence.text_data = report result.report_data = report result.report_priority = priority # Write the report to the output file. with open(output_file_path, 'wb') as fh: fh.write(output_evidence.text_data.encode('utf-8')) # Add the resulting evidence to the result object. result.add_evidence(output_evidence, evidence.config) result.close(self, success=True, status=summary) return result
def run(self, evidence, result): """Run the Jenkins worker. Args: evidence (Evidence object): The evidence to process result (TurbiniaTaskResult): The object to place task results into. Returns: TurbiniaTaskResult object. """ # What type of evidence we should output. output_evidence = ReportText() # Where to store the resulting output file. output_file_name = 'jenkins_analysis.txt' output_file_path = os.path.join(self.output_dir, output_file_name) # Set the output file as the data source for the output evidence. output_evidence.local_path = output_file_path # TODO(aarontp): We should find a more optimal solution for this because # this requires traversing the entire filesystem and extracting more files # than we need. Tracked in https://github.com/google/turbinia/issues/402 try: collected_artifacts = extract_files(file_name='config.xml', disk_path=evidence.local_path, output_dir=os.path.join( self.output_dir, 'artifacts')) except TurbiniaException as e: result.close(self, success=False, status=str(e)) return result jenkins_artifacts = [] jenkins_re = re.compile( r'^.*jenkins[^\/]*(\/users\/[^\/]+)*\/config\.xml$') for collected_artifact in collected_artifacts: if re.match(jenkins_re, collected_artifact): jenkins_artifacts.append(collected_artifact) version = None credentials = [] for filepath in jenkins_artifacts: with open(filepath, 'r') as input_file: config = input_file.read() extracted_version = self._extract_jenkins_version(config) extracted_credentials = self._extract_jenkins_credentials(config) if extracted_version: version = extracted_version credentials.extend(extracted_credentials) (report, priority, summary) = self.analyze_jenkins(version, credentials) output_evidence.text_data = report result.report_data = report result.report_priority = priority # Write the report to the output file. with open(output_file_path, 'w') as fh: fh.write(output_evidence.text_data.encode('utf8')) fh.write('\n'.encode('utf8')) # Add the resulting evidence to the result object. result.add_evidence(output_evidence, evidence.config) result.close(self, success=True, status=summary) return result