Esempio n. 1
0
  def run(self, evidence, result):
    """Run the Redis configuration analysis worker.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
    # Where to store the resulting output file.
    output_file_name = 'redis_config_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)
    # Set the output file as the data source for the output evidence.
    output_evidence = ReportText(source_path=output_file_path)

    # Read the input file
    with open(evidence.local_path, 'r') as input_file:
      redis_config = input_file.read()

    (report, priority, summary) = self.analyse_redis_config(redis_config)
    output_evidence.text_data = report
    result.report_priority = priority
    result.report_data = report

    # Write the report to the output file.
    with open(output_file_path, 'wb') as fh:
      fh.write(output_evidence.text_data.encode('utf-8'))

    # Add the resulting evidence to the result object.
    result.add_evidence(output_evidence, evidence.config)
    result.close(self, success=True, status=summary)
    return result
Esempio n. 2
0
    def run(self, evidence, result):
        """Run the sshd_config analysis worker.

    Args:
       evidence (Evidence object):  The evidence to process
       result (TurbiniaTaskResult): The object to place task results into.

    Returns:
      TurbiniaTaskResult object.
    """
        # What type of evidence we should output.
        output_evidence = ReportText()

        # Where to store the resulting output file.
        output_file_name = 'sshd_config_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)
        # Set the output file as the data source for the output evidence.
        output_evidence.local_path = output_file_path

        # Read the input file
        with open(evidence.local_path, 'r') as input_file:
            sshd_config = input_file.read()

        analysis = self.analyse_sshd_config(sshd_config)
        output_evidence.text_data = analysis

        # Write the report to the output file.
        with open(output_file_path, 'w') as fh:
            fh.write(output_evidence.text_data.encode('utf-8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True)
        return result
Esempio n. 3
0
    def run(self, evidence, result):
        """Run the Jenkins worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
        # What type of evidence we should output.
        output_evidence = ReportText()

        # Where to store the resulting output file.
        output_file_name = 'jenkins_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)

        # Set the output file as the data source for the output evidence.
        output_evidence.local_path = output_file_path

        try:
            collected_artifacts = extract_artifacts(
                artifact_names=['JenkinsConfigFile'],
                disk_path=evidence.local_path,
                output_dir=os.path.join(self.output_dir, 'artifacts'))
        except TurbiniaException as e:
            result.close(self, success=False, status=str(e))
            return result

        version = None
        credentials = []
        for filepath in collected_artifacts:
            with open(filepath, 'r') as input_file:
                config = input_file.read()

            extracted_version = self._extract_jenkins_version(config)
            extracted_credentials = self._extract_jenkins_credentials(config)

            if extracted_version:
                version = extracted_version

            credentials.extend(extracted_credentials)

        analysis_report = self.analyze_jenkins(version, credentials)
        output_evidence.text_data = analysis_report

        # Write the report to the output file.
        with open(output_file_path, 'w') as fh:
            fh.write(output_evidence.text_data.encode('utf8'))
            fh.write('\n'.encode('utf8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        if analysis_report:
            status = analysis_report[0].strip()
        else:
            status = 'Jenkins analysis found no potential issues'
        result.close(self, success=True, status=status)

        return result
Esempio n. 4
0
    def run(self, evidence, result):
        """Run the Wordpress Creds worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

        # Where to store the resulting output file.
        output_file_name = 'wordpress_creds_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)

        # What type of evidence we should output.
        output_evidence = ReportText(source_path=output_file_path)

        try:
            location, num_files = self._collect_wordpress_file(evidence)
            if num_files == 0:
                result.close(self,
                             success=True,
                             status='No Wordpress database found')
                return result
        except TurbiniaException as e:
            result.close(
                self,
                success=False,
                status='Error retrieving Wordpress database: {0:s}'.format(
                    str(e)))
            return result

        try:
            (creds, hashnames) = self._extract_wordpress_hashes(location)
        except TurbiniaException as e:
            result.close(self, success=False, status=str(e))
            return result

        timeout = self.task_config.get('bruteforce_timeout')
        (report, priority,
         summary) = self._analyse_wordpress_creds(creds,
                                                  hashnames,
                                                  timeout=timeout)
        output_evidence.text_data = report
        result.report_data = report
        result.report_priority = priority

        # Write the report to the output file.
        with open(output_file_path, 'wb') as fh:
            fh.write(output_evidence.text_data.encode('utf8'))
            fh.write('\n'.encode('utf8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True, status=summary)

        return result
Esempio n. 5
0
    def run(self, evidence, result):
        """Run the Linux Account worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.
    Returns:
        TurbiniaTaskResult object.
    """
        # Where to store the resulting output file.
        output_file_name = 'linux_account_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)

        # What type of evidence we should output.
        output_evidence = ReportText(source_path=output_file_path)

        try:
            collected_artifacts = extract_artifacts(
                artifact_names=['LoginPolicyConfiguration'],
                disk_path=evidence.local_path,
                output_dir=self.output_dir,
                credentials=evidence.credentials)
        except TurbiniaException as e:
            result.close(self, success=False, status=str(e))
            return result

        for filepath in collected_artifacts:
            if not filepath.endswith('shadow'):
                continue

            shadow_file = []
            # Read the input file
            with open(filepath, 'r') as input_file:
                shadow_file = input_file.readlines()

            hashnames = self._extract_linux_credentials(shadow_file)
            timeout = self.task_config.get('bruteforce_timeout')
            (report, priority,
             summary) = self.analyse_shadow_file(shadow_file,
                                                 hashnames,
                                                 timeout=timeout)
            output_evidence.text_data = report
            result.report_priority = priority
            result.report_data = report

            # Write the report to the output file.
            with open(output_file_path, 'wb') as fh:
                fh.write(output_evidence.text_data.encode('utf-8'))

            # Add the resulting evidence to the result object.
            result.add_evidence(output_evidence, evidence.config)
            result.close(self, success=True, status=summary)
            return result
        result.close(self, success=True, status='No shadow files found')
        return result
Esempio n. 6
0
    def run(self, evidence, result):
        """Run Hadoop specific analysis on the evidences.

    Args:
        evidence (Evidence object):  The evidence we will process
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

        # What type of evidence we should output.
        output_evidence = ReportText()

        # Where to store the resulting output file.
        output_file_name = 'hadoop_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)

        output_evidence.local_path = output_file_path

        try:
            # We don't use FileArtifactExtractionTask as it export one evidence per
            # file extracted
            output_dir = os.path.join(self.output_dir, 'artifacts')
            collected_artifacts = extract_artifacts(
                artifact_names=['HadoopAppRoot'],
                disk_path=evidence.local_path,
                output_dir=output_dir)

            (report, priority,
             summary) = self._AnalyzeHadoopAppRoot(collected_artifacts,
                                                   output_dir)
            if not report:
                raise TurbiniaException(
                    'Report generated by _AnalyzeHadoopAppRoot() is empty')

            output_evidence.text_data = '\n'.join(report)
            result.report_data = output_evidence.text_data

            # Write the report to the output file.
            with open(output_file_path, 'wb') as fh:
                fh.write(output_evidence.text_data.encode('utf8'))
                fh.write('\n'.encode('utf8'))

            result.add_evidence(output_evidence, evidence.config)
            result.report_priority = priority
            result.close(self, success=True, status=summary)
        except TurbiniaException as e:
            result.close(self, success=False, status=str(e))
            return result
        return result
Esempio n. 7
0
  def run(self, evidence, result):
    """Run the Windows Account worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.
    Returns:
        TurbiniaTaskResult object.
    """
    # Where to store the resulting output file.
    output_file_name = 'windows_account_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)

    # What type of evidence we should output.
    output_evidence = ReportText(source_path=output_file_path)

    try:
      (location, num_files) = self._collect_windows_files(evidence)
    except TurbiniaException as e:
      result.close(
          self, success=True,
          status='No Windows account files found: {0:s}'.format(str(e)))
      return result
    if num_files < 2:
      result.close(self, success=True, status='No Windows account files found')
      return result
    try:
      (creds, hashnames) = self._extract_windows_hashes(
          result, os.path.join(location, 'Windows', 'System32', 'config'))
    except TurbiniaException as e:
      result.close(
          self, success=False,
          status='Unable to extract hashes from registry files: {0:s}'.format(
              str(e)))
      return result
    timeout = self.task_config.get('bruteforce_timeout')
    (report, priority, summary) = self._analyse_windows_creds(
        creds, hashnames, timeout=timeout)
    output_evidence.text_data = report
    result.report_priority = priority
    result.report_data = report

    # Write the report to the output file.
    with open(output_file_path, 'wb') as fh:
      fh.write(output_evidence.text_data.encode('utf-8'))

    # Add the resulting evidence to the result object.
    result.add_evidence(output_evidence, evidence.config)
    result.close(self, success=True, status=summary)
    return result
Esempio n. 8
0
    def run(self, evidence, result):
        """Task to execute fsstat.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
        fsstat_output = os.path.join(self.output_dir, 'fsstat.txt')
        # Since fsstat does not support XFS, we won't run it when we know the
        # partition is XFS.
        if evidence.path_spec.type_indicator == 'XFS':
            message = 'Not running fsstat since partition is XFS'
            result.log(message)
            result.close(self, successful=True, status=message)
        else:
            output_evidence = ReportText(source_path=fsstat_output)
            cmd = ['sudo', 'fsstat', evidence.device_path]
            result.log('Running fsstat as [{0!s}]'.format(cmd))
            self.execute(cmd,
                         result,
                         stdout_file=fsstat_output,
                         new_evidence=[output_evidence],
                         close=True)

        return result
Esempio n. 9
0
    def run(self, evidence, result):
        """Task to execute fsstat.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
        fsstat_output = os.path.join(self.output_dir, 'fsstat.txt')

        if evidence.path_spec is None:
            message = 'Could not run fsstat since partition does not have a path_spec'
            result.log(message)
            result.close(self, success=False, status=message)
        # Since fsstat does not support some filesystems, we won't run it when we
        # know the partition is not supported.
        elif evidence.path_spec.type_indicator in ("APFS", "XFS"):
            message = 'Not running fsstat since partition is not supported'
            result.log(message)
            result.close(self, success=True, status=message)
        else:
            output_evidence = ReportText(source_path=fsstat_output)
            cmd = ['sudo', 'fsstat', evidence.device_path]
            result.log('Running fsstat as [{0!s}]'.format(cmd))
            self.execute(cmd,
                         result,
                         stdout_file=fsstat_output,
                         new_evidence=[output_evidence],
                         close=True)

        return result
Esempio n. 10
0
    def run(self, evidence, result):
        """Task to index a disk with dfDewey.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

        config.LoadConfig()

        dfdewey_output = os.path.join(self.output_dir, 'dfdewey.txt')
        success = True
        status_summary = ''

        if self.task_config.get('case'):
            cmd = []
            # Datastore config
            config_vars = [
                configvar for configvar in dir(config)
                if configvar.startswith('DFDEWEY_')
            ]
            env = os.environ.copy()
            for configvar in config_vars:
                configval = getattr(config, configvar)
                if configvar != 'DFDEWEY_OS_URL' or configval:
                    env[configvar] = '{0!s}'.format(getattr(config, configvar))

            cmd.append('dfdewey')
            cmd.append(self.task_config.get('case'))
            cmd.append(evidence.local_path)
            if self.task_config.get('search'):
                cmd.extend(['-s', self.task_config.get('search')])
            output_evidence = ReportText(source_path=dfdewey_output)

            result.log('Running dfDewey as [{0:s}]'.format(' '.join(cmd)))
            ret, _ = self.execute(cmd,
                                  result,
                                  stdout_file=dfdewey_output,
                                  new_evidence=[output_evidence],
                                  close=True,
                                  env=env)
            status_summary = 'dfDewey executed with [{0:s}]'.format(
                ' '.join(cmd))
            if ret != 0:
                success = False
                status_summary = 'dfDewey execution failed. Return code: {0:d}'.format(
                    ret)
                result.log(status_summary)
        else:
            status_summary = (
                'Not running dfDewey. Case was not provided in task config.')
            result.log(status_summary)

        result.close(self, success=success, status=status_summary)
        return result
Esempio n. 11
0
    def run(self, evidence, result):
        """Run the Wordpress access log analysis worker.

    Args:
       evidence (Evidence object):  The evidence to process
       result (TurbiniaTaskResult): The object to place task results into.

    Returns:
      TurbiniaTaskResult object.
    """
        # What type of evidence we should output.
        output_evidence = ReportText()

        # Where to store the resulting output file.
        output_file_name = 'wp_acces_log_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)
        # Set the output file as the data source for the output evidence.
        output_evidence.local_path = output_file_path

        # Change open function if file is GZIP compressed.
        open_function = open
        if evidence.local_path.lower().endswith('gz'):
            open_function = gzip.open

        # Read the input file
        with open_function(evidence.local_path, 'rb') as input_file:
            access_logs_content = input_file.read().decode('utf-8')

        (report, priority,
         summary) = self.analyze_wp_access_logs(access_logs_content)
        output_evidence.text_data = report
        result.report_data = report
        result.report_priority = priority

        # Write the report to the output file.
        with open(output_file_path, 'wb') as fh:
            fh.write(output_evidence.text_data.encode('utf-8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True, status=summary)
        return result
Esempio n. 12
0
    def run(self, evidence, result):
        """Test Stat task.

    Args:
        evidence: Path to data to process.

    Returns:
        TurbiniaTaskResult object.
    """
        report = ReportText()
        result.log('Running stat on evidence {0:s}'.format(
            evidence.local_path))
        report.text_data = str(os.stat(evidence.local_path))
        with open(os.path.join(self.output_dir, 'report.txt'), 'w') as f:
            f.write(report.text_data)

        result.add_evidence(report)
        result.close(success=True)

        return result
Esempio n. 13
0
    def run(self, evidence, result):
        """Run the cron analysis worker.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
        # Where to store the resulting output file.
        output_file_name = 'cron_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)
        # Set the output file as the data source for the output evidence.
        output_evidence = ReportText(source_path=output_file_path)

        # Read the input file
        with open(evidence.local_path, 'r') as input_file:
            try:
                crontab = input_file.read()
            except UnicodeDecodeError as exception:
                message = 'Error parsing cron file {0:s}: {1!s}'.format(
                    evidence.local_path, exception)
                result.log(message)
                result.close(self, success=False, status=message)
                return result

        (report, priority, summary) = self.analyse_crontab(crontab)
        output_evidence.text_data = report
        result.report_priority = priority
        result.report_data = report

        # Write the report to the output file.
        with open(output_file_path, 'wb') as fh:
            fh.write(output_evidence.text_data.encode('utf-8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True, status=summary)
        return result
Esempio n. 14
0
    def run(self, evidence, result):
        """Test Stat task.

    Args:
        evidence: Path to data to process.
        result: TurbiniaTaskResult to populate with results.

    Returns:
        TurbiniaTaskResult: object.
    """
        result.log('Running stat on evidence {0:s}'.format(
            evidence.source_path))
        report_path = os.path.join(self.output_dir, 'report.txt')
        report = ReportText(source_path=report_path)
        report.text_data = str(os.stat(evidence.source_path))
        with open(report_path, 'w') as f:
            f.write(report.text_data)

        result.add_evidence(report, evidence.config)
        result.close(self, success=True)

        return result
Esempio n. 15
0
    def run(self, evidence, result):
        """Run the Jupyter worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

        # Where to store the resulting output file.
        output_file_name = 'jupyter_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)

        # What type of evidence we should output.
        output_evidence = ReportText(source_path=output_file_path)

        # Read the config file.

        jupyter_config = open(evidence.local_path, 'r').read()

        # Extract the config and return the report
        (report, priority, summary) = self.analyse_config(jupyter_config)
        output_evidence.text_data = report
        result.report_priority = priority
        result.report_data = report

        # Write the report to the output file.
        with open(output_file_path, 'w', encoding='utf-8') as fh:
            fh.write(output_evidence.text_data)
            fh.write('\n')

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True, status=summary)

        return result
Esempio n. 16
0
  def run(self, evidence, result):
    """Run the Loki worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.
    Returns:
        TurbiniaTaskResult object.
    """
    # Where to store the resulting output file.
    output_file_name = 'loki_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)

    # What type of evidence we should output.
    output_evidence = ReportText(source_path=output_file_path)

    try:
      (report, priority, summary) = self.runLoki(result, evidence)
    except TurbiniaException as e:
      result.close(
          self, success=False, status='Unable to run Loki: {0:s}'.format(
              str(e)))
      return result

    output_evidence.text_data = report
    result.report_priority = priority
    result.report_data = report

    # Write the report to the output file.
    with open(output_file_path, 'wb') as fh:
      fh.write(output_evidence.text_data.encode('utf-8'))

    # Add the resulting evidence to the result object.
    result.add_evidence(output_evidence, evidence.config)
    result.close(self, success=True, status=summary)
    return result
Esempio n. 17
0
class StatJob(TurbiniaJob):
  """Job to run Stat."""

  # The types of evidence that this Job will process
  evidence_input = [type(RawDisk()), type(Directory())]
  evidence_output = [type(ReportText())]

  def __init__(self):
    super(StatJob, self).__init__(name='StatJob')

  def create_tasks(self, evidence):
    """Create task for Stat.

    Args:
      evidence: List of evidence object to process

    Returns:
        A list of StatTasks.
    """
    return [StatTask() for _ in evidence]
Esempio n. 18
0
    def run(self, evidence, result):
        """Run the Gitlab worker.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
        # Where to store the resulting output file.
        output_file_name = 'gitlab_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)
        # Set the output file as the data source for the output evidence.
        output_evidence = ReportText(source_path=output_file_path)

        reports = []
        summaries = []

        # Grep for exif in workhorse logs
        (r, priority, s) = self._is_exif_in_logs(
            result, evidence.local_path,
            os.path.join('var', 'log', 'gitlab', 'workhorse.log'))
        if r != '':
            reports.append(r)
        if s != '':
            summaries.append(s)

        (r, p2, s) = self._is_exif_in_logs(
            result, evidence.local_path,
            os.path.join('var', 'log', 'gitlab', 'gitlab-workhorse', '@*'))
        if r != '':
            reports.append(r)
        if s != '':
            summaries.append(s)
        if p2 < priority:
            priority = p2

        # TODO: Check for Metasploit Module
        # 'https://packetstormsecurity.com/files/160441/GitLab-File-Read-Remote-Code-Execution.html'

        (r, p3, s) = self._is_traversal_in_logs(
            result, evidence.local_path,
            os.path.join('var', 'log', 'gitlab', 'nginx', '*access*'))
        if r != '':
            reports.append(r)
        if s != '':
            summaries.append(s)
        if p3 < priority:
            priority = p3

        if priority == priority.LOW:
            result.close(self,
                         success=True,
                         status='No Gitlab exploitation found')
            return result

        report = " ".join(reports)
        summary = " ".join(summaries)

        output_evidence.text_data = report
        result.report_priority = priority
        result.report_data = report

        # Write the report to the output file.
        with open(output_file_path, 'wb') as fh:
            fh.write(output_evidence.text_data.encode('utf-8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True, status=summary)
        return result
Esempio n. 19
0
    def run(self, evidence, result):
        """Run the Jenkins worker.

    Args:
        evidence (Evidence object):  The evidence to process
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

        # Where to store the resulting output file.
        output_file_name = 'jenkins_analysis.txt'
        output_file_path = os.path.join(self.output_dir, output_file_name)

        # What type of evidence we should output.
        output_evidence = ReportText(source_path=output_file_path)

        # TODO(aarontp): We should find a more optimal solution for this because
        # this requires traversing the entire filesystem and extracting more files
        # than we need.  Tracked in https://github.com/google/turbinia/issues/402
        try:
            collected_artifacts = extract_files(file_name='config.xml',
                                                disk_path=evidence.local_path,
                                                output_dir=os.path.join(
                                                    self.output_dir,
                                                    'artifacts'))
        except TurbiniaException as e:
            result.close(self, success=False, status=str(e))
            return result

        jenkins_artifacts = []
        jenkins_re = re.compile(
            r'^.*jenkins[^\/]*(\/users\/[^\/]+)*\/config\.xml$')
        for collected_artifact in collected_artifacts:
            if re.match(jenkins_re, collected_artifact):
                jenkins_artifacts.append(collected_artifact)

        version = None
        credentials = []
        for filepath in jenkins_artifacts:
            with open(filepath, 'r') as input_file:
                config = input_file.read()

            extracted_version = self._extract_jenkins_version(config)
            extracted_credentials = self._extract_jenkins_credentials(config)

            if extracted_version:
                version = extracted_version

            credentials.extend(extracted_credentials)

        (report, priority,
         summary) = self.analyze_jenkins(version, credentials)
        output_evidence.text_data = report
        result.report_data = report
        result.report_priority = priority

        # Write the report to the output file.
        with open(output_file_path, 'wb') as fh:
            fh.write(output_evidence.text_data.encode('utf8'))
            fh.write('\n'.encode('utf8'))

        # Add the resulting evidence to the result object.
        result.add_evidence(output_evidence, evidence.config)
        result.close(self, success=True, status=summary)

        return result