예제 #1
0
파일: engine.py 프로젝트: vonnopsled/plaso
    def PreprocessSources(self, source_path_specs, resolver_context=None):
        """Preprocesses the sources.

    Args:
      source_path_specs: list of path specifications (instances of
                         dfvfs.PathSpec) to process.
      resolver_context: Optional resolver context (instance of dfvfs.Context).
                        The default is None which will use the built in context
                        which is not multi process safe. Note that every thread
                        or process must have its own resolver context.
    """
        for source_path_spec in source_path_specs:
            file_system, mount_point = self.GetSourceFileSystem(
                source_path_spec, resolver_context=resolver_context)

            try:
                searcher = file_system_searcher.FileSystemSearcher(
                    file_system, mount_point)
                platform = preprocess_interface.GuessOS(searcher)
                if platform:
                    self.knowledge_base.platform = platform

                    preprocess_manager.PreprocessPluginsManager.RunPlugins(
                        platform, file_system, mount_point,
                        self.knowledge_base)

            finally:
                file_system.Close()

            if platform:
                break
예제 #2
0
    def _Preprocess(self, file_system, mount_point):
        """Preprocesses the image.

    Args:
      file_system: the file system object (instance of vfs.FileSystem)
                   to be preprocessed.
      mount_point: the mount point path specification (instance of
                   path.PathSpec) that refers to the base location
                   of the file system.
    """
        if self._knowledge_base is not None:
            return

        self._knowledge_base = knowledge_base.KnowledgeBase()

        logging.info(u'Guessing OS')

        searcher = file_system_searcher.FileSystemSearcher(
            file_system, mount_point)
        platform = preprocess_interface.GuessOS(searcher)
        logging.info(u'OS: {0:s}'.format(platform))

        logging.info(u'Running preprocess.')

        preprocess_manager.PreprocessPluginsManager.RunPlugins(
            platform, file_system, mount_point, self._knowledge_base)

        logging.info(u'Preprocess done, saving files from image.')
예제 #3
0
파일: engine.py 프로젝트: iwm911/plaso
    def PreprocessSource(self, pre_obj, platform, resolver_context=None):
        """Preprocesses the source and fills the preprocessing object.

    Args:
      pre_obj: the preprocessing object (instance of PreprocessObject).
      platform: string that indicates the platform (operating system).
      resolver_context: Optional resolver context (instance of dfvfs.Context).
                        The default is None. Note that every thread or process
                        must have its own resolver context.
    """
        searcher = self.GetSourceFileSystemSearcher(
            resolver_context=resolver_context)
        if not platform:
            platform = preprocess_interface.GuessOS(searcher)
        pre_obj.guessed_os = platform

        plugin_list = preprocessors.PreProcessList(pre_obj)

        for weight in plugin_list.GetWeightList(platform):
            for plugin in plugin_list.GetWeight(platform, weight):
                try:
                    plugin.Run(searcher)
                except (IOError, errors.PreProcessFail) as exception:
                    logging.warning((
                        u'Unable to run preprocessor: {0:s} for attribute: {1:s} '
                        u'with error: {2:s}').format(plugin.plugin_name,
                                                     plugin.ATTRIBUTE,
                                                     exception))
예제 #4
0
  def PreprocessSource(self, platform, resolver_context=None):
    """Preprocesses the source and fills the preprocessing object.

    Args:
      platform: string that indicates the platform (operating system).
      resolver_context: Optional resolver context (instance of dfvfs.Context).
                        The default is None. Note that every thread or process
                        must have its own resolver context.
    """
    searcher = self.GetSourceFileSystemSearcher(
        resolver_context=resolver_context)
    if not platform:
      platform = preprocess_interface.GuessOS(searcher)
    self.knowledge_base.platform = platform

    preprocess_manager.PreprocessPluginsManager.RunPlugins(
        platform, searcher, self.knowledge_base)
예제 #5
0
    def _Preprocess(self, searcher):
        """Preprocesses the image.

    Args:
      searcher: The file system searcher object (instance of
                dfvfs.FileSystemSearcher).
    """
        if self._knowledge_base is not None:
            return

        self._knowledge_base = knowledge_base.KnowledgeBase()

        logging.info(u'Guessing OS')

        platform = preprocess_interface.GuessOS(searcher)
        logging.info(u'OS: {0:s}'.format(platform))

        logging.info(u'Running preprocess.')

        preprocess_manager.PreprocessPluginsManager.RunPlugins(
            platform, searcher, self._knowledge_base)

        logging.info(u'Preprocess done, saving files from image.')
예제 #6
0
def Main():
    """Start the tool."""
    multiprocessing.freeze_support()

    front_end = Log2TimelineFrontend()

    epilog = ("""
      Example usage:

      Run the tool against an image (full kitchen sink)
          log2timeline.py /cases/mycase/plaso.dump image.dd

      Instead of answering questions, indicate some of the options on the
      command line (including data from particular VSS stores).
          log2timeline.py -o 63 --vss_stores 1,2 /cases/plaso_vss.dump image.E01

      And that's how you build a timeline using log2timeline...
      """)
    description = ("""
      log2timeline is the main front-end to the plaso back-end, used to
      collect and correlate events extracted from a filesystem.

      More information can be gathered from here:
        http://plaso.kiddaland.net/usage/log2timeline
      """)
    arg_parser = argparse.ArgumentParser(
        description=textwrap.dedent(description),
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=textwrap.dedent(epilog),
        add_help=False)

    # Create few argument groups to make formatting help messages clearer.
    info_group = arg_parser.add_argument_group('Informational Arguments')
    function_group = arg_parser.add_argument_group('Functional Arguments')
    deep_group = arg_parser.add_argument_group('Deep Analysis Arguments')
    performance_group = arg_parser.add_argument_group('Performance Arguments')

    function_group.add_argument(
        '-z',
        '--zone',
        '--timezone',
        dest='timezone',
        action='store',
        type=str,
        default='UTC',
        help=
        ('Define the timezone of the IMAGE (not the output). This is usually '
         'discovered automatically by preprocessing but might need to be '
         'specifically set if preprocessing does not properly detect or to '
         'overwrite the detected time zone.'))

    function_group.add_argument(
        '-t',
        '--text',
        dest='text_prepend',
        action='store',
        type=unicode,
        default=u'',
        metavar='TEXT',
        help=(r'Define a free form text string that is prepended to each path '
              r'to make it easier to distinguish one record from another in a '
              r'timeline (like c:\, or host_w_c:\)'))

    function_group.add_argument(
        '--parsers',
        dest='parsers',
        type=str,
        action='store',
        default='',
        metavar='PARSER_LIST',
        help=(
            'Define a list of parsers to use by the tool. This is a comma '
            'separated list where each entry can be either a name of a parser '
            'or a parser list. Each entry can be prepended with a minus sign '
            'to negate the selection (exclude it). The list match is an '
            'exact match while an individual parser matching is a case '
            'insensitive substring match, with support for glob patterns. '
            'Examples would be: "reg" that matches the substring "reg" in '
            'all parser names or the glob pattern "sky[pd]" that would match '
            'all parsers that have the string "skyp" or "skyd" in it\'s name. '
            'All matching is case insensitive.'))

    info_group.add_argument('-h',
                            '--help',
                            action='help',
                            help='Show this help message and exit.')

    info_group.add_argument(
        '--logfile',
        action='store',
        metavar='FILENAME',
        dest='logfile',
        type=unicode,
        default=u'',
        help=
        ('If defined all log messages will be redirected to this file instead '
         'the default STDERR.'))

    function_group.add_argument(
        '-p',
        '--preprocess',
        dest='preprocess',
        action='store_true',
        default=False,
        help=('Turn on preprocessing. Preprocessing is turned on by default '
              'when parsing image files, however if a mount point is being '
              'parsed then this parameter needs to be set manually.'))

    performance_group.add_argument(
        '--buffer_size',
        '--buffer-size',
        '--bs',
        dest='buffer_size',
        action='store',
        default=0,
        help='The buffer size for the output (defaults to 196MiB).')

    performance_group.add_argument(
        '--workers',
        dest='workers',
        action='store',
        type=int,
        default=0,
        help=('The number of worker threads [defaults to available system '
              'CPU\'s minus three].'))

    function_group.add_argument(
        '-i',
        '--image',
        dest='image',
        action='store_true',
        default=False,
        help=(
            'Indicates that this is an image instead of a regular file. It is '
            'not necessary to include this option if -o (offset) is used, then '
            'this option is assumed. Use this when parsing an image with an '
            'offset of zero.'))

    front_end.AddVssProcessingOptions(deep_group)

    performance_group.add_argument(
        '--single_thread',
        '--single-thread',
        '--single_process',
        '--single-process',
        dest='single_process',
        action='store_true',
        default=False,
        help=(u'Indicate that the tool should run in a single process.'))

    function_group.add_argument(
        '-f',
        '--file_filter',
        '--file-filter',
        dest='file_filter',
        action='store',
        type=unicode,
        default=None,
        help=
        ('List of files to include for targeted collection of files to parse, '
         'one line per file path, setup is /path|file - where each element '
         'can contain either a variable set in the preprocessing stage or a '
         'regular expression'))

    deep_group.add_argument('--scan_archives',
                            dest='open_files',
                            action='store_true',
                            default=False,
                            help=argparse.SUPPRESS)

    # This option is "hidden" for the time being, still left in there for testing
    # purposes, but hidden from the tool usage and help messages.
    #    help=('Indicate that the tool should try to open files to extract embedd'
    #          'ed files within them, for instance to extract files from compress'
    #          'ed containers, etc. Be AWARE THAT THIS IS EXTREMELY SLOW.'))

    front_end.AddImageOptions(function_group)

    function_group.add_argument(
        '--partition',
        dest='partition_number',
        action='store',
        type=int,
        default=None,
        help=('Choose a partition number from a disk image. This partition '
              'number should correspond to the partion number on the disk '
              'image, starting from partition 1.'))

    # Build the version information.
    version_string = u'log2timeline - plaso back-end {0:s}'.format(
        plaso.GetVersion())

    info_group.add_argument('-v',
                            '--version',
                            action='version',
                            version=version_string,
                            help='Show the current version of the back-end.')

    info_group.add_argument(
        '--info',
        dest='show_info',
        action='store_true',
        default=False,
        help='Print out information about supported plugins and parsers.')

    info_group.add_argument(
        '--show_memory_usage',
        '--show-memory-usage',
        action='store_true',
        default=False,
        dest='foreman_verbose',
        help=(u'Indicates that basic memory usage should be included in the '
              u'output of the process monitor. If this option is not set the '
              u'tool only displays basic status and counter information.'))

    info_group.add_argument(
        '--disable_worker_monitor',
        '--disable-worker-monitor',
        action='store_false',
        default=True,
        dest='foreman_enabled',
        help=
        (u'Turn off the foreman. The foreman monitors all worker processes '
         u'and periodically prints out information about all running workers.'
         u'By default the foreman is run, but it can be turned off using this '
         u'parameter.'))

    function_group.add_argument(
        '--use_old_preprocess',
        '--use-old-preprocess',
        dest='old_preprocess',
        action='store_true',
        default=False,
        help=(
            'Only used in conjunction when appending to a previous storage '
            'file. When this option is used then a new preprocessing object '
            'is not calculated and instead the last one that got added to '
            'the storage file is used. This can be handy when parsing an image '
            'that contains more than a single partition.'))

    function_group.add_argument(
        '--output',
        dest='output_module',
        action='store',
        type=unicode,
        default='',
        help=(
            'Bypass the storage module directly storing events according to '
            'the output module. This means that the output will not be in the '
            'pstorage format but in the format chosen by the output module. '
            '[Please not this feature is EXPERIMENTAL at this time, use at '
            'own risk (eg. sqlite output does not yet work)]'))

    info_group.add_argument('-d',
                            '--debug',
                            dest='debug',
                            action='store_true',
                            default=False,
                            help='Turn on debug information in the tool.')

    arg_parser.add_argument(
        'output',
        action='store',
        metavar='STORAGE_FILE',
        nargs='?',
        type=unicode,
        help=('The path to the output file, if the file exists it will get '
              'appended to.'))

    arg_parser.add_argument(
        'source',
        action='store',
        metavar='SOURCE',
        nargs='?',
        type=unicode,
        help=
        ('The path to the source device, file or directory. If the source is '
         'a supported storage media device or image file, archive file or '
         'a directory, the files within are processed recursively.'))

    arg_parser.add_argument(
        'filter',
        action='store',
        metavar='FILTER',
        nargs='?',
        default=None,
        type=unicode,
        help=('A filter that can be used to filter the dataset before it '
              'is written into storage. More information about the filters'
              ' and it\'s usage can be found here: http://plaso.kiddaland.'
              'net/usage/filters'))

    # Properly prepare the attributes according to local encoding.
    if front_end.preferred_encoding == 'ascii':
        logging.warning(
            u'The preferred encoding of your system is ASCII, which is not optimal '
            u'for the typically non-ASCII characters that need to be parsed and '
            u'processed. The tool will most likely crash and die, perhaps in a way '
            u'that may not be recoverable. A five second delay is introduced to '
            u'give you time to cancel the runtime and reconfigure your preferred '
            u'encoding, otherwise continue at own risk.')
        time.sleep(5)

    u_argv = [x.decode(front_end.preferred_encoding) for x in sys.argv]
    sys.argv = u_argv
    options = arg_parser.parse_args()

    if options.timezone == 'list':
        front_end.ListTimeZones()
        return True

    if options.show_info:
        front_end.ListPluginInformation()
        return True

    format_str = (
        u'%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d '
        u'<%(module)s> %(message)s')

    if options.debug:
        if options.logfile:
            logging.basicConfig(level=logging.DEBUG,
                                format=format_str,
                                filename=options.logfile)
        else:
            logging.basicConfig(level=logging.DEBUG, format=format_str)

        logging_filter = LoggingFilter()
        root_logger = logging.getLogger()
        root_logger.addFilter(logging_filter)
    elif options.logfile:
        logging.basicConfig(level=logging.INFO,
                            format=format_str,
                            filename=options.logfile)
    else:
        logging.basicConfig(level=logging.INFO, format=format_str)

    if not options.output:
        arg_parser.print_help()
        print u''
        arg_parser.print_usage()
        print u''
        logging.error(u'Wrong usage: need to define an output.')
        return False

    try:
        front_end.ParseOptions(options, 'source')
        front_end.SetStorageFile(options.output)
    except errors.BadConfigOption as exception:
        arg_parser.print_help()
        print u''
        logging.error(u'{0:s}'.format(exception))
        return False

    # Check to see if we are trying to parse a mount point.
    if getattr(options, 'recursive', False):
        searcher = front_end.GetSourceFileSystemSearcher()

        guessed_os = preprocess_interface.GuessOS(searcher)
        if guessed_os != 'None':
            options.preprocess = True
            logging.info((u'Running against a mount point [{0:s}]. Turning on '
                          u'preprocessing.').format(guessed_os))
            logging.warning(
                u'It is highly recommended to run the tool directly against '
                u'the image, instead of parsing a mount point (you may get '
                u'inconsistence results depending on the driver you use to mount '
                u'the image. Please consider running against the raw image. '
                u'Processing will continue in 5 seconds.')
            time.sleep(5)

    # Configure the foreman (monitors workers).
    front_end.SetShowMemoryInformation(show_memory=options.foreman_verbose)
    front_end.SetRunForeman(run_foreman=options.foreman_enabled)

    try:
        front_end.ProcessSource(options)
        logging.info(u'Processing completed.')
    except KeyboardInterrupt:
        logging.warning(u'Aborted by user.')
        front_end.CleanUpAfterAbort()
        return False
    return True
예제 #7
0
파일: preg.py 프로젝트: f-s-p/plaso
  def GetHivesAndCollectors(
      self, options, registry_types=None, plugin_names=None):
    """Returns a list of discovered Registry hives and collectors.

    Args:
      options: the command line arguments (instance of argparse.Namespace).
      registry_types: an optional list of Registry types, eg: NTUSER, SAM, etc
                      that should be included. Defaults to None.
      plugin_names: an optional list of strings containing the name of the
                    plugin(s) or an empty string for all the types. Defaults to
                    None.

    Returns:
      A tuple of hives and searchers, where hives is a list that contains
      either a string (location of a Registry hive) or path specs (instance of
      dfvfs.path.path_spec.PathSpec). The searchers is a list of tuples that
      contain the name of the searcher and a searcher object (instance of
      dfvfs.helpers.file_system_searcher.FileSystemSearcher) or None (if no
      searcher is required).

    Raises:
      ValueError: If neither registry_types nor plugin name is passed
                  as a parameter.
      BadConfigOption: If the source scanner is unable to complete due to
                       a source scanner error or back end error in dfvfs.
    """
    if registry_types is None and plugin_names is None:
      raise ValueError(
          u'Missing Registry_types or plugin_name value.')

    if plugin_names is None:
      plugin_names = []
    else:
      plugin_names = [plugin_name.lower() for plugin_name in plugin_names]

    # TODO: use non-preprocess collector with filter to collect hives.

    # TODO: rewrite to always use collector or equiv.
    if not self._source_path:
      searchers = [(u'', None)]
      return registry_types, searchers

    try:
      self.ScanSource(options)
    except errors.SourceScannerError as exception:
      raise errors.BadConfigOption((
          u'Unable to scan for a supported filesystem with error: {0:s}\n'
          u'Most likely the image format is not supported by the '
          u'tool.').format(exception))

    searchers = self._GetSearchersForImage(self.GetSourcePathSpec().parent)
    _, searcher = searchers[0]

    # Run preprocessing on image.
    platform = preprocess_interface.GuessOS(searcher)

    preprocess_manager.PreprocessPluginsManager.RunPlugins(
        platform, searcher, PregCache.knowledge_base_object)

    # Create the keyword list if plugins are used.
    plugins_list = parsers_manager.ParsersManager.GetWindowsRegistryPlugins()
    if plugin_names:
      if registry_types is None:
        registry_types = []
      for plugin_name in plugin_names:
        if not plugin_name.startswith('winreg_'):
          plugin_name = u'winreg_{0:s}'.format(plugin_name)

        for plugin_cls in plugins_list.GetAllKeyPlugins():
          if plugin_name == plugin_cls.NAME.lower():
            # If a plugin is available for every Registry type
            # we need to make sure all Registry hives are included.
            if plugin_cls.REG_TYPE == u'any':
              for available_type in PregHiveHelper.REG_TYPES.iterkeys():
                if available_type is u'Unknown':
                  continue

                if available_type not in registry_types:
                  registry_types.append(available_type)

            if plugin_cls.REG_TYPE not in registry_types:
              registry_types.append(plugin_cls.REG_TYPE)

    # Find all the Registry paths we need to check.
    paths = []
    if registry_types:
      for registry_type in registry_types:
        paths.extend(self._GetRegistryFilePaths(
            registry_type=registry_type.upper()))
    else:
      for plugin_name in plugin_names:
        paths.extend(self._GetRegistryFilePaths(plugin_name=plugin_name))

    hives = []
    for path in paths:
      hives.extend(self._FindRegistryPaths(searcher, path))

    return hives, searchers