Example #1
0
  def _ProcessEvent(self, mediator, event, event_data, event_data_stream):
    """Processes an event.

    Args:
      mediator (AnalysisMediator): mediates interactions between
          analysis plugins and other components, such as storage and dfvfs.
      event (EventObject): event.
      event_data (EventData): event data.
      event_data_stream (EventDataStream): event data stream.
    """
    try:
      self._analysis_plugin.ExamineEvent(
          mediator, event, event_data, event_data_stream)

    except Exception as exception:  # pylint: disable=broad-except
      # TODO: write analysis error and change logger to debug only.

      logger.warning('Unhandled exception while processing event object.')
      logger.exception(exception)
Example #2
0
    def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):
        """Processes a path specification.

    Args:
      extraction_worker (worker.ExtractionWorker): extraction worker.
      parser_mediator (ParserMediator): parser mediator.
      path_spec (dfvfs.PathSpec): path specification.
    """
        excluded_find_specs = None
        if self._collection_filters_helper:
            excluded_find_specs = (self._collection_filters_helper.
                                   excluded_file_system_find_specs)

        self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
            path_spec)

        try:
            self._CacheFileSystem(path_spec)

            extraction_worker.ProcessPathSpec(
                parser_mediator,
                path_spec,
                excluded_find_specs=excluded_find_specs)

        except dfvfs_errors.CacheFullError:
            # TODO: signal engine of failure.
            self._abort = True
            logger.error((
                'ABORT: detected cache full error while processing path spec: '
                '{0:s}').format(self._current_display_name))

        except Exception as exception:  # pylint: disable=broad-except
            parser_mediator.ProduceExtractionWarning(
                ('unable to process path specification with error: '
                 '{0!s}').format(exception),
                path_spec=path_spec)

            if self._processing_configuration.debug_output:
                logger.warning((
                    'Unhandled exception while processing path specification: '
                    '{0:s}.').format(self._current_display_name))
                logger.exception(exception)
Example #3
0
    def _Main(self):
        """The main loop."""
        # We need a resolver context per process to prevent multi processing
        # issues with file objects stored in images.
        self._resolver_context = context.Context()

        for credential_configuration in self._processing_configuration.credentials:
            resolver.Resolver.key_chain.SetCredential(
                credential_configuration.path_spec,
                credential_configuration.credential_type,
                credential_configuration.credential_data)

        self._parser_mediator = self._CreateParserMediator(
            self._knowledge_base, self._resolver_context,
            self._processing_configuration)

        # We need to initialize the parser and hasher objects after the process
        # has forked otherwise on Windows the "fork" will fail with
        # a PickleError for Python modules that cannot be pickled.
        self._extraction_worker = worker.EventExtractionWorker(
            parser_filter_expression=(
                self._processing_configuration.parser_filter_expression))

        self._extraction_worker.SetExtractionConfiguration(
            self._processing_configuration.extraction)

        self._parser_mediator.StartProfiling(
            self._processing_configuration.profiling, self._name,
            self._process_information)
        self._StartProfiling(self._processing_configuration.profiling)

        if self._analyzers_profiler:
            self._extraction_worker.SetAnalyzersProfiler(
                self._analyzers_profiler)

        if self._processing_profiler:
            self._extraction_worker.SetProcessingProfiler(
                self._processing_profiler)

        logger.debug('Worker: {0!s} (PID: {1:d}) started.'.format(
            self._name, self._pid))

        self._status = definitions.STATUS_INDICATOR_RUNNING

        try:
            logger.debug(
                '{0!s} (PID: {1:d}) started monitoring task queue.'.format(
                    self._name, self._pid))

            while not self._abort:
                try:
                    task = self._task_queue.PopItem()
                except (errors.QueueClose, errors.QueueEmpty) as exception:
                    logger.debug(
                        'ConsumeItems exiting with exception: {0!s}.'.format(
                            type(exception)))
                    break

                if isinstance(task, plaso_queue.QueueAbort):
                    logger.debug(
                        'ConsumeItems exiting, dequeued QueueAbort object.')
                    break

                self._ProcessTask(task)

            logger.debug(
                '{0!s} (PID: {1:d}) stopped monitoring task queue.'.format(
                    self._name, self._pid))

        # All exceptions need to be caught here to prevent the process
        # from being killed by an uncaught exception.
        except Exception as exception:  # pylint: disable=broad-except
            logger.warning(
                'Unhandled exception in process: {0!s} (PID: {1:d}).'.format(
                    self._name, self._pid))
            logger.exception(exception)

            self._abort = True

        if self._analyzers_profiler:
            self._extraction_worker.SetAnalyzersProfiler(None)

        if self._processing_profiler:
            self._extraction_worker.SetProcessingProfiler(None)

        self._StopProfiling()
        self._parser_mediator.StopProfiling()

        self._extraction_worker = None
        self._file_system_cache = []
        self._parser_mediator = None
        self._resolver_context = None

        if self._abort:
            self._status = definitions.STATUS_INDICATOR_ABORTED
        else:
            self._status = definitions.STATUS_INDICATOR_COMPLETED

        logger.debug('Worker: {0!s} (PID: {1:d}) stopped.'.format(
            self._name, self._pid))

        try:
            self._task_queue.Close(abort=self._abort)
        except errors.QueueAlreadyClosed:
            logger.error('Queue for {0:s} was already closed.'.format(
                self.name))
Example #4
0
    def _Main(self):
        """The main loop."""
        self._StartProfiling(self._processing_configuration.profiling)

        logger.debug('Analysis plugin: {0!s} (PID: {1:d}) started'.format(
            self._name, self._pid))

        # Creating the threading event in the constructor will cause a pickle
        # error on Windows when an analysis process is created.
        self._foreman_status_wait_event = threading.Event()
        self._status = definitions.STATUS_INDICATOR_ANALYZING

        task = tasks.Task()
        task.storage_format = definitions.STORAGE_FORMAT_SQLITE
        # TODO: temporary solution.
        task.identifier = self._analysis_plugin.plugin_name

        self._task = task

        task_storage_writer = self._storage_factory.CreateTaskStorageWriter(
            definitions.STORAGE_FORMAT_SQLITE)

        if self._serializers_profiler:
            task_storage_writer.SetSerializersProfiler(
                self._serializers_profiler)

        if self._storage_profiler:
            task_storage_writer.SetStorageProfiler(self._storage_profiler)

        storage_file_path = self._GetTaskStorageFilePath(
            definitions.STORAGE_FORMAT_SQLITE, task)
        task_storage_writer.Open(path=storage_file_path)

        self._analysis_mediator = self._CreateAnalysisMediator(
            self._session, self._knowledge_base,
            self._processing_configuration, self._data_location)
        self._analysis_mediator.SetStorageWriter(task_storage_writer)

        # TODO: set event_filter_expression in mediator.

        task_storage_writer.AddAttributeContainer(task)

        try:
            logger.debug(
                '{0!s} (PID: {1:d}) started monitoring event queue.'.format(
                    self._name, self._pid))

            while not self._abort:
                try:
                    queued_object = self._event_queue.PopItem()

                except (errors.QueueClose, errors.QueueEmpty) as exception:
                    logger.debug(
                        'ConsumeItems exiting with exception {0!s}.'.format(
                            type(exception)))
                    break

                if isinstance(queued_object, plaso_queue.QueueAbort):
                    logger.debug(
                        'ConsumeItems exiting, dequeued QueueAbort object.')
                    break

                self._ProcessEvent(self._analysis_mediator, *queued_object)

                self._number_of_consumed_events += 1

            logger.debug(
                '{0!s} (PID: {1:d}) stopped monitoring event queue.'.format(
                    self._name, self._pid))

            if not self._abort:
                self._status = definitions.STATUS_INDICATOR_REPORTING

                self._analysis_mediator.ProduceAnalysisReport(
                    self._analysis_plugin)

        # All exceptions need to be caught here to prevent the process
        # from being killed by an uncaught exception.
        except Exception as exception:  # pylint: disable=broad-except
            logger.warning(
                'Unhandled exception in process: {0!s} (PID: {1:d}).'.format(
                    self._name, self._pid))
            logger.exception(exception)

            self._abort = True

        finally:
            task.aborted = self._abort
            task_storage_writer.UpdateAttributeContainer(task)

            task_storage_writer.Close()

            if self._serializers_profiler:
                task_storage_writer.SetSerializersProfiler(None)

            if self._storage_profiler:
                task_storage_writer.SetStorageProfiler(None)

        try:
            self._FinalizeTaskStorageWriter(definitions.STORAGE_FORMAT_SQLITE,
                                            task)
        except IOError as exception:
            logger.warning(
                'Unable to finalize task storage with error: {0!s}'.format(
                    exception))

        if self._abort:
            self._status = definitions.STATUS_INDICATOR_ABORTED
        else:
            self._status = definitions.STATUS_INDICATOR_COMPLETED

        logger.debug('Wait for foreman status wait event')
        self._foreman_status_wait_event.clear()
        self._foreman_status_wait_event.wait(self._FOREMAN_STATUS_WAIT)

        logger.debug('Analysis plugin: {0!s} (PID: {1:d}) stopped'.format(
            self._name, self._pid))

        self._StopProfiling()

        self._analysis_mediator = None
        self._foreman_status_wait_event = None
        self._task = None

        try:
            self._event_queue.Close(abort=self._abort)
        except errors.QueueAlreadyClosed:
            logger.error('Queue for {0:s} was already closed.'.format(
                self.name))