Exemple #1
0
    def testMain(self):
        """Tests the _Main function."""
        output_task_queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
            delay_open=True,
            linger_seconds=0,
            maximum_items=1,
            name='test output task queue',
            timeout_seconds=self._QUEUE_TIMEOUT)
        output_task_queue.Open()

        input_task_queue = zeromq_queue.ZeroMQRequestConnectQueue(
            delay_open=True,
            linger_seconds=0,
            name='test input task queue',
            port=output_task_queue.port,
            timeout_seconds=self._QUEUE_TIMEOUT)

        with shared_test_lib.TempDirectory() as temp_directory:
            configuration = configurations.ProcessingConfiguration()
            configuration.task_storage_path = temp_directory

            test_process = extraction_process.ExtractionWorkerProcess(
                input_task_queue, None, None, configuration, name='TestWorker')

            test_process.start()

            output_task_queue.PushItem(plaso_queue.QueueAbort(), block=False)
            output_task_queue.Close(abort=True)
Exemple #2
0
  def testMain(self):
    """Tests the _Main function."""
    output_event_queue = zeromq_queue.ZeroMQPushBindQueue(
        name='test output event queue', timeout_seconds=self._QUEUE_TIMEOUT)
    output_event_queue.Open()

    input_event_queue = zeromq_queue.ZeroMQPullConnectQueue(
        name='test input event queue', delay_open=True,
        port=output_event_queue.port,
        timeout_seconds=self._QUEUE_TIMEOUT)

    session = sessions.Session()
    analysis_plugin = TestAnalysisPlugin()

    with shared_test_lib.TempDirectory() as temp_directory:
      # Set up the processed for the task storage file generated by the
      # analysis plugin.
      os.mkdir(os.path.join(temp_directory, 'processed'))

      configuration = configurations.ProcessingConfiguration()
      configuration.task_storage_path = temp_directory

      test_process = analysis_process.AnalysisProcess(
          input_event_queue, None, session, analysis_plugin, configuration,
          name='TestAnalysis')
      test_process._FOREMAN_STATUS_WAIT = 1

      test_process.start()

      output_event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
      output_event_queue.Close(abort=True)

      # Sleep for 1 second to allow the analysis process to terminate.
      # Before the temporary directory is removed.
      time.sleep(1)
Exemple #3
0
    def _StopAnalysisProcesses(self, abort=False):
        """Stops the analysis processes.

    Args:
      abort (bool): True to indicated the stop is issued on abort.
    """
        logger.debug('Stopping analysis processes.')
        self._StopMonitoringProcesses()

        if abort:
            # Signal all the processes to abort.
            self._AbortTerminate()

        # Wake the processes to make sure that they are not blocking
        # waiting for the queue new items.
        for event_queue in self._event_queues.values():
            event_queue.PushItem(plaso_queue.QueueAbort(), block=False)

        # Try waiting for the processes to exit normally.
        self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
        for event_queue in self._event_queues.values():
            event_queue.Close(abort=abort)

        if abort:
            # Kill any remaining processes.
            self._AbortKill()
        else:
            # Check if the processes are still alive and terminate them if necessary.
            self._AbortTerminate()
            self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)

            for event_queue in self._event_queues.values():
                event_queue.Close(abort=True)
Exemple #4
0
    def _StopExtractionProcesses(self, abort=False):
        """Stops the extraction processes.

    Args:
      abort (bool): True to indicated the stop is issued on abort.
    """
        logger.debug('Stopping extraction processes.')
        self._StopMonitoringProcesses()

        if abort:
            # Signal all the processes to abort.
            self._AbortTerminate()

        logger.debug('Emptying task queue.')
        self._task_queue.Empty()

        # Wake the processes to make sure that they are not blocking
        # waiting for the queue new items.
        for _ in self._processes_per_pid:
            try:
                self._task_queue.PushItem(plaso_queue.QueueAbort(),
                                          block=False)
            except errors.QueueFull:
                logger.warning(
                    'Task queue full, unable to push abort message.')

        # Try waiting for the processes to exit normally.
        self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
        self._task_queue.Close(abort=abort)

        if not abort:
            # Check if the processes are still alive and terminate them if necessary.
            self._AbortTerminate()
            self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
            self._task_queue.Close(abort=True)

        # Kill any lingering processes.
        self._AbortKill()
Exemple #5
0
    def _AnalyzeEvents(self,
                       storage_writer,
                       analysis_plugins,
                       event_filter=None):
        """Analyzes events in a Plaso storage.

    Args:
      storage_writer (StorageWriter): storage writer.
      analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
          should be run and their names.
      event_filter (Optional[EventObjectFilter]): event filter.

    Returns:
      collections.Counter: counter containing information about the events
          processed and filtered.

    Raises:
      RuntimeError: if a non-recoverable situation is encountered.
    """
        self._status = definitions.STATUS_INDICATOR_RUNNING
        self._number_of_consumed_analysis_reports = 0
        self._number_of_consumed_events = 0
        self._number_of_consumed_event_tags = 0
        self._number_of_consumed_sources = 0
        self._number_of_produced_analysis_reports = 0
        self._number_of_produced_events = 0
        self._number_of_produced_event_tags = 0
        self._number_of_produced_sources = 0

        number_of_filtered_events = 0

        logger.debug('Processing events.')

        filter_limit = getattr(event_filter, 'limit', None)

        for event in storage_writer.GetSortedEvents():
            event_data_identifier = event.GetEventDataIdentifier()
            event_data = storage_writer.GetAttributeContainerByIdentifier(
                events.EventData.CONTAINER_TYPE, event_data_identifier)

            event_data_stream_identifier = event_data.GetEventDataStreamIdentifier(
            )
            if event_data_stream_identifier:
                event_data_stream = storage_writer.GetAttributeContainerByIdentifier(
                    events.EventDataStream.CONTAINER_TYPE,
                    event_data_stream_identifier)
            else:
                event_data_stream = None

            event_identifier = event.GetIdentifier()
            event_tag = self._event_tag_index.GetEventTagByIdentifier(
                storage_writer, event_identifier)

            if event_filter:
                filter_match = event_filter.Match(event, event_data,
                                                  event_data_stream, event_tag)
            else:
                filter_match = None

            # pylint: disable=singleton-comparison
            if filter_match == False:
                number_of_filtered_events += 1
                continue

            for event_queue in self._event_queues.values():
                # TODO: Check for premature exit of analysis plugins.
                event_queue.PushItem((event, event_data, event_data_stream))

            self._number_of_consumed_events += 1

            if (event_filter and filter_limit
                    and filter_limit == self._number_of_consumed_events):
                break

        logger.debug('Finished pushing events to analysis plugins.')
        # Signal that we have finished adding events.
        for event_queue in self._event_queues.values():
            event_queue.PushItem(plaso_queue.QueueAbort(), block=False)

        logger.debug('Processing analysis plugin results.')

        # TODO: use a task based approach.
        plugin_names = list(analysis_plugins.keys())
        while plugin_names:
            for plugin_name in list(plugin_names):
                if self._abort:
                    break

                # TODO: temporary solution.
                task = tasks.Task()
                task.storage_format = definitions.STORAGE_FORMAT_SQLITE
                task.identifier = plugin_name

                merge_ready = self._CheckTaskReadyForMerge(
                    definitions.STORAGE_FORMAT_SQLITE, task)
                if merge_ready:
                    self._PrepareMergeTaskStorage(
                        definitions.STORAGE_FORMAT_SQLITE, task)
                    self._status = definitions.STATUS_INDICATOR_MERGING

                    event_queue = self._event_queues[plugin_name]
                    del self._event_queues[plugin_name]

                    event_queue.Close()

                    task_storage_reader = self._GetMergeTaskStorage(
                        definitions.STORAGE_FORMAT_SQLITE, task)

                    try:
                        merge_helper = merge_helpers.AnalysisTaskMergeHelper(
                            task_storage_reader, task.identifier)

                        logger.debug('Starting merge of task: {0:s}'.format(
                            merge_helper.task_identifier))

                        number_of_containers = self._MergeAttributeContainers(
                            storage_writer, merge_helper)

                        logger.debug(
                            'Merged {0:d} containers of task: {1:s}'.format(
                                number_of_containers,
                                merge_helper.task_identifier))

                    finally:
                        task_storage_reader.Close()

                    self._RemoveMergeTaskStorage(
                        definitions.STORAGE_FORMAT_SQLITE, task)

                    self._status = definitions.STATUS_INDICATOR_RUNNING

                    # TODO: temporary solution.
                    plugin_names.remove(plugin_name)

        events_counter = collections.Counter()
        events_counter['Events filtered'] = number_of_filtered_events
        events_counter['Events processed'] = self._number_of_consumed_events

        return events_counter