Ejemplo n.º 1
0
    def PopItem(self):
        """Pops an item off the queue.

    If no ZeroMQ socket has been created, one will be created the first
    time this method is called.

    Raises:
      QueueEmpty: If the queue is empty, and no item could be popped within the
                  queue timeout.
      zmq.error.ZMQError: If an error is encountered by ZeroMQ.
    """
        logging.debug(u'Pop on {0:s} queue, port {1:d}'.format(
            self.name, self.port))
        if not self._zmq_socket:
            self._CreateZMQSocket()
        try:
            return self._queue.get(timeout=self._buffer_timeout_seconds)
        except Queue.Empty:
            return plaso_queue.QueueAbort()
        except zmq.error.ZMQError as exception:
            if exception.errno == errno.EINTR:
                logging.error(u'ZMQ syscall interrupted in {0:s}.'.format(
                    self.name))
                return plaso_queue.QueueAbort()
            else:
                raise
        except KeyboardInterrupt:
            self.Close(abort=True)
            raise
Ejemplo n.º 2
0
  def _StopAnalysisProcesses(self, abort=False):
    """Stops the analysis processes.

    Args:
      abort (bool): True to indicated the stop is issued on abort.
    """
    logger.debug('Stopping analysis processes.')
    self._StopMonitoringProcesses()

    if abort:
      # Signal all the processes to abort.
      self._AbortTerminate()

    # Wake the processes to make sure that they are not blocking
    # waiting for the queue new items.
    for event_queue in self._event_queues.values():
      event_queue.PushItem(plaso_queue.QueueAbort(), block=False)

    # Try waiting for the processes to exit normally.
    self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
    for event_queue in self._event_queues.values():
      event_queue.Close(abort=abort)

    if abort:
      # Kill any remaining processes.
      self._AbortKill()
    else:
      # Check if the processes are still alive and terminate them if necessary.
      self._AbortTerminate()
      self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)

      for event_queue in self._event_queues.values():
        event_queue.Close(abort=True)
Ejemplo n.º 3
0
    def testMain(self):
        """Tests the _Main function."""
        output_task_queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
            delay_open=True,
            linger_seconds=0,
            maximum_items=1,
            name='test output task queue',
            timeout_seconds=self._QUEUE_TIMEOUT)
        output_task_queue.Open()

        input_task_queue = zeromq_queue.ZeroMQRequestConnectQueue(
            delay_open=True,
            linger_seconds=0,
            name='test input task queue',
            port=output_task_queue.port,
            timeout_seconds=self._QUEUE_TIMEOUT)

        configuration = configurations.ProcessingConfiguration()

        test_process = worker_process.WorkerProcess(input_task_queue,
                                                    None,
                                                    None,
                                                    None,
                                                    None,
                                                    configuration,
                                                    name='TestWorker')

        test_process.start()

        output_task_queue.PushItem(plaso_queue.QueueAbort(), block=False)
        output_task_queue.Close(abort=True)
Ejemplo n.º 4
0
  def testMain(self):
    """Tests the _Main function."""
    output_event_queue = zeromq_queue.ZeroMQPushBindQueue(
        name='test output event queue', timeout_seconds=self._QUEUE_TIMEOUT)
    output_event_queue.Open()

    input_event_queue = zeromq_queue.ZeroMQPullConnectQueue(
        name='test input event queue', delay_open=True,
        port=output_event_queue.port,
        timeout_seconds=self._QUEUE_TIMEOUT)

    session = sessions.Session()
    storage_writer = self._CreateStorageWriter(session)
    analysis_plugin = TestAnalysisPlugin()

    configuration = configurations.ProcessingConfiguration()

    test_process = analysis_process.AnalysisProcess(
        input_event_queue, storage_writer, None, analysis_plugin, configuration,
        name='TestAnalysis')
    test_process._FOREMAN_STATUS_WAIT = 1

    test_process.start()

    output_event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
    output_event_queue.Close(abort=True)
Ejemplo n.º 5
0
    def _ZeroMQResponder(self, source_queue, socket, terminate_event):
        """Listens for requests and replies to clients.

    Args:
      source_queue: The queue to uses to pull items from.
      socket: The socket to listen to, and send responses to.
      terminate_event: The event that signals that the queue should terminate.

    Raises:
      QueueEmpty: If a timeout occurs when trying to reply to a request.
      zmq.error.ZMQError: If an error occurs in ZeroMQ.
    """
        logging.debug(u'ZeroMQ responder thread started')
        while not terminate_event.isSet():
            try:
                # We need to receive a request before we send.
                _ = socket.recv()
            except zmq.error.Again:
                logging.debug(u'{0:s} did not receive a request within the '
                              u'timeout of {1:d} seconds.'.format(
                                  self.name, self.timeout_seconds))
                continue
            except zmq.error.ZMQError as exception:
                if exception.errno == errno.EINTR:
                    logging.error(u'ZMQ syscall interrupted in {0:s}.'.format(
                        self.name))
                    break
                else:
                    raise

            try:
                if self._closed:
                    item = source_queue.get_nowait()
                else:
                    item = source_queue.get(True, self._buffer_timeout_seconds)
            except Queue.Empty:
                item = plaso_queue.QueueAbort()

            try:
                self._zmq_socket.send_pyobj(item)
            except zmq.error.Again:
                logging.debug(
                    u'{0:s} could not reply to a request within {1:d} seconds.'
                    .format(self.name, self.timeout_seconds))
                raise errors.QueueEmpty
            except zmq.error.ZMQError as exception:
                if exception.errno == errno.EINTR:
                    logging.error(u'ZMQ syscall interrupted in {0:s}.'.format(
                        self.name))
                    break
                else:
                    raise
        socket.close(self._linger_seconds)
Ejemplo n.º 6
0
    def _ZeroMQResponder(self, source_queue, socket, terminate_event):
        """Listens for requests and replies to clients.

    Args:
      source_queue: The queue to uses to pull items from.
      socket: The socket to listen to, and send responses to.
      terminate_event: The event that signals that the queue should terminate.

    Raises:
      zmq.error.ZMQError: If an error is encountered by ZeroMQ.
    """
        logging.debug(u'ZeroMQ responder thread started')
        while not terminate_event.isSet():
            try:
                item = socket.recv_pyobj()
            except zmq.error.Again:
                # No item received within timeout.
                item = plaso_queue.QueueAbort()
            except zmq.error.ZMQError as exception:
                if exception.errno == errno.EINTR:
                    logging.error(u'ZMQ syscall interrupted in {0:s}.'.format(
                        self.name))
                    break
                else:
                    raise

            retries = 0
            while retries < self._DOWNSTREAM_QUEUE_MAX_TRIES:
                try:
                    self._queue.put(item, timeout=self._buffer_timeout_seconds)
                    break
                except Queue.Full:
                    logging.debug(u'Queue {0:s} buffer limit hit.'.format(
                        self.name))
                    retries += 1
                    if retries >= self._DOWNSTREAM_QUEUE_MAX_TRIES:
                        logging.error(
                            u'Queue {0:s} unserved for too long, aborting'.
                            format(self.name))
                        break
                    else:
                        time.sleep(self._DOWNSTREAM_QUEUE_SLEEP_TIME)
                        continue
        logging.info(u'Queue {0:s} responder exiting.'.format(self.name))
Ejemplo n.º 7
0
    def _StopExtractionProcesses(self, abort=False):
        """Stops the extraction processes.

    Args:
      abort (bool): True to indicated the stop is issued on abort.
    """
        logger.debug('Stopping extraction processes.')
        self._StopMonitoringProcesses()

        # Note that multiprocessing.Queue is very sensitive regarding
        # blocking on either a get or a put. So we try to prevent using
        # any blocking behavior.

        if abort:
            # Signal all the processes to abort.
            self._AbortTerminate()

        logger.debug('Emptying task queue.')
        self._task_queue.Empty()

        # Wake the processes to make sure that they are not blocking
        # waiting for the queue new items.
        for _ in self._processes_per_pid:
            try:
                self._task_queue.PushItem(plaso_queue.QueueAbort(),
                                          block=False)
            except errors.QueueFull:
                logger.warning(
                    'Task queue full, unable to push abort message.')

        # Try waiting for the processes to exit normally.
        self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
        self._task_queue.Close(abort=abort)

        if not abort:
            # Check if the processes are still alive and terminate them if necessary.
            self._AbortTerminate()
            self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
            self._task_queue.Close(abort=True)

        # Kill any lingering processes.
        self._AbortKill()
Ejemplo n.º 8
0
    def _StopAnalysisProcesses(self, abort=False):
        """Stops the analysis processes.

    Args:
      abort (bool): True to indicated the stop is issued on abort.
    """
        logger.debug('Stopping analysis processes.')
        self._StopMonitoringProcesses()

        # Note that multiprocessing.Queue is very sensitive regarding
        # blocking on either a get or a put. So we try to prevent using
        # any blocking behavior.

        if abort:
            # Signal all the processes to abort.
            self._AbortTerminate()

        if not self._use_zeromq:
            logger.debug('Emptying queues.')
            for event_queue in self._event_queues.values():
                event_queue.Empty()

        # Wake the processes to make sure that they are not blocking
        # waiting for the queue new items.
        for event_queue in self._event_queues.values():
            event_queue.PushItem(plaso_queue.QueueAbort(), block=False)

        # Try waiting for the processes to exit normally.
        self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
        for event_queue in self._event_queues.values():
            event_queue.Close(abort=abort)

        if abort:
            # Kill any remaining processes.
            self._AbortKill()
        else:
            # Check if the processes are still alive and terminate them if necessary.
            self._AbortTerminate()
            self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)

            for event_queue in self._event_queues.values():
                event_queue.Close(abort=True)
Ejemplo n.º 9
0
    def testMain(self):
        """Tests the _Main function."""
        output_event_queue = zeromq_queue.ZeroMQPushBindQueue(
            name='test output event queue',
            timeout_seconds=self._QUEUE_TIMEOUT)
        output_event_queue.Open()

        input_event_queue = zeromq_queue.ZeroMQPullConnectQueue(
            name='test input event queue',
            delay_open=True,
            port=output_event_queue.port,
            timeout_seconds=self._QUEUE_TIMEOUT)

        session = sessions.Session()
        analysis_plugin = TestAnalysisPlugin()

        with shared_test_lib.TempDirectory() as temp_directory:
            # Set up the processed for the task storage file generated by the
            # analysis plugin.
            os.mkdir(os.path.join(temp_directory, 'processed'))

            configuration = configurations.ProcessingConfiguration()
            configuration.task_storage_path = temp_directory

            test_process = analysis_process.AnalysisProcess(
                input_event_queue,
                None,
                session,
                analysis_plugin,
                configuration,
                name='TestAnalysis')
            test_process._FOREMAN_STATUS_WAIT = 1

            test_process.start()

            output_event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
            output_event_queue.Close(abort=True)

            # Sleep for 1 second to allow the analysis process to terminate.
            # Before the temporary directory is removed.
            time.sleep(1)
Ejemplo n.º 10
0
    def PushItem(self, item, block=True):
        """Push an item on to the queue.

    If no ZeroMQ socket has been created, one will be created the first time
    this method is called.

    Args:
      item: The item to push on to the queue.
      block: Optional argument to indicate whether the push should be performed
             in blocking or non-block mode.

    Raises:
      KeyboardInterrupt: If the process is sent a KeyboardInterrupt while
                         pushing an item.
      zmq.error.Again: If it was not possible to push the item to the queue
                       within the timeout.
      zmq.error.ZMQError: If a ZeroMQ specific error occurs.
    """
        logging.debug(u'Push on {0:s} queue, port {1:d}'.format(
            self.name, self.port))
        if not self._zmq_socket:
            self._CreateZMQSocket()
        try:
            if block:
                self._zmq_socket.send_pyobj(item)
            else:
                self._zmq_socket.send_pyobj(item, zmq.DONTWAIT)
        except zmq.error.Again:
            logging.error(u'{0:s} unable to push item, raising.'.format(
                self.name))
            raise
        except zmq.error.ZMQError as exception:
            if exception.errno == errno.EINTR:
                logging.error(u'ZMQ syscall interrupted in {0:s}.'.format(
                    self.name))
                return plaso_queue.QueueAbort()
            else:
                raise
        except KeyboardInterrupt:
            self.Close(abort=True)
            raise
Ejemplo n.º 11
0
    def PopItem(self):
        """Pops an item off the queue.

    If no ZeroMQ socket has been created, one will be created the first
    time this method is called.

    Returns:
      The item retrieved from the queue, as a deserialized Python object.

    Raises:
      KeyboardInterrupt: If the process is sent a KeyboardInterrupt while
                         popping an item.
      QueueEmpty: If the queue is empty, and no item could be popped within the
                  queue timeout.
      zmq.error.ZMQError: If an error occurs in ZeroMQ.

    """
        logging.debug(u'Pop on {0:s} queue, port {1:d}'.format(
            self.name, self.port))
        if not self._zmq_socket:
            self._CreateZMQSocket()
        try:
            self._zmq_socket.send_pyobj(None)
            return self._zmq_socket.recv_pyobj()
        except zmq.error.Again:
            raise errors.QueueEmpty
        except zmq.error.ZMQError as exception:
            if exception.errno == errno.EINTR:
                logging.error(
                    u'ZMQ syscall interrupted in {0:s}. Queue aborting.'.
                    format(self.name))
                return plaso_queue.QueueAbort()
            else:
                raise
        except KeyboardInterrupt:
            self.Close(abort=True)
            raise
Ejemplo n.º 12
0
  def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None):
    """Analyzes events in a plaso storage.

    Args:
      storage_writer (StorageWriter): storage writer.
      analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
          should be run and their names.
      event_filter (Optional[EventObjectFilter]): event filter.

    Returns:
      collections.Counter: counter containing information about the events
          processed and filtered.

    Raises:
      RuntimeError: if a non-recoverable situation is encountered.
    """
    self._status = definitions.STATUS_INDICATOR_RUNNING
    self._number_of_consumed_events = 0
    self._number_of_consumed_reports = 0
    self._number_of_consumed_sources = 0
    self._number_of_consumed_warnings = 0
    self._number_of_produced_events = 0
    self._number_of_produced_reports = 0
    self._number_of_produced_sources = 0
    self._number_of_produced_warnings = 0

    number_of_filtered_events = 0

    logger.debug('Processing events.')

    filter_limit = getattr(event_filter, 'limit', None)

    for event in storage_writer.GetSortedEvents():
      event_data_identifier = event.GetEventDataIdentifier()
      event_data = storage_writer.GetEventDataByIdentifier(
          event_data_identifier)

      event_data_stream_identifier = event_data.GetEventDataStreamIdentifier()
      if event_data_stream_identifier:
        event_data_stream = storage_writer.GetEventDataStreamByIdentifier(
            event_data_stream_identifier)
      else:
        event_data_stream = None

      event_identifier = event.GetIdentifier()
      event_tag = self._event_tag_index.GetEventTagByIdentifier(
          storage_writer, event_identifier)

      if event_filter:
        filter_match = event_filter.Match(
            event, event_data, event_data_stream, event_tag)
      else:
        filter_match = None

      # pylint: disable=singleton-comparison
      if filter_match == False:
        number_of_filtered_events += 1
        continue

      for event_queue in self._event_queues.values():
        # TODO: Check for premature exit of analysis plugins.
        event_queue.PushItem((event, event_data, event_data_stream))

      self._number_of_consumed_events += 1

      if (event_filter and filter_limit and
          filter_limit == self._number_of_consumed_events):
        break

    logger.debug('Finished pushing events to analysis plugins.')
    # Signal that we have finished adding events.
    for event_queue in self._event_queues.values():
      event_queue.PushItem(plaso_queue.QueueAbort(), block=False)

    logger.debug('Processing analysis plugin results.')

    # TODO: use a task based approach.
    plugin_names = list(analysis_plugins.keys())
    while plugin_names:
      for plugin_name in list(plugin_names):
        if self._abort:
          break

        # TODO: temporary solution.
        task = tasks.Task()
        task.storage_format = definitions.STORAGE_FORMAT_SQLITE
        task.identifier = plugin_name

        merge_ready = storage_writer.CheckTaskReadyForMerge(task)
        if merge_ready:
          storage_writer.PrepareMergeTaskStorage(task)
          self._status = definitions.STATUS_INDICATOR_MERGING

          event_queue = self._event_queues[plugin_name]
          del self._event_queues[plugin_name]

          event_queue.Close()

          storage_merge_reader = storage_writer.StartMergeTaskStorage(task)

          storage_merge_reader.MergeAttributeContainers(
              callback=self._MergeEventTag)
          # TODO: temporary solution.
          plugin_names.remove(plugin_name)

          self._status = definitions.STATUS_INDICATOR_RUNNING

          self._number_of_produced_event_tags = (
              storage_writer.number_of_event_tags)
          self._number_of_produced_reports = (
              storage_writer.number_of_analysis_reports)

    try:
      storage_writer.StopTaskStorage(abort=self._abort)
    except (IOError, OSError) as exception:
      logger.error('Unable to stop task storage with error: {0!s}'.format(
          exception))

    if self._abort:
      logger.debug('Processing aborted.')
    else:
      logger.debug('Processing completed.')

    events_counter = collections.Counter()
    events_counter['Events filtered'] = number_of_filtered_events
    events_counter['Events processed'] = self._number_of_consumed_events

    return events_counter
Ejemplo n.º 13
0
  def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None):
    """Analyzes events in a plaso storage.

    Args:
      storage_writer (StorageWriter): storage writer.
      analysis_plugins (list[AnalysisPlugin]): analysis plugins that should
          be run.
      event_filter (Optional[FilterObject]): event filter.

    Raises:
      RuntimeError: if a non-recoverable situation is encountered.
    """
    self._status = definitions.PROCESSING_STATUS_RUNNING
    self._number_of_consumed_errors = 0
    self._number_of_consumed_events = 0
    self._number_of_consumed_reports = 0
    self._number_of_consumed_sources = 0
    self._number_of_produced_errors = 0
    self._number_of_produced_events = 0
    self._number_of_produced_reports = 0
    self._number_of_produced_sources = 0

    number_of_filtered_events = 0

    logging.debug(u'Processing events.')

    filter_limit = getattr(event_filter, u'limit', None)

    for event in storage_writer.GetEvents():
      if event_filter:
        filter_match = event_filter.Match(event)
      else:
        filter_match = None

      # pylint: disable=singleton-comparison
      if filter_match == False:
        number_of_filtered_events += 1
        continue

      for event_queue in self._event_queues.values():
        # TODO: Check for premature exit of analysis plugins.
        event_queue.PushItem(event)

      self._number_of_consumed_events += 1

      if (event_filter and filter_limit and
          filter_limit == self._number_of_consumed_events):
        break

    logging.debug(u'Finished pushing events to analysis plugins.')
    # Signal that we have finished adding events.
    for event_queue in self._event_queues.values():
      event_queue.PushItem(plaso_queue.QueueAbort(), block=False)

    logging.debug(u'Processing analysis plugin results.')

    # TODO: use a task based approach.
    plugin_names = [plugin.plugin_name for plugin in analysis_plugins]
    while plugin_names:
      for plugin_name in list(plugin_names):
        if self._abort:
          break

        # TODO: temporary solution.
        task = tasks.Task()
        task.identifier = plugin_name

        merge_ready = storage_writer.CheckTaskReadyForMerge(task)
        if merge_ready:
          self._status = definitions.PROCESSING_STATUS_MERGING

          event_queue = self._event_queues[plugin_name]
          del self._event_queues[plugin_name]

          event_queue.Close()

          storage_merge_reader = storage_writer.StartMergeTaskStorage(task)

          storage_merge_reader.MergeAttributeContainers()
          # TODO: temporary solution.
          plugin_names.remove(plugin_name)

          self._status = definitions.PROCESSING_STATUS_RUNNING

          self._number_of_produced_event_tags = (
              storage_writer.number_of_event_tags)
          self._number_of_produced_reports = (
              storage_writer.number_of_analysis_reports)

    try:
      storage_writer.StopTaskStorage(abort=self._abort)
    except (IOError, OSError) as exception:
      logging.error(u'Unable to stop task storage with error: {0:s}'.format(
          exception))

    if self._abort:
      logging.debug(u'Processing aborted.')
    else:
      logging.debug(u'Processing completed.')

    events_counter = collections.Counter()
    events_counter[u'Events filtered'] = number_of_filtered_events
    events_counter[u'Events processed'] = self._number_of_consumed_events

    return events_counter