Beispiel #1
0
 def test_start_stop_dumper(self):
     executions_queue = self.get_queue()
     sleep_interval = 0.01
     dumper = Dumper(queue=executions_queue, sleep_interval=sleep_interval,
                     export_dir='/tmp', batch_size=1, max_files_per_sleep=5,
                     file_prefix='st2-stuff-', file_format='json')
     dumper.start()
     # Call stop after at least one batch was written to disk.
     eventlet.sleep(10 * sleep_interval)
     dumper.stop()
Beispiel #2
0
 def test_start_stop_dumper(self):
     executions_queue = self.get_queue()
     sleep_interval = 0.01
     dumper = Dumper(queue=executions_queue, sleep_interval=sleep_interval,
                     export_dir='/tmp', batch_size=1, max_files_per_sleep=5,
                     file_prefix='st2-stuff-', file_format='json')
     dumper.start()
     # Call stop after at least one batch was written to disk.
     eventlet.sleep(10 * sleep_interval)
     dumper.stop()
Beispiel #3
0
class ExecutionsExporter(consumers.MessageHandler):
    message_type = ActionExecutionDB

    def __init__(self, connection, queues):
        super(ExecutionsExporter, self).__init__(connection, queues)
        self.pending_executions = Queue.Queue()
        self._dumper = Dumper(queue=self.pending_executions,
                              export_dir=cfg.CONF.exporter.dump_dir)
        self._consumer_thread = None

    def start(self, wait=False):
        LOG.info('Bootstrapping executions from db...')
        try:
            self._bootstrap()
        except:
            LOG.exception('Unable to bootstrap executions from db. Aborting.')
            raise
        self._consumer_thread = eventlet.spawn(super(ExecutionsExporter, self).start, wait=True)
        self._dumper.start()
        if wait:
            self.wait()

    def wait(self):
        self._consumer_thread.wait()
        self._dumper.wait()

    def shutdown(self):
        self._dumper.stop()
        super(ExecutionsExporter, self).shutdown()

    def process(self, execution):
        LOG.debug('Got execution from queue: %s', execution)
        if execution.status not in COMPLETION_STATUSES:
            return
        execution_api = ActionExecutionAPI.from_model(execution, mask_secrets=True)
        self.pending_executions.put_nowait(execution_api)
        LOG.debug("Added execution to queue.")

    def _bootstrap(self):
        marker = self._get_export_marker_from_db()
        LOG.info('Using marker %s...' % marker)
        missed_executions = self._get_missed_executions_from_db(export_marker=marker)
        LOG.info('Found %d executions not exported yet...', len(missed_executions))

        for missed_execution in missed_executions:
            if missed_execution.status not in COMPLETION_STATUSES:
                continue
            execution_api = ActionExecutionAPI.from_model(missed_execution, mask_secrets=True)
            try:
                LOG.debug('Missed execution %s', execution_api)
                self.pending_executions.put_nowait(execution_api)
            except:
                LOG.exception('Failed adding execution to in-memory queue.')
                continue
        LOG.info('Bootstrapped executions...')

    def _get_export_marker_from_db(self):
        try:
            markers = DumperMarker.get_all()
        except:
            return None
        else:
            if len(markers) >= 1:
                marker = markers[0]
                return isotime.parse(marker.marker)
            else:
                return None

    def _get_missed_executions_from_db(self, export_marker=None):
        if not export_marker:
            return self._get_all_executions_from_db()

        # XXX: Should adapt this query to get only executions with status
        # in COMPLETION_STATUSES.
        filters = {'end_timestamp__gt': export_marker}
        LOG.info('Querying for executions with filters: %s', filters)
        return ActionExecution.query(**filters)

    def _get_all_executions_from_db(self):
        return ActionExecution.get_all()  # XXX: Paginated call.