Ejemplo n.º 1
0
    def process(self, pre_ack_response):
        trigger_instance, message = self._decompose_pre_ack_process_response(
            pre_ack_response)
        if not trigger_instance:
            raise ValueError("No trigger_instance provided for processing.")

        get_driver().inc_counter("trigger.%s.processed" %
                                 (trigger_instance.trigger))

        try:
            # Use trace_context from the message and if not found create a new context
            # and use the trigger_instance.id as trace_tag.
            trace_context = message.get(TRACE_CONTEXT, None)
            if not trace_context:
                trace_context = {
                    TRACE_ID: "trigger_instance-%s" % str(trigger_instance.id)
                }
            # add a trace or update an existing trace with trigger_instance
            trace_service.add_or_update_given_trace_context(
                trace_context=trace_context,
                trigger_instances=[
                    trace_service.get_trace_component_for_trigger_instance(
                        trigger_instance)
                ],
            )

            container_utils.update_trigger_instance_status(
                trigger_instance,
                trigger_constants.TRIGGER_INSTANCE_PROCESSING)

            with CounterWithTimer(key="rule.processed"):
                with Timer(key="trigger.%s.processed" %
                           (trigger_instance.trigger)):
                    self.rules_engine.handle_trigger_instance(trigger_instance)

            container_utils.update_trigger_instance_status(
                trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSED)
        except:
            # TODO : Capture the reason for failure.
            container_utils.update_trigger_instance_status(
                trigger_instance,
                trigger_constants.TRIGGER_INSTANCE_PROCESSING_FAILED)
            # This could be a large message but at least in case of an exception
            # we get to see more context.
            # Beyond this point code cannot really handle the exception anyway so
            # eating up the exception.
            LOG.exception("Failed to handle trigger_instance %s.",
                          trigger_instance)
            return
Ejemplo n.º 2
0
    def create_rule_enforcers(self, trigger_instance, matching_rules):
        """
        Creates a RuleEnforcer matching to each rule.

        This method is trigger_instance specific therefore if creation of 1 RuleEnforcer
        fails it is likely that all wil be broken.
        """
        enforcers = []
        for matching_rule in matching_rules:

            get_driver().inc_counter(
                format_metrics_key(key='rule.%s' % matching_rule))

            enforcers.append(RuleEnforcer(trigger_instance, matching_rule))
        return enforcers
Ejemplo n.º 3
0
        def custom_start_response(status, headers, exc_info=None):
            status_code = int(status.split(' ')[0])

            metrics_driver = get_driver()
            metrics_driver.inc_counter('%s.response.status.%s' % (self._service_name,
                                                                  status_code))

            return start_response(status, headers, exc_info)
Ejemplo n.º 4
0
        def custom_start_response(status, headers, exc_info=None):
            status_code = int(status.split(' ')[0])

            metrics_driver = get_driver()
            metrics_driver.inc_counter('%s.response.status.%s' %
                                       (self._service_name, status_code))

            return start_response(status, headers, exc_info)
Ejemplo n.º 5
0
    def create_rule_enforcers(self, trigger_instance, matching_rules):
        """
        Creates a RuleEnforcer matching to each rule.

        This method is trigger_instance specific therefore if creation of 1 RuleEnforcer
        fails it is likely that all wil be broken.
        """
        enforcers = []
        for matching_rule in matching_rules:

            get_driver().inc_counter(
                format_metrics_key(
                    key='rule.%s' % matching_rule
                )
            )

            enforcers.append(RuleEnforcer(trigger_instance, matching_rule))
        return enforcers
Ejemplo n.º 6
0
    def __call__(self, environ, start_response):
        request = Request(environ)

        metrics_driver = get_driver()

        key = '%s.request.total' % (self._service_name)
        metrics_driver.inc_counter(key)

        key = '%s.request.method.%s' % (self._service_name, request.method)
        metrics_driver.inc_counter(key)

        path = request.path.replace('/', '_')
        key = '%s.request.path.%s' % (self._service_name, path)
        metrics_driver.inc_counter(key)

        if self._service_name == 'stream':
            # For stream service, we also record current number of open connections.
            # Due to the way stream service works, we need to utilize eventlet posthook to
            # correctly set the counter when the connection is closed / full response is returned.
            # See http://eventlet.net/doc/modules/wsgi.html#non-standard-extension-to-support-post-
            # hooks for details

            # Increase request counter
            key = '%s.request' % (self._service_name)
            metrics_driver.inc_counter(key)

            # Increase "total number of connections" gauge
            metrics_driver.inc_gauge('stream.connections', 1)

            start_time = get_datetime_utc_now()

            def update_metrics_hook(env):
                # Hook which is called at the very end after all the response has been sent and
                # connection closed
                time_delta = (get_datetime_utc_now() - start_time)
                duration = time_delta.total_seconds()

                # Send total request time
                metrics_driver.time(key, duration)

                # Decrease "current number of connections" gauge
                metrics_driver.dec_gauge('stream.connections', 1)

            # NOTE: Some tests mock environ and there 'eventlet.posthooks' key is not available
            if 'eventlet.posthooks' in environ:
                environ['eventlet.posthooks'].append(
                    (update_metrics_hook, (), {}))

            return self.app(environ, start_response)
        else:
            # Track and time current number of processing requests
            key = '%s.request' % (self._service_name)

            with CounterWithTimer(key=key):
                return self.app(environ, start_response)
Ejemplo n.º 7
0
    def process(self, pre_ack_response):
        trigger_instance, message = self._decompose_pre_ack_process_response(pre_ack_response)
        if not trigger_instance:
            raise ValueError('No trigger_instance provided for processing.')

        get_driver().inc_counter('trigger.%s.processed' % (trigger_instance.trigger))

        try:
            # Use trace_context from the message and if not found create a new context
            # and use the trigger_instance.id as trace_tag.
            trace_context = message.get(TRACE_CONTEXT, None)
            if not trace_context:
                trace_context = {
                    TRACE_ID: 'trigger_instance-%s' % str(trigger_instance.id)
                }
            # add a trace or update an existing trace with trigger_instance
            trace_service.add_or_update_given_trace_context(
                trace_context=trace_context,
                trigger_instances=[
                    trace_service.get_trace_component_for_trigger_instance(trigger_instance)
                ]
            )

            container_utils.update_trigger_instance_status(
                trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSING)

            with CounterWithTimer(key='rule.processed'):
                with Timer(key='trigger.%s.processed' % (trigger_instance.trigger)):
                    self.rules_engine.handle_trigger_instance(trigger_instance)

            container_utils.update_trigger_instance_status(
                trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSED)
        except:
            # TODO : Capture the reason for failure.
            container_utils.update_trigger_instance_status(
                trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSING_FAILED)
            # This could be a large message but at least in case of an exception
            # we get to see more context.
            # Beyond this point code cannot really handle the exception anyway so
            # eating up the exception.
            LOG.exception('Failed to handle trigger_instance %s.', trigger_instance)
            return
Ejemplo n.º 8
0
    def create_rule_enforcers(self, trigger_instance, matching_rules):
        """
        Creates a RuleEnforcer matching to each rule.

        This method is trigger_instance specific therefore if creation of 1 RuleEnforcer
        fails it is likely that all wil be broken.
        """
        metrics_driver = get_driver()

        enforcers = []
        for matching_rule in matching_rules:
            metrics_driver.inc_counter("rule.matched")
            metrics_driver.inc_counter("rule.%s.matched" % (matching_rule.ref))

            enforcers.append(RuleEnforcer(trigger_instance, matching_rule))
        return enforcers
Ejemplo n.º 9
0
    def __call__(self, environ, start_response):
        request = Request(environ)

        try:
            endpoint, _ = self.router.match(request)
        except NotFoundException:
            endpoint = {}

        # NOTE: We don't track per request and response metrics for /v1/executions/<id> and some
        # other endpoints because this would result in a lot of unique metrics which is an
        # anti-pattern and causes unnecessary load on the metrics server.
        submit_metrics = endpoint.get('x-submit-metrics', True)
        operation_id = endpoint.get('operationId', None)
        is_get_one_endpoint = bool(operation_id) and (operation_id.endswith('.get') or
                                                      operation_id.endswith('.get_one'))

        if is_get_one_endpoint:
            # NOTE: We don't submit metrics for any get one API endpoint since this would result
            # in potentially too many unique metrics
            submit_metrics = False

        if not submit_metrics:
            LOG.debug('Not submitting request metrics for path: %s' % (request.path))
            return self.app(environ, start_response)

        metrics_driver = get_driver()

        key = '%s.request.total' % (self._service_name)
        metrics_driver.inc_counter(key)

        key = '%s.request.method.%s' % (self._service_name, request.method)
        metrics_driver.inc_counter(key)

        path = request.path.replace('/', '_')
        key = '%s.request.path.%s' % (self._service_name, path)
        metrics_driver.inc_counter(key)

        if self._service_name == 'stream':
            # For stream service, we also record current number of open connections.
            # Due to the way stream service works, we need to utilize eventlet posthook to
            # correctly set the counter when the connection is closed / full response is returned.
            # See http://eventlet.net/doc/modules/wsgi.html#non-standard-extension-to-support-post-
            # hooks for details

            # Increase request counter
            key = '%s.request' % (self._service_name)
            metrics_driver.inc_counter(key)

            # Increase "total number of connections" gauge
            metrics_driver.inc_gauge('stream.connections', 1)

            start_time = get_datetime_utc_now()

            def update_metrics_hook(env):
                # Hook which is called at the very end after all the response has been sent and
                # connection closed
                time_delta = (get_datetime_utc_now() - start_time)
                duration = time_delta.total_seconds()

                # Send total request time
                metrics_driver.time(key, duration)

                # Decrease "current number of connections" gauge
                metrics_driver.dec_gauge('stream.connections', 1)

            # NOTE: Some tests mock environ and there 'eventlet.posthooks' key is not available
            if 'eventlet.posthooks' in environ:
                environ['eventlet.posthooks'].append((update_metrics_hook, (), {}))

            return self.app(environ, start_response)
        else:
            # Track and time current number of processing requests
            key = '%s.request' % (self._service_name)

            with CounterWithTimer(key=key):
                return self.app(environ, start_response)
Ejemplo n.º 10
0
def create_request(liveaction):
    """
    Create an action execution.

    :return: (liveaction, execution)
    :rtype: tuple
    """
    # We import this here to avoid conflicts w/ runners that might import this
    # file since the runners don't have the config context by default.
    from st2common.metrics.base import get_driver

    # Use the user context from the parent action execution. Subtasks in a workflow
    # action can be invoked by a system user and so we want to use the user context
    # from the original workflow action.
    parent_context = executions.get_parent_context(liveaction)
    if parent_context:
        parent_user = parent_context.get('user', None)
        if parent_user:
            liveaction.context['user'] = parent_user

    # Validate action.
    action_db = action_utils.get_action_by_ref(liveaction.action)
    if not action_db:
        raise ValueError('Action "%s" cannot be found.' % liveaction.action)
    if not action_db.enabled:
        raise ValueError('Unable to execute. Action "%s" is disabled.' %
                         liveaction.action)

    runnertype_db = action_utils.get_runnertype_by_name(
        action_db.runner_type['name'])

    if not hasattr(liveaction, 'parameters'):
        liveaction.parameters = dict()

    # Validate action parameters.
    schema = util_schema.get_schema_for_action_parameters(action_db)
    validator = util_schema.get_validator()
    util_schema.validate(liveaction.parameters,
                         schema,
                         validator,
                         use_default=True,
                         allow_default_none=True)

    # validate that no immutable params are being overriden. Although possible to
    # ignore the override it is safer to inform the user to avoid surprises.
    immutables = _get_immutable_params(action_db.parameters)
    immutables.extend(_get_immutable_params(runnertype_db.runner_parameters))
    overridden_immutables = [
        p for p in six.iterkeys(liveaction.parameters) if p in immutables
    ]
    if len(overridden_immutables) > 0:
        raise ValueError(
            'Override of immutable parameter(s) %s is unsupported.' %
            str(overridden_immutables))

    # Set notification settings for action.
    # XXX: There are cases when we don't want notifications to be sent for a particular
    # execution. So we should look at liveaction.parameters['notify']
    # and not set liveaction.notify.
    if not _is_notify_empty(action_db.notify):
        liveaction.notify = action_db.notify

    # Write to database and send to message queue.
    liveaction.status = action_constants.LIVEACTION_STATUS_REQUESTED
    liveaction.start_timestamp = date_utils.get_datetime_utc_now()

    # Set the "action_is_workflow" attribute
    liveaction.action_is_workflow = action_db.is_workflow()

    # Publish creation after both liveaction and actionexecution are created.
    liveaction = LiveAction.add_or_update(liveaction, publish=False)

    # Get trace_db if it exists. This could throw. If it throws, we have to cleanup
    # liveaction object so we don't see things in requested mode.
    trace_db = None
    try:
        _, trace_db = trace_service.get_trace_db_by_live_action(liveaction)
    except db_exc.StackStormDBObjectNotFoundError as e:
        _cleanup_liveaction(liveaction)
        raise trace_exc.TraceNotFoundException(str(e))

    execution = executions.create_execution_object(liveaction, publish=False)

    if trace_db:
        trace_service.add_or_update_given_trace_db(
            trace_db=trace_db,
            action_executions=[
                trace_service.get_trace_component_for_action_execution(
                    execution, liveaction)
            ])

    get_driver().inc_counter('action.executions.%s' % (liveaction.status))

    return liveaction, execution
Ejemplo n.º 11
0
def update_liveaction_status(status=None, result=None, context=None, end_timestamp=None,
                             liveaction_id=None, runner_info=None, liveaction_db=None,
                             publish=True):
    """
        Update the status of the specified LiveAction to the value provided in
        new_status.

        The LiveAction may be specified using either liveaction_id, or as an
        liveaction_db instance.
    """

    if (liveaction_id is None) and (liveaction_db is None):
        raise ValueError('Must specify an liveaction_id or an liveaction_db when '
                         'calling update_LiveAction_status')

    if liveaction_db is None:
        liveaction_db = get_liveaction_by_id(liveaction_id)

    if status not in LIVEACTION_STATUSES:
        raise ValueError('Attempting to set status for LiveAction "%s" '
                         'to unknown status string. Unknown status is "%s"',
                         liveaction_db, status)

    # If liveaction_db status is set then we need to decrement the counter
    # because it is transitioning to a new state
    if liveaction_db.status:
        get_driver().dec_counter(
            format_metrics_key(
                liveaction_db=liveaction_db,
                key='action.%s' % liveaction_db.status
            )
        )

    # If status is provided then we need to increment the timer because the action
    # is transitioning into this new state
    if status:
        get_driver().inc_counter(
            format_metrics_key(
                liveaction_db=liveaction_db,
                key='action.%s' % status
            )
        )

    extra = {'liveaction_db': liveaction_db}
    LOG.debug('Updating ActionExection: "%s" with status="%s"', liveaction_db.id, status,
              extra=extra)

    # If liveaction is already canceled, then do not allow status to be updated.
    if liveaction_db.status == LIVEACTION_STATUS_CANCELED and status != LIVEACTION_STATUS_CANCELED:
        LOG.info('Unable to update ActionExecution "%s" with status="%s". '
                 'ActionExecution is already canceled.', liveaction_db.id, status, extra=extra)
        return liveaction_db

    old_status = liveaction_db.status
    liveaction_db.status = status

    if result:
        liveaction_db.result = result

    if context:
        liveaction_db.context.update(context)

    if end_timestamp:
        liveaction_db.end_timestamp = end_timestamp

    if runner_info:
        liveaction_db.runner_info = runner_info

    liveaction_db = LiveAction.add_or_update(liveaction_db)

    LOG.debug('Updated status for LiveAction object.', extra=extra)

    if publish and status != old_status:
        LiveAction.publish_status(liveaction_db)
        LOG.debug('Published status for LiveAction object.', extra=extra)

    return liveaction_db
Ejemplo n.º 12
0
    def __call__(self, environ, start_response):
        request = Request(environ)

        try:
            endpoint, _ = self.router.match(request)
        except NotFoundException:
            endpoint = {}

        # NOTE: We don't track per request and response metrics for /v1/executions/<id> and some
        # other endpoints because this would result in a lot of unique metrics which is an
        # anti-pattern and causes unnecessary load on the metrics server.
        submit_metrics = endpoint.get("x-submit-metrics", True)
        operation_id = endpoint.get("operationId", None)
        is_get_one_endpoint = bool(operation_id) and (
            operation_id.endswith(".get") or operation_id.endswith(".get_one"))

        if is_get_one_endpoint:
            # NOTE: We don't submit metrics for any get one API endpoint since this would result
            # in potentially too many unique metrics
            submit_metrics = False

        if not submit_metrics:
            LOG.debug("Not submitting request metrics for path: %s" %
                      (request.path))
            return self.app(environ, start_response)

        metrics_driver = get_driver()

        key = "%s.request.total" % (self._service_name)
        metrics_driver.inc_counter(key)

        key = "%s.request.method.%s" % (self._service_name, request.method)
        metrics_driver.inc_counter(key)

        path = request.path.replace("/", "_")
        key = "%s.request.path.%s" % (self._service_name, path)
        metrics_driver.inc_counter(key)

        if self._service_name == "stream":
            # For stream service, we also record current number of open connections.
            # Due to the way stream service works, we need to utilize eventlet posthook to
            # correctly set the counter when the connection is closed / full response is returned.
            # See http://eventlet.net/doc/modules/wsgi.html#non-standard-extension-to-support-post-
            # hooks for details

            # Increase request counter
            key = "%s.request" % (self._service_name)
            metrics_driver.inc_counter(key)

            # Increase "total number of connections" gauge
            metrics_driver.inc_gauge("stream.connections", 1)

            start_time = get_datetime_utc_now()

            def update_metrics_hook(env):
                # Hook which is called at the very end after all the response has been sent and
                # connection closed
                time_delta = get_datetime_utc_now() - start_time
                duration = time_delta.total_seconds()

                # Send total request time
                metrics_driver.time(key, duration)

                # Decrease "current number of connections" gauge
                metrics_driver.dec_gauge("stream.connections", 1)

            # NOTE: Some tests mock environ and there 'eventlet.posthooks' key is not available
            if "eventlet.posthooks" in environ:
                environ["eventlet.posthooks"].append(
                    (update_metrics_hook, (), {}))

            return self.app(environ, start_response)
        else:
            # Track and time current number of processing requests
            key = "%s.request" % (self._service_name)

            with CounterWithTimer(key=key):
                return self.app(environ, start_response)
Ejemplo n.º 13
0
def update_liveaction_status(
    status=None,
    result=None,
    context=None,
    end_timestamp=None,
    liveaction_id=None,
    runner_info=None,
    liveaction_db=None,
    publish=True,
):
    """
    Update the status of the specified LiveAction to the value provided in
    new_status.

    The LiveAction may be specified using either liveaction_id, or as an
    liveaction_db instance.
    """

    if (liveaction_id is None) and (liveaction_db is None):
        raise ValueError(
            "Must specify an liveaction_id or an liveaction_db when "
            "calling update_LiveAction_status"
        )

    if liveaction_db is None:
        liveaction_db = get_liveaction_by_id(liveaction_id)

    if status not in LIVEACTION_STATUSES:
        raise ValueError(
            'Attempting to set status for LiveAction "%s" '
            'to unknown status string. Unknown status is "%s"' % (liveaction_db, status)
        )

    if (
        result
        and cfg.CONF.system.validate_output_schema
        and status == LIVEACTION_STATUS_SUCCEEDED
    ):
        action_db = get_action_by_ref(liveaction_db.action)
        runner_db = get_runnertype_by_name(action_db.runner_type["name"])
        result, status = output_schema.validate_output(
            runner_db.output_schema,
            action_db.output_schema,
            result,
            status,
            runner_db.output_key,
        )

    # If liveaction_db status is set then we need to decrement the counter
    # because it is transitioning to a new state
    if liveaction_db.status:
        get_driver().dec_counter("action.executions.%s" % (liveaction_db.status))

    # If status is provided then we need to increment the timer because the action
    # is transitioning into this new state
    if status:
        get_driver().inc_counter("action.executions.%s" % (status))

    extra = {"liveaction_db": liveaction_db}
    LOG.debug(
        'Updating ActionExection: "%s" with status="%s"',
        liveaction_db.id,
        status,
        extra=extra,
    )

    # If liveaction is already canceled, then do not allow status to be updated.
    if (
        liveaction_db.status == LIVEACTION_STATUS_CANCELED
        and status != LIVEACTION_STATUS_CANCELED
    ):
        LOG.info(
            'Unable to update ActionExecution "%s" with status="%s". '
            "ActionExecution is already canceled.",
            liveaction_db.id,
            status,
            extra=extra,
        )
        return liveaction_db

    old_status = liveaction_db.status
    liveaction_db.status = status

    if result:
        liveaction_db.result = result

    if context:
        liveaction_db.context.update(context)

    if end_timestamp:
        liveaction_db.end_timestamp = end_timestamp

    if runner_info:
        liveaction_db.runner_info = runner_info

    # TODO: This is not efficient. Perform direct partial update and only update
    # manipulated fields
    liveaction_db = LiveAction.add_or_update(liveaction_db)

    LOG.debug("Updated status for LiveAction object.", extra=extra)

    if publish and status != old_status:
        LiveAction.publish_status(liveaction_db)
        LOG.debug("Published status for LiveAction object.", extra=extra)

    return liveaction_db
Ejemplo n.º 14
0
def update_liveaction_status(status=None, result=None, context=None, end_timestamp=None,
                             liveaction_id=None, runner_info=None, liveaction_db=None,
                             publish=True):
    """
        Update the status of the specified LiveAction to the value provided in
        new_status.

        The LiveAction may be specified using either liveaction_id, or as an
        liveaction_db instance.
    """

    if (liveaction_id is None) and (liveaction_db is None):
        raise ValueError('Must specify an liveaction_id or an liveaction_db when '
                         'calling update_LiveAction_status')

    if liveaction_db is None:
        liveaction_db = get_liveaction_by_id(liveaction_id)

    if status not in LIVEACTION_STATUSES:
        raise ValueError('Attempting to set status for LiveAction "%s" '
                         'to unknown status string. Unknown status is "%s"',
                         liveaction_db, status)

    # If liveaction_db status is set then we need to decrement the counter
    # because it is transitioning to a new state
    if liveaction_db.status:
        get_driver().dec_counter('action.executions.%s' % (liveaction_db.status))

    # If status is provided then we need to increment the timer because the action
    # is transitioning into this new state
    if status:
        get_driver().inc_counter('action.executions.%s' % (status))

    extra = {'liveaction_db': liveaction_db}
    LOG.debug('Updating ActionExection: "%s" with status="%s"', liveaction_db.id, status,
              extra=extra)

    # If liveaction is already canceled, then do not allow status to be updated.
    if liveaction_db.status == LIVEACTION_STATUS_CANCELED and status != LIVEACTION_STATUS_CANCELED:
        LOG.info('Unable to update ActionExecution "%s" with status="%s". '
                 'ActionExecution is already canceled.', liveaction_db.id, status, extra=extra)
        return liveaction_db

    old_status = liveaction_db.status
    liveaction_db.status = status

    if result:
        liveaction_db.result = result

    if context:
        liveaction_db.context.update(context)

    if end_timestamp:
        liveaction_db.end_timestamp = end_timestamp

    if runner_info:
        liveaction_db.runner_info = runner_info

    liveaction_db = LiveAction.add_or_update(liveaction_db)

    LOG.debug('Updated status for LiveAction object.', extra=extra)

    if publish and status != old_status:
        LiveAction.publish_status(liveaction_db)
        LOG.debug('Published status for LiveAction object.', extra=extra)

    return liveaction_db
Ejemplo n.º 15
0
def update_liveaction_status(status=None, result=None, context=None, end_timestamp=None,
                             liveaction_id=None, runner_info=None, liveaction_db=None,
                             publish=True):
    """
        Update the status of the specified LiveAction to the value provided in
        new_status.

        The LiveAction may be specified using either liveaction_id, or as an
        liveaction_db instance.
    """

    if (liveaction_id is None) and (liveaction_db is None):
        raise ValueError('Must specify an liveaction_id or an liveaction_db when '
                         'calling update_LiveAction_status')

    if liveaction_db is None:
        liveaction_db = get_liveaction_by_id(liveaction_id)

    if status not in LIVEACTION_STATUSES:
        raise ValueError('Attempting to set status for LiveAction "%s" '
                         'to unknown status string. Unknown status is "%s"'
                         % (liveaction_db, status))

    if result and cfg.CONF.system.validate_output_schema and status == LIVEACTION_STATUS_SUCCEEDED:
        action_db = get_action_by_ref(liveaction_db.action)
        runner_db = get_runnertype_by_name(action_db.runner_type['name'])
        result, status = output_schema.validate_output(
            runner_db.output_schema,
            action_db.output_schema,
            result,
            status,
            runner_db.output_key,
        )

    # If liveaction_db status is set then we need to decrement the counter
    # because it is transitioning to a new state
    if liveaction_db.status:
        get_driver().dec_counter('action.executions.%s' % (liveaction_db.status))

    # If status is provided then we need to increment the timer because the action
    # is transitioning into this new state
    if status:
        get_driver().inc_counter('action.executions.%s' % (status))

    extra = {'liveaction_db': liveaction_db}
    LOG.debug('Updating ActionExection: "%s" with status="%s"', liveaction_db.id, status,
              extra=extra)

    # If liveaction is already canceled, then do not allow status to be updated.
    if liveaction_db.status == LIVEACTION_STATUS_CANCELED and status != LIVEACTION_STATUS_CANCELED:
        LOG.info('Unable to update ActionExecution "%s" with status="%s". '
                 'ActionExecution is already canceled.', liveaction_db.id, status, extra=extra)
        return liveaction_db

    old_status = liveaction_db.status
    liveaction_db.status = status

    if result:
        liveaction_db.result = result

    if context:
        liveaction_db.context.update(context)

    if end_timestamp:
        liveaction_db.end_timestamp = end_timestamp

    if runner_info:
        liveaction_db.runner_info = runner_info

    # TODO: This is not efficient. Perform direct partial update and only update
    # manipulated fields
    liveaction_db = LiveAction.add_or_update(liveaction_db)

    LOG.debug('Updated status for LiveAction object.', extra=extra)

    if publish and status != old_status:
        LiveAction.publish_status(liveaction_db)
        LOG.debug('Published status for LiveAction object.', extra=extra)

    return liveaction_db
Ejemplo n.º 16
0
def create_request(liveaction, action_db=None, runnertype_db=None):
    """
    Create an action execution.

    :param action_db: Action model to operate one. If not provided, one is retrieved from the
                      database using values from "liveaction".
    :type action_db: :class:`ActionDB`

    :param runnertype_db: Runner model to operate one. If not provided, one is retrieved from the
                          database using values from "liveaction".
    :type runnertype_db: :class:`RunnerTypeDB`

    :return: (liveaction, execution)
    :rtype: tuple
    """
    # We import this here to avoid conflicts w/ runners that might import this
    # file since the runners don't have the config context by default.
    from st2common.metrics.base import get_driver

    # Use the user context from the parent action execution. Subtasks in a workflow
    # action can be invoked by a system user and so we want to use the user context
    # from the original workflow action.
    parent_context = executions.get_parent_context(liveaction) or {}
    parent_user = parent_context.get('user', None)

    if parent_user:
        liveaction.context['user'] = parent_user

    # Validate action
    if not action_db:
        action_db = action_utils.get_action_by_ref(liveaction.action)

    if not action_db:
        raise ValueError('Action "%s" cannot be found.' % liveaction.action)
    if not action_db.enabled:
        raise ValueError('Unable to execute. Action "%s" is disabled.' % liveaction.action)

    if not runnertype_db:
        runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name'])

    if not hasattr(liveaction, 'parameters'):
        liveaction.parameters = dict()

    # For consistency add pack to the context here in addition to RunnerContainer.dispatch() method
    liveaction.context['pack'] = action_db.pack

    # Validate action parameters.
    schema = util_schema.get_schema_for_action_parameters(action_db, runnertype_db)
    validator = util_schema.get_validator()
    util_schema.validate(liveaction.parameters, schema, validator, use_default=True,
                         allow_default_none=True)

    # validate that no immutable params are being overriden. Although possible to
    # ignore the override it is safer to inform the user to avoid surprises.
    immutables = _get_immutable_params(action_db.parameters)
    immutables.extend(_get_immutable_params(runnertype_db.runner_parameters))
    overridden_immutables = [p for p in six.iterkeys(liveaction.parameters) if p in immutables]
    if len(overridden_immutables) > 0:
        raise ValueError('Override of immutable parameter(s) %s is unsupported.'
                         % str(overridden_immutables))

    # Set notification settings for action.
    # XXX: There are cases when we don't want notifications to be sent for a particular
    # execution. So we should look at liveaction.parameters['notify']
    # and not set liveaction.notify.
    if not _is_notify_empty(action_db.notify):
        liveaction.notify = action_db.notify

    # Write to database and send to message queue.
    liveaction.status = action_constants.LIVEACTION_STATUS_REQUESTED
    liveaction.start_timestamp = date_utils.get_datetime_utc_now()

    # Set the "action_is_workflow" attribute
    liveaction.action_is_workflow = action_db.is_workflow()

    # Publish creation after both liveaction and actionexecution are created.
    liveaction = LiveAction.add_or_update(liveaction, publish=False)
    # Get trace_db if it exists. This could throw. If it throws, we have to cleanup
    # liveaction object so we don't see things in requested mode.
    trace_db = None
    try:
        _, trace_db = trace_service.get_trace_db_by_live_action(liveaction)
    except db_exc.StackStormDBObjectNotFoundError as e:
        _cleanup_liveaction(liveaction)
        raise trace_exc.TraceNotFoundException(six.text_type(e))

    execution = executions.create_execution_object(liveaction=liveaction, action_db=action_db,
                                                   runnertype_db=runnertype_db, publish=False)

    if trace_db:
        trace_service.add_or_update_given_trace_db(
            trace_db=trace_db,
            action_executions=[
                trace_service.get_trace_component_for_action_execution(execution, liveaction)
            ])

    get_driver().inc_counter('action.executions.%s' % (liveaction.status))

    return liveaction, execution
Ejemplo n.º 17
0
def create_request(
    liveaction, action_db=None, runnertype_db=None, validate_params=True
):
    """
    Create an action execution.

    :param action_db: Action model to operate one. If not provided, one is retrieved from the
                      database using values from "liveaction".
    :type action_db: :class:`ActionDB`

    :param runnertype_db: Runner model to operate one. If not provided, one is retrieved from the
                          database using values from "liveaction".
    :type runnertype_db: :class:`RunnerTypeDB`
    :param validate_params: Whether to validate parameters against schema. Default to True, but
                          set to False when raising a request to report an error.
    :type validate_params: ``bool``

    :return: (liveaction, execution)
    :rtype: tuple
    """
    # We import this here to avoid conflicts w/ runners that might import this
    # file since the runners don't have the config context by default.
    from st2common.metrics.base import get_driver

    # Use the user context from the parent action execution. Subtasks in a workflow
    # action can be invoked by a system user and so we want to use the user context
    # from the original workflow action.
    parent_context = executions.get_parent_context(liveaction) or {}
    parent_user = parent_context.get("user", None)

    if parent_user:
        liveaction.context["user"] = parent_user

    # Validate action
    if not action_db:
        action_db = action_utils.get_action_by_ref(liveaction.action)

    if not action_db:
        raise ValueError('Action "%s" cannot be found.' % liveaction.action)
    if not action_db.enabled:
        raise ValueError(
            'Unable to execute. Action "%s" is disabled.' % liveaction.action
        )

    if not runnertype_db:
        runnertype_db = action_utils.get_runnertype_by_name(
            action_db.runner_type["name"]
        )

    if not hasattr(liveaction, "parameters"):
        liveaction.parameters = dict()

    # For consistency add pack to the context here in addition to RunnerContainer.dispatch() method
    liveaction.context["pack"] = action_db.pack

    # Validate action parameters.
    schema = util_schema.get_schema_for_action_parameters(action_db, runnertype_db)
    validator = util_schema.get_validator()
    if validate_params:
        util_schema.validate(
            liveaction.parameters,
            schema,
            validator,
            use_default=True,
            allow_default_none=True,
        )

    # validate that no immutable params are being overriden. Although possible to
    # ignore the override it is safer to inform the user to avoid surprises.
    immutables = _get_immutable_params(action_db.parameters)
    immutables.extend(_get_immutable_params(runnertype_db.runner_parameters))
    overridden_immutables = [
        p for p in six.iterkeys(liveaction.parameters) if p in immutables
    ]
    if len(overridden_immutables) > 0:
        raise ValueError(
            "Override of immutable parameter(s) %s is unsupported."
            % str(overridden_immutables)
        )

    # Set notification settings for action.
    # XXX: There are cases when we don't want notifications to be sent for a particular
    # execution. So we should look at liveaction.parameters['notify']
    # and not set liveaction.notify.
    if not _is_notify_skipped(liveaction) and not _is_notify_empty(action_db.notify):
        liveaction.notify = action_db.notify

    # Write to database and send to message queue.
    liveaction.status = action_constants.LIVEACTION_STATUS_REQUESTED
    liveaction.start_timestamp = date_utils.get_datetime_utc_now()

    # Set the "action_is_workflow" attribute
    liveaction.action_is_workflow = action_db.is_workflow()

    # Publish creation after both liveaction and actionexecution are created.
    liveaction = LiveAction.add_or_update(liveaction, publish=False)
    # Get trace_db if it exists. This could throw. If it throws, we have to cleanup
    # liveaction object so we don't see things in requested mode.
    trace_db = None
    try:
        _, trace_db = trace_service.get_trace_db_by_live_action(liveaction)
    except db_exc.StackStormDBObjectNotFoundError as e:
        _cleanup_liveaction(liveaction)
        raise trace_exc.TraceNotFoundException(six.text_type(e))

    execution = executions.create_execution_object(
        liveaction=liveaction,
        action_db=action_db,
        runnertype_db=runnertype_db,
        publish=False,
    )

    if trace_db:
        trace_service.add_or_update_given_trace_db(
            trace_db=trace_db,
            action_executions=[
                trace_service.get_trace_component_for_action_execution(
                    execution, liveaction
                )
            ],
        )

    get_driver().inc_counter("action.executions.%s" % (liveaction.status))

    return liveaction, execution