class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger def dispatch(self, trigger, payload=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` """ self._dispatcher.dispatch(trigger, payload=payload)
def _inject_instances(trigger, rate_per_trigger, duration, payload=None, max_throughput=False): payload = payload or {} start = date_utils.get_datetime_utc_now() elapsed = 0.0 count = 0 dispatcher = TriggerDispatcher() while elapsed < duration: # print('Dispatching trigger %s at time %s', trigger, date_utils.get_datetime_utc_now()) dispatcher.dispatch(trigger, payload) if rate_per_trigger: # NOTE: We decrease sleep delay for 56% to take into account overhead / delay because # of the call to dispatchet.dispatch method. delta = random.expovariate(rate_per_trigger) eventlet.sleep(delta * 0.56) elapsed = (date_utils.get_datetime_utc_now() - start).seconds count += 1 actual_rate = int(count / elapsed) print('%s: Emitted %d triggers in %d seconds (actual rate=%s triggers / second)' % (trigger, count, elapsed, actual_rate)) # NOTE: Due to the overhead of dispatcher.dispatch call, we allow for 10% of deviation from # requested rate before warning if rate_per_trigger and (actual_rate < (rate_per_trigger * 0.9)): print('') print('Warning, requested rate was %s triggers / second, but only achieved %s ' 'triggers / second' % (rate_per_trigger, actual_rate)) print('Too increase the throuput you will likely need to run multiple instances of ' 'this script in parallel.')
class Inquirer(ActionRunner): """This runner implements the ability to ask for more input during a workflow """ def __init__(self, runner_id): super(Inquirer, self).__init__(runner_id=runner_id) self.trigger_dispatcher = TriggerDispatcher(LOG) def pre_run(self): super(Inquirer, self).pre_run() # TODO :This is awful, but the way "runner_parameters" and other variables get # assigned on the runner instance is even worse. Those arguments should # be passed to the constructor. self.schema = self.runner_parameters.get(RUNNER_SCHEMA, DEFAULT_SCHEMA) self.roles_param = self.runner_parameters.get(RUNNER_ROLES, []) self.users_param = self.runner_parameters.get(RUNNER_USERS, []) self.route = self.runner_parameters.get(RUNNER_ROUTE, "") self.ttl = self.runner_parameters.get(RUNNER_TTL, 1440) def run(self, action_parameters): liveaction_db = action_utils.get_liveaction_by_id(self.liveaction_id) exc = ActionExecution.get(liveaction__id=str(liveaction_db.id)) # Assemble and dispatch trigger trigger_ref = ResourceReference.to_string_reference( pack=INQUIRY_TRIGGER['pack'], name=INQUIRY_TRIGGER['name'] ) trigger_payload = { "id": str(exc.id), "route": self.route } self.trigger_dispatcher.dispatch(trigger_ref, trigger_payload) # We only want to request a pause if this has a parent if liveaction_db.context.get("parent"): # Get the root liveaction and request that it pauses root_liveaction = action_service.get_root_liveaction(liveaction_db) action_service.request_pause( root_liveaction, self.context.get('user', None) ) result = { "schema": self.schema, "roles": self.roles_param, "users": self.users_param, "route": self.route, "ttl": self.ttl } return (LIVEACTION_STATUS_PENDING, result, None)
def _inject_instances(trigger, rate_per_trigger, duration, payload={}): start = date_utils.get_datetime_utc_now() elapsed = 0.0 count = 0 dispatcher = TriggerDispatcher() while elapsed < duration: # print('Dispatching trigger %s at time %s', trigger, date_utils.get_datetime_utc_now()) dispatcher.dispatch(trigger, payload) delta = random.expovariate(rate_per_trigger) eventlet.sleep(delta) elapsed = (date_utils.get_datetime_utc_now() - start).seconds / 60.0 count += 1 print("%s: Emitted %d triggers in %d seconds" % (trigger, count, elapsed))
class TriggerInstanceResendController(TriggerInstanceControllerMixin, resource.ResourceController): supported_filters = {} def __init__(self, *args, **kwargs): super(TriggerInstanceResendController, self).__init__(*args, **kwargs) self.trigger_dispatcher = TriggerDispatcher(LOG) class TriggerInstancePayload(object): def __init__(self, payload=None): self.payload = payload or {} def validate(self): if self.payload: assert isinstance(self.payload, dict) return True def post(self, trigger_instance_id): """ Re-send the provided trigger instance optionally specifying override parameters. Handles requests: POST /triggerinstance/<id>/re_emit POST /triggerinstance/<id>/re_send """ # Note: We only really need parameters here existing_trigger_instance = self._get_one_by_id(id=trigger_instance_id, permission_type=None, requester_user=None) new_payload = copy.deepcopy(existing_trigger_instance.payload) new_payload['__context'] = { 'original_id': trigger_instance_id } try: self.trigger_dispatcher.dispatch(existing_trigger_instance.trigger, new_payload) return { 'message': 'Trigger instance %s succesfully re-sent.' % trigger_instance_id, 'payload': new_payload } except Exception as e: abort(http_client.INTERNAL_SERVER_ERROR, six.text_type(e))
def _refire_trigger_instance(trigger_instance_id, log_): trigger_instance = TriggerInstance.get_by_id(trigger_instance_id) trigger_dispatcher = TriggerDispatcher(log_) trigger_dispatcher.dispatch(trigger=trigger_instance.trigger, payload=trigger_instance.payload)
class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = {} self._base_url = '/webhooks/' self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) queue_suffix = self.__class__.__name__ self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=queue_suffix, exclusive=True) self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return [trigger for trigger in six.itervalues(self._hooks)] @jsexpose() def get_one(self, name): hook = self._hooks.get(name, None) if not hook: abort(http_client.NOT_FOUND) return return hook @request_user_has_webhook_permission(permission_type=PermissionType.WEBHOOK_SEND) @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = '/'.join(args) # TODO: There must be a better way to do this. # Note: For backward compatibility reasons we default to application/json if content # type is not explicitly provided content_type = pecan.request.headers.get('Content-Type', 'application/json') body = pecan.request.body try: body = self._parse_request_body(content_type=content_type, body=body) except Exception as e: self._log_request('Failed to parse request body: %s.' % (str(e)), pecan.request) msg = 'Failed to parse request body "%s": %s' % (body, str(e)) return pecan.abort(http_client.BAD_REQUEST, msg) headers = self._get_headers_as_dict(pecan.request.headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None), hook=hook) if hook == 'st2' or hook == 'st2/': return self._handle_st2_webhook(body, trace_context=trace_context) if not self._is_valid_hook(hook): self._log_request('Invalid hook.', pecan.request) msg = 'Webhook %s not registered with st2' % hook return pecan.abort(http_client.NOT_FOUND, msg) trigger = self._get_trigger_for_hook(hook) payload = {} payload['headers'] = headers payload['body'] = body self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _parse_request_body(self, content_type, body): if content_type == 'application/json': self._log_request('Parsing request body as JSON', request=pecan.request) body = json.loads(body) elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']: self._log_request('Parsing request body as form encoded data', request=pecan.request) body = urlparse.parse_qs(body) else: raise ValueError('Unsupported Content-Type: "%s"' % (content_type)) return body def _handle_st2_webhook(self, body, trace_context): trigger = body.get('trigger', None) payload = body.get('payload', None) if not trigger: msg = 'Trigger not specified.' return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _get_trigger_for_hook(self, hook): return self._hooks[hook] def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = trigger['parameters']['url'] LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks[url] = trigger def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = trigger['parameters']['url'] if url in self._hooks: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) del self._hooks[url] def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if 'id' in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized['id'] = str(sanitized['id']) return sanitized
class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) self._datastore_service = SensorDatastoreService( logger=self._logger, pack_name=self._sensor_wrapper._pack, class_name=self._sensor_wrapper._class_name, api_username='******') self._client = None @property def datastore_service(self): return self._datastore_service def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger ################################## # General methods ################################## def get_user_info(self): return self._datastore_service.get_user_info() ################################## # Sensor related methods ################################## def dispatch(self, trigger, payload=None, trace_tag=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` """ # empty strings trace_context = TraceContext(trace_tag=trace_tag) if trace_tag else None self._logger.debug('Added trace_context %s to trigger %s.', trace_context, trigger) self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context) def dispatch_with_context(self, trigger, payload=None, trace_context=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` """ # This means specified payload is complied with trigger_type schema, or not. is_valid = True try: validate_trigger_payload(trigger_type_ref=trigger, payload=payload) except (ValidationError, Exception) as e: is_valid = False self._logger.warn('Failed to validate payload (%s) for trigger "%s": %s' % (str(payload), trigger, str(e))) # If validation is disabled, still dispatch a trigger even if it failed validation # This condition prevents unexpected restriction. if not is_valid and cfg.CONF.system.validate_trigger_payload: self._logger.warn('Trigger payload validation failed and validation is enabled, not ' 'dispatching a trigger "%s" (%s)' % (trigger, str(payload))) return None self._logger.debug('Dispatching trigger %s with payload %s.', trigger, payload) self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) ################################## # Methods for datastore management ################################## def list_values(self, local=True, prefix=None): return self.datastore_service.list_values(local=local, prefix=prefix) def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False): return self.datastore_service.get_value(name=name, local=local, scope=scope, decrypt=decrypt) def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False): return self.datastore_service.set_value(name=name, value=value, ttl=ttl, local=local, scope=scope, encrypt=encrypt) def delete_value(self, name, local=True, scope=SYSTEM_SCOPE): return self.datastore_service.delete_value(name=name, local=local, scope=scope)
class TriggerDispatcherService(object): """ Class for handling dispatching of trigger. """ def __init__(self, logger): self._logger = logger self._dispatcher = TriggerDispatcher(self._logger) def dispatch(self, trigger, payload=None, trace_tag=None, throw_on_validation_error=False): """ Method which dispatches the trigger. :param trigger: Reference to the TriggerTypeDB (<pack>.<name>) or TriggerDB object. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` :param throw_on_validation_error: True to throw on validation error (if validate_payload is True) instead of logging the error. :type throw_on_validation_error: ``boolean`` """ # empty strings trace_context = TraceContext(trace_tag=trace_tag) if trace_tag else None self._logger.debug('Added trace_context %s to trigger %s.', trace_context, trigger) return self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context, throw_on_validation_error=throw_on_validation_error) def dispatch_with_context(self, trigger, payload=None, trace_context=None, throw_on_validation_error=False): """ Method which dispatches the trigger. :param trigger: Reference to the TriggerTypeDB (<pack>.<name>) or TriggerDB object. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` :param throw_on_validation_error: True to throw on validation error (if validate_payload is True) instead of logging the error. :type throw_on_validation_error: ``boolean`` """ # Note: We perform validation even if it's disabled in the config so we can at least warn # the user if validation fals (but not throw if it's disabled) try: validate_trigger_payload(trigger_type_ref=trigger, payload=payload, throw_on_inexistent_trigger=True) except (ValidationError, ValueError, Exception) as e: self._logger.warn('Failed to validate payload (%s) for trigger "%s": %s' % (str(payload), trigger, six.text_type(e))) # If validation is disabled, still dispatch a trigger even if it failed validation # This condition prevents unexpected restriction. if cfg.CONF.system.validate_trigger_payload: msg = ('Trigger payload validation failed and validation is enabled, not ' 'dispatching a trigger "%s" (%s): %s' % (trigger, str(payload), six.text_type(e))) if throw_on_validation_error: raise ValueError(msg) self._logger.warn(msg) return None self._logger.debug('Dispatching trigger %s with payload %s.', trigger, payload) return self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)
class ActionsController(resource.ContentPackResourceController): """ Implements the RESTful web endpoint that handles the lifecycle of Actions in the system. """ views = ActionViewsController() model = ActionAPI access = Action supported_filters = {'name': 'name', 'pack': 'pack', 'tags': 'name'} query_options = {'sort': ['pack', 'name']} valid_exclude_attributes = ['parameters', 'notify'] def __init__(self, *args, **kwargs): super(ActionsController, self).__init__(*args, **kwargs) self._trigger_dispatcher = TriggerDispatcher(LOG) def get_all(self, exclude_attributes=None, include_attributes=None, sort=None, offset=0, limit=None, requester_user=None, **raw_filters): return super(ActionsController, self)._get_all(exclude_fields=exclude_attributes, include_fields=include_attributes, sort=sort, offset=offset, limit=limit, raw_filters=raw_filters, requester_user=requester_user) def get_one(self, ref_or_id, requester_user): return super(ActionsController, self)._get_one(ref_or_id, requester_user=requester_user, permission_type=PermissionType.ACTION_VIEW) def post(self, action, requester_user): """ Create a new action. Handles requests: POST /actions/ """ permission_type = PermissionType.ACTION_CREATE rbac_utils = get_rbac_backend().get_utils_class() rbac_utils.assert_user_has_resource_api_permission( user_db=requester_user, resource_api=action, permission_type=permission_type) try: # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) except (ValidationError, ValueError, ValueValidationException, InvalidActionParameterException) as e: LOG.exception('Unable to create action data=%s', action) abort(http_client.BAD_REQUEST, six.text_type(e)) return # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_ref=action.pack, data_files=data_files) action_model = ActionAPI.to_model(action) LOG.debug('/actions/ POST verified ActionAPI object=%s', action) action_db = Action.add_or_update(action_model) LOG.debug('/actions/ POST saved ActionDB object=%s', action_db) # Dispatch an internal trigger for each written data file. This way user # automate comitting this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files( action_db=action_db, written_data_files=written_data_files) extra = {'acion_db': action_db} LOG.audit('Action created. Action.id=%s' % (action_db.id), extra=extra) action_api = ActionAPI.from_model(action_db) return Response(json=action_api, status=http_client.CREATED) def put(self, action, ref_or_id, requester_user): action_db = self._get_by_ref_or_id(ref_or_id=ref_or_id) # Assert permissions permission_type = PermissionType.ACTION_MODIFY rbac_utils = get_rbac_backend().get_utils_class() rbac_utils.assert_user_has_resource_db_permission( user_db=requester_user, resource_db=action_db, permission_type=permission_type) action_id = action_db.id if not getattr(action, 'pack', None): action.pack = action_db.pack # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_ref=action.pack, data_files=data_files) try: action_db = ActionAPI.to_model(action) LOG.debug('/actions/ PUT incoming action: %s', action_db) action_db.id = action_id action_db = Action.add_or_update(action_db) LOG.debug('/actions/ PUT after add_or_update: %s', action_db) except (ValidationError, ValueError) as e: LOG.exception('Unable to update action data=%s', action) abort(http_client.BAD_REQUEST, six.text_type(e)) return # Dispatch an internal trigger for each written data file. This way user # automate committing this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files( action_db=action_db, written_data_files=written_data_files) action_api = ActionAPI.from_model(action_db) LOG.debug('PUT /actions/ client_result=%s', action_api) return action_api def delete(self, ref_or_id, requester_user): """ Delete an action. Handles requests: POST /actions/1?_method=delete DELETE /actions/1 DELETE /actions/mypack.myaction """ action_db = self._get_by_ref_or_id(ref_or_id=ref_or_id) action_id = action_db.id permission_type = PermissionType.ACTION_DELETE rbac_utils = get_rbac_backend().get_utils_class() rbac_utils.assert_user_has_resource_db_permission( user_db=requester_user, resource_db=action_db, permission_type=permission_type) try: validate_not_part_of_system_pack(action_db) except ValueValidationException as e: abort(http_client.BAD_REQUEST, six.text_type(e)) LOG.debug('DELETE /actions/ lookup with ref_or_id=%s found object: %s', ref_or_id, action_db) try: Action.delete(action_db) except Exception as e: LOG.error( 'Database delete encountered exception during delete of id="%s". ' 'Exception was %s', action_id, e) abort(http_client.INTERNAL_SERVER_ERROR, six.text_type(e)) return extra = {'action_db': action_db} LOG.audit('Action deleted. Action.id=%s' % (action_db.id), extra=extra) return Response(status=http_client.NO_CONTENT) def _handle_data_files(self, pack_ref, data_files): """ Method for handling action data files. This method performs two tasks: 1. Writes files to disk 2. Updates affected PackDB model """ # Write files to disk written_file_paths = self._write_data_files_to_disk( pack_ref=pack_ref, data_files=data_files) # Update affected PackDB model (update a list of files) # Update PackDB self._update_pack_model(pack_ref=pack_ref, data_files=data_files, written_file_paths=written_file_paths) return written_file_paths def _write_data_files_to_disk(self, pack_ref, data_files): """ Write files to disk. """ written_file_paths = [] for data_file in data_files: file_path = data_file['file_path'] content = data_file['content'] file_path = get_pack_resource_file_abs_path(pack_ref=pack_ref, resource_type='action', file_path=file_path) LOG.debug('Writing data file "%s" to "%s"' % (str(data_file), file_path)) try: self._write_data_file(pack_ref=pack_ref, file_path=file_path, content=content) except (OSError, IOError) as e: # Throw a more user-friendly exception on Permission denied error if e.errno == errno.EACCES: msg = ( 'Unable to write data to "%s" (permission denied). Make sure ' 'permissions for that pack directory are configured correctly so ' 'st2api can write to it.' % (file_path)) raise ValueError(msg) raise e written_file_paths.append(file_path) return written_file_paths def _update_pack_model(self, pack_ref, data_files, written_file_paths): """ Update PackDB models (update files list). """ file_paths = [ ] # A list of paths relative to the pack directory for new files for file_path in written_file_paths: file_path = get_relative_path_to_pack_file(pack_ref=pack_ref, file_path=file_path) file_paths.append(file_path) pack_db = Pack.get_by_ref(pack_ref) pack_db.files = set(pack_db.files) pack_db.files.update(set(file_paths)) pack_db.files = list(pack_db.files) pack_db = Pack.add_or_update(pack_db) return pack_db def _write_data_file(self, pack_ref, file_path, content): """ Write data file on disk. """ # Throw if pack directory doesn't exist pack_base_path = get_pack_base_path(pack_name=pack_ref) if not os.path.isdir(pack_base_path): raise ValueError('Directory for pack "%s" doesn\'t exist' % (pack_ref)) # Create pack sub-directory tree if it doesn't exist directory = os.path.dirname(file_path) if not os.path.isdir(directory): # NOTE: We apply same permission bits as we do on pack install. If we don't do that, # st2api won't be able to write to pack sub-directory mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH os.makedirs(directory, mode) with open(file_path, 'w') as fp: fp.write(content) def _dispatch_trigger_for_written_data_files(self, action_db, written_data_files): trigger = ACTION_FILE_WRITTEN_TRIGGER['name'] host_info = get_host_info() for file_path in written_data_files: payload = { 'ref': action_db.ref, 'file_path': file_path, 'host_info': host_info } self._trigger_dispatcher.dispatch(trigger=trigger, payload=payload)
class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) self._datastore_service = DatastoreService( logger=self._logger, pack_name=self._sensor_wrapper._pack, class_name=self._sensor_wrapper._class_name, api_username='******') self._client = None def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger def dispatch(self, trigger, payload=None, trace_tag=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` """ # empty strings trace_context = TraceContext( trace_tag=trace_tag) if trace_tag else None self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context) def dispatch_with_context(self, trigger, payload=None, trace_context=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` """ self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) ################################## # Methods for datastore management ################################## def list_values(self, local=True, prefix=None): return self._datastore_service.list_values(local, prefix) def get_value(self, name, local=True): return self._datastore_service.get_value(name, local) def set_value(self, name, value, ttl=None, local=True): return self._datastore_service.set_value(name, value, ttl, local) def delete_value(self, name, local=True): return self._datastore_service.delete_value(name, local)
class TriggerDispatcherService(object): """ Class for handling dispatching of trigger. """ def __init__(self, logger): self._logger = logger self._dispatcher = TriggerDispatcher(self._logger) def dispatch(self, trigger, payload=None, trace_tag=None, throw_on_validation_error=False): """ Method which dispatches the trigger. :param trigger: Reference to the TriggerTypeDB (<pack>.<name>) or TriggerDB object. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` :param throw_on_validation_error: True to throw on validation error (if validate_payload is True) instead of logging the error. :type throw_on_validation_error: ``boolean`` """ # empty strings trace_context = TraceContext( trace_tag=trace_tag) if trace_tag else None self._logger.debug("Added trace_context %s to trigger %s.", trace_context, trigger) return self.dispatch_with_context( trigger, payload=payload, trace_context=trace_context, throw_on_validation_error=throw_on_validation_error, ) def dispatch_with_context(self, trigger, payload=None, trace_context=None, throw_on_validation_error=False): """ Method which dispatches the trigger. :param trigger: Reference to the TriggerTypeDB (<pack>.<name>) or TriggerDB object. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` :param throw_on_validation_error: True to throw on validation error (if validate_payload is True) instead of logging the error. :type throw_on_validation_error: ``boolean`` """ # Note: We perform validation even if it's disabled in the config so we can at least warn # the user if validation fals (but not throw if it's disabled) try: validate_trigger_payload( trigger_type_ref=trigger, payload=payload, throw_on_inexistent_trigger=True, ) except (ValidationError, ValueError, Exception) as e: self._logger.warn( 'Failed to validate payload (%s) for trigger "%s": %s' % (str(payload), trigger, six.text_type(e))) # If validation is disabled, still dispatch a trigger even if it failed validation # This condition prevents unexpected restriction. if cfg.CONF.system.validate_trigger_payload: msg = ( "Trigger payload validation failed and validation is enabled, not " 'dispatching a trigger "%s" (%s): %s' % (trigger, str(payload), six.text_type(e))) if throw_on_validation_error: raise ValueError(msg) self._logger.warn(msg) return None self._logger.debug("Dispatching trigger %s with payload %s.", trigger, payload) return self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)
class St2Timer(object): """ A timer interface that uses APScheduler 3.0. """ def __init__(self, local_timezone=None): self._timezone = local_timezone self._scheduler = BlockingScheduler(timezone=self._timezone) self._jobs = {} self._trigger_types = TIMER_TRIGGER_TYPES.keys() self._trigger_watcher = TriggerWatcher( create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix='timers') self._trigger_dispatcher = TriggerDispatcher(LOG) def start(self): self._register_timer_trigger_types() self._trigger_watcher.start() self._scheduler.start() def cleanup(self): self._scheduler.shutdown(wait=True) def add_trigger(self, trigger): self._add_job_to_scheduler(trigger) def update_trigger(self, trigger): self.remove_trigger(trigger) self.add_trigger(trigger) def remove_trigger(self, trigger): id = trigger['id'] try: job_id = self._jobs[id] except KeyError: LOG.info('Job not found: %s', id) return self._scheduler.remove_job(job_id) def _add_job_to_scheduler(self, trigger): trigger_type_ref = trigger['type'] trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref] try: jsonschema.validate(trigger['parameters'], trigger_type['parameters_schema']) except jsonschema.ValidationError as e: LOG.error('Exception scheduling timer: %s, %s', trigger['parameters'], e, exc_info=True) raise # Or should we just return? time_spec = trigger['parameters'] time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone')) time_type = None if trigger_type['name'] == 'st2.IntervalTimer': unit = time_spec.get('unit', None) value = time_spec.get('delta', None) time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone}) elif trigger_type['name'] == 'st2.DateTimer': # Raises an exception if date string isn't a valid one. dat = date_parser.parse(time_spec.get('date', None)) time_type = DateTrigger(dat, timezone=time_zone) elif trigger_type['name'] == 'st2.CronTimer': cron = time_spec.copy() cron['timezone'] = time_zone time_type = CronTrigger(**cron) utc_now = date_utils.get_datetime_utc_now() if hasattr(time_type, 'run_date') and utc_now > time_type.run_date: LOG.warning('Not scheduling expired timer: %s : %s', trigger['parameters'], time_type.run_date) else: self._add_job(trigger, time_type) def _add_job(self, trigger, time_type, replace=True): try: job = self._scheduler.add_job(self._emit_trigger_instance, trigger=time_type, args=[trigger], replace_existing=replace) LOG.info('Job %s scheduled.', job.id) self._jobs[trigger['id']] = job.id except Exception as e: LOG.error('Exception scheduling timer: %s, %s', trigger['parameters'], e, exc_info=True) def _emit_trigger_instance(self, trigger): utc_now = date_utils.get_datetime_utc_now() LOG.info('Timer fired at: %s. Trigger: %s', str(utc_now), trigger) payload = { 'executed_at': str(utc_now), 'schedule': trigger['parameters'].get('time') } self._trigger_dispatcher.dispatch(trigger, payload) def _register_timer_trigger_types(self): return trigger_services.add_trigger_models( TIMER_TRIGGER_TYPES.values()) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if 'id' in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized['id'] = str(sanitized['id']) return sanitized
class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = HooksHolder() self._base_url = '/webhooks/' self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) queue_suffix = self.__class__.__name__ self._trigger_watcher = TriggerWatcher( create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=queue_suffix, exclusive=True) self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return self._hooks.get_all() @jsexpose() def get_one(self, name): triggers = self._hooks.get_triggers_for_hook(name) if not triggers: abort(http_client.NOT_FOUND) return # For demonstration purpose return 1st return triggers[0] @request_user_has_webhook_permission( permission_type=PermissionType.WEBHOOK_SEND) @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = '/'.join(args) # TODO: There must be a better way to do this. # Note: For backward compatibility reasons we default to application/json if content # type is not explicitly provided content_type = pecan.request.headers.get('Content-Type', 'application/json') content_type = parse_content_type_header(content_type=content_type)[0] body = pecan.request.body try: body = self._parse_request_body(content_type=content_type, body=body) except Exception as e: self._log_request('Failed to parse request body: %s.' % (str(e)), pecan.request) msg = 'Failed to parse request body "%s": %s' % (body, str(e)) return pecan.abort(http_client.BAD_REQUEST, msg) headers = self._get_headers_as_dict(pecan.request.headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop( TRACE_TAG_HEADER, None), hook=hook) if hook == 'st2' or hook == 'st2/': return self._handle_st2_webhook(body, trace_context=trace_context) if not self._is_valid_hook(hook): self._log_request('Invalid hook.', pecan.request) msg = 'Webhook %s not registered with st2' % hook return pecan.abort(http_client.NOT_FOUND, msg) triggers = self._hooks.get_triggers_for_hook(hook) payload = {} payload['headers'] = headers payload['body'] = body # Dispatch trigger instance for each of the trigger found for trigger in triggers: self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _parse_request_body(self, content_type, body): if content_type == 'application/json': self._log_request('Parsing request body as JSON', request=pecan.request) body = json.loads(body) elif content_type in [ 'application/x-www-form-urlencoded', 'multipart/form-data' ]: self._log_request('Parsing request body as form encoded data', request=pecan.request) body = urlparse.parse_qs(body) else: raise ValueError('Unsupported Content-Type: "%s"' % (content_type)) return body def _handle_st2_webhook(self, body, trace_context): trigger = body.get('trigger', None) payload = body.get('payload', None) if not trigger: msg = 'Trigger not specified.' return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks.add_hook(url, trigger) def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) removed = self._hooks.remove_hook(url, trigger) if removed: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) def _get_normalized_url(self, trigger): """ remove the trailing and leading / so that the hook url and those coming from trigger parameters end up being the same. """ return trigger['parameters']['url'].strip('/') def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = TriggerAPI.from_model(trigger).to_dict() return sanitized
class ActionsController(resource.ContentPackResourceController): """ Implements the RESTful web endpoint that handles the lifecycle of Actions in the system. """ views = ActionViewsController() model = ActionAPI access = Action supported_filters = { 'name': 'name', 'pack': 'pack', 'tags': 'name' } query_options = { 'sort': ['pack', 'name'] } valid_exclude_attributes = [ 'parameters', 'notify' ] def __init__(self, *args, **kwargs): super(ActionsController, self).__init__(*args, **kwargs) self._trigger_dispatcher = TriggerDispatcher(LOG) def get_all(self, exclude_attributes=None, include_attributes=None, sort=None, offset=0, limit=None, requester_user=None, **raw_filters): return super(ActionsController, self)._get_all(exclude_fields=exclude_attributes, include_fields=include_attributes, sort=sort, offset=offset, limit=limit, raw_filters=raw_filters, requester_user=requester_user) def get_one(self, ref_or_id, requester_user): return super(ActionsController, self)._get_one(ref_or_id, requester_user=requester_user, permission_type=PermissionType.ACTION_VIEW) def post(self, action, requester_user): """ Create a new action. Handles requests: POST /actions/ """ permission_type = PermissionType.ACTION_CREATE rbac_utils = get_rbac_backend().get_utils_class() rbac_utils.assert_user_has_resource_api_permission(user_db=requester_user, resource_api=action, permission_type=permission_type) try: # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) except (ValidationError, ValueError, ValueValidationException, InvalidActionParameterException) as e: LOG.exception('Unable to create action data=%s', action) abort(http_client.BAD_REQUEST, six.text_type(e)) return # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_ref=action.pack, data_files=data_files) action_model = ActionAPI.to_model(action) LOG.debug('/actions/ POST verified ActionAPI object=%s', action) action_db = Action.add_or_update(action_model) LOG.debug('/actions/ POST saved ActionDB object=%s', action_db) # Dispatch an internal trigger for each written data file. This way user # automate comitting this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files(action_db=action_db, written_data_files=written_data_files) extra = {'acion_db': action_db} LOG.audit('Action created. Action.id=%s' % (action_db.id), extra=extra) action_api = ActionAPI.from_model(action_db) return Response(json=action_api, status=http_client.CREATED) def put(self, action, ref_or_id, requester_user): action_db = self._get_by_ref_or_id(ref_or_id=ref_or_id) # Assert permissions permission_type = PermissionType.ACTION_MODIFY rbac_utils = get_rbac_backend().get_utils_class() rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user, resource_db=action_db, permission_type=permission_type) action_id = action_db.id if not getattr(action, 'pack', None): action.pack = action_db.pack # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_ref=action.pack, data_files=data_files) try: action_db = ActionAPI.to_model(action) LOG.debug('/actions/ PUT incoming action: %s', action_db) action_db.id = action_id action_db = Action.add_or_update(action_db) LOG.debug('/actions/ PUT after add_or_update: %s', action_db) except (ValidationError, ValueError) as e: LOG.exception('Unable to update action data=%s', action) abort(http_client.BAD_REQUEST, six.text_type(e)) return # Dispatch an internal trigger for each written data file. This way user # automate committing this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files(action_db=action_db, written_data_files=written_data_files) action_api = ActionAPI.from_model(action_db) LOG.debug('PUT /actions/ client_result=%s', action_api) return action_api def delete(self, ref_or_id, requester_user): """ Delete an action. Handles requests: POST /actions/1?_method=delete DELETE /actions/1 DELETE /actions/mypack.myaction """ action_db = self._get_by_ref_or_id(ref_or_id=ref_or_id) action_id = action_db.id permission_type = PermissionType.ACTION_DELETE rbac_utils = get_rbac_backend().get_utils_class() rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user, resource_db=action_db, permission_type=permission_type) try: validate_not_part_of_system_pack(action_db) except ValueValidationException as e: abort(http_client.BAD_REQUEST, six.text_type(e)) LOG.debug('DELETE /actions/ lookup with ref_or_id=%s found object: %s', ref_or_id, action_db) try: Action.delete(action_db) except Exception as e: LOG.error('Database delete encountered exception during delete of id="%s". ' 'Exception was %s', action_id, e) abort(http_client.INTERNAL_SERVER_ERROR, six.text_type(e)) return extra = {'action_db': action_db} LOG.audit('Action deleted. Action.id=%s' % (action_db.id), extra=extra) return Response(status=http_client.NO_CONTENT) def _handle_data_files(self, pack_ref, data_files): """ Method for handling action data files. This method performs two tasks: 1. Writes files to disk 2. Updates affected PackDB model """ # Write files to disk written_file_paths = self._write_data_files_to_disk(pack_ref=pack_ref, data_files=data_files) # Update affected PackDB model (update a list of files) # Update PackDB self._update_pack_model(pack_ref=pack_ref, data_files=data_files, written_file_paths=written_file_paths) return written_file_paths def _write_data_files_to_disk(self, pack_ref, data_files): """ Write files to disk. """ written_file_paths = [] for data_file in data_files: file_path = data_file['file_path'] content = data_file['content'] file_path = get_pack_resource_file_abs_path(pack_ref=pack_ref, resource_type='action', file_path=file_path) LOG.debug('Writing data file "%s" to "%s"' % (str(data_file), file_path)) try: self._write_data_file(pack_ref=pack_ref, file_path=file_path, content=content) except (OSError, IOError) as e: # Throw a more user-friendly exception on Permission denied error if e.errno == errno.EACCES: msg = ('Unable to write data to "%s" (permission denied). Make sure ' 'permissions for that pack directory are configured correctly so ' 'st2api can write to it.' % (file_path)) raise ValueError(msg) raise e written_file_paths.append(file_path) return written_file_paths def _update_pack_model(self, pack_ref, data_files, written_file_paths): """ Update PackDB models (update files list). """ file_paths = [] # A list of paths relative to the pack directory for new files for file_path in written_file_paths: file_path = get_relative_path_to_pack_file(pack_ref=pack_ref, file_path=file_path) file_paths.append(file_path) pack_db = Pack.get_by_ref(pack_ref) pack_db.files = set(pack_db.files) pack_db.files.update(set(file_paths)) pack_db.files = list(pack_db.files) pack_db = Pack.add_or_update(pack_db) return pack_db def _write_data_file(self, pack_ref, file_path, content): """ Write data file on disk. """ # Throw if pack directory doesn't exist pack_base_path = get_pack_base_path(pack_name=pack_ref) if not os.path.isdir(pack_base_path): raise ValueError('Directory for pack "%s" doesn\'t exist' % (pack_ref)) # Create pack sub-directory tree if it doesn't exist directory = os.path.dirname(file_path) if not os.path.isdir(directory): # NOTE: We apply same permission bits as we do on pack install. If we don't do that, # st2api won't be able to write to pack sub-directory mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH os.makedirs(directory, mode) with open(file_path, 'w') as fp: fp.write(content) def _dispatch_trigger_for_written_data_files(self, action_db, written_data_files): trigger = ACTION_FILE_WRITTEN_TRIGGER['name'] host_info = get_host_info() for file_path in written_data_files: payload = { 'ref': action_db.ref, 'file_path': file_path, 'host_info': host_info } self._trigger_dispatcher.dispatch(trigger=trigger, payload=payload)
class WebhooksController(pecan.rest.RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = {} self._base_url = '/webhooks/' self._trigger_types = [GENERIC_WEBHOOK_TRIGGER_REF] self._trigger_dispatcher = TriggerDispatcher(LOG) self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types) self._trigger_watcher.start() @jsexpose(str, status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = '/'.join(args) # TODO: There must be a better way to do this. LOG.info('POST /webhooks/ with hook=%s', hook) if not self._is_valid_hook(hook): msg = 'Webhook %s not registered with st2' % hook return pecan.abort(http_client.NOT_FOUND, msg) body = pecan.request.body try: body = json.loads(body) except ValueError: msg = 'Invalid JSON body: %s' % (body) return pecan.abort(http_client.BAD_REQUEST, msg) trigger = self._get_trigger_for_hook(hook) payload = {} payload['headers'] = self._get_headers_as_dict(pecan.request.headers) payload['body'] = body self._trigger_dispatcher.dispatch(trigger, payload=payload) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _get_trigger_for_hook(self, hook): return self._hooks[hook] def add_trigger(self, trigger): url = trigger['parameters']['url'] LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks[url] = trigger def update_trigger(self, trigger): pass def remove_trigger(self, trigger): url = trigger['parameters']['url'] if url in self._hooks: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) del self._hooks[url] def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if 'id' in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized['id'] = str(sanitized['id']) return sanitized
class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = {} self._base_url = '/webhooks/' self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix='webhooks') self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return [trigger for trigger in six.itervalues(self._hooks)] @jsexpose() def get_one(self, name): hook = self._hooks.get(name, None) if not hook: abort(http_client.NOT_FOUND) return return hook @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = '/'.join(args) # TODO: There must be a better way to do this. body = pecan.request.body try: body = json.loads(body) except ValueError: self._log_request('Invalid JSON body.', pecan.request) msg = 'Invalid JSON body: %s' % (body) return pecan.abort(http_client.BAD_REQUEST, msg) if hook == 'st2' or hook == 'st2/': return self._handle_st2_webhook(body) if not self._is_valid_hook(hook): self._log_request('Invalid hook.', pecan.request) msg = 'Webhook %s not registered with st2' % hook return pecan.abort(http_client.NOT_FOUND, msg) trigger = self._get_trigger_for_hook(hook) payload = {} payload['headers'] = self._get_headers_as_dict(pecan.request.headers) payload['body'] = body self._trigger_dispatcher.dispatch(trigger, payload=payload) return body def _handle_st2_webhook(self, body): trigger = body.get('trigger', None) payload = body.get('payload', None) if not trigger: msg = 'Trigger not specified.' return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _get_trigger_for_hook(self, hook): return self._hooks[hook] def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def add_trigger(self, trigger): url = trigger['parameters']['url'] LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks[url] = trigger def update_trigger(self, trigger): pass def remove_trigger(self, trigger): url = trigger['parameters']['url'] if url in self._hooks: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) del self._hooks[url] def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if 'id' in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized['id'] = str(sanitized['id']) return sanitized
class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = {} self._base_url = '/webhooks/' self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) self._trigger_watcher = TriggerWatcher( create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=self.__class__.__name__, exclusive=True) self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return [trigger for trigger in six.itervalues(self._hooks)] @jsexpose() def get_one(self, name): hook = self._hooks.get(name, None) if not hook: abort(http_client.NOT_FOUND) return return hook @request_user_has_webhook_permission( permission_type=PermissionType.WEBHOOK_SEND) @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = '/'.join(args) # TODO: There must be a better way to do this. body = pecan.request.body try: body = json.loads(body) except ValueError: self._log_request('Invalid JSON body.', pecan.request) msg = 'Invalid JSON body: %s' % (body) return pecan.abort(http_client.BAD_REQUEST, msg) headers = self._get_headers_as_dict(pecan.request.headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop( TRACE_TAG_HEADER, None), hook=hook) if hook == 'st2' or hook == 'st2/': return self._handle_st2_webhook(body, trace_context=trace_context) if not self._is_valid_hook(hook): self._log_request('Invalid hook.', pecan.request) msg = 'Webhook %s not registered with st2' % hook return pecan.abort(http_client.NOT_FOUND, msg) trigger = self._get_trigger_for_hook(hook) payload = {} payload['headers'] = headers payload['body'] = body self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _handle_st2_webhook(self, body, trace_context): trigger = body.get('trigger', None) payload = body.get('payload', None) if not trigger: msg = 'Trigger not specified.' return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _get_trigger_for_hook(self, hook): return self._hooks[hook] def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = trigger['parameters']['url'] LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks[url] = trigger def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = trigger['parameters']['url'] if url in self._hooks: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) del self._hooks[url] def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if 'id' in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized['id'] = str(sanitized['id']) return sanitized
class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) self._datastore_service = DatastoreService(logger=self._logger, pack_name=self._sensor_wrapper._pack, class_name=self._sensor_wrapper._class_name, api_username='******') self._client = None def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger def dispatch(self, trigger, payload=None, trace_tag=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` """ # empty strings trace_context = TraceContext(trace_tag=trace_tag) if trace_tag else None self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context) def dispatch_with_context(self, trigger, payload=None, trace_context=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` """ self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) ################################## # Methods for datastore management ################################## def list_values(self, local=True, prefix=None): return self._datastore_service.list_values(local, prefix) def get_value(self, name, local=True): return self._datastore_service.get_value(name, local) def set_value(self, name, value, ttl=None, local=True): return self._datastore_service.set_value(name, value, ttl, local) def delete_value(self, name, local=True): return self._datastore_service.delete_value(name, local)
class St2Timer(object): """ A timer interface that uses APScheduler 3.0. """ def __init__(self, local_timezone=None): self._timezone = local_timezone self._scheduler = BlockingScheduler(timezone=self._timezone) self._jobs = {} self._trigger_types = list(TIMER_TRIGGER_TYPES.keys()) self._trigger_watcher = TriggerWatcher( create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=self.__class__.__name__, exclusive=True) self._trigger_dispatcher = TriggerDispatcher(LOG) def start(self): self._register_timer_trigger_types() self._trigger_watcher.start() self._scheduler.start() def cleanup(self): self._scheduler.shutdown(wait=True) def add_trigger(self, trigger): self._add_job_to_scheduler(trigger) def update_trigger(self, trigger): self.remove_trigger(trigger) self.add_trigger(trigger) def remove_trigger(self, trigger): trigger_id = trigger['id'] try: job_id = self._jobs[trigger_id] except KeyError: LOG.info('Job not found: %s', trigger_id) return self._scheduler.remove_job(job_id) del self._jobs[trigger_id] def _add_job_to_scheduler(self, trigger): trigger_type_ref = trigger['type'] trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref] try: util_schema.validate(instance=trigger['parameters'], schema=trigger_type['parameters_schema'], cls=util_schema.CustomValidator, use_default=True, allow_default_none=True) except jsonschema.ValidationError as e: LOG.error('Exception scheduling timer: %s, %s', trigger['parameters'], e, exc_info=True) raise # Or should we just return? time_spec = trigger['parameters'] time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone')) time_type = None if trigger_type['name'] == 'st2.IntervalTimer': unit = time_spec.get('unit', None) value = time_spec.get('delta', None) time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone}) elif trigger_type['name'] == 'st2.DateTimer': # Raises an exception if date string isn't a valid one. dat = date_parser.parse(time_spec.get('date', None)) time_type = DateTrigger(dat, timezone=time_zone) elif trigger_type['name'] == 'st2.CronTimer': cron = time_spec.copy() cron['timezone'] = time_zone time_type = CronTrigger(**cron) utc_now = date_utils.get_datetime_utc_now() if hasattr(time_type, 'run_date') and utc_now > time_type.run_date: LOG.warning('Not scheduling expired timer: %s : %s', trigger['parameters'], time_type.run_date) else: self._add_job(trigger, time_type) return time_type def _add_job(self, trigger, time_type, replace=True): try: job = self._scheduler.add_job(self._emit_trigger_instance, trigger=time_type, args=[trigger], replace_existing=replace) LOG.info('Job %s scheduled.', job.id) self._jobs[trigger['id']] = job.id except Exception as e: LOG.error('Exception scheduling timer: %s, %s', trigger['parameters'], e, exc_info=True) def _emit_trigger_instance(self, trigger): utc_now = date_utils.get_datetime_utc_now() # debug logging is reasonable for this one. A high resolution timer will end up # trashing standard logs. LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger) payload = { 'executed_at': str(utc_now), 'schedule': trigger['parameters'].get('time') } trace_context = TraceContext(trace_tag='%s-%s' % (self._get_trigger_type_name(trigger), trigger.get('name', uuid.uuid4().hex))) self._trigger_dispatcher.dispatch(trigger, payload, trace_context=trace_context) def _get_trigger_type_name(self, trigger): trigger_type_ref = trigger['type'] trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref] return trigger_type['name'] def _register_timer_trigger_types(self): return trigger_services.add_trigger_models( list(TIMER_TRIGGER_TYPES.values())) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = TriggerAPI.from_model(trigger).to_dict() return sanitized
class ProcessSensorContainer(object): """ Sensor container which runs sensors in a separate process. """ def __init__(self, sensors, poll_interval=5): """ :param sensors: A list of sensor dicts. :type sensors: ``list`` of ``dict`` """ self._sensors = {} # maps sensor_id -> sensor object self._processes = {} # maps sensor_id -> sensor process self._dispatcher = TriggerDispatcher(LOG) self.poll_interval = poll_interval self.stopped = False sensors = sensors or [] for sensor_obj in sensors: sensor_id = self._get_sensor_id(sensor=sensor_obj) self._sensors[sensor_id] = sensor_obj def run(self): self._run_all_sensors() try: while not self.stopped: # Poll for all running processes sensor_ids = self._sensors.keys() if len(sensor_ids) >= 1: self._poll_sensors_for_results(sensor_ids) eventlet.sleep(self.poll_interval) except greenlet.GreenletExit: # This exception is thrown when sensor container manager # kills the thread which runs process container. Not sure # if this is the best thing to do. self.stopped = True return SUCCESS_EXIT_CODE except: LOG.exception('Container failed to run sensors.') self.stopped = True return FAILURE_EXIT_CODE self.stopped = True LOG.error('Process container quit. It shouldn\'t.') def _poll_sensors_for_results(self, sensor_ids): for sensor_id in sensor_ids: process = self._processes[sensor_id] status = process.poll() if status is not None: # Dead process detected LOG.info('Process for sensor %s has exited with code %s', self._sensors[sensor_id]['ref'], status) sensor = self._sensors[sensor_id] self._dispatch_trigger_for_sensor_exit(sensor=sensor, exit_code=status) self._delete_sensors(sensor_id) def running(self): return len(self._processes) def shutdown(self): LOG.info('Container shutting down. Invoking cleanup on sensors.') self.stopped = True sensor_ids = self._sensors.keys() for sensor_id in sensor_ids: self._stop_sensor_process(sensor_id=sensor_id) LOG.info('All sensors are shut down.') self._sensors = {} self._processes = {} def add_sensor(self, sensor): """ Add a new sensor to the container. :type sensor: ``dict`` """ sensor_id = self._get_sensor_id(sensor=sensor) if sensor_id in self._sensors: LOG.warning('Sensor %s already exists and running.', sensor_id) return False self._spawn_sensor_process(sensor=sensor) LOG.debug('Sensor %s started.', sensor_id) self._sensors[sensor_id] = sensor return True def remove_sensor(self, sensor): """ Remove an existing sensor from the container. :type sensor: ``dict`` """ sensor_id = self._get_sensor_id(sensor=sensor) if sensor_id not in self._sensors: LOG.warning('Sensor %s isn\'t running in this container.', sensor_id) return False self._stop_sensor_process(sensor_id=sensor_id) LOG.debug('Sensor %s stopped.', sensor_id) return True def _run_all_sensors(self): sensor_ids = self._sensors.keys() for sensor_id in sensor_ids: sensor_obj = self._sensors[sensor_id] LOG.info('Running sensor %s', sensor_id) try: self._spawn_sensor_process(sensor=sensor_obj) except Exception as e: LOG.warning(e.message, exc_info=True) # Disable sensor which we are unable to start del self._sensors[sensor_id] continue LOG.info('Sensor %s started' % sensor_id) def _spawn_sensor_process(self, sensor): """ Spawn a new process for the provided sensor. New process uses isolated Python binary from a virtual environment belonging to the sensor pack. """ sensor_id = self._get_sensor_id(sensor=sensor) virtualenv_path = get_sandbox_virtualenv_path(pack=sensor['pack']) python_path = get_sandbox_python_binary_path(pack=sensor['pack']) if virtualenv_path and not os.path.isdir(virtualenv_path): msg = PACK_VIRTUALENV_DOESNT_EXIST % (sensor['pack'], sensor['pack']) raise Exception(msg) trigger_type_refs = sensor['trigger_types'] or [] trigger_type_refs = ','.join(trigger_type_refs) parent_args = json.dumps(sys.argv[1:]) args = [ python_path, WRAPPER_SCRIPT_PATH, '--pack=%s' % (sensor['pack']), '--file-path=%s' % (sensor['file_path']), '--class-name=%s' % (sensor['class_name']), '--trigger-type-refs=%s' % (trigger_type_refs), '--parent-args=%s' % (parent_args) ] if sensor['poll_interval']: args.append('--poll-interval=%s' % (sensor['poll_interval'])) env = os.environ.copy() env['PYTHONPATH'] = get_sandbox_python_path(inherit_from_parent=True, inherit_parent_virtualenv=True) # Include full api URL and API token specific to that sensor ttl = (24 * 60 * 60) temporary_token = create_token(username='******', ttl=ttl) env[API_URL_ENV_VARIABLE_NAME] = get_full_public_api_url() env[AUTH_TOKEN_ENV_VARIABLE_NAME] = temporary_token.token # TODO 1: Purge temporary token when service stops or sensor process dies # TODO 2: Store metadata (wrapper process id) with the token and delete # tokens for old, dead processes on startup cmd = ' '.join(args) LOG.debug('Running sensor subprocess (cmd="%s")', cmd) # TODO: Intercept stdout and stderr for aggregated logging purposes try: process = subprocess.Popen(args=args, stdin=None, stdout=None, stderr=None, shell=False, env=env) except Exception as e: cmd = ' '.join(args) message = ('Failed to spawn process for sensor %s ("%s"): %s' % (sensor_id, cmd, str(e))) raise Exception(message) self._dispatch_trigger_for_sensor_spawn(sensor=sensor, process=process, cmd=cmd) self._processes[sensor_id] = process return process def _stop_sensor_process(self, sensor_id, exit_timeout=5): """ Stop a sensor process for the provided sensor. :param sensor_id: Sensor ID. :type sensor_id: ``str`` :param exit_timeout: How long to wait for process to exit after sending SIGTERM signal. If the process doesn't exit in this amount of seconds, SIGKILL signal will be sent to the process. :type exit__timeout: ``int`` """ process = self._processes[sensor_id] # Terminate the process and wait for up to stop_timeout seconds for the # process to exit process.terminate() timeout = 0 sleep_delay = 1 while timeout < exit_timeout: status = process.poll() if status is not None: # Process has exited break timeout += sleep_delay time.sleep(sleep_delay) if status is None: # Process hasn't exited yet, forcefully kill it process.kill() self._delete_sensors(sensor_id) def _get_sensor_id(self, sensor): """ Return unique identifier for the provider sensor dict. :type sensor: ``dict`` """ sensor_id = sensor['ref'] return sensor_id def _dispatch_trigger_for_sensor_spawn(self, sensor, process, cmd): trigger = 'st2.sensor.process_spawn' now = int(time.time()) payload = { 'id': sensor['class_name'], 'timestamp': now, 'pid': process.pid, 'cmd': cmd } self._dispatcher.dispatch(trigger, payload=payload) def _dispatch_trigger_for_sensor_exit(self, sensor, exit_code): trigger = 'st2.sensor.process_exit' now = int(time.time()) payload = { 'id': sensor['class_name'], 'timestamp': now, 'exit_code': exit_code } self._dispatcher.dispatch(trigger, payload=payload) def _delete_sensors(self, sensor_id): if sensor_id in self._processes: del self._processes[sensor_id] if sensor_id in self._sensors: del self._sensors[sensor_id]
class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) self._datastore_service = SensorDatastoreService( logger=self._logger, pack_name=self._sensor_wrapper._pack, class_name=self._sensor_wrapper._class_name, api_username='******') self._client = None def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger def dispatch(self, trigger, payload=None, trace_tag=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` """ # empty strings trace_context = TraceContext( trace_tag=trace_tag) if trace_tag else None self._logger.debug('Added trace_context %s to trigger %s.', trace_context, trigger) self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context) def dispatch_with_context(self, trigger, payload=None, trace_context=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` """ # This means specified payload is complied with trigger_type schema, or not. is_valid = True try: validate_trigger_payload(trigger_type_ref=trigger, payload=payload) except (ValidationError, Exception) as e: is_valid = False self._logger.warn( 'Failed to validate payload (%s) for trigger "%s": %s' % (str(payload), trigger, str(e))) # If validation is disabled, still dispatch a trigger even if it failed validation # This condition prevents unexpected restriction. if not is_valid and cfg.CONF.system.validate_trigger_payload: self._logger.warn( 'Trigger payload validation failed and validation is enabled, not ' 'dispatching a trigger "%s" (%s)' % (trigger, str(payload))) return None self._logger.debug('Dispatching trigger %s with payload %s.', trigger, payload) self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) ################################## # Methods for datastore management ################################## def list_values(self, local=True, prefix=None): return self._datastore_service.list_values(local, prefix) def get_value(self, name, local=True): return self._datastore_service.get_value(name, local) def set_value(self, name, value, ttl=None, local=True): return self._datastore_service.set_value(name, value, ttl, local) def delete_value(self, name, local=True): return self._datastore_service.delete_value(name, local)
class St2Timer(object): """ A timer interface that uses APScheduler 3.0. """ def __init__(self, local_timezone=None): self._timezone = local_timezone self._scheduler = BlockingScheduler(timezone=self._timezone) self._jobs = {} self._trigger_types = TIMER_TRIGGER_TYPES.keys() self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=self.__class__.__name__, exclusive=True) self._trigger_dispatcher = TriggerDispatcher(LOG) def start(self): self._register_timer_trigger_types() self._trigger_watcher.start() self._scheduler.start() def cleanup(self): self._scheduler.shutdown(wait=True) def add_trigger(self, trigger): self._add_job_to_scheduler(trigger) def update_trigger(self, trigger): self.remove_trigger(trigger) self.add_trigger(trigger) def remove_trigger(self, trigger): trigger_id = trigger['id'] try: job_id = self._jobs[trigger_id] except KeyError: LOG.info('Job not found: %s', trigger_id) return self._scheduler.remove_job(job_id) del self._jobs[trigger_id] def _add_job_to_scheduler(self, trigger): trigger_type_ref = trigger['type'] trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref] try: jsonschema.validate(trigger['parameters'], trigger_type['parameters_schema']) except jsonschema.ValidationError as e: LOG.error('Exception scheduling timer: %s, %s', trigger['parameters'], e, exc_info=True) raise # Or should we just return? time_spec = trigger['parameters'] time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone')) time_type = None if trigger_type['name'] == 'st2.IntervalTimer': unit = time_spec.get('unit', None) value = time_spec.get('delta', None) time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone}) elif trigger_type['name'] == 'st2.DateTimer': # Raises an exception if date string isn't a valid one. dat = date_parser.parse(time_spec.get('date', None)) time_type = DateTrigger(dat, timezone=time_zone) elif trigger_type['name'] == 'st2.CronTimer': cron = time_spec.copy() cron['timezone'] = time_zone time_type = CronTrigger(**cron) utc_now = date_utils.get_datetime_utc_now() if hasattr(time_type, 'run_date') and utc_now > time_type.run_date: LOG.warning('Not scheduling expired timer: %s : %s', trigger['parameters'], time_type.run_date) else: self._add_job(trigger, time_type) return time_type def _add_job(self, trigger, time_type, replace=True): try: job = self._scheduler.add_job(self._emit_trigger_instance, trigger=time_type, args=[trigger], replace_existing=replace) LOG.info('Job %s scheduled.', job.id) self._jobs[trigger['id']] = job.id except Exception as e: LOG.error('Exception scheduling timer: %s, %s', trigger['parameters'], e, exc_info=True) def _emit_trigger_instance(self, trigger): utc_now = date_utils.get_datetime_utc_now() # debug logging is reasonable for this one. A high resolution timer will end up # trashing standard logs. LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger) payload = { 'executed_at': str(utc_now), 'schedule': trigger['parameters'].get('time') } trace_context = TraceContext(trace_tag='%s-%s' % (self._get_trigger_type_name(trigger), trigger.get('name', uuid.uuid4().hex))) self._trigger_dispatcher.dispatch(trigger, payload, trace_context=trace_context) def _get_trigger_type_name(self, trigger): trigger_type_ref = trigger['type'] trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref] return trigger_type['name'] def _register_timer_trigger_types(self): return trigger_services.add_trigger_models(TIMER_TRIGGER_TYPES.values()) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if 'id' in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized['id'] = str(sanitized['id']) return sanitized
class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ DATASTORE_NAME_SEPARATOR = ':' def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) self._client = None def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger def dispatch(self, trigger, payload=None, trace_tag=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` """ # empty strings trace_context = TraceContext( trace_tag=trace_tag) if trace_tag else None self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context) def dispatch_with_context(self, trigger, payload=None, trace_context=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` """ self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) ################################## # Methods for datastore management ################################## def list_values(self, local=True, prefix=None): """ Retrieve all the datastores items. :param local: List values from a namespace local to this sensor. Defaults to True. :type: local: ``bool`` :param prefix: Optional key name prefix / startswith filter. :type prefix: ``str`` :rtype: ``list`` of :class:`KeyValuePair` """ client = self._get_api_client() self._logger.audit('Retrieving all the value from the datastore') key_prefix = self._get_full_key_prefix(local=local, prefix=prefix) kvps = client.keys.get_all(prefix=key_prefix) return kvps def get_value(self, name, local=True): """ Retrieve a value from the datastore for the provided key. By default, value is retrieved from the namespace local to the sensor. If you want to retrieve a global value from a datastore, pass local=False to this method. :param name: Key name. :type name: ``str`` :param local: Retrieve value from a namespace local to the sensor. Defaults to True. :type: local: ``bool`` :rtype: ``str`` or ``None`` """ name = self._get_full_key_name(name=name, local=local) client = self._get_api_client() self._logger.audit('Retrieving value from the datastore (name=%s)', name) try: kvp = client.keys.get_by_id(id=name) except Exception: return None if kvp: return kvp.value return None def set_value(self, name, value, ttl=None, local=True): """ Set a value for the provided key. By default, value is set in a namespace local to the sensor. If you want to set a global value, pass local=False to this method. :param name: Key name. :type name: ``str`` :param value: Key value. :type value: ``str`` :param ttl: Optional TTL (in seconds). :type ttl: ``int`` :param local: Set value in a namespace local to the sensor. Defaults to True. :type: local: ``bool`` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ name = self._get_full_key_name(name=name, local=local) value = str(value) client = self._get_api_client() self._logger.audit('Setting value in the datastore (name=%s)', name) instance = KeyValuePair() instance.id = name instance.name = name instance.value = value if ttl: instance.ttl = ttl client.keys.update(instance=instance) return True def delete_value(self, name, local=True): """ Delete the provided key. By default, value is deleted from a namespace local to the sensor. If you want to delete a global value, pass local=False to this method. :param name: Name of the key to delete. :type name: ``str`` :param local: Delete a value in a namespace local to the sensor. Defaults to True. :type: local: ``bool`` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ name = self._get_full_key_name(name=name, local=local) client = self._get_api_client() instance = KeyValuePair() instance.id = name instance.name = name self._logger.audit('Deleting value from the datastore (name=%s)', name) try: client.keys.delete(instance=instance) except Exception: return False return True def _get_api_client(self): """ Retrieve API client instance. """ # TODO: API client is really unfriendly and needs to be re-designed and # improved api_url = os.environ.get(API_URL_ENV_VARIABLE_NAME, None) auth_token = os.environ.get(AUTH_TOKEN_ENV_VARIABLE_NAME, None) if not api_url or not auth_token: raise ValueError( '%s and %s environment variable must be set' % (API_URL_ENV_VARIABLE_NAME, AUTH_TOKEN_ENV_VARIABLE_NAME)) if not self._client: self._client = Client(api_url=api_url) return self._client def _get_full_key_name(self, name, local): """ Retrieve a full key name. :rtype: ``str`` """ if local: name = self._get_key_name_with_sensor_prefix(name=name) return name def _get_full_key_prefix(self, local, prefix=None): if local: key_prefix = self._get_sensor_local_key_name_prefix() if prefix: key_prefix += prefix else: key_prefix = prefix return key_prefix def _get_sensor_local_key_name_prefix(self): """ Retrieve key prefix which is local to this sensor. """ key_prefix = self._get_datastore_key_prefix( ) + self.DATASTORE_NAME_SEPARATOR return key_prefix def _get_key_name_with_sensor_prefix(self, name): """ Retrieve a full key name which is local to the current sensor. :param name: Base datastore key name. :type name: ``str`` :rtype: ``str`` """ prefix = self._get_datastore_key_prefix() full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name return full_name def _get_datastore_key_prefix(self): prefix = '%s.%s' % (self._sensor_wrapper._pack, self._sensor_wrapper._class_name) return prefix
class SensorService(object): """ Instance of this class is passed to the sensor instance and exposes "public" methods which can be called by the sensor. """ DATASTORE_NAME_SEPARATOR = ':' def __init__(self, sensor_wrapper): self._sensor_wrapper = sensor_wrapper self._logger = self._sensor_wrapper._logger self._dispatcher = TriggerDispatcher(self._logger) self._client = None def get_logger(self, name): """ Retrieve an instance of a logger to be used by the sensor class. """ logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name) logger = logging.getLogger(logger_name) logger.propagate = True return logger def dispatch(self, trigger, payload=None, trace_tag=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str`` """ # empty strings trace_context = TraceContext(trace_tag=trace_tag) if trace_tag else None self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context) def dispatch_with_context(self, trigger, payload=None, trace_context=None): """ Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext`` """ self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) ################################## # Methods for datastore management ################################## def list_values(self, local=True, prefix=None): """ Retrieve all the datastores items. :param local: List values from a namespace local to this sensor. Defaults to True. :type: local: ``bool`` :param prefix: Optional key name prefix / startswith filter. :type prefix: ``str`` :rtype: ``list`` of :class:`KeyValuePair` """ client = self._get_api_client() self._logger.audit('Retrieving all the value from the datastore') if local: key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR if prefix: key_prefix += prefix else: key_prefix = prefix kvps = client.keys.get_all(prefix=key_prefix) return kvps def get_value(self, name, local=True): """ Retrieve a value from the datastore for the provided key. By default, value is retrieved from the namespace local to the sensor. If you want to retrieve a global value from a datastore, pass local=False to this method. :param name: Key name. :type name: ``str`` :param local: Retrieve value from a namespace local to the sensor. Defaults to True. :type: local: ``bool`` :rtype: ``str`` or ``None`` """ if local: name = self._get_key_name_with_sensor_prefix(name=name) client = self._get_api_client() self._logger.audit('Retrieving value from the datastore (name=%s)', name) try: kvp = client.keys.get_by_id(id=name) except Exception: return None if kvp: return kvp.value return None def set_value(self, name, value, ttl=None, local=True): """ Set a value for the provided key. By default, value is set in a namespace local to the sensor. If you want to set a global value, pass local=False to this method. :param name: Key name. :type name: ``str`` :param value: Key value. :type value: ``str`` :param ttl: Optional TTL (in seconds). :type ttl: ``int`` :param local: Set value in a namespace local to the sensor. Defaults to True. :type: local: ``bool`` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ if local: name = self._get_key_name_with_sensor_prefix(name=name) value = str(value) client = self._get_api_client() self._logger.audit('Setting value in the datastore (name=%s)', name) instance = KeyValuePair() instance.id = name instance.name = name instance.value = value if ttl: instance.ttl = ttl client.keys.update(instance=instance) return True def delete_value(self, name, local=True): """ Delete the provided key. By default, value is deleted from a namespace local to the sensor. If you want to delete a global value, pass local=False to this method. :param name: Name of the key to delete. :type name: ``str`` :param local: Delete a value in a namespace local to the sensor. Defaults to True. :type: local: ``bool`` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ if local: name = self._get_key_name_with_sensor_prefix(name=name) client = self._get_api_client() instance = KeyValuePair() instance.id = name instance.name = name self._logger.audit('Deleting value from the datastore (name=%s)', name) try: client.keys.delete(instance=instance) except Exception: return False return True def _get_api_client(self): """ Retrieve API client instance. """ # TODO: API client is really unfriendly and needs to be re-designed and # improved api_url = os.environ.get(API_URL_ENV_VARIABLE_NAME, None) auth_token = os.environ.get(AUTH_TOKEN_ENV_VARIABLE_NAME, None) if not api_url or not auth_token: raise ValueError('%s and %s environment variable must be set' % (API_URL_ENV_VARIABLE_NAME, AUTH_TOKEN_ENV_VARIABLE_NAME)) if not self._client: self._client = Client(api_url=api_url) return self._client def _get_key_name_with_sensor_prefix(self, name): """ Retrieve a full key name which is local to the current sensor. :param name: Base datastore key name. :type name: ``str`` :rtype: ``str`` """ prefix = self._get_datastore_key_prefix() full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name return full_name def _get_datastore_key_prefix(self): prefix = '%s.%s' % (self._sensor_wrapper._pack, self._sensor_wrapper._class_name) return prefix
class ActionsController(resource.ContentPackResourceController): """ Implements the RESTful web endpoint that handles the lifecycle of Actions in the system. """ views = ActionViewsController() model = ActionAPI access = Action supported_filters = { 'name': 'name', 'pack': 'pack' } query_options = { 'sort': ['pack', 'name'] } valid_exclude_attributes = [ 'parameters', 'notify' ] include_reference = True def __init__(self, *args, **kwargs): super(ActionsController, self).__init__(*args, **kwargs) self._trigger_dispatcher = TriggerDispatcher(LOG) @request_user_has_permission(permission_type=PermissionType.ACTION_LIST) @jsexpose() def get_all(self, exclude_attributes=None, **kwargs): if exclude_attributes: exclude_fields = exclude_attributes.split(',') else: exclude_fields = None exclude_fields = self._validate_exclude_fields(exclude_fields) return super(ActionsController, self)._get_all(exclude_fields=exclude_fields, **kwargs) @request_user_has_resource_db_permission(permission_type=PermissionType.ACTION_VIEW) @jsexpose(arg_types=[str]) def get_one(self, ref_or_id): return super(ActionsController, self)._get_one(ref_or_id) @jsexpose(body_cls=ActionCreateAPI, status_code=http_client.CREATED) @request_user_has_resource_api_permission(permission_type=PermissionType.ACTION_CREATE) def post(self, action): """ Create a new action. Handles requests: POST /actions/ """ try: # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) except (ValidationError, ValueError, ValueValidationException, InvalidActionParameterException) as e: LOG.exception('Unable to create action data=%s', action) abort(http_client.BAD_REQUEST, str(e)) return # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_name=action.pack, data_files=data_files) action_model = ActionAPI.to_model(action) LOG.debug('/actions/ POST verified ActionAPI object=%s', action) action_db = Action.add_or_update(action_model) LOG.debug('/actions/ POST saved ActionDB object=%s', action_db) # Dispatch an internal trigger for each written data file. This way user # automate comitting this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files(action_db=action_db, written_data_files=written_data_files) extra = {'acion_db': action_db} LOG.audit('Action created. Action.id=%s' % (action_db.id), extra=extra) action_api = ActionAPI.from_model(action_db) return action_api @request_user_has_resource_db_permission(permission_type=PermissionType.ACTION_MODIFY) @jsexpose(arg_types=[str], body_cls=ActionCreateAPI) def put(self, action_ref_or_id, action): action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id) # Assert permissions action_id = action_db.id if not getattr(action, 'pack', None): action.pack = action_db.pack # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_name=action.pack, data_files=data_files) try: action_db = ActionAPI.to_model(action) LOG.debug('/actions/ PUT incoming action: %s', action_db) action_db.id = action_id action_db = Action.add_or_update(action_db) LOG.debug('/actions/ PUT after add_or_update: %s', action_db) except (ValidationError, ValueError) as e: LOG.exception('Unable to update action data=%s', action) abort(http_client.BAD_REQUEST, str(e)) return # Dispatch an internal trigger for each written data file. This way user # automate comitting this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files(action_db=action_db, written_data_files=written_data_files) action_api = ActionAPI.from_model(action_db) LOG.debug('PUT /actions/ client_result=%s', action_api) return action_api @request_user_has_resource_db_permission(permission_type=PermissionType.ACTION_DELETE) @jsexpose(arg_types=[str], status_code=http_client.NO_CONTENT) def delete(self, action_ref_or_id): """ Delete an action. Handles requests: POST /actions/1?_method=delete DELETE /actions/1 DELETE /actions/mypack.myaction """ action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id) action_id = action_db.id try: validate_not_part_of_system_pack(action_db) except ValueValidationException as e: abort(http_client.BAD_REQUEST, str(e)) LOG.debug('DELETE /actions/ lookup with ref_or_id=%s found object: %s', action_ref_or_id, action_db) try: Action.delete(action_db) except Exception as e: LOG.error('Database delete encountered exception during delete of id="%s". ' 'Exception was %s', action_id, e) abort(http_client.INTERNAL_SERVER_ERROR, str(e)) return extra = {'action_db': action_db} LOG.audit('Action deleted. Action.id=%s' % (action_db.id), extra=extra) return None def _handle_data_files(self, pack_name, data_files): """ Method for handling action data files. This method performs two tasks: 1. Writes files to disk 2. Updates affected PackDB model """ # Write files to disk written_file_paths = self._write_data_files_to_disk(pack_name=pack_name, data_files=data_files) # Update affected PackDB model (update a list of files) # Update PackDB self._update_pack_model(pack_name=pack_name, data_files=data_files, written_file_paths=written_file_paths) return written_file_paths def _write_data_files_to_disk(self, pack_name, data_files): """ Write files to disk. """ written_file_paths = [] for data_file in data_files: file_path = data_file['file_path'] content = data_file['content'] file_path = get_pack_resource_file_abs_path(pack_name=pack_name, resource_type='action', file_path=file_path) LOG.debug('Writing data file "%s" to "%s"' % (str(data_file), file_path)) self._write_data_file(pack_name=pack_name, file_path=file_path, content=content) written_file_paths.append(file_path) return written_file_paths def _update_pack_model(self, pack_name, data_files, written_file_paths): """ Update PackDB models (update files list). """ file_paths = [] # A list of paths relative to the pack directory for new files for file_path in written_file_paths: file_path = get_relative_path_to_pack(pack_name=pack_name, file_path=file_path) file_paths.append(file_path) pack_db = Pack.get_by_ref(pack_name) pack_db.files = set(pack_db.files) pack_db.files.update(set(file_paths)) pack_db.files = list(pack_db.files) pack_db = Pack.add_or_update(pack_db) return pack_db def _write_data_file(self, pack_name, file_path, content): """ Write data file on disk. """ # Throw if pack directory doesn't exist pack_base_path = get_pack_base_path(pack_name=pack_name) if not os.path.isdir(pack_base_path): raise ValueError('Directory for pack "%s" doesn\'t exist' % (pack_name)) # Create pack sub-directory tree if it doesn't exist directory = os.path.dirname(file_path) if not os.path.isdir(directory): os.makedirs(directory) with open(file_path, 'w') as fp: fp.write(content) def _dispatch_trigger_for_written_data_files(self, action_db, written_data_files): trigger = ACTION_FILE_WRITTEN_TRIGGER['name'] host_info = get_host_info() for file_path in written_data_files: payload = { 'ref': action_db.ref, 'file_path': file_path, 'host_info': host_info } self._trigger_dispatcher.dispatch(trigger=trigger, payload=payload)
class WebhooksController(object): def __init__(self, *args, **kwargs): self._hooks = HooksHolder() self._base_url = '/webhooks/' self._trigger_types = list(WEBHOOK_TRIGGER_TYPES.keys()) self._trigger_dispatcher = TriggerDispatcher(LOG) queue_suffix = self.__class__.__name__ self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=queue_suffix, exclusive=True) self._trigger_watcher.start() self._register_webhook_trigger_types() def get_all(self): # Return only the hooks known by this controller. return self._hooks.get_all() def get_one(self, url, requester_user): triggers = self._hooks.get_triggers_for_hook(url) if not triggers: abort(http_client.NOT_FOUND) return permission_type = PermissionType.WEBHOOK_VIEW rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user, resource_db=WebhookDB(name=url), permission_type=permission_type) # For demonstration purpose return 1st return triggers[0] def post(self, hook, webhook_body_api, headers, requester_user): body = webhook_body_api.data permission_type = PermissionType.WEBHOOK_SEND rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user, resource_db=WebhookDB(name=hook), permission_type=permission_type) headers = self._get_headers_as_dict(headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None), hook=hook) if hook == 'st2' or hook == 'st2/': # When using st2 or system webhook, body needs to always be a dict if not isinstance(body, dict): type_string = get_json_type_for_python_value(body) msg = ('Webhook body needs to be an object, got: %s' % (type_string)) raise ValueError(msg) trigger = body.get('trigger', None) payload = body.get('payload', None) if not trigger: msg = 'Trigger not specified.' return abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) else: if not self._is_valid_hook(hook): self._log_request('Invalid hook.', headers, body) msg = 'Webhook %s not registered with st2' % hook return abort(http_client.NOT_FOUND, msg) triggers = self._hooks.get_triggers_for_hook(hook) payload = {} payload['headers'] = headers payload['body'] = body # Dispatch trigger instance for each of the trigger found for trigger in triggers: self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return Response(json=body, status=http_client.ACCEPTED) def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks.add_hook(url, trigger) def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) removed = self._hooks.remove_hook(url, trigger) if removed: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) def _get_normalized_url(self, trigger): """ remove the trailing and leading / so that the hook url and those coming from trigger parameters end up being the same. """ return trigger['parameters']['url'].strip('/') def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, headers, body, log_method=LOG.debug): headers = self._get_headers_as_dict(headers) body = str(body) log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = TriggerAPI.from_model(trigger).to_dict() return sanitized
class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = HooksHolder() self._base_url = "/webhooks/" self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) queue_suffix = self.__class__.__name__ self._trigger_watcher = TriggerWatcher( create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=queue_suffix, exclusive=True, ) self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return self._hooks.get_all() @jsexpose() def get_one(self, name): triggers = self._hooks.get_triggers_for_hook(name) if not triggers: abort(http_client.NOT_FOUND) return # For demonstration purpose return 1st return triggers[0] @request_user_has_webhook_permission(permission_type=PermissionType.WEBHOOK_SEND) @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = "/".join(args) # TODO: There must be a better way to do this. # Note: For backward compatibility reasons we default to application/json if content # type is not explicitly provided content_type = pecan.request.headers.get("Content-Type", "application/json") content_type = parse_content_type_header(content_type=content_type)[0] body = pecan.request.body try: body = self._parse_request_body(content_type=content_type, body=body) except Exception as e: self._log_request("Failed to parse request body: %s." % (str(e)), pecan.request) msg = 'Failed to parse request body "%s": %s' % (body, str(e)) return pecan.abort(http_client.BAD_REQUEST, msg) headers = self._get_headers_as_dict(pecan.request.headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None), hook=hook) if hook == "st2" or hook == "st2/": return self._handle_st2_webhook(body, trace_context=trace_context) if not self._is_valid_hook(hook): self._log_request("Invalid hook.", pecan.request) msg = "Webhook %s not registered with st2" % hook return pecan.abort(http_client.NOT_FOUND, msg) triggers = self._hooks.get_triggers_for_hook(hook) payload = {} payload["headers"] = headers payload["body"] = body # Dispatch trigger instance for each of the trigger found for trigger in triggers: self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _parse_request_body(self, content_type, body): if content_type == "application/json": self._log_request("Parsing request body as JSON", request=pecan.request) body = json.loads(body) elif content_type in ["application/x-www-form-urlencoded", "multipart/form-data"]: self._log_request("Parsing request body as form encoded data", request=pecan.request) body = urlparse.parse_qs(body) else: raise ValueError('Unsupported Content-Type: "%s"' % (content_type)) return body def _handle_st2_webhook(self, body, trace_context): trigger = body.get("trigger", None) payload = body.get("payload", None) if not trigger: msg = "Trigger not specified." return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = "webhook-%s-%s" % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) LOG.info("Listening to endpoint: %s", urljoin(self._base_url, url)) self._hooks.add_hook(url, trigger) def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) removed = self._hooks.remove_hook(url, trigger) if removed: LOG.info("Stop listening to endpoint: %s", urljoin(self._base_url, url)) def _get_normalized_url(self, trigger): """ remove the trailing and leading / so that the hook url and those coming from trigger parameters end up being the same. """ return trigger["parameters"]["url"].strip("/") def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method("%s\n\trequest.header: %s.\n\trequest.body: %s.", msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if "id" in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized["id"] = str(sanitized["id"]) return sanitized
class ActionsController(resource.ContentPackResourceController): """ Implements the RESTful web endpoint that handles the lifecycle of Actions in the system. """ views = ActionViewsController() model = ActionAPI access = Action supported_filters = { 'name': 'name', 'pack': 'pack' } query_options = { 'sort': ['pack', 'name'] } include_reference = True def __init__(self, *args, **kwargs): super(ActionsController, self).__init__(*args, **kwargs) self._trigger_dispatcher = TriggerDispatcher(LOG) @request_user_has_permission(permission_type=PermissionType.ACTION_VIEW) @jsexpose() def get_all(self, **kwargs): return super(ActionsController, self)._get_all(**kwargs) @request_user_has_resource_db_permission(permission_type=PermissionType.ACTION_VIEW) @jsexpose(arg_types=[str]) def get_one(self, ref_or_id): return super(ActionsController, self)._get_one(ref_or_id) @jsexpose(body_cls=ActionCreateAPI, status_code=http_client.CREATED) @request_user_has_resource_api_permission(permission_type=PermissionType.ACTION_CREATE) def post(self, action): """ Create a new action. Handles requests: POST /actions/ """ # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_name=action.pack, data_files=data_files) action_model = ActionAPI.to_model(action) LOG.debug('/actions/ POST verified ActionAPI object=%s', action) action_db = Action.add_or_update(action_model) LOG.debug('/actions/ POST saved ActionDB object=%s', action_db) # Dispatch an internal trigger for each written data file. This way user # automate comitting this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files(action_db=action_db, written_data_files=written_data_files) extra = {'acion_db': action_db} LOG.audit('Action created. Action.id=%s' % (action_db.id), extra=extra) action_api = ActionAPI.from_model(action_db) return action_api @request_user_has_resource_db_permission(permission_type=PermissionType.ACTION_MODIFY) @jsexpose(arg_types=[str], body_cls=ActionCreateAPI) def put(self, action_ref_or_id, action): action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id) # Assert permissions action_id = action_db.id if not getattr(action, 'pack', None): action.pack = action_db.pack # Perform validation validate_not_part_of_system_pack(action) action_validator.validate_action(action) # Write pack data files to disk (if any are provided) data_files = getattr(action, 'data_files', []) written_data_files = [] if data_files: written_data_files = self._handle_data_files(pack_name=action.pack, data_files=data_files) try: action_db = ActionAPI.to_model(action) action_db.id = action_id action_db = Action.add_or_update(action_db) except (ValidationError, ValueError) as e: LOG.exception('Unable to update action data=%s', action) abort(http_client.BAD_REQUEST, str(e)) return # Dispatch an internal trigger for each written data file. This way user # automate comitting this files to git using StackStorm rule if written_data_files: self._dispatch_trigger_for_written_data_files(action_db=action_db, written_data_files=written_data_files) action_api = ActionAPI.from_model(action_db) LOG.debug('PUT /actions/ client_result=%s', action_api) return action_api @request_user_has_resource_db_permission(permission_type=PermissionType.ACTION_DELETE) @jsexpose(arg_types=[str], status_code=http_client.NO_CONTENT) def delete(self, action_ref_or_id): """ Delete an action. Handles requests: POST /actions/1?_method=delete DELETE /actions/1 DELETE /actions/mypack.myaction """ action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id) action_id = action_db.id try: validate_not_part_of_system_pack(action_db) except ValueValidationException as e: abort(http_client.BAD_REQUEST, str(e)) LOG.debug('DELETE /actions/ lookup with ref_or_id=%s found object: %s', action_ref_or_id, action_db) try: Action.delete(action_db) except Exception as e: LOG.error('Database delete encountered exception during delete of id="%s". ' 'Exception was %s', action_id, e) abort(http_client.INTERNAL_SERVER_ERROR, str(e)) return extra = {'action_db': action_db} LOG.audit('Action deleted. Action.id=%s' % (action_db.id), extra=extra) return None def _handle_data_files(self, pack_name, data_files): """ Method for handling action data files. This method performs two tasks: 1. Writes files to disk 2. Updates affected PackDB model """ # Write files to disk written_file_paths = self._write_data_files_to_disk(pack_name=pack_name, data_files=data_files) # Update affected PackDB model (update a list of files) # Update PackDB self._update_pack_model(pack_name=pack_name, data_files=data_files, written_file_paths=written_file_paths) return written_file_paths def _write_data_files_to_disk(self, pack_name, data_files): """ Write files to disk. """ written_file_paths = [] for data_file in data_files: file_path = data_file['file_path'] content = data_file['content'] file_path = get_pack_resource_file_abs_path(pack_name=pack_name, resource_type='action', file_path=file_path) LOG.debug('Writing data file "%s" to "%s"' % (str(data_file), file_path)) self._write_data_file(pack_name=pack_name, file_path=file_path, content=content) written_file_paths.append(file_path) return written_file_paths def _update_pack_model(self, pack_name, data_files, written_file_paths): """ Update PackDB models (update files list). """ file_paths = [] # A list of paths relative to the pack directory for new files for file_path in written_file_paths: file_path = get_relative_path_to_pack(pack_name=pack_name, file_path=file_path) file_paths.append(file_path) pack_db = Pack.get_by_ref(pack_name) pack_db.files = set(pack_db.files) pack_db.files.update(set(file_paths)) pack_db.files = list(pack_db.files) pack_db = Pack.add_or_update(pack_db) return pack_db def _write_data_file(self, pack_name, file_path, content): """ Write data file on disk. """ # Throw if pack directory doesn't exist pack_base_path = get_pack_base_path(pack_name=pack_name) if not os.path.isdir(pack_base_path): raise ValueError('Directory for pack "%s" doesn\'t exist' % (pack_name)) # Create pack sub-directory tree if it doesn't exist directory = os.path.dirname(file_path) if not os.path.isdir(directory): os.makedirs(directory) with open(file_path, 'w') as fp: fp.write(content) def _dispatch_trigger_for_written_data_files(self, action_db, written_data_files): trigger = ACTION_FILE_WRITTEN_TRIGGER['name'] host_info = get_host_info() for file_path in written_data_files: payload = { 'ref': action_db.ref, 'file_path': file_path, 'host_info': host_info } self._trigger_dispatcher.dispatch(trigger=trigger, payload=payload)
class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = {} self._base_url = "/webhooks/" self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) self._trigger_watcher = TriggerWatcher( create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix="webhooks", ) self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return [trigger for trigger in six.itervalues(self._hooks)] @jsexpose() def get_one(self, name): hook = self._hooks.get(name, None) if not hook: abort(http_client.NOT_FOUND) return return hook @request_user_has_webhook_permission(permission_type=PermissionType.WEBHOOK_SEND) @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = "/".join(args) # TODO: There must be a better way to do this. body = pecan.request.body try: body = json.loads(body) except ValueError: self._log_request("Invalid JSON body.", pecan.request) msg = "Invalid JSON body: %s" % (body) return pecan.abort(http_client.BAD_REQUEST, msg) headers = self._get_headers_as_dict(pecan.request.headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None), hook=hook) if hook == "st2" or hook == "st2/": return self._handle_st2_webhook(body, trace_context=trace_context) if not self._is_valid_hook(hook): self._log_request("Invalid hook.", pecan.request) msg = "Webhook %s not registered with st2" % hook return pecan.abort(http_client.NOT_FOUND, msg) trigger = self._get_trigger_for_hook(hook) payload = {} payload["headers"] = headers payload["body"] = body self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _handle_st2_webhook(self, body, trace_context): trigger = body.get("trigger", None) payload = body.get("payload", None) if not trigger: msg = "Trigger not specified." return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _get_trigger_for_hook(self, hook): return self._hooks[hook] def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = "webhook-%s-%s" % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = trigger["parameters"]["url"] LOG.info("Listening to endpoint: %s", urljoin(self._base_url, url)) self._hooks[url] = trigger def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = trigger["parameters"]["url"] if url in self._hooks: LOG.info("Stop listening to endpoint: %s", urljoin(self._base_url, url)) del self._hooks[url] def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method("%s\n\trequest.header: %s.\n\trequest.body: %s.", msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = trigger._data if "id" in sanitized: # Friendly objectid rather than the MongoEngine representation. sanitized["id"] = str(sanitized["id"]) return sanitized