示例#1
0
    def _get_action_alias_db_by_name(self, name):
        """
        Retrieve ActionAlias DB object for the provided alias name.
        """
        base_pack_path = self._get_base_pack_path()
        pack_yaml_path = os.path.join(base_pack_path, MANIFEST_FILE_NAME)

        if os.path.isfile(pack_yaml_path):
            # 1. 1st try to infer pack name from pack metadata file
            meta_loader = MetaLoader()
            pack_metadata = meta_loader.load(pack_yaml_path)
            pack = get_pack_ref_from_metadata(metadata=pack_metadata)
        else:
            # 2. If pack.yaml is not available, fail back to directory name
            # Note: For this to work, directory name needs to match pack name
            _, pack = os.path.split(base_pack_path)

        pack_loader = ContentPackLoader()
        registrar = AliasesRegistrar(use_pack_cache=False)

        aliases_path = pack_loader.get_content_from_pack(pack_dir=base_pack_path,
                                                         content_type='aliases')
        aliases = registrar._get_aliases_from_pack(aliases_dir=aliases_path)
        for alias_path in aliases:
            action_alias_db = registrar._get_action_alias_db(pack=pack,
                                                             action_alias=alias_path)

            if action_alias_db.name == name:
                return action_alias_db

        raise ValueError('Alias with name "%s" not found' % (name))
示例#2
0
def register_runners(runner_dirs=None,
                     experimental=False,
                     fail_on_failure=True):
    """ Register runners
    """
    LOG.debug('Start : register runners')
    runner_count = 0
    runner_loader = RunnersLoader()

    if not runner_dirs:
        runner_dirs = content_utils.get_runners_base_paths()

    runners = runner_loader.get_runners(runner_dirs)

    for runner, path in runners.iteritems():
        LOG.info('Runner "%s"' % (runner))
        runner_manifest = os.path.join(path, MANIFEST_FILE_NAME)
        meta_loader = MetaLoader()
        runner_types = meta_loader.load(runner_manifest)
        for runner_type in runner_types:
            runner_count += register_runner(runner_type, experimental)

    LOG.debug('End : register runners')

    return runner_count
示例#3
0
 def __init__(self, runner_id):
     super(ActionChainRunner, self).__init__(runner_id=runner_id)
     self.chain_holder = None
     self._meta_loader = MetaLoader()
     self._stopped = False
     self._skip_notify_tasks = []
     self._chain_notify = None
示例#4
0
 def __init__(self, runner_id):
     super(ActionChainRunner, self).__init__(runner_id=runner_id)
     self.chain_holder = None
     self._meta_loader = MetaLoader()
     self._skip_notify_tasks = []
     self._display_published = True
     self._chain_notify = None
示例#5
0
    def _get_action_alias_db_by_name(self, name):
        """
        Retrieve ActionAlias DB object for the provided alias name.
        """
        base_pack_path = self._get_base_pack_path()
        pack_yaml_path = os.path.join(base_pack_path, MANIFEST_FILE_NAME)

        if os.path.isfile(pack_yaml_path):
            # 1. 1st try to infer pack name from pack metadata file
            meta_loader = MetaLoader()
            pack_metadata = meta_loader.load(pack_yaml_path)
            pack = get_pack_ref_from_metadata(metadata=pack_metadata)
        else:
            # 2. If pack.yaml is not available, fail back to directory name
            # Note: For this to work, directory name needs to match pack name
            _, pack = os.path.split(base_pack_path)

        pack_loader = ContentPackLoader()
        registrar = AliasesRegistrar(use_pack_cache=False)

        aliases_path = pack_loader.get_content_from_pack(pack_dir=base_pack_path,
                                                         content_type='aliases')
        aliases = registrar._get_aliases_from_pack(aliases_dir=aliases_path)
        for alias_path in aliases:
            action_alias_db = registrar._get_action_alias_db(pack=pack,
                                                             action_alias=alias_path,
                                                             ignore_metadata_file_error=True)

            if action_alias_db.name == name:
                return action_alias_db

        raise ValueError('Alias with name "%s" not found' % (name))
示例#6
0
def register_runners(runner_dir=None, experimental=False, fail_on_failure=True):
    """ Register runners
    """
    LOG.debug('Start : register runners')
    runner_count = 0
    runner_loader = RunnersLoader()

    if runner_dir:
        assert isinstance(runner_dir, list)

    if not runner_dir:
        runner_dirs = content_utils.get_runners_base_paths()

    runners = runner_loader.get_runners(runner_dirs)

    for runner, path in runners.iteritems():
        LOG.info('Runner "%s"' % (runner))
        runner_manifest = os.path.join(path, MANIFEST_FILE_NAME)
        meta_loader = MetaLoader()
        runner_types = meta_loader.load(runner_manifest)
        for runner_type in runner_types:
            runner_count += register_runner(runner_type, experimental)

    LOG.debug('End : register runners')

    return runner_count
示例#7
0
文件: base.py 项目: tzmvp/st2
    def __init__(self,
                 use_pack_cache=True,
                 use_runners_cache=False,
                 fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param use_runners_cache: True to cache RunnerTypeDB objects in memory to reduce load on
                                  the database.
        :type use_runners_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._use_runners_cache = use_runners_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()

        # Maps runner name -> RunnerTypeDB
        self._runner_type_db_cache = {}
示例#8
0
    def __init__(self):
        base_path = cfg.CONF.system.base_path

        self._rbac_definitions_path = os.path.join(base_path, 'rbac/')
        self._role_definitions_path = os.path.join(self._rbac_definitions_path, 'roles/')
        self._role_assignments_path = os.path.join(self._rbac_definitions_path, 'assignments/')
        self._role_maps_path = os.path.join(self._rbac_definitions_path, 'mappings/')
        self._meta_loader = MetaLoader()
示例#9
0
 def __init__(self, use_pack_cache=True):
     """
     :param use_pack_cache: True to cache which packs have been registered in memory and making
                             sure packs are only registered once.
     :type use_pack_cache: ``bool``
     """
     self._use_pack_cache = use_pack_cache
     self._meta_loader = MetaLoader()
     self._pack_loader = ContentPackLoader()
示例#10
0
class RuleTester(object):
    def __init__(self, rule_file_path, trigger_instance_file_path):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._trigger_instance_file_path = trigger_instance_file_path
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """
        rule_db = self._get_rule_db_from_file(file_path=self._rule_file_path)
        trigger_instance_db = \
            self._get_trigger_instance_db_from_file(file_path=self._trigger_instance_file_path)

        trigger_ref = ResourceReference.from_string_reference(
            trigger_instance_db['trigger'])

        trigger_db = TriggerDB(pack=trigger_ref.pack,
                               name=trigger_ref.name,
                               type=trigger_ref.ref)

        matcher = RulesMatcher(trigger_instance=trigger_instance_db,
                               trigger=trigger_db,
                               rules=[rule_db])
        matching_rules = matcher.get_matching_rules()
        return len(matching_rules) >= 1

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        pack = data.get('pack', 'unknown')
        name = data.get('name', 'unknown')
        trigger = data['trigger']['type']
        criteria = data.get('criteria', None)

        rule_db = RuleDB(pack=pack,
                         name=name,
                         trigger=trigger,
                         criteria=criteria,
                         action={},
                         enabled=True)
        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)
        return instance
示例#11
0
    def __init__(self, rule_file_path, trigger_instance_file_path):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._trigger_instance_file_path = trigger_instance_file_path
        self._meta_loader = MetaLoader()
示例#12
0
    def __init__(self, use_pack_cache=True, fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()
示例#13
0
class RuleTester(object):
    def __init__(self, rule_file_path, trigger_instance_file_path):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._trigger_instance_file_path = trigger_instance_file_path
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """
        rule_db = self._get_rule_db_from_file(file_path=self._rule_file_path)
        trigger_instance_db = \
            self._get_trigger_instance_db_from_file(file_path=self._trigger_instance_file_path)

        trigger_ref = ResourceReference.from_string_reference(trigger_instance_db['trigger'])

        trigger_db = TriggerDB()
        trigger_db.pack = trigger_ref.pack
        trigger_db.name = trigger_ref.name
        trigger_db.type = trigger_ref.ref

        matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
                               rules=[rule_db])
        matching_rules = matcher.get_matching_rules()
        return len(matching_rules) >= 1

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        rule_db = RuleDB()
        rule_db.trigger = data['trigger']['type']
        rule_db.criteria = data.get('criteria', None)
        rule_db.action = {}
        rule_db.enabled = True
        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)
        return instance
def _load_config_schemas():
    config_schemas = {}

    packs = ContentPackLoader().get_packs(content_utils.get_packs_base_paths())

    for pack_name, pack_dir in six.iteritems(packs):
        config_schema_path = os.path.join(pack_dir, CONFIG_SCHEMA_FILE_NAME)

        if not os.path.isfile(config_schema_path):
            # Note: Config schema is optional
            continue

        values = MetaLoader().load(config_schema_path)

        if not values:
            raise ValueError('Config schema "%s" is empty and invalid.' %
                             (config_schema_path))

        content = {}
        content['pack'] = pack_name
        content['attributes'] = values

        config_schema_api = ConfigSchemaAPI(**content)
        config_schema_api = config_schema_api.validate()
        config_schemas[pack_name] = values

    return config_schemas
示例#15
0
    def __init__(self):
        base_path = cfg.CONF.system.base_path
        rbac_definitions_path = os.path.join(base_path, 'rbac/')

        self._role_definitions_path = os.path.join(rbac_definitions_path, 'roles/')
        self._role_assignments_path = os.path.join(rbac_definitions_path, 'assignments/')
        self._meta_loader = MetaLoader()
示例#16
0
 def __init__(self, runner_id):
     super(ActionChainRunner, self).__init__(runner_id=runner_id)
     self.chain_holder = None
     self._meta_loader = MetaLoader()
     self._stopped = False
     self._skip_notify_tasks = []
     self._chain_notify = None
示例#17
0
 def __init__(self, runner_id):
     super(ActionChainRunner, self).__init__(runner_id=runner_id)
     self.chain_holder = None
     self._meta_loader = MetaLoader()
     self._skip_notify_tasks = []
     self._display_published = True
     self._chain_notify = None
示例#18
0
def get_pack_metadata(pack_dir):
    """
    Return parsed metadata for a particular pack directory.

    :rtype: ``dict``
    """
    manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)

    if not os.path.isfile(manifest_path):
        raise ValueError('Pack "%s" is missing %s file' % (pack_dir, MANIFEST_FILE_NAME))

    meta_loader = MetaLoader()
    content = meta_loader.load(manifest_path)
    if not content:
        raise ValueError('Pack "%s" metadata file is empty' % (pack_dir))

    return content
示例#19
0
def get_pack_metadata(pack_dir):
    """
    Return parsed metadata for a particular pack directory.

    :rtype: ``dict``
    """
    manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)

    if not os.path.isfile(manifest_path):
        raise ValueError('Pack "%s" is missing %s file' % (pack_dir, MANIFEST_FILE_NAME))

    meta_loader = MetaLoader()
    content = meta_loader.load(manifest_path)
    if not content:
        raise ValueError('Pack "%s" metadata file is empty' % (pack_dir))

    return content
示例#20
0
文件: base.py 项目: hejin/st2
 def __init__(self, use_pack_cache=True):
     """
     :param use_pack_cache: True to cache which packs have been registered in memory and making
                             sure packs are only registered once.
     :type use_pack_cache: ``bool``
     """
     self._use_pack_cache = use_pack_cache
     self._meta_loader = MetaLoader()
     self._pack_loader = ContentPackLoader()
示例#21
0
    def __init__(self, rule_file_path, trigger_instance_file_path):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._trigger_instance_file_path = trigger_instance_file_path
        self._meta_loader = MetaLoader()
示例#22
0
文件: base.py 项目: Bala96/st2
    def __init__(self, use_pack_cache=True, fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()
示例#23
0
def _load_actions():
    actions = {}
    action_dirs = ContentPackLoader().get_content(content_utils.get_packs_base_paths(), 'actions')

    for pack in action_dirs:
        for action_path in ActionsRegistrar().get_resources_from_pack(action_dirs[pack]):
            content = MetaLoader().load(action_path)
            ref = pack + "." + content['name']

            action_api = ActionAPI(pack=pack, **content)
            action_api.validate()
            # action_validator.validate_action(action_api)
            actions[ref] = ActionAPI.to_model(action_api)

    return actions
示例#24
0
文件: base.py 项目: chadpatt/st2
class RunnerTestCase(unittest2.TestCase):
    meta_loader = MetaLoader()

    def assertCommonSt2EnvVarsAvailableInEnv(self, env):
        """
        Method which asserts that the common ST2 environment variables are present in the provided
        environment.
        """
        for var_name in COMMON_ACTION_ENV_VARIABLES:
            self.assertIn(var_name, env)
        self.assertEqual(env["ST2_ACTION_API_URL"], get_full_public_api_url())
        self.assertIsNotNone(env[AUTH_TOKEN_ENV_VARIABLE_NAME])

    def loader(self, path):
        """Load the runner config"""
        return self.meta_loader.load(path)
示例#25
0
class RulesRegistrar(object):
    def __init__(self):
        self._meta_loader = MetaLoader()

    def _get_json_rules_from_pack(self, rules_dir):
        return glob.glob(rules_dir + '/*.json')

    def _get_yaml_rules_from_pack(self, rules_dir):
        rules = glob.glob(rules_dir + '/*.yaml')
        rules.extend(glob.glob(rules_dir + '*.yml'))
        return rules

    def _get_rules_from_pack(self, rules_dir):
        rules = self._get_json_rules_from_pack(rules_dir) or []
        rules.extend(self._get_yaml_rules_from_pack(rules_dir) or [])
        return rules

    def _register_rules_from_pack(self, pack, rules):
        for rule in rules:
            LOG.debug('Loading rule from %s.', rule)
            try:
                content = self._meta_loader.load(rule)
                rule_api = RuleAPI(**content)
                rule_db = RuleAPI.to_model(rule_api)
                try:
                    rule_db.id = Rule.get_by_name(rule_api.name).id
                except ValueError:
                    LOG.info('Rule %s not found. Creating new one.', rule)
                try:
                    rule_db = Rule.add_or_update(rule_db)
                    LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule)
                except Exception:
                    LOG.exception('Failed to create rule %s.', rule_api.name)
            except:
                LOG.exception('Failed registering rule from %s.', rule)

    def register_rules_from_packs(self, base_dir):
        pack_loader = ContentPackLoader()
        dirs = pack_loader.get_content(base_dir=base_dir,
                                       content_type='rules')
        for pack, rules_dir in six.iteritems(dirs):
            try:
                LOG.info('Registering rules from pack: %s', pack)
                rules = self._get_rules_from_pack(rules_dir)
                self._register_rules_from_pack(pack, rules)
            except:
                LOG.exception('Failed registering all rules from pack: %s', rules_dir)
示例#26
0
class RulesRegistrar(object):
    def __init__(self):
        self._meta_loader = MetaLoader()

    def _get_json_rules_from_pack(self, rules_dir):
        return glob.glob(rules_dir + '/*.json')

    def _get_yaml_rules_from_pack(self, rules_dir):
        rules = glob.glob(rules_dir + '/*.yaml')
        rules.extend(glob.glob(rules_dir + '*.yml'))
        return rules

    def _get_rules_from_pack(self, rules_dir):
        rules = self._get_json_rules_from_pack(rules_dir) or []
        rules.extend(self._get_yaml_rules_from_pack(rules_dir) or [])
        return rules

    def _register_rules_from_pack(self, pack, rules):
        for rule in rules:
            LOG.debug('Loading rule from %s.', rule)
            try:
                content = self._meta_loader.load(rule)
                rule_api = RuleAPI(**content)
                rule_db = RuleAPI.to_model(rule_api)
                try:
                    rule_db.id = Rule.get_by_name(rule_api.name).id
                except ValueError:
                    LOG.info('Rule %s not found. Creating new one.', rule)
                try:
                    rule_db = Rule.add_or_update(rule_db)
                    LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule)
                except Exception:
                    LOG.exception('Failed to create rule %s.', rule_api.name)
            except:
                LOG.exception('Failed registering rule from %s.', rule)

    def register_rules_from_packs(self, base_dir):
        pack_loader = ContentPackLoader()
        dirs = pack_loader.get_content(base_dir=base_dir, content_type='rules')
        for pack, rules_dir in six.iteritems(dirs):
            try:
                LOG.info('Registering rules from pack: %s', pack)
                rules = self._get_rules_from_pack(rules_dir)
                self._register_rules_from_pack(pack, rules)
            except:
                LOG.exception('Failed registering all rules from pack: %s',
                              rules_dir)
示例#27
0
文件: base.py 项目: nzlosh/st2
    def __init__(self, use_pack_cache=True, use_runners_cache=False, fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param use_runners_cache: True to cache RunnerTypeDB objects in memory to reduce load on
                                  the database.
        :type use_runners_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._use_runners_cache = use_runners_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()

        # Maps runner name -> RunnerTypeDB
        self._runner_type_db_cache = {}
示例#28
0
文件: base.py 项目: ruslantum/st2
class ResourceRegistrar(object):
    ALLOWED_EXTENSIONS = []

    def __init__(self):
        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()

    def get_resources_from_pack(self, resources_dir):
        resources = []
        for ext in self.ALLOWED_EXTENSIONS:
            resources_glob = resources_dir

            if resources_dir.endswith('/'):
                resources_glob = resources_dir + ext
            else:
                resources_glob = resources_dir + '/*' + ext

            resource_files = glob.glob(resources_glob)
            resources.extend(resource_files)

        resources = sorted(resources)
        return resources

    def register_packs(self, base_dirs):
        """
        Register packs in all the provided directories.
        """
        packs = self._pack_loader.get_packs(base_dirs=base_dirs)

        registered_count = 0
        for pack_name, pack_path in six.iteritems(packs):
            self.register_pack(pack_name=pack_name, pack_dir=pack_path)
            registered_count += 1

        return registered_count

    def register_pack(self, pack_name, pack_dir):
        """
        Register pack in the provided directory.
        """
        if pack_name in REGISTERED_PACKS_CACHE:
            # This pack has already been registered during this register content run
            return

        LOG.debug('Registering pack: %s' % (pack_name))
        REGISTERED_PACKS_CACHE[pack_name] = True

        try:
            pack_db = self._register_pack(pack_name=pack_name, pack_dir=pack_dir)
        except Exception:
            LOG.exception('Failed to register pack "%s"' % (pack_name))
            return None

        return pack_db

    def _register_pack(self, pack_name, pack_dir):
        """
        Register a pack (create a DB object in the system).

        Note: Pack registration now happens when registering the content and not when installing
        a pack using packs.install. Eventually this will be moved to the pack management API.
        """
        manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)

        if not os.path.isfile(manifest_path):
            raise ValueError('Pack "%s" is missing %s file' % (pack_name, MANIFEST_FILE_NAME))

        content = self._meta_loader.load(manifest_path)
        if not content:
            raise ValueError('Pack "%s" metadata file is empty' % (pack_name))

        content['ref'] = pack_name
        pack_api = PackAPI(**content)
        pack_db = PackAPI.to_model(pack_api)

        try:
            pack_db.id = Pack.get_by_ref(pack_name).id
        except ValueError:
            LOG.debug('Pack %s not found. Creating new one.', pack_name)

        pack_db = Pack.add_or_update(pack_db)
        LOG.debug('Pack %s registered.' % (pack_name))
        return pack_db
示例#29
0
class RBACDefinitionsLoader(object):
    """
    A class which loads role definitions and user role assignments from files on
    disk.
    """

    def __init__(self):
        base_path = cfg.CONF.system.base_path

        self._rbac_definitions_path = os.path.join(base_path, 'rbac/')
        self._role_definitions_path = os.path.join(self._rbac_definitions_path, 'roles/')
        self._role_assignments_path = os.path.join(self._rbac_definitions_path, 'assignments/')
        self._role_maps_path = os.path.join(self._rbac_definitions_path, 'mappings/')
        self._meta_loader = MetaLoader()

    def load(self):
        """
        :return: Dict with the following keys: roles, role_assiginments
        :rtype: ``dict``
        """
        result = {}
        result['roles'] = self.load_role_definitions()
        result['role_assignments'] = self.load_user_role_assignments()
        result['group_to_role_maps'] = self.load_group_to_role_maps()

        return result

    def load_role_definitions(self):
        """
        Load all the role definitions.

        :rtype: ``dict``
        """
        LOG.info('Loading role definitions from "%s"' % (self._role_definitions_path))
        file_paths = self._get_role_definitions_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading role definition from: %s' % (file_path))
            role_definition_api = self.load_role_definition_from_file(file_path=file_path)
            role_name = role_definition_api.name
            enabled = getattr(role_definition_api, 'enabled', True)

            if role_name in result:
                raise ValueError('Duplicate definition file found for role "%s"' % (role_name))

            if not enabled:
                LOG.debug('Skipping disabled role "%s"' % (role_name))
                continue

            result[role_name] = role_definition_api

        return result

    def load_user_role_assignments(self):
        """
        Load all the user role assignments.

        :rtype: ``dict``
        """
        LOG.info('Loading user role assignments from "%s"' % (self._role_assignments_path))
        file_paths = self._get_role_assiginments_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading user role assignments from: %s' % (file_path))
            role_assignment_api = self.load_user_role_assignments_from_file(file_path=file_path)
            username = role_assignment_api.username
            enabled = getattr(role_assignment_api, 'enabled', True)

            if username in result:
                raise ValueError('Duplicate definition file found for user "%s"' % (username))

            if not enabled:
                LOG.debug('Skipping disabled role assignment for user "%s"' % (username))
                continue

            result[username] = role_assignment_api

        return result

    def load_group_to_role_maps(self):
        """
        Load all the remote group to local role mappings.

        :rtype: ``dict``
        """
        LOG.info('Loading group to role map definitions from "%s"' % (self._role_maps_path))
        file_paths = self._get_group_to_role_maps_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading group to role mapping from: %s' % (file_path))
            group_to_role_map_api = self.load_group_to_role_map_assignment_from_file(
                file_path=file_path)

            group_name = group_to_role_map_api.group
            result[group_name] = group_to_role_map_api

        return result

    def load_role_definition_from_file(self, file_path):
        """
        Load role definition from file.

        :param file_path: Path to the role definition file.
        :type file_path: ``str``

        :return: Role definition.
        :rtype: :class:`RoleDefinitionFileFormatAPI`
        """
        content = self._meta_loader.load(file_path)

        if not content:
            msg = ('Role definition file "%s" is empty and invalid' % file_path)
            raise ValueError(msg)

        role_definition_api = RoleDefinitionFileFormatAPI(**content)
        role_definition_api = role_definition_api.validate()

        return role_definition_api

    def load_user_role_assignments_from_file(self, file_path):
        """
        Load user role assignments from file.

        :param file_path: Path to the user role assignment file.
        :type file_path: ``str``

        :return: User role assignments.
        :rtype: :class:`UserRoleAssignmentFileFormatAPI`
        """
        content = self._meta_loader.load(file_path)

        if not content:
            msg = ('Role assignment file "%s" is empty and invalid' % file_path)
            raise ValueError(msg)

        user_role_assignment_api = UserRoleAssignmentFileFormatAPI(**content)
        user_role_assignment_api.file_path = file_path[file_path.rfind('assignments/'):]
        user_role_assignment_api = user_role_assignment_api.validate()

        return user_role_assignment_api

    def load_group_to_role_map_assignment_from_file(self, file_path):
        content = self._meta_loader.load(file_path)

        if not content:
            msg = ('Group to role map assignment file "%s" is empty and invalid' % (file_path))
            raise ValueError(msg)

        group_to_role_map_api = AuthGroupToRoleMapAssignmentFileFormatAPI(**content)
        group_to_role_map_api.file_path = file_path[file_path.rfind('mappings/'):]
        group_to_role_map_api = group_to_role_map_api.validate()

        return group_to_role_map_api

    def _get_role_definitions_file_paths(self):
        """
        Retrieve a list of paths for all the role definitions.

        Notes: Roles are sorted in an alphabetical order based on the role name.

        :rtype: ``list``
        """
        glob_str = self._role_definitions_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, key=functools.cmp_to_key(compare_path_file_name))
        return file_paths

    def _get_role_assiginments_file_paths(self):
        """
        Retrieve a list of paths for all the user role assignments.

        Notes: Assignments are sorted in an alphabetical order based on the username.

        :rtype: ``list``
        """
        glob_str = self._role_assignments_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, key=functools.cmp_to_key(compare_path_file_name))
        return file_paths

    def _get_group_to_role_maps_file_paths(self):
        """
        Retrieve a list of path for remote group to local role mapping assignment files.

        :rtype: ``list``
        """
        glob_str = self._role_maps_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, key=functools.cmp_to_key(compare_path_file_name))
        return file_paths
示例#30
0
 def __init__(self):
     self.meta_loader = MetaLoader()
示例#31
0
class ActionsRegistrar(object):
    def __init__(self):
        self._meta_loader = MetaLoader()

    def _get_actions_from_pack(self, actions_dir):
        actions = []
        for ext in ALLOWED_EXTS:
            actions_ext = glob.glob(actions_dir + '/*' + ext)
            # Exclude global actions configuration file
            config_file = 'actions/config' + ext
            actions_ext = [file_path for file_path in actions_ext if
                           config_file not in file_path] or []
            actions.extend(actions_ext)
        return actions

    def _register_action(self, pack, action):
        content = self._meta_loader.load(action)
        action_ref = ResourceReference(pack=pack, name=str(content['name']))
        model = action_utils.get_action_by_ref(action_ref)
        if not model:
            model = ActionDB()
        model.name = content['name']
        model.description = content['description']
        model.enabled = content['enabled']
        model.pack = pack
        model.entry_point = content['entry_point']
        model.parameters = content.get('parameters', {})
        runner_type = str(content['runner_type'])
        valid_runner_type, runner_type_db = self._has_valid_runner_type(runner_type)
        if valid_runner_type:
            model.runner_type = {'name': runner_type_db.name}
        else:
            LOG.exception('Runner type %s doesn\'t exist.', runner_type)
            raise

        try:
            model = Action.add_or_update(model)
            LOG.audit('Action created. Action %s from %s.', model, action)
        except Exception:
            LOG.exception('Failed to write action to db %s.', model.name)
            raise

    def _has_valid_runner_type(self, runner_type):
        try:
            return True, action_utils.get_runnertype_by_name(runner_type)
        except:
            return False, None

    def _register_actions_from_pack(self, pack, actions):
        for action in actions:
            try:
                LOG.debug('Loading action from %s.', action)
                self._register_action(pack, action)
            except Exception:
                LOG.exception('Unable to register action: %s', action)
                continue

    # XXX: Requirements for actions is tricky because actions can execute remotely.
    # Currently, this method is unused.
    def _is_requirements_ok(self, actions_dir):
        rqmnts_file = os.path.join(actions_dir, 'requirements.txt')

        if not os.path.exists(rqmnts_file):
            return True

        missing = RequirementsValidator.validate(rqmnts_file)
        if missing:
            LOG.warning('Actions in %s missing dependencies: %s', actions_dir, ','.join(missing))
            return False
        return True

    def register_actions_from_packs(self, base_dir):
        pack_loader = ContentPackLoader()
        dirs = pack_loader.get_content(base_dir=base_dir,
                                       content_type='actions')
        for pack, actions_dir in six.iteritems(dirs):
            try:
                LOG.debug('Registering actions from pack %s:, dir: %s', pack, actions_dir)
                actions = self._get_actions_from_pack(actions_dir)
                self._register_actions_from_pack(pack, actions)
            except:
                LOG.exception('Failed registering all actions from pack: %s', actions_dir)
示例#32
0
文件: base.py 项目: hejin/st2
class ResourceRegistrar(object):
    ALLOWED_EXTENSIONS = []

    def __init__(self, use_pack_cache=True):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()

    def get_resources_from_pack(self, resources_dir):
        resources = []
        for ext in self.ALLOWED_EXTENSIONS:
            resources_glob = resources_dir

            if resources_dir.endswith("/"):
                resources_glob = resources_dir + ext
            else:
                resources_glob = resources_dir + "/*" + ext

            resource_files = glob.glob(resources_glob)
            resources.extend(resource_files)

        resources = sorted(resources)
        return resources

    def register_packs(self, base_dirs):
        """
        Register packs in all the provided directories.
        """
        packs = self._pack_loader.get_packs(base_dirs=base_dirs)

        registered_count = 0
        for pack_name, pack_path in six.iteritems(packs):
            self.register_pack(pack_name=pack_name, pack_dir=pack_path)
            registered_count += 1

        return registered_count

    def register_pack(self, pack_name, pack_dir):
        """
        Register pack in the provided directory.
        """
        if self._use_pack_cache and pack_name in REGISTERED_PACKS_CACHE:
            # This pack has already been registered during this register content run
            return

        LOG.debug("Registering pack: %s" % (pack_name))
        REGISTERED_PACKS_CACHE[pack_name] = True

        try:
            pack_db = self._register_pack(pack_name=pack_name, pack_dir=pack_dir)
        except Exception:
            LOG.exception('Failed to register pack "%s"' % (pack_name))
            return None

        return pack_db

    def _register_pack(self, pack_name, pack_dir):
        """
        Register a pack (create a DB object in the system).

        Note: Pack registration now happens when registering the content and not when installing
        a pack using packs.install. Eventually this will be moved to the pack management API.
        """
        manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)

        if not os.path.isfile(manifest_path):
            raise ValueError('Pack "%s" is missing %s file' % (pack_name, MANIFEST_FILE_NAME))

        content = self._meta_loader.load(manifest_path)
        if not content:
            raise ValueError('Pack "%s" metadata file is empty' % (pack_name))

        content["ref"] = pack_name

        # Include a list of pack files
        pack_file_list = get_file_list(directory=pack_dir, exclude_patterns=EXCLUDE_FILE_PATTERNS)
        content["files"] = pack_file_list

        pack_api = PackAPI(**content)
        pack_db = PackAPI.to_model(pack_api)

        try:
            pack_db.id = Pack.get_by_ref(pack_name).id
        except ValueError:
            LOG.debug("Pack %s not found. Creating new one.", pack_name)

        pack_db = Pack.add_or_update(pack_db)
        LOG.debug("Pack %s registered." % (pack_name))
        return pack_db
示例#33
0
from st2common.bootstrap.base import ResourceRegistrar
import st2common.content.utils as content_utils
from st2common.models.api.action import ActionAPI
from st2common.models.api.sensor import SensorTypeAPI
from st2common.models.api.rule import RuleAPI
from st2common.service_setup import db_setup
from st2common.service_setup import db_teardown
from st2common.models.system.common import ResourceReference
from st2common.persistence.rule import Rule
from st2common.persistence.sensor import SensorType
from st2common.persistence.action import Action

registrar = ResourceRegistrar()
registrar.ALLOWED_EXTENSIONS = ['.yaml', '.yml', '.json']

meta_loader = MetaLoader()

API_MODELS_ARTIFACT_TYPES = {
    'actions': ActionAPI,
    'sensors': SensorTypeAPI,
    'rules': RuleAPI
}

API_MODELS_PERSISTENT_MODELS = {
    Action: ActionAPI,
    SensorType: SensorTypeAPI,
    Rule: RuleAPI
}


def do_register_cli_opts(opts, ignore_errors=False):
示例#34
0
class ActionChainRunner(ActionRunner):

    def __init__(self, runner_id):
        super(ActionChainRunner, self).__init__(runner_id=runner_id)
        self.chain_holder = None
        self._meta_loader = MetaLoader()
        self._stopped = False
        self._skip_notify_tasks = []
        self._display_published = False
        self._chain_notify = None

    def pre_run(self):
        super(ActionChainRunner, self).pre_run()

        chainspec_file = self.entry_point
        LOG.debug('Reading action chain from %s for action %s.', chainspec_file,
                  self.action)

        try:
            chainspec = self._meta_loader.load(file_path=chainspec_file,
                                               expected_type=dict)
        except Exception as e:
            message = ('Failed to parse action chain definition from "%s": %s' %
                       (chainspec_file, str(e)))
            LOG.exception('Failed to load action chain definition.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

        try:
            self.chain_holder = ChainHolder(chainspec, self.action_name)
        except json_schema_exceptions.ValidationError as e:
            # preserve the whole nasty jsonschema message as that is better to get to the
            # root cause
            message = str(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runnerexceptions.ActionRunnerPreRunError(message)
        except Exception as e:
            message = e.message or str(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

        # Runner attributes are set lazily. So these steps
        # should happen outside the constructor.
        if getattr(self, 'liveaction', None):
            self._chain_notify = getattr(self.liveaction, 'notify', None)
        if self.runner_parameters:
            self._skip_notify_tasks = self.runner_parameters.get('skip_notify', [])
            self._display_published = self.runner_parameters.get('display_published', False)

        # Perform some pre-run chain validation
        try:
            self.chain_holder.validate()
        except Exception as e:
            raise runnerexceptions.ActionRunnerPreRunError(e.message)

    def run(self, action_parameters):
        # holds final result we store.
        result = {'tasks': []}
        # published variables are to be stored for display.
        if self._display_published:
            result[PUBLISHED_VARS_KEY] = {}
        context_result = {}  # holds result which is used for the template context purposes
        top_level_error = None  # stores a reference to a top level error
        fail = True
        action_node = None

        try:
            # initialize vars once we have the action_parameters. This allows
            # vars to refer to action_parameters.
            self.chain_holder.init_vars(action_parameters)
        except Exception as e:
            error = 'Failed initializing ``vars`` in chain.'

            LOG.exception(error)

            trace = traceback.format_exc(10)
            top_level_error = {
                'error': error,
                'traceback': trace
            }
            result['error'] = top_level_error['error']
            result['traceback'] = top_level_error['traceback']
            return (LIVEACTION_STATUS_FAILED, result, None)

        try:
            action_node = self.chain_holder.get_next_node()
        except Exception as e:
            LOG.exception('Failed to get starting node "%s".', action_node.name)

            error = ('Failed to get starting node "%s". Lookup failed: %s' %
                     (action_node.name, str(e)))
            trace = traceback.format_exc(10)
            top_level_error = {
                'error': error,
                'traceback': trace
            }

        parent_context = {
            'execution_id': self.execution_id
        }
        if getattr(self.liveaction, 'context', None):
            parent_context.update(self.liveaction.context)

        while action_node:
            fail = False
            timeout = False
            error = None
            liveaction = None

            created_at = date_utils.get_datetime_utc_now()

            try:
                liveaction = self._get_next_action(
                    action_node=action_node, parent_context=parent_context,
                    action_params=action_parameters, context_result=context_result)
            except InvalidActionReferencedException as e:
                error = ('Failed to run task "%s". Action with reference "%s" doesn\'t exist.' %
                         (action_node.name, action_node.ref))
                LOG.exception(error)

                fail = True
                top_level_error = {
                    'error': error,
                    'traceback': traceback.format_exc(10)
                }
                break
            except ParameterRenderingFailedException as e:
                # Rendering parameters failed before we even got to running this action, abort and
                # fail the whole action chain
                LOG.exception('Failed to run action "%s".', action_node.name)

                fail = True
                error = ('Failed to run task "%s". Parameter rendering failed: %s' %
                         (action_node.name, str(e)))
                trace = traceback.format_exc(10)
                top_level_error = {
                    'error': error,
                    'traceback': trace
                }
                break

            try:
                liveaction = self._run_action(liveaction)
            except Exception as e:
                # Save the traceback and error message
                LOG.exception('Failure in running action "%s".', action_node.name)

                error = {
                    'error': 'Task "%s" failed: %s' % (action_node.name, str(e)),
                    'traceback': traceback.format_exc(10)
                }
                context_result[action_node.name] = error
            else:
                # Update context result
                context_result[action_node.name] = liveaction.result

                # Render and publish variables
                rendered_publish_vars = ActionChainRunner._render_publish_vars(
                    action_node=action_node, action_parameters=action_parameters,
                    execution_result=liveaction.result, previous_execution_results=context_result,
                    chain_vars=self.chain_holder.vars)

                if rendered_publish_vars:
                    self.chain_holder.vars.update(rendered_publish_vars)
                    if self._display_published:
                        result[PUBLISHED_VARS_KEY].update(rendered_publish_vars)
            finally:
                # Record result and resolve a next node based on the task success or failure
                updated_at = date_utils.get_datetime_utc_now()

                format_kwargs = {'action_node': action_node, 'liveaction_db': liveaction,
                                 'created_at': created_at, 'updated_at': updated_at}

                if error:
                    format_kwargs['error'] = error

                task_result = self._format_action_exec_result(**format_kwargs)
                result['tasks'].append(task_result)

                if self.liveaction_id:
                    self._stopped = action_service.is_action_canceled_or_canceling(
                        self.liveaction_id)

                if self._stopped:
                    LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
                    status = LIVEACTION_STATUS_CANCELED
                    return (status, result, None)

                try:
                    if not liveaction:
                        fail = True
                        action_node = self.chain_holder.get_next_node(action_node.name,
                                                                      condition='on-failure')
                    elif liveaction.status in LIVEACTION_FAILED_STATES:
                        if liveaction and liveaction.status == LIVEACTION_STATUS_TIMED_OUT:
                            timeout = True
                        else:
                            fail = True
                        action_node = self.chain_holder.get_next_node(action_node.name,
                                                                      condition='on-failure')
                    elif liveaction.status == LIVEACTION_STATUS_CANCELED:
                        # User canceled an action (task) in the workflow - cancel the execution of
                        # rest of the workflow
                        self._stopped = True
                        LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
                    elif liveaction.status == LIVEACTION_STATUS_SUCCEEDED:
                        action_node = self.chain_holder.get_next_node(action_node.name,
                                                                      condition='on-success')
                except Exception as e:
                    LOG.exception('Failed to get next node "%s".', action_node.name)

                    fail = True
                    error = ('Failed to get next node "%s". Lookup failed: %s' %
                             (action_node.name, str(e)))
                    trace = traceback.format_exc(10)
                    top_level_error = {
                        'error': error,
                        'traceback': trace
                    }
                    # reset action_node here so that chain breaks on failure.
                    action_node = None
                    break

                if self._stopped:
                    LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
                    status = LIVEACTION_STATUS_CANCELED
                    return (status, result, None)

        if fail:
            status = LIVEACTION_STATUS_FAILED
        elif timeout:
            status = LIVEACTION_STATUS_TIMED_OUT
        else:
            status = LIVEACTION_STATUS_SUCCEEDED

        if top_level_error:
            # Include top level error information
            result['error'] = top_level_error['error']
            result['traceback'] = top_level_error['traceback']

        return (status, result, None)

    @staticmethod
    def _render_publish_vars(action_node, action_parameters, execution_result,
                             previous_execution_results, chain_vars):
        """
        If no output is specified on the action_node the output is the entire execution_result.
        If any output is specified then only those variables are published as output of an
        execution of this action_node.
        The output variable can refer to a variable from the execution_result,
        previous_execution_results or chain_vars.
        """
        if not action_node.publish:
            return {}

        context = {}
        context.update(action_parameters)
        context.update({action_node.name: execution_result})
        context.update(previous_execution_results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: previous_execution_results})
        context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
        context.update({
            DATASTORE_PARENT_SCOPE: {
                SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
            }
        })

        try:
            rendered_result = jinja_utils.render_values(mapping=action_node.publish,
                                                        context=context)
        except Exception as e:
            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = ('Failed rendering value for publish parameter "%s" in task "%s" '
                   '(template string=%s): %s' % (key, action_node.name, value, str(e)))
            raise ParameterRenderingFailedException(msg)

        return rendered_result

    @staticmethod
    def _resolve_params(action_node, original_parameters, results, chain_vars, chain_context):
        # setup context with original parameters and the intermediate results.
        context = {}
        context.update(original_parameters)
        context.update(results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: results})
        context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
        context.update({
            DATASTORE_PARENT_SCOPE: {
                SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
            }
        })
        context.update({ACTION_CONTEXT_KV_PREFIX: chain_context})
        try:
            rendered_params = jinja_utils.render_values(mapping=action_node.get_parameters(),
                                                        context=context)
        except Exception as e:
            LOG.exception('Jinja rendering for parameter "%s" failed.' % (e.key))

            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = ('Failed rendering value for action parameter "%s" in task "%s" '
                   '(template string=%s): %s') % (key, action_node.name, value, str(e))
            raise ParameterRenderingFailedException(msg)
        LOG.debug('Rendered params: %s: Type: %s', rendered_params, type(rendered_params))
        return rendered_params

    def _get_next_action(self, action_node, parent_context, action_params, context_result):
        # Verify that the referenced action exists
        # TODO: We do another lookup in cast_param, refactor to reduce number of lookups
        task_name = action_node.name
        action_ref = action_node.ref
        action_db = action_db_util.get_action_by_ref(ref=action_ref)

        if not action_db:
            error = 'Task :: %s - Action with ref %s not registered.' % (task_name, action_ref)
            raise InvalidActionReferencedException(error)

        resolved_params = ActionChainRunner._resolve_params(
            action_node=action_node, original_parameters=action_params,
            results=context_result, chain_vars=self.chain_holder.vars,
            chain_context={'parent': parent_context})

        liveaction = self._build_liveaction_object(
            action_node=action_node,
            resolved_params=resolved_params,
            parent_context=parent_context)

        return liveaction

    def _run_action(self, liveaction, wait_for_completion=True, sleep_delay=1.0):
        """
        :param sleep_delay: Number of seconds to wait during "is completed" polls.
        :type sleep_delay: ``float``
        """
        try:
            # request return canceled
            liveaction, _ = action_service.request(liveaction)
        except Exception as e:
            liveaction.status = LIVEACTION_STATUS_FAILED
            LOG.exception('Failed to schedule liveaction.')
            raise e

        while (wait_for_completion and liveaction.status not in LIVEACTION_COMPLETED_STATES):
            eventlet.sleep(sleep_delay)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _build_liveaction_object(self, action_node, resolved_params, parent_context):
        liveaction = LiveActionDB(action=action_node.ref)

        # Setup notify for task in chain.
        notify = self._get_notify(action_node)
        if notify:
            liveaction.notify = notify
            LOG.debug('%s: Task notify set to: %s', action_node.name, liveaction.notify)

        liveaction.context = {
            'parent': parent_context,
            'chain': vars(action_node)
        }
        liveaction.parameters = action_param_utils.cast_params(action_ref=action_node.ref,
                                                               params=resolved_params)
        return liveaction

    def _get_notify(self, action_node):
        if action_node.name not in self._skip_notify_tasks:
            if action_node.notify:
                task_notify = NotificationsHelper.to_model(action_node.notify)
                return task_notify
            elif self._chain_notify:
                return self._chain_notify

        return None

    def _format_action_exec_result(self, action_node, liveaction_db, created_at, updated_at,
                                   error=None):
        """
        Format ActionExecution result so it can be used in the final action result output.

        :rtype: ``dict``
        """
        assert isinstance(created_at, datetime.datetime)
        assert isinstance(updated_at, datetime.datetime)

        result = {}

        execution_db = None
        if liveaction_db:
            execution_db = ActionExecution.get(liveaction__id=str(liveaction_db.id))

        result['id'] = action_node.name
        result['name'] = action_node.name
        result['execution_id'] = str(execution_db.id) if execution_db else None
        result['workflow'] = None

        result['created_at'] = isotime.format(dt=created_at)
        result['updated_at'] = isotime.format(dt=updated_at)

        if error or not liveaction_db:
            result['state'] = LIVEACTION_STATUS_FAILED
        else:
            result['state'] = liveaction_db.status

        if error:
            result['result'] = error
        else:
            result['result'] = liveaction_db.result

        return result
示例#35
0
class RBACDefinitionsLoader(object):
    """
    A class which loads role definitions and user role assignments from files on
    disk.
    """

    def __init__(self):
        base_path = cfg.CONF.system.base_path
        rbac_definitions_path = os.path.join(base_path, 'rbac/')

        self._role_definitions_path = os.path.join(rbac_definitions_path, 'roles/')
        self._role_assignments_path = os.path.join(rbac_definitions_path, 'assignments/')
        self._meta_loader = MetaLoader()

    def load(self):
        """
        :return: Dict with the following keys: roles, role_assiginments
        :rtype: ``dict``
        """
        result = {}
        result['roles'] = self.load_role_definitions()
        result['role_assignments'] = self.load_user_role_assignments()

        return result

    def load_role_definitions(self):
        """
        Load all the role definitions.

        :rtype: ``dict``
        """
        LOG.info('Loading role definitions from "%s"' % (self._role_definitions_path))
        file_paths = self._get_role_definitions_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading role definition from: %s' % (file_path))
            role_definition_api = self.load_role_definition_from_file(file_path=file_path)
            role_name = role_definition_api.name

            if role_name in result:
                raise ValueError('Duplicate definition file found for role "%s"' % (role_name))

            result[role_name] = role_definition_api

        return result

    def load_user_role_assignments(self):
        """
        Load all the user role assignments.

        :rtype: ``dict``
        """
        LOG.info('Loading user role assignments from "%s"' % (self._role_assignments_path))
        file_paths = self._get_role_assiginments_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading user role assignments from: %s' % (file_path))
            role_assignment_api = self.load_user_role_assignments_from_file(file_path=file_path)
            username = role_assignment_api.username

            if username in result:
                raise ValueError('Duplicate definition file found for user "%s"' % (username))

            result[username] = role_assignment_api

        return result

    def load_role_definition_from_file(self, file_path):
        """
        Load role definition from file.

        :param file_path: Path to the role definition file.
        :type file_path: ``str``

        :return: Role definition.
        :rtype: :class:`RoleDefinitionFileFormatAPI`
        """
        content = self._meta_loader.load(file_path)

        role_definition_api = RoleDefinitionFileFormatAPI(**content)
        role_definition_api.validate()

        return role_definition_api

    def load_user_role_assignments_from_file(self, file_path):
        """
        Load user role assignments from file.

        :param file_path: Path to the user role assignment file.
        :type file_path: ``str``

        :return: User role assignments.
        :rtype: :class:`UserRoleAssignmentFileFormatAPI`
        """
        content = self._meta_loader.load(file_path)

        user_role_assignment_api = UserRoleAssignmentFileFormatAPI(**content)
        user_role_assignment_api.validate()

        return user_role_assignment_api

    def _get_role_definitions_file_paths(self):
        """
        Retrieve a list of paths for all the role definitions.

        Notes: Roles are sorted in an alphabetical order based on the role name.

        :rtype: ``list``
        """
        glob_str = self._role_definitions_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, cmp=compare_path_file_name)
        return file_paths

    def _get_role_assiginments_file_paths(self):
        """
        Retrieve a list of paths for all the user role assignments.

        Notes: Assignments are sorted in an alphabetical order based on the username.

        :rtype: ``list``
        """
        glob_str = self._role_assignments_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, cmp=compare_path_file_name)
        return file_paths
示例#36
0
class FixturesLoader(object):
    def __init__(self):
        self.meta_loader = MetaLoader()

    def save_fixtures_to_db(self,
                            fixtures_pack="generic",
                            fixtures_dict=None,
                            use_object_ids=False):
        """
        Loads fixtures specified in fixtures_dict into the database
        and returns DB models for the fixtures.

        fixtures_dict should be of the form:
        {
            'actions': ['action-1.yaml', 'action-2.yaml'],
            'rules': ['rule-1.yaml'],
            'liveactions': ['execution-1.yaml']
        }

        :param fixtures_pack: Name of the pack to load fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to load for each type.
        :type fixtures_dict: ``dict``

        :param use_object_ids: Use object id primary key from fixture file (if available) when
                              storing objects in the database. By default id in
                              file is discarded / not used and a new random one
                              is generated.
        :type use_object_ids: ``bool``

        :rtype: ``dict``
        """
        if fixtures_dict is None:
            fixtures_dict = {}

        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)

        db_models = {}
        for fixture_type, fixtures in six.iteritems(fixtures_dict):
            API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
            PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(
                fixture_type, None)

            loaded_fixtures = {}
            for fixture in fixtures:
                # Guard against copy and type and similar typos
                if fixture in loaded_fixtures:
                    msg = 'Fixture "%s" is specified twice, probably a typo.' % (
                        fixture)
                    raise ValueError(msg)

                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path,
                                                    fixture_type, fixture))
                api_model = API_MODEL(**fixture_dict)
                db_model = API_MODEL.to_model(api_model)

                # Make sure we also set and use object id if that functionality is used
                if use_object_ids and "id" in fixture_dict:
                    db_model.id = fixture_dict["id"]

                db_model = PERSISTENCE_MODEL.add_or_update(db_model)
                loaded_fixtures[fixture] = db_model

            db_models[fixture_type] = loaded_fixtures

        return db_models

    def load_fixtures(self, fixtures_pack="generic", fixtures_dict=None):
        """
        Loads fixtures specified in fixtures_dict. We
        simply want to load the meta into dict objects.

        fixtures_dict should be of the form:
        {
            'actionchains': ['actionchain1.yaml', 'actionchain2.yaml'],
            'workflows': ['workflow.yaml']
        }

        :param fixtures_pack: Name of the pack to load fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to load for each type.
        :type fixtures_dict: ``dict``

        :rtype: ``dict``
        """
        if not fixtures_dict:
            return {}
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict)

        all_fixtures = {}
        for fixture_type, fixtures in six.iteritems(fixtures_dict):
            loaded_fixtures = {}
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path,
                                                    fixture_type, fixture))
                loaded_fixtures[fixture] = fixture_dict
            all_fixtures[fixture_type] = loaded_fixtures

        return all_fixtures

    def load_models(self, fixtures_pack="generic", fixtures_dict=None):
        """
        Loads fixtures specified in fixtures_dict as db models. This method must be
        used for fixtures that have associated DB models. We simply want to load the
        meta as DB models but don't want to save them to db.

        fixtures_dict should be of the form:
        {
            'actions': ['action-1.yaml', 'action-2.yaml'],
            'rules': ['rule-1.yaml'],
            'liveactions': ['execution-1.yaml']
        }

        :param fixtures_pack: Name of the pack to load fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to load for each type.
        :type fixtures_dict: ``dict``

        :rtype: ``dict``
        """
        if not fixtures_dict:
            return {}
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)

        all_fixtures = {}
        for fixture_type, fixtures in six.iteritems(fixtures_dict):

            API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)

            loaded_models = {}
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path,
                                                    fixture_type, fixture))
                api_model = API_MODEL(**fixture_dict)
                db_model = API_MODEL.to_model(api_model)
                loaded_models[fixture] = db_model
            all_fixtures[fixture_type] = loaded_models

        return all_fixtures

    def delete_fixtures_from_db(self,
                                fixtures_pack="generic",
                                fixtures_dict=None,
                                raise_on_fail=False):
        """
        Deletes fixtures specified in fixtures_dict from the database.

        fixtures_dict should be of the form:
        {
            'actions': ['action-1.yaml', 'action-2.yaml'],
            'rules': ['rule-1.yaml'],
            'liveactions': ['execution-1.yaml']
        }

        :param fixtures_pack: Name of the pack to delete fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
        :type fixtures_dict: ``dict``

        :param raise_on_fail: Optional If True, raises exception if delete fails on any fixture.
        :type raise_on_fail: ``boolean``
        """
        if not fixtures_dict:
            return
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict)

        for fixture_type, fixtures in six.iteritems(fixtures_dict):
            API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
            PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(
                fixture_type, None)
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path,
                                                    fixture_type, fixture))
                # Note that when we have a reference mechanism consistent for
                # every model, we can just do a get and delete the object. Until
                # then, this model conversions are necessary.
                api_model = API_MODEL(**fixture_dict)
                db_model = API_MODEL.to_model(api_model)
                try:
                    PERSISTENCE_MODEL.delete(db_model)
                except:
                    if raise_on_fail:
                        raise

    def delete_models_from_db(self, models_dict, raise_on_fail=False):
        """
        Deletes models specified in models_dict from the database.

        models_dict should be of the form:
        {
            'actions': [ACTION1, ACTION2],
            'rules': [RULE1],
            'liveactions': [EXECUTION]
        }

        :param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
        :type fixtures_dict: ``dict``.

        :param raise_on_fail: Optional If True, raises exception if delete fails on any model.
        :type raise_on_fail: ``boolean``
        """
        for model_type, models in six.iteritems(models_dict):
            PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(model_type, None)
            for model in models:
                try:
                    PERSISTENCE_MODEL.delete(model)
                except:
                    if raise_on_fail:
                        raise

    def _validate_fixtures_pack(self, fixtures_pack):
        fixtures_pack_path = self._get_fixtures_pack_path(fixtures_pack)

        if not self._is_fixture_pack_exists(fixtures_pack_path):
            raise Exception("Fixtures pack not found " +
                            "in fixtures path %s." % get_fixtures_base_path())
        return fixtures_pack_path

    def _validate_fixture_dict(self, fixtures_dict, allowed=ALLOWED_FIXTURES):
        fixture_types = list(fixtures_dict.keys())
        for fixture_type in fixture_types:
            if fixture_type not in allowed:
                raise Exception(
                    "Disallowed fixture type: %s. Valid fixture types are: %s"
                    % (fixture_type, ", ".join(allowed)))

    def _is_fixture_pack_exists(self, fixtures_pack_path):
        return os.path.exists(fixtures_pack_path)

    def _get_fixture_file_path_abs(self, fixtures_pack_path, fixtures_type,
                                   fixture_name):
        return os.path.join(fixtures_pack_path, fixtures_type, fixture_name)

    def _get_fixtures_pack_path(self, fixtures_pack_name):
        return os.path.join(get_fixtures_base_path(), fixtures_pack_name)

    def get_fixture_file_path_abs(self, fixtures_pack, fixtures_type,
                                  fixture_name):
        return os.path.join(get_fixtures_base_path(), fixtures_pack,
                            fixtures_type, fixture_name)
示例#37
0
class ActionChainRunner(ActionRunner):
    def __init__(self, runner_id):
        super(ActionChainRunner, self).__init__(runner_id=runner_id)
        self.chain_holder = None
        self._meta_loader = MetaLoader()
        self._skip_notify_tasks = []
        self._display_published = True
        self._chain_notify = None

    def pre_run(self):
        super(ActionChainRunner, self).pre_run()

        chainspec_file = self.entry_point
        LOG.debug('Reading action chain from %s for action %s.',
                  chainspec_file, self.action)

        try:
            chainspec = self._meta_loader.load(file_path=chainspec_file,
                                               expected_type=dict)
        except Exception as e:
            message = (
                'Failed to parse action chain definition from "%s": %s' %
                (chainspec_file, six.text_type(e)))
            LOG.exception('Failed to load action chain definition.')
            raise runner_exc.ActionRunnerPreRunError(message)

        try:
            self.chain_holder = ChainHolder(chainspec, self.action_name)
        except json_schema_exc.ValidationError as e:
            # preserve the whole nasty jsonschema message as that is better to get to the
            # root cause
            message = six.text_type(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runner_exc.ActionRunnerPreRunError(message)
        except Exception as e:
            message = six.text_type(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runner_exc.ActionRunnerPreRunError(message)

        # Runner attributes are set lazily. So these steps
        # should happen outside the constructor.
        if getattr(self, 'liveaction', None):
            self._chain_notify = getattr(self.liveaction, 'notify', None)
        if self.runner_parameters:
            self._skip_notify_tasks = self.runner_parameters.get(
                'skip_notify', [])
            self._display_published = self.runner_parameters.get(
                'display_published', True)

        # Perform some pre-run chain validation
        try:
            self.chain_holder.validate()
        except Exception as e:
            raise runner_exc.ActionRunnerPreRunError(six.text_type(e))

    def run(self, action_parameters):
        # Run the action chain.
        return self._run_chain(action_parameters)

    def cancel(self):
        # Identify the list of action executions that are workflows and cascade pause.
        for child_exec_id in self.execution.children:
            child_exec = ActionExecution.get(id=child_exec_id,
                                             raise_exception=True)
            if (child_exec.runner['name']
                    in action_constants.WORKFLOW_RUNNER_TYPES
                    and child_exec.status
                    in action_constants.LIVEACTION_CANCELABLE_STATES):
                action_service.request_cancellation(
                    LiveAction.get(id=child_exec.liveaction['id']),
                    self.context.get('user', None))

        return (action_constants.LIVEACTION_STATUS_CANCELING,
                self.liveaction.result, self.liveaction.context)

    def pause(self):
        # Identify the list of action executions that are workflows and cascade pause.
        for child_exec_id in self.execution.children:
            child_exec = ActionExecution.get(id=child_exec_id,
                                             raise_exception=True)
            if (child_exec.runner['name']
                    in action_constants.WORKFLOW_RUNNER_TYPES
                    and child_exec.status
                    == action_constants.LIVEACTION_STATUS_RUNNING):
                action_service.request_pause(
                    LiveAction.get(id=child_exec.liveaction['id']),
                    self.context.get('user', None))

        return (action_constants.LIVEACTION_STATUS_PAUSING,
                self.liveaction.result, self.liveaction.context)

    def resume(self):
        # Restore runner and action parameters since they are not provided on resume.
        runner_parameters, action_parameters = param_utils.render_final_params(
            self.runner_type.runner_parameters, self.action.parameters,
            self.liveaction.parameters, self.liveaction.context)

        # Assign runner parameters needed for pre-run.
        if runner_parameters:
            self.runner_parameters = runner_parameters

        # Restore chain holder if it is not initialized.
        if not self.chain_holder:
            self.pre_run()

        # Change the status of the liveaction from resuming to running.
        self.liveaction = action_service.update_status(
            self.liveaction,
            action_constants.LIVEACTION_STATUS_RUNNING,
            publish=False)

        # Run the action chain.
        return self._run_chain(action_parameters, resuming=True)

    def _run_chain(self, action_parameters, resuming=False):
        # Set chain status to fail unless explicitly set to succeed.
        chain_status = action_constants.LIVEACTION_STATUS_FAILED

        # Result holds the final result that the chain store in the database.
        result = {'tasks': []}

        # Save published variables into the result if specified.
        if self._display_published:
            result[PUBLISHED_VARS_KEY] = {}

        context_result = {
        }  # Holds result which is used for the template context purposes
        top_level_error = None  # Stores a reference to a top level error
        action_node = None
        last_task = None

        try:
            # Initialize vars with the action parameters.
            # This allows action parameers to be referenced from vars.
            self.chain_holder.init_vars(action_parameters)
        except Exception as e:
            chain_status = action_constants.LIVEACTION_STATUS_FAILED
            m = 'Failed initializing ``vars`` in chain.'
            LOG.exception(m)
            top_level_error = self._format_error(e, m)
            result.update(top_level_error)
            return (chain_status, result, None)

        # Restore state on resuming an existing chain execution.
        if resuming:
            # Restore vars is any from the liveaction.
            ctx_vars = self.liveaction.context.pop('vars', {})
            self.chain_holder.restore_vars(ctx_vars)

            # Restore result if any from the liveaction.
            if self.liveaction and hasattr(
                    self.liveaction, 'result') and self.liveaction.result:
                result = self.liveaction.result

            # Initialize or rebuild existing context_result from liveaction
            # which holds the result used for resolving context in Jinja template.
            for task in result.get('tasks', []):
                context_result[task['name']] = task['result']

            # Restore or initialize the top_level_error
            # that stores a reference to a top level error.
            if 'error' in result or 'traceback' in result:
                top_level_error = {
                    'error': result.get('error'),
                    'traceback': result.get('traceback')
                }

        # If there are no executed tasks in the chain, then get the first node.
        if len(result['tasks']) <= 0:
            try:
                action_node = self.chain_holder.get_next_node()
            except Exception as e:
                m = 'Failed to get starting node "%s".', action_node.name
                LOG.exception(m)
                top_level_error = self._format_error(e, m)

            # If there are no action node to run next, then mark the chain successful.
            if not action_node:
                chain_status = action_constants.LIVEACTION_STATUS_SUCCEEDED

        # Otherwise, figure out the last task executed and
        # its state to determine where to begin executing.
        else:
            last_task = result['tasks'][-1]
            action_node = self.chain_holder.get_node(last_task['name'])
            liveaction = action_db_util.get_liveaction_by_id(
                last_task['liveaction_id'])

            # If the liveaction of the last task has changed, update the result entry.
            if liveaction.status != last_task['state']:
                updated_task_result = self._get_updated_action_exec_result(
                    action_node, liveaction, last_task)
                del result['tasks'][-1]
                result['tasks'].append(updated_task_result)

                # Also need to update context_result so the updated result
                # is available to Jinja expressions
                updated_task_name = updated_task_result['name']
                context_result[updated_task_name][
                    'result'] = updated_task_result['result']

            # If the last task was canceled, then canceled the chain altogether.
            if liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED:
                chain_status = action_constants.LIVEACTION_STATUS_CANCELED
                return (chain_status, result, None)

            # If the last task was paused, then stay on this action node.
            # This is explicitly put here for clarity.
            if liveaction.status == action_constants.LIVEACTION_STATUS_PAUSED:
                pass

            # If the last task succeeded, then get the next on-success action node.
            if liveaction.status == action_constants.LIVEACTION_STATUS_SUCCEEDED:
                chain_status = action_constants.LIVEACTION_STATUS_SUCCEEDED
                action_node = self.chain_holder.get_next_node(
                    last_task['name'], condition='on-success')

            # If the last task failed, then get the next on-failure action node.
            if liveaction.status in action_constants.LIVEACTION_FAILED_STATES:
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                action_node = self.chain_holder.get_next_node(
                    last_task['name'], condition='on-failure')

        # Setup parent context.
        parent_context = {'execution_id': self.execution_id}

        if getattr(self.liveaction, 'context', None):
            parent_context.update(self.liveaction.context)

        # Run the action chain until there are no more tasks.
        while action_node:
            error = None
            liveaction = None
            last_task = result['tasks'][-1] if len(
                result['tasks']) > 0 else None
            created_at = date_utils.get_datetime_utc_now()

            try:
                # If last task was paused, then fetch the liveaction and resume it first.
                if last_task and last_task[
                        'state'] == action_constants.LIVEACTION_STATUS_PAUSED:
                    liveaction = action_db_util.get_liveaction_by_id(
                        last_task['liveaction_id'])
                    del result['tasks'][-1]
                else:
                    liveaction = self._get_next_action(
                        action_node=action_node,
                        parent_context=parent_context,
                        action_params=action_parameters,
                        context_result=context_result)
            except action_exc.InvalidActionReferencedException as e:
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                m = (
                    'Failed to run task "%s". Action with reference "%s" doesn\'t exist.'
                    % (action_node.name, action_node.ref))
                LOG.exception(m)
                top_level_error = self._format_error(e, m)
                break
            except action_exc.ParameterRenderingFailedException as e:
                # Rendering parameters failed before we even got to running this action,
                # abort and fail the whole action chain
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                m = 'Failed to run task "%s". Parameter rendering failed.' % action_node.name
                LOG.exception(m)
                top_level_error = self._format_error(e, m)
                break
            except db_exc.StackStormDBObjectNotFoundError as e:
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                m = 'Failed to resume task "%s". Unable to find liveaction.' % action_node.name
                LOG.exception(m)
                top_level_error = self._format_error(e, m)
                break

            try:
                # If last task was paused, then fetch the liveaction and resume it first.
                if last_task and last_task[
                        'state'] == action_constants.LIVEACTION_STATUS_PAUSED:
                    LOG.info('Resume task %s for chain %s.', action_node.name,
                             self.liveaction.id)
                    liveaction = self._resume_action(liveaction)
                else:
                    LOG.info('Run task %s for chain %s.', action_node.name,
                             self.liveaction.id)
                    liveaction = self._run_action(liveaction)
            except Exception as e:
                # Save the traceback and error message
                m = 'Failed running task "%s".' % action_node.name
                LOG.exception(m)
                error = self._format_error(e, m)
                context_result[action_node.name] = error
            else:
                # Update context result
                context_result[action_node.name] = liveaction.result

                # Render and publish variables
                rendered_publish_vars = ActionChainRunner._render_publish_vars(
                    action_node=action_node,
                    action_parameters=action_parameters,
                    execution_result=liveaction.result,
                    previous_execution_results=context_result,
                    chain_vars=self.chain_holder.vars)

                if rendered_publish_vars:
                    self.chain_holder.vars.update(rendered_publish_vars)
                    if self._display_published:
                        result[PUBLISHED_VARS_KEY].update(
                            rendered_publish_vars)
            finally:
                # Record result and resolve a next node based on the task success or failure
                updated_at = date_utils.get_datetime_utc_now()

                task_result = self._format_action_exec_result(action_node,
                                                              liveaction,
                                                              created_at,
                                                              updated_at,
                                                              error=error)

                result['tasks'].append(task_result)

                try:
                    if not liveaction:
                        chain_status = action_constants.LIVEACTION_STATUS_FAILED
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_TIMED_OUT:
                        chain_status = action_constants.LIVEACTION_STATUS_TIMED_OUT
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED:
                        LOG.info(
                            'Chain execution (%s) canceled because task "%s" is canceled.',
                            self.liveaction_id, action_node.name)
                        chain_status = action_constants.LIVEACTION_STATUS_CANCELED
                        action_node = None
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_PAUSED:
                        LOG.info(
                            'Chain execution (%s) paused because task "%s" is paused.',
                            self.liveaction_id, action_node.name)
                        chain_status = action_constants.LIVEACTION_STATUS_PAUSED
                        self._save_vars()
                        action_node = None
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_PENDING:
                        LOG.info(
                            'Chain execution (%s) paused because task "%s" is pending.',
                            self.liveaction_id, action_node.name)
                        chain_status = action_constants.LIVEACTION_STATUS_PAUSED
                        self._save_vars()
                        action_node = None
                    elif liveaction.status in action_constants.LIVEACTION_FAILED_STATES:
                        chain_status = action_constants.LIVEACTION_STATUS_FAILED
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_SUCCEEDED:
                        chain_status = action_constants.LIVEACTION_STATUS_SUCCEEDED
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-success')
                    else:
                        action_node = None
                except Exception as e:
                    chain_status = action_constants.LIVEACTION_STATUS_FAILED
                    m = 'Failed to get next node "%s".' % action_node.name
                    LOG.exception(m)
                    top_level_error = self._format_error(e, m)
                    action_node = None
                    break

            if action_service.is_action_canceled_or_canceling(
                    self.liveaction.id):
                LOG.info('Chain execution (%s) canceled by user.',
                         self.liveaction.id)
                chain_status = action_constants.LIVEACTION_STATUS_CANCELED
                return (chain_status, result, None)

            if action_service.is_action_paused_or_pausing(self.liveaction.id):
                LOG.info('Chain execution (%s) paused by user.',
                         self.liveaction.id)
                chain_status = action_constants.LIVEACTION_STATUS_PAUSED
                self._save_vars()
                return (chain_status, result, self.liveaction.context)

        if top_level_error and isinstance(top_level_error, dict):
            result.update(top_level_error)

        return (chain_status, result, self.liveaction.context)

    def _format_error(self, e, msg):
        return {
            'error': '%s. %s' % (msg, six.text_type(e)),
            'traceback': traceback.format_exc(10)
        }

    def _save_vars(self):
        # Save the context vars in the liveaction context.
        self.liveaction.context['vars'] = self.chain_holder.vars

    @staticmethod
    def _render_publish_vars(action_node, action_parameters, execution_result,
                             previous_execution_results, chain_vars):
        """
        If no output is specified on the action_node the output is the entire execution_result.
        If any output is specified then only those variables are published as output of an
        execution of this action_node.
        The output variable can refer to a variable from the execution_result,
        previous_execution_results or chain_vars.
        """
        if not action_node.publish:
            return {}

        context = {}
        context.update(action_parameters)
        context.update({action_node.name: execution_result})
        context.update(previous_execution_results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: previous_execution_results})

        context.update({
            kv_constants.SYSTEM_SCOPE:
            kv_service.KeyValueLookup(scope=kv_constants.SYSTEM_SCOPE)
        })

        context.update({
            kv_constants.DATASTORE_PARENT_SCOPE: {
                kv_constants.SYSTEM_SCOPE:
                kv_service.KeyValueLookup(scope=kv_constants.FULL_SYSTEM_SCOPE)
            }
        })

        try:
            rendered_result = jinja_utils.render_values(
                mapping=action_node.publish, context=context)
        except Exception as e:
            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = (
                'Failed rendering value for publish parameter "%s" in task "%s" '
                '(template string=%s): %s' %
                (key, action_node.name, value, six.text_type(e)))
            raise action_exc.ParameterRenderingFailedException(msg)

        return rendered_result

    @staticmethod
    def _resolve_params(action_node, original_parameters, results, chain_vars,
                        chain_context):
        # setup context with original parameters and the intermediate results.
        chain_parent = chain_context.get('parent', {})
        pack = chain_parent.get('pack')
        user = chain_parent.get('user')

        config = get_config(pack, user)

        context = {}
        context.update(original_parameters)
        context.update(results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: results})

        context.update({
            kv_constants.SYSTEM_SCOPE:
            kv_service.KeyValueLookup(scope=kv_constants.SYSTEM_SCOPE)
        })

        context.update({
            kv_constants.DATASTORE_PARENT_SCOPE: {
                kv_constants.SYSTEM_SCOPE:
                kv_service.KeyValueLookup(scope=kv_constants.FULL_SYSTEM_SCOPE)
            }
        })
        context.update(
            {action_constants.ACTION_CONTEXT_KV_PREFIX: chain_context})
        context.update({pack_constants.PACK_CONFIG_CONTEXT_KV_PREFIX: config})
        try:
            rendered_params = jinja_utils.render_values(
                mapping=action_node.get_parameters(), context=context)
        except Exception as e:
            LOG.exception('Jinja rendering for parameter "%s" failed.' %
                          (e.key))

            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = (
                'Failed rendering value for action parameter "%s" in task "%s" '
                '(template string=%s): %s') % (key, action_node.name, value,
                                               six.text_type(e))
            raise action_exc.ParameterRenderingFailedException(msg)
        LOG.debug('Rendered params: %s: Type: %s', rendered_params,
                  type(rendered_params))
        return rendered_params

    def _get_next_action(self, action_node, parent_context, action_params,
                         context_result):
        # Verify that the referenced action exists
        # TODO: We do another lookup in cast_param, refactor to reduce number of lookups
        task_name = action_node.name
        action_ref = action_node.ref
        action_db = action_db_util.get_action_by_ref(ref=action_ref)

        if not action_db:
            error = 'Task :: %s - Action with ref %s not registered.' % (
                task_name, action_ref)
            raise action_exc.InvalidActionReferencedException(error)

        resolved_params = ActionChainRunner._resolve_params(
            action_node=action_node,
            original_parameters=action_params,
            results=context_result,
            chain_vars=self.chain_holder.vars,
            chain_context={'parent': parent_context})

        liveaction = self._build_liveaction_object(
            action_node=action_node,
            resolved_params=resolved_params,
            parent_context=parent_context)

        return liveaction

    def _run_action(self,
                    liveaction,
                    wait_for_completion=True,
                    sleep_delay=1.0):
        """
        :param sleep_delay: Number of seconds to wait during "is completed" polls.
        :type sleep_delay: ``float``
        """
        try:
            liveaction, _ = action_service.request(liveaction)
        except Exception as e:
            liveaction.status = action_constants.LIVEACTION_STATUS_FAILED
            LOG.exception('Failed to schedule liveaction.')
            raise e

        while (wait_for_completion and liveaction.status
               not in (action_constants.LIVEACTION_COMPLETED_STATES + [
                   action_constants.LIVEACTION_STATUS_PAUSED,
                   action_constants.LIVEACTION_STATUS_PENDING
               ])):
            eventlet.sleep(sleep_delay)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _resume_action(self,
                       liveaction,
                       wait_for_completion=True,
                       sleep_delay=1.0):
        """
        :param sleep_delay: Number of seconds to wait during "is completed" polls.
        :type sleep_delay: ``float``
        """
        try:
            user = self.context.get('user', None)
            liveaction, _ = action_service.request_resume(liveaction, user)
        except Exception as e:
            liveaction.status = action_constants.LIVEACTION_STATUS_FAILED
            LOG.exception('Failed to schedule liveaction.')
            raise e

        while (wait_for_completion and liveaction.status
               not in (action_constants.LIVEACTION_COMPLETED_STATES +
                       [action_constants.LIVEACTION_STATUS_PAUSED])):
            eventlet.sleep(sleep_delay)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _build_liveaction_object(self, action_node, resolved_params,
                                 parent_context):
        liveaction = LiveActionDB(action=action_node.ref)

        # Setup notify for task in chain.
        notify = self._get_notify(action_node)
        if notify:
            liveaction.notify = notify
            LOG.debug('%s: Task notify set to: %s', action_node.name,
                      liveaction.notify)

        liveaction.context = {
            'parent': parent_context,
            'chain': vars(action_node)
        }
        liveaction.parameters = action_param_utils.cast_params(
            action_ref=action_node.ref, params=resolved_params)
        return liveaction

    def _get_notify(self, action_node):
        if action_node.name not in self._skip_notify_tasks:
            if action_node.notify:
                task_notify = NotificationsHelper.to_model(action_node.notify)
                return task_notify
            elif self._chain_notify:
                return self._chain_notify

        return None

    def _get_updated_action_exec_result(self, action_node, liveaction,
                                        prev_task_result):
        if liveaction.status in action_constants.LIVEACTION_COMPLETED_STATES:
            created_at = isotime.parse(prev_task_result['created_at'])
            updated_at = liveaction.end_timestamp
        else:
            created_at = isotime.parse(prev_task_result['created_at'])
            updated_at = isotime.parse(prev_task_result['updated_at'])

        return self._format_action_exec_result(action_node, liveaction,
                                               created_at, updated_at)

    def _format_action_exec_result(self,
                                   action_node,
                                   liveaction_db,
                                   created_at,
                                   updated_at,
                                   error=None):
        """
        Format ActionExecution result so it can be used in the final action result output.

        :rtype: ``dict``
        """
        assert isinstance(created_at, datetime.datetime)
        assert isinstance(updated_at, datetime.datetime)

        result = {}

        execution_db = None
        if liveaction_db:
            execution_db = ActionExecution.get(
                liveaction__id=str(liveaction_db.id))

        result['id'] = action_node.name
        result['name'] = action_node.name
        result['execution_id'] = str(execution_db.id) if execution_db else None
        result['liveaction_id'] = str(
            liveaction_db.id) if liveaction_db else None
        result['workflow'] = None

        result['created_at'] = isotime.format(dt=created_at)
        result['updated_at'] = isotime.format(dt=updated_at)

        if error or not liveaction_db:
            result['state'] = action_constants.LIVEACTION_STATUS_FAILED
        else:
            result['state'] = liveaction_db.status

        if error:
            result['result'] = error
        else:
            result['result'] = liveaction_db.result

        return result
示例#38
0
class ActionChainRunner(ActionRunner):

    def __init__(self, runner_id):
        super(ActionChainRunner, self).__init__(runner_id=runner_id)
        self.chain_holder = None
        self._meta_loader = MetaLoader()
        self._skip_notify_tasks = []
        self._display_published = True
        self._chain_notify = None

    def pre_run(self):
        super(ActionChainRunner, self).pre_run()

        chainspec_file = self.entry_point
        LOG.debug('Reading action chain from %s for action %s.', chainspec_file,
                  self.action)

        try:
            chainspec = self._meta_loader.load(file_path=chainspec_file,
                                               expected_type=dict)
        except Exception as e:
            message = ('Failed to parse action chain definition from "%s": %s' %
                       (chainspec_file, six.text_type(e)))
            LOG.exception('Failed to load action chain definition.')
            raise runner_exc.ActionRunnerPreRunError(message)

        try:
            self.chain_holder = ChainHolder(chainspec, self.action_name)
        except json_schema_exc.ValidationError as e:
            # preserve the whole nasty jsonschema message as that is better to get to the
            # root cause
            message = six.text_type(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runner_exc.ActionRunnerPreRunError(message)
        except Exception as e:
            message = six.text_type(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runner_exc.ActionRunnerPreRunError(message)

        # Runner attributes are set lazily. So these steps
        # should happen outside the constructor.
        if getattr(self, 'liveaction', None):
            self._chain_notify = getattr(self.liveaction, 'notify', None)
        if self.runner_parameters:
            self._skip_notify_tasks = self.runner_parameters.get('skip_notify', [])
            self._display_published = self.runner_parameters.get('display_published', True)

        # Perform some pre-run chain validation
        try:
            self.chain_holder.validate()
        except Exception as e:
            raise runner_exc.ActionRunnerPreRunError(six.text_type(e))

    def run(self, action_parameters):
        # Run the action chain.
        return self._run_chain(action_parameters)

    def cancel(self):
        # Identify the list of action executions that are workflows and cascade pause.
        for child_exec_id in self.execution.children:
            child_exec = ActionExecution.get(id=child_exec_id, raise_exception=True)
            if (child_exec.runner['name'] in action_constants.WORKFLOW_RUNNER_TYPES and
                    child_exec.status in action_constants.LIVEACTION_CANCELABLE_STATES):
                action_service.request_cancellation(
                    LiveAction.get(id=child_exec.liveaction['id']),
                    self.context.get('user', None)
                )

        return (
            action_constants.LIVEACTION_STATUS_CANCELING,
            self.liveaction.result,
            self.liveaction.context
        )

    def pause(self):
        # Identify the list of action executions that are workflows and cascade pause.
        for child_exec_id in self.execution.children:
            child_exec = ActionExecution.get(id=child_exec_id, raise_exception=True)
            if (child_exec.runner['name'] in action_constants.WORKFLOW_RUNNER_TYPES and
                    child_exec.status == action_constants.LIVEACTION_STATUS_RUNNING):
                action_service.request_pause(
                    LiveAction.get(id=child_exec.liveaction['id']),
                    self.context.get('user', None)
                )

        return (
            action_constants.LIVEACTION_STATUS_PAUSING,
            self.liveaction.result,
            self.liveaction.context
        )

    def resume(self):
        # Restore runner and action parameters since they are not provided on resume.
        runner_parameters, action_parameters = param_utils.render_final_params(
            self.runner_type.runner_parameters,
            self.action.parameters,
            self.liveaction.parameters,
            self.liveaction.context
        )

        # Assign runner parameters needed for pre-run.
        if runner_parameters:
            self.runner_parameters = runner_parameters

        # Restore chain holder if it is not initialized.
        if not self.chain_holder:
            self.pre_run()

        # Change the status of the liveaction from resuming to running.
        self.liveaction = action_service.update_status(
            self.liveaction,
            action_constants.LIVEACTION_STATUS_RUNNING,
            publish=False
        )

        # Run the action chain.
        return self._run_chain(action_parameters, resuming=True)

    def _run_chain(self, action_parameters, resuming=False):
        # Set chain status to fail unless explicitly set to succeed.
        chain_status = action_constants.LIVEACTION_STATUS_FAILED

        # Result holds the final result that the chain store in the database.
        result = {'tasks': []}

        # Save published variables into the result if specified.
        if self._display_published:
            result[PUBLISHED_VARS_KEY] = {}

        context_result = {}  # Holds result which is used for the template context purposes
        top_level_error = None  # Stores a reference to a top level error
        action_node = None
        last_task = None

        try:
            # Initialize vars with the action parameters.
            # This allows action parameers to be referenced from vars.
            self.chain_holder.init_vars(action_parameters)
        except Exception as e:
            chain_status = action_constants.LIVEACTION_STATUS_FAILED
            m = 'Failed initializing ``vars`` in chain.'
            LOG.exception(m)
            top_level_error = self._format_error(e, m)
            result.update(top_level_error)
            return (chain_status, result, None)

        # Restore state on resuming an existing chain execution.
        if resuming:
            # Restore vars is any from the liveaction.
            ctx_vars = self.liveaction.context.pop('vars', {})
            self.chain_holder.restore_vars(ctx_vars)

            # Restore result if any from the liveaction.
            if self.liveaction and hasattr(self.liveaction, 'result') and self.liveaction.result:
                result = self.liveaction.result

            # Initialize or rebuild existing context_result from liveaction
            # which holds the result used for resolving context in Jinja template.
            for task in result.get('tasks', []):
                context_result[task['name']] = task['result']

            # Restore or initialize the top_level_error
            # that stores a reference to a top level error.
            if 'error' in result or 'traceback' in result:
                top_level_error = {
                    'error': result.get('error'),
                    'traceback': result.get('traceback')
                }

        # If there are no executed tasks in the chain, then get the first node.
        if len(result['tasks']) <= 0:
            try:
                action_node = self.chain_holder.get_next_node()
            except Exception as e:
                m = 'Failed to get starting node "%s".', action_node.name
                LOG.exception(m)
                top_level_error = self._format_error(e, m)

            # If there are no action node to run next, then mark the chain successful.
            if not action_node:
                chain_status = action_constants.LIVEACTION_STATUS_SUCCEEDED

        # Otherwise, figure out the last task executed and
        # its state to determine where to begin executing.
        else:
            last_task = result['tasks'][-1]
            action_node = self.chain_holder.get_node(last_task['name'])
            liveaction = action_db_util.get_liveaction_by_id(last_task['liveaction_id'])

            # If the liveaction of the last task has changed, update the result entry.
            if liveaction.status != last_task['state']:
                updated_task_result = self._get_updated_action_exec_result(
                    action_node, liveaction, last_task)
                del result['tasks'][-1]
                result['tasks'].append(updated_task_result)

                # Also need to update context_result so the updated result
                # is available to Jinja expressions
                updated_task_name = updated_task_result['name']
                context_result[updated_task_name]['result'] = updated_task_result['result']

            # If the last task was canceled, then canceled the chain altogether.
            if liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED:
                chain_status = action_constants.LIVEACTION_STATUS_CANCELED
                return (chain_status, result, None)

            # If the last task was paused, then stay on this action node.
            # This is explicitly put here for clarity.
            if liveaction.status == action_constants.LIVEACTION_STATUS_PAUSED:
                pass

            # If the last task succeeded, then get the next on-success action node.
            if liveaction.status == action_constants.LIVEACTION_STATUS_SUCCEEDED:
                chain_status = action_constants.LIVEACTION_STATUS_SUCCEEDED
                action_node = self.chain_holder.get_next_node(
                    last_task['name'], condition='on-success')

            # If the last task failed, then get the next on-failure action node.
            if liveaction.status in action_constants.LIVEACTION_FAILED_STATES:
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                action_node = self.chain_holder.get_next_node(
                    last_task['name'], condition='on-failure')

        # Setup parent context.
        parent_context = {
            'execution_id': self.execution_id
        }

        if getattr(self.liveaction, 'context', None):
            parent_context.update(self.liveaction.context)

        # Run the action chain until there are no more tasks.
        while action_node:
            error = None
            liveaction = None
            last_task = result['tasks'][-1] if len(result['tasks']) > 0 else None
            created_at = date_utils.get_datetime_utc_now()

            try:
                # If last task was paused, then fetch the liveaction and resume it first.
                if last_task and last_task['state'] == action_constants.LIVEACTION_STATUS_PAUSED:
                    liveaction = action_db_util.get_liveaction_by_id(last_task['liveaction_id'])
                    del result['tasks'][-1]
                else:
                    liveaction = self._get_next_action(
                        action_node=action_node, parent_context=parent_context,
                        action_params=action_parameters, context_result=context_result)
            except action_exc.InvalidActionReferencedException as e:
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                m = ('Failed to run task "%s". Action with reference "%s" doesn\'t exist.' %
                     (action_node.name, action_node.ref))
                LOG.exception(m)
                top_level_error = self._format_error(e, m)
                break
            except action_exc.ParameterRenderingFailedException as e:
                # Rendering parameters failed before we even got to running this action,
                # abort and fail the whole action chain
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                m = 'Failed to run task "%s". Parameter rendering failed.' % action_node.name
                LOG.exception(m)
                top_level_error = self._format_error(e, m)
                break
            except db_exc.StackStormDBObjectNotFoundError as e:
                chain_status = action_constants.LIVEACTION_STATUS_FAILED
                m = 'Failed to resume task "%s". Unable to find liveaction.' % action_node.name
                LOG.exception(m)
                top_level_error = self._format_error(e, m)
                break

            try:
                # If last task was paused, then fetch the liveaction and resume it first.
                if last_task and last_task['state'] == action_constants.LIVEACTION_STATUS_PAUSED:
                    LOG.info('Resume task %s for chain %s.', action_node.name, self.liveaction.id)
                    liveaction = self._resume_action(liveaction)
                else:
                    LOG.info('Run task %s for chain %s.', action_node.name, self.liveaction.id)
                    liveaction = self._run_action(liveaction)
            except Exception as e:
                # Save the traceback and error message
                m = 'Failed running task "%s".' % action_node.name
                LOG.exception(m)
                error = self._format_error(e, m)
                context_result[action_node.name] = error
            else:
                # Update context result
                context_result[action_node.name] = liveaction.result

                # Render and publish variables
                rendered_publish_vars = ActionChainRunner._render_publish_vars(
                    action_node=action_node, action_parameters=action_parameters,
                    execution_result=liveaction.result, previous_execution_results=context_result,
                    chain_vars=self.chain_holder.vars)

                if rendered_publish_vars:
                    self.chain_holder.vars.update(rendered_publish_vars)
                    if self._display_published:
                        result[PUBLISHED_VARS_KEY].update(rendered_publish_vars)
            finally:
                # Record result and resolve a next node based on the task success or failure
                updated_at = date_utils.get_datetime_utc_now()

                task_result = self._format_action_exec_result(
                    action_node,
                    liveaction,
                    created_at,
                    updated_at,
                    error=error
                )

                result['tasks'].append(task_result)

                try:
                    if not liveaction:
                        chain_status = action_constants.LIVEACTION_STATUS_FAILED
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_TIMED_OUT:
                        chain_status = action_constants.LIVEACTION_STATUS_TIMED_OUT
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED:
                        LOG.info('Chain execution (%s) canceled because task "%s" is canceled.',
                                 self.liveaction_id, action_node.name)
                        chain_status = action_constants.LIVEACTION_STATUS_CANCELED
                        action_node = None
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_PAUSED:
                        LOG.info('Chain execution (%s) paused because task "%s" is paused.',
                                 self.liveaction_id, action_node.name)
                        chain_status = action_constants.LIVEACTION_STATUS_PAUSED
                        self._save_vars()
                        action_node = None
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_PENDING:
                        LOG.info('Chain execution (%s) paused because task "%s" is pending.',
                                 self.liveaction_id, action_node.name)
                        chain_status = action_constants.LIVEACTION_STATUS_PAUSED
                        self._save_vars()
                        action_node = None
                    elif liveaction.status in action_constants.LIVEACTION_FAILED_STATES:
                        chain_status = action_constants.LIVEACTION_STATUS_FAILED
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == action_constants.LIVEACTION_STATUS_SUCCEEDED:
                        chain_status = action_constants.LIVEACTION_STATUS_SUCCEEDED
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-success')
                    else:
                        action_node = None
                except Exception as e:
                    chain_status = action_constants.LIVEACTION_STATUS_FAILED
                    m = 'Failed to get next node "%s".' % action_node.name
                    LOG.exception(m)
                    top_level_error = self._format_error(e, m)
                    action_node = None
                    break

            if action_service.is_action_canceled_or_canceling(self.liveaction.id):
                LOG.info('Chain execution (%s) canceled by user.', self.liveaction.id)
                chain_status = action_constants.LIVEACTION_STATUS_CANCELED
                return (chain_status, result, None)

            if action_service.is_action_paused_or_pausing(self.liveaction.id):
                LOG.info('Chain execution (%s) paused by user.', self.liveaction.id)
                chain_status = action_constants.LIVEACTION_STATUS_PAUSED
                self._save_vars()
                return (chain_status, result, self.liveaction.context)

        if top_level_error and isinstance(top_level_error, dict):
            result.update(top_level_error)

        return (chain_status, result, self.liveaction.context)

    def _format_error(self, e, msg):
        return {
            'error': '%s. %s' % (msg, six.text_type(e)),
            'traceback': traceback.format_exc(10)
        }

    def _save_vars(self):
        # Save the context vars in the liveaction context.
        self.liveaction.context['vars'] = self.chain_holder.vars

    @staticmethod
    def _render_publish_vars(action_node, action_parameters, execution_result,
                             previous_execution_results, chain_vars):
        """
        If no output is specified on the action_node the output is the entire execution_result.
        If any output is specified then only those variables are published as output of an
        execution of this action_node.
        The output variable can refer to a variable from the execution_result,
        previous_execution_results or chain_vars.
        """
        if not action_node.publish:
            return {}

        context = {}
        context.update(action_parameters)
        context.update({action_node.name: execution_result})
        context.update(previous_execution_results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: previous_execution_results})

        context.update({
            kv_constants.SYSTEM_SCOPE: kv_service.KeyValueLookup(
                scope=kv_constants.SYSTEM_SCOPE)
        })

        context.update({
            kv_constants.DATASTORE_PARENT_SCOPE: {
                kv_constants.SYSTEM_SCOPE: kv_service.KeyValueLookup(
                    scope=kv_constants.FULL_SYSTEM_SCOPE)
            }
        })

        try:
            rendered_result = jinja_utils.render_values(mapping=action_node.publish,
                                                        context=context)
        except Exception as e:
            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = ('Failed rendering value for publish parameter "%s" in task "%s" '
                   '(template string=%s): %s' % (key, action_node.name, value, six.text_type(e)))
            raise action_exc.ParameterRenderingFailedException(msg)

        return rendered_result

    @staticmethod
    def _resolve_params(action_node, original_parameters, results, chain_vars, chain_context):
        # setup context with original parameters and the intermediate results.
        chain_parent = chain_context.get('parent', {})
        pack = chain_parent.get('pack')
        user = chain_parent.get('user')

        config = get_config(pack, user)

        context = {}
        context.update(original_parameters)
        context.update(results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: results})

        context.update({
            kv_constants.SYSTEM_SCOPE: kv_service.KeyValueLookup(
                scope=kv_constants.SYSTEM_SCOPE)
        })

        context.update({
            kv_constants.DATASTORE_PARENT_SCOPE: {
                kv_constants.SYSTEM_SCOPE: kv_service.KeyValueLookup(
                    scope=kv_constants.FULL_SYSTEM_SCOPE)
            }
        })
        context.update({action_constants.ACTION_CONTEXT_KV_PREFIX: chain_context})
        context.update({pack_constants.PACK_CONFIG_CONTEXT_KV_PREFIX: config})
        try:
            rendered_params = jinja_utils.render_values(mapping=action_node.get_parameters(),
                                                        context=context)
        except Exception as e:
            LOG.exception('Jinja rendering for parameter "%s" failed.' % (e.key))

            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = ('Failed rendering value for action parameter "%s" in task "%s" '
                   '(template string=%s): %s') % (key, action_node.name, value, six.text_type(e))
            raise action_exc.ParameterRenderingFailedException(msg)
        LOG.debug('Rendered params: %s: Type: %s', rendered_params, type(rendered_params))
        return rendered_params

    def _get_next_action(self, action_node, parent_context, action_params, context_result):
        # Verify that the referenced action exists
        # TODO: We do another lookup in cast_param, refactor to reduce number of lookups
        task_name = action_node.name
        action_ref = action_node.ref
        action_db = action_db_util.get_action_by_ref(ref=action_ref)

        if not action_db:
            error = 'Task :: %s - Action with ref %s not registered.' % (task_name, action_ref)
            raise action_exc.InvalidActionReferencedException(error)

        resolved_params = ActionChainRunner._resolve_params(
            action_node=action_node, original_parameters=action_params,
            results=context_result, chain_vars=self.chain_holder.vars,
            chain_context={'parent': parent_context})

        liveaction = self._build_liveaction_object(
            action_node=action_node,
            resolved_params=resolved_params,
            parent_context=parent_context)

        return liveaction

    def _run_action(self, liveaction, wait_for_completion=True, sleep_delay=1.0):
        """
        :param sleep_delay: Number of seconds to wait during "is completed" polls.
        :type sleep_delay: ``float``
        """
        try:
            liveaction, _ = action_service.request(liveaction)
        except Exception as e:
            liveaction.status = action_constants.LIVEACTION_STATUS_FAILED
            LOG.exception('Failed to schedule liveaction.')
            raise e

        while (wait_for_completion and liveaction.status not in (
                action_constants.LIVEACTION_COMPLETED_STATES +
                [action_constants.LIVEACTION_STATUS_PAUSED,
                 action_constants.LIVEACTION_STATUS_PENDING])):
            eventlet.sleep(sleep_delay)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _resume_action(self, liveaction, wait_for_completion=True, sleep_delay=1.0):
        """
        :param sleep_delay: Number of seconds to wait during "is completed" polls.
        :type sleep_delay: ``float``
        """
        try:
            user = self.context.get('user', None)
            liveaction, _ = action_service.request_resume(liveaction, user)
        except Exception as e:
            liveaction.status = action_constants.LIVEACTION_STATUS_FAILED
            LOG.exception('Failed to schedule liveaction.')
            raise e

        while (wait_for_completion and liveaction.status not in (
                action_constants.LIVEACTION_COMPLETED_STATES +
                [action_constants.LIVEACTION_STATUS_PAUSED])):
            eventlet.sleep(sleep_delay)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _build_liveaction_object(self, action_node, resolved_params, parent_context):
        liveaction = LiveActionDB(action=action_node.ref)

        # Setup notify for task in chain.
        notify = self._get_notify(action_node)
        if notify:
            liveaction.notify = notify
            LOG.debug('%s: Task notify set to: %s', action_node.name, liveaction.notify)

        liveaction.context = {
            'parent': parent_context,
            'chain': vars(action_node)
        }
        liveaction.parameters = action_param_utils.cast_params(action_ref=action_node.ref,
                                                               params=resolved_params)
        return liveaction

    def _get_notify(self, action_node):
        if action_node.name not in self._skip_notify_tasks:
            if action_node.notify:
                task_notify = NotificationsHelper.to_model(action_node.notify)
                return task_notify
            elif self._chain_notify:
                return self._chain_notify

        return None

    def _get_updated_action_exec_result(self, action_node, liveaction, prev_task_result):
        if liveaction.status in action_constants.LIVEACTION_COMPLETED_STATES:
            created_at = isotime.parse(prev_task_result['created_at'])
            updated_at = liveaction.end_timestamp
        else:
            created_at = isotime.parse(prev_task_result['created_at'])
            updated_at = isotime.parse(prev_task_result['updated_at'])

        return self._format_action_exec_result(action_node, liveaction, created_at, updated_at)

    def _format_action_exec_result(self, action_node, liveaction_db, created_at, updated_at,
                                   error=None):
        """
        Format ActionExecution result so it can be used in the final action result output.

        :rtype: ``dict``
        """
        assert isinstance(created_at, datetime.datetime)
        assert isinstance(updated_at, datetime.datetime)

        result = {}

        execution_db = None
        if liveaction_db:
            execution_db = ActionExecution.get(liveaction__id=str(liveaction_db.id))

        result['id'] = action_node.name
        result['name'] = action_node.name
        result['execution_id'] = str(execution_db.id) if execution_db else None
        result['liveaction_id'] = str(liveaction_db.id) if liveaction_db else None
        result['workflow'] = None

        result['created_at'] = isotime.format(dt=created_at)
        result['updated_at'] = isotime.format(dt=updated_at)

        if error or not liveaction_db:
            result['state'] = action_constants.LIVEACTION_STATUS_FAILED
        else:
            result['state'] = liveaction_db.status

        if error:
            result['result'] = error
        else:
            result['result'] = liveaction_db.result

        return result
示例#39
0
文件: tester.py 项目: rlugojr/st2
class RuleTester(object):
    def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None, trigger_instance_id=None):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._rule_ref = rule_ref
        self._trigger_instance_file_path = trigger_instance_file_path
        self._trigger_instance_id = trigger_instance_id
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """

        rule_db = self._get_rule_db()
        trigger_instance_db, trigger_db = self._get_trigger_instance_db()

        # The trigger check needs to be performed here as that is not performed
        # by RulesMatcher.
        if rule_db.trigger != trigger_db.ref:
            LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.', rule_db.trigger, trigger_db.ref)
            return False

        # Check if rule matches criteria.
        matcher = RulesMatcher(
            trigger_instance=trigger_instance_db, trigger=trigger_db, rules=[rule_db], extra_info=True
        )
        matching_rules = matcher.get_matching_rules()

        # Rule does not match so early exit.
        if len(matching_rules) < 1:
            return False

        # Check if rule can be enforced
        try:
            enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
            params = enforcer.get_resolved_parameters()
            LOG.info("Action parameters resolved to:")
            for param in six.iteritems(params):
                LOG.info("\t%s: %s", param[0], param[1])
            return True
        except (UndefinedError, ValueError) as e:
            LOG.error("Failed to resolve parameters\n\tOriginal error : %s", str(e))
            return False
        except:
            LOG.exception("Failed to resolve parameters.")
            return False

    def _get_rule_db(self):
        if self._rule_file_path:
            return self._get_rule_db_from_file(file_path=os.path.realpath(self._rule_file_path))
        elif self._rule_ref:
            return Rule.get_by_ref(self._rule_ref)
        raise ValueError("One of _rule_file_path or _rule_ref should be specified.")

    def _get_trigger_instance_db(self):
        if self._trigger_instance_file_path:
            return self._get_trigger_instance_db_from_file(file_path=os.path.realpath(self._trigger_instance_file_path))
        elif self._trigger_instance_id:
            trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
            trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
            return trigger_instance_db, trigger_db
        raise ValueError("One of _trigger_instance_file_path or" "_trigger_instance_id should be specified.")

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        pack = data.get("pack", "unknown")
        name = data.get("name", "unknown")
        trigger = data["trigger"]["type"]
        criteria = data.get("criteria", None)
        action = data.get("action", {})

        rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action, enabled=True)

        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)

        trigger_ref = ResourceReference.from_string_reference(instance["trigger"])
        trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
        return instance, trigger_db
示例#40
0
def main(metadata_path, output_path, print_source=False):
    metadata_path = os.path.abspath(metadata_path)
    metadata_dir = os.path.dirname(metadata_path)

    meta_loader = MetaLoader()
    data = meta_loader.load(metadata_path)

    action_name = data['name']
    entry_point = data['entry_point']

    workflow_metadata_path = os.path.join(metadata_dir, entry_point)
    chainspec = meta_loader.load(workflow_metadata_path)

    chain_holder = ChainHolder(chainspec, 'workflow')

    graph_label = '%s action-chain workflow visualization' % (action_name)

    graph_attr = {
        'rankdir': 'TD',
        'labelloc': 't',
        'fontsize': '15',
        'label': graph_label
    }
    node_attr = {}
    dot = Digraph(comment='Action chain work-flow visualization',
                  node_attr=node_attr, graph_attr=graph_attr, format='png')
    #  dot.body.extend(['rankdir=TD', 'size="10,5"'])

    # Add all nodes
    node = chain_holder.get_next_node()
    while node:
        dot.node(node.name, node.name)
        node = chain_holder.get_next_node(curr_node_name=node.name)

    # Add connections
    node = chain_holder.get_next_node()
    processed_nodes = sets.Set([node.name])
    nodes = [node]
    while nodes:
        previous_node = nodes.pop()
        success_node = chain_holder.get_next_node(curr_node_name=previous_node.name,
                                                  condition='on-success')
        failure_node = chain_holder.get_next_node(curr_node_name=previous_node.name,
                                                  condition='on-failure')

        # Add success node (if any)
        if success_node:
            dot.edge(previous_node.name, success_node.name, constraint='true',
                     color='green', label='on success')
            if success_node.name not in processed_nodes:
                nodes.append(success_node)
                processed_nodes.add(success_node.name)

        # Add failure node (if any)
        if failure_node:
            dot.edge(previous_node.name, failure_node.name, constraint='true',
                     color='red', label='on failure')
            if failure_node.name not in processed_nodes:
                nodes.append(failure_node)
                processed_nodes.add(failure_node.name)

    if print_source:
        print(dot.source)

    if output_path:
        output_path = os.path.join(output_path, action_name)
    else:
        output_path = output_path or os.path.join(os.getcwd(), action_name)

    dot.format = 'png'
    dot.render(output_path)

    print('Graph saved at %s' % (output_path + '.png'))
示例#41
0
文件: tester.py 项目: automotola/st2
class RuleTester(object):
    def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
                 trigger_instance_id=None):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._rule_ref = rule_ref
        self._trigger_instance_file_path = trigger_instance_file_path
        self._trigger_instance_id = trigger_instance_id
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """

        rule_db = self._get_rule_db()
        trigger_instance_db, trigger_db = self._get_trigger_instance_db()

        # The trigger check needs to be performed here as that is not performed
        # by RulesMatcher.
        if rule_db.trigger != trigger_db.ref:
            LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
                     rule_db.trigger, trigger_db.ref)
            return False

        matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
                               rules=[rule_db], extra_info=True)
        matching_rules = matcher.get_matching_rules()

        return len(matching_rules) >= 1

    def _get_rule_db(self):
        if self._rule_file_path:
            return self._get_rule_db_from_file(
                file_path=os.path.realpath(self._rule_file_path))
        elif self._rule_ref:
            return Rule.get_by_ref(self._rule_ref)
        raise ValueError('One of _rule_file_path or _rule_ref should be specified.')

    def _get_trigger_instance_db(self):
        if self._trigger_instance_file_path:
            return self._get_trigger_instance_db_from_file(
                file_path=os.path.realpath(self._trigger_instance_file_path))
        elif self._trigger_instance_id:
            trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
            trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
            return trigger_instance_db, trigger_db
        raise ValueError('One of _trigger_instance_file_path or'
                         '_trigger_instance_id should be specified.')

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        pack = data.get('pack', 'unknown')
        name = data.get('name', 'unknown')
        trigger = data['trigger']['type']
        criteria = data.get('criteria', None)

        rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action={},
                         enabled=True)
        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)

        trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
        trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
        return instance, trigger_db
示例#42
0
class ActionChainRunner(ActionRunner):
    def __init__(self, runner_id):
        super(ActionChainRunner, self).__init__(runner_id=runner_id)
        self.chain_holder = None
        self._meta_loader = MetaLoader()
        self._stopped = False
        self._skip_notify_tasks = []
        self._display_published = True
        self._chain_notify = None

    def pre_run(self):
        super(ActionChainRunner, self).pre_run()

        chainspec_file = self.entry_point
        LOG.debug('Reading action chain from %s for action %s.',
                  chainspec_file, self.action)

        try:
            chainspec = self._meta_loader.load(file_path=chainspec_file,
                                               expected_type=dict)
        except Exception as e:
            message = (
                'Failed to parse action chain definition from "%s": %s' %
                (chainspec_file, str(e)))
            LOG.exception('Failed to load action chain definition.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

        try:
            self.chain_holder = ChainHolder(chainspec, self.action_name)
        except json_schema_exceptions.ValidationError as e:
            # preserve the whole nasty jsonschema message as that is better to get to the
            # root cause
            message = str(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runnerexceptions.ActionRunnerPreRunError(message)
        except Exception as e:
            message = e.message or str(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

        # Runner attributes are set lazily. So these steps
        # should happen outside the constructor.
        if getattr(self, 'liveaction', None):
            self._chain_notify = getattr(self.liveaction, 'notify', None)
        if self.runner_parameters:
            self._skip_notify_tasks = self.runner_parameters.get(
                'skip_notify', [])
            self._display_published = self.runner_parameters.get(
                'display_published', True)

        # Perform some pre-run chain validation
        try:
            self.chain_holder.validate()
        except Exception as e:
            raise runnerexceptions.ActionRunnerPreRunError(e.message)

    def run(self, action_parameters):
        # holds final result we store.
        result = {'tasks': []}
        # published variables are to be stored for display.
        if self._display_published:
            result[PUBLISHED_VARS_KEY] = {}
        context_result = {
        }  # holds result which is used for the template context purposes
        top_level_error = None  # stores a reference to a top level error
        fail = True
        action_node = None

        try:
            # initialize vars once we have the action_parameters. This allows
            # vars to refer to action_parameters.
            self.chain_holder.init_vars(action_parameters)
        except Exception as e:
            error = 'Failed initializing ``vars`` in chain.'

            LOG.exception(error)

            trace = traceback.format_exc(10)
            top_level_error = {'error': error, 'traceback': trace}
            result['error'] = top_level_error['error']
            result['traceback'] = top_level_error['traceback']
            return (LIVEACTION_STATUS_FAILED, result, None)

        try:
            action_node = self.chain_holder.get_next_node()
        except Exception as e:
            LOG.exception('Failed to get starting node "%s".',
                          action_node.name)

            error = ('Failed to get starting node "%s". Lookup failed: %s' %
                     (action_node.name, str(e)))
            trace = traceback.format_exc(10)
            top_level_error = {'error': error, 'traceback': trace}

        parent_context = {'execution_id': self.execution_id}
        if getattr(self.liveaction, 'context', None):
            parent_context.update(self.liveaction.context)

        while action_node:
            fail = False
            timeout = False
            error = None
            liveaction = None

            created_at = date_utils.get_datetime_utc_now()

            try:
                liveaction = self._get_next_action(
                    action_node=action_node,
                    parent_context=parent_context,
                    action_params=action_parameters,
                    context_result=context_result)
            except InvalidActionReferencedException as e:
                error = (
                    'Failed to run task "%s". Action with reference "%s" doesn\'t exist.'
                    % (action_node.name, action_node.ref))
                LOG.exception(error)

                fail = True
                top_level_error = {
                    'error': error,
                    'traceback': traceback.format_exc(10)
                }
                break
            except ParameterRenderingFailedException as e:
                # Rendering parameters failed before we even got to running this action, abort and
                # fail the whole action chain
                LOG.exception('Failed to run action "%s".', action_node.name)

                fail = True
                error = (
                    'Failed to run task "%s". Parameter rendering failed: %s' %
                    (action_node.name, str(e)))
                trace = traceback.format_exc(10)
                top_level_error = {'error': error, 'traceback': trace}
                break

            try:
                liveaction = self._run_action(liveaction)
            except Exception as e:
                # Save the traceback and error message
                LOG.exception('Failure in running action "%s".',
                              action_node.name)

                error = {
                    'error':
                    'Task "%s" failed: %s' % (action_node.name, str(e)),
                    'traceback': traceback.format_exc(10)
                }
                context_result[action_node.name] = error
            else:
                # Update context result
                context_result[action_node.name] = liveaction.result

                # Render and publish variables
                rendered_publish_vars = ActionChainRunner._render_publish_vars(
                    action_node=action_node,
                    action_parameters=action_parameters,
                    execution_result=liveaction.result,
                    previous_execution_results=context_result,
                    chain_vars=self.chain_holder.vars)

                if rendered_publish_vars:
                    self.chain_holder.vars.update(rendered_publish_vars)
                    if self._display_published:
                        result[PUBLISHED_VARS_KEY].update(
                            rendered_publish_vars)
            finally:
                # Record result and resolve a next node based on the task success or failure
                updated_at = date_utils.get_datetime_utc_now()

                format_kwargs = {
                    'action_node': action_node,
                    'liveaction_db': liveaction,
                    'created_at': created_at,
                    'updated_at': updated_at
                }

                if error:
                    format_kwargs['error'] = error

                task_result = self._format_action_exec_result(**format_kwargs)
                result['tasks'].append(task_result)

                if self.liveaction_id:
                    self._stopped = action_service.is_action_canceled_or_canceling(
                        self.liveaction_id)

                if self._stopped:
                    LOG.info('Chain execution (%s) canceled by user.',
                             self.liveaction_id)
                    status = LIVEACTION_STATUS_CANCELED
                    return (status, result, None)

                try:
                    if not liveaction:
                        fail = True
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status in LIVEACTION_FAILED_STATES:
                        if liveaction and liveaction.status == LIVEACTION_STATUS_TIMED_OUT:
                            timeout = True
                        else:
                            fail = True
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == LIVEACTION_STATUS_CANCELED:
                        # User canceled an action (task) in the workflow - cancel the execution of
                        # rest of the workflow
                        self._stopped = True
                        LOG.info('Chain execution (%s) canceled by user.',
                                 self.liveaction_id)
                    elif liveaction.status == LIVEACTION_STATUS_SUCCEEDED:
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-success')
                except Exception as e:
                    LOG.exception('Failed to get next node "%s".',
                                  action_node.name)

                    fail = True
                    error = (
                        'Failed to get next node "%s". Lookup failed: %s' %
                        (action_node.name, str(e)))
                    trace = traceback.format_exc(10)
                    top_level_error = {'error': error, 'traceback': trace}
                    # reset action_node here so that chain breaks on failure.
                    action_node = None
                    break

                if self._stopped:
                    LOG.info('Chain execution (%s) canceled by user.',
                             self.liveaction_id)
                    status = LIVEACTION_STATUS_CANCELED
                    return (status, result, None)

        if fail:
            status = LIVEACTION_STATUS_FAILED
        elif timeout:
            status = LIVEACTION_STATUS_TIMED_OUT
        else:
            status = LIVEACTION_STATUS_SUCCEEDED

        if top_level_error:
            # Include top level error information
            result['error'] = top_level_error['error']
            result['traceback'] = top_level_error['traceback']

        return (status, result, None)

    @staticmethod
    def _render_publish_vars(action_node, action_parameters, execution_result,
                             previous_execution_results, chain_vars):
        """
        If no output is specified on the action_node the output is the entire execution_result.
        If any output is specified then only those variables are published as output of an
        execution of this action_node.
        The output variable can refer to a variable from the execution_result,
        previous_execution_results or chain_vars.
        """
        if not action_node.publish:
            return {}

        context = {}
        context.update(action_parameters)
        context.update({action_node.name: execution_result})
        context.update(previous_execution_results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: previous_execution_results})
        context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
        context.update({
            DATASTORE_PARENT_SCOPE: {
                SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
            }
        })

        try:
            rendered_result = jinja_utils.render_values(
                mapping=action_node.publish, context=context)
        except Exception as e:
            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = (
                'Failed rendering value for publish parameter "%s" in task "%s" '
                '(template string=%s): %s' %
                (key, action_node.name, value, str(e)))
            raise ParameterRenderingFailedException(msg)

        return rendered_result

    @staticmethod
    def _resolve_params(action_node, original_parameters, results, chain_vars,
                        chain_context):
        # setup context with original parameters and the intermediate results.
        chain_parent = chain_context.get('parent', {})
        pack = chain_parent.get('pack')
        user = chain_parent.get('user')

        config = get_config(pack, user)

        context = {}
        context.update(original_parameters)
        context.update(results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: results})
        context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
        context.update({
            DATASTORE_PARENT_SCOPE: {
                SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
            }
        })
        context.update({ACTION_CONTEXT_KV_PREFIX: chain_context})
        context.update({PACK_CONFIG_CONTEXT_KV_PREFIX: config})
        try:
            rendered_params = jinja_utils.render_values(
                mapping=action_node.get_parameters(), context=context)
        except Exception as e:
            LOG.exception('Jinja rendering for parameter "%s" failed.' %
                          (e.key))

            key = getattr(e, 'key', None)
            value = getattr(e, 'value', None)
            msg = (
                'Failed rendering value for action parameter "%s" in task "%s" '
                '(template string=%s): %s') % (key, action_node.name, value,
                                               str(e))
            raise ParameterRenderingFailedException(msg)
        LOG.debug('Rendered params: %s: Type: %s', rendered_params,
                  type(rendered_params))
        return rendered_params

    def _get_next_action(self, action_node, parent_context, action_params,
                         context_result):
        # Verify that the referenced action exists
        # TODO: We do another lookup in cast_param, refactor to reduce number of lookups
        task_name = action_node.name
        action_ref = action_node.ref
        action_db = action_db_util.get_action_by_ref(ref=action_ref)

        if not action_db:
            error = 'Task :: %s - Action with ref %s not registered.' % (
                task_name, action_ref)
            raise InvalidActionReferencedException(error)

        resolved_params = ActionChainRunner._resolve_params(
            action_node=action_node,
            original_parameters=action_params,
            results=context_result,
            chain_vars=self.chain_holder.vars,
            chain_context={'parent': parent_context})

        liveaction = self._build_liveaction_object(
            action_node=action_node,
            resolved_params=resolved_params,
            parent_context=parent_context)

        return liveaction

    def _run_action(self,
                    liveaction,
                    wait_for_completion=True,
                    sleep_delay=1.0):
        """
        :param sleep_delay: Number of seconds to wait during "is completed" polls.
        :type sleep_delay: ``float``
        """
        try:
            # request return canceled
            liveaction, _ = action_service.request(liveaction)
        except Exception as e:
            liveaction.status = LIVEACTION_STATUS_FAILED
            LOG.exception('Failed to schedule liveaction.')
            raise e

        while (wait_for_completion
               and liveaction.status not in LIVEACTION_COMPLETED_STATES):
            eventlet.sleep(sleep_delay)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _build_liveaction_object(self, action_node, resolved_params,
                                 parent_context):
        liveaction = LiveActionDB(action=action_node.ref)

        # Setup notify for task in chain.
        notify = self._get_notify(action_node)
        if notify:
            liveaction.notify = notify
            LOG.debug('%s: Task notify set to: %s', action_node.name,
                      liveaction.notify)

        liveaction.context = {
            'parent': parent_context,
            'chain': vars(action_node)
        }
        liveaction.parameters = action_param_utils.cast_params(
            action_ref=action_node.ref, params=resolved_params)
        return liveaction

    def _get_notify(self, action_node):
        if action_node.name not in self._skip_notify_tasks:
            if action_node.notify:
                task_notify = NotificationsHelper.to_model(action_node.notify)
                return task_notify
            elif self._chain_notify:
                return self._chain_notify

        return None

    def _format_action_exec_result(self,
                                   action_node,
                                   liveaction_db,
                                   created_at,
                                   updated_at,
                                   error=None):
        """
        Format ActionExecution result so it can be used in the final action result output.

        :rtype: ``dict``
        """
        assert isinstance(created_at, datetime.datetime)
        assert isinstance(updated_at, datetime.datetime)

        result = {}

        execution_db = None
        if liveaction_db:
            execution_db = ActionExecution.get(
                liveaction__id=str(liveaction_db.id))

        result['id'] = action_node.name
        result['name'] = action_node.name
        result['execution_id'] = str(execution_db.id) if execution_db else None
        result['workflow'] = None

        result['created_at'] = isotime.format(dt=created_at)
        result['updated_at'] = isotime.format(dt=updated_at)

        if error or not liveaction_db:
            result['state'] = LIVEACTION_STATUS_FAILED
        else:
            result['state'] = liveaction_db.status

        if error:
            result['result'] = error
        else:
            result['result'] = liveaction_db.result

        return result
示例#43
0
class SensorsRegistrar(object):
    def __init__(self):
        self._meta_loader = MetaLoader()

    def _get_sensors_from_pack(self, sensors_dir):
        sensors = glob.glob(sensors_dir + '/*.yaml')
        sensors.extend(glob.glob(sensors_dir + '*.yml'))
        return sensors

    def _register_sensors_from_pack(self, pack, sensors):
        for sensor in sensors:
            try:
                self._register_sensor_from_pack(pack=pack, sensor=sensor)
            except Exception as e:
                LOG.debug('Failed to register sensor "%s": %s', sensor, str(e))
            else:
                LOG.debug('Sensor "%s" successfully registered', sensor)

    def _register_sensor_from_pack(self, pack, sensor):
        sensor_metadata_file_path = sensor

        LOG.debug('Loading sensor from %s.', sensor_metadata_file_path)
        metadata = self._meta_loader.load(file_path=sensor_metadata_file_path)

        class_name = metadata.get('class_name', None)
        entry_point = metadata.get('entry_point', None)
        description = metadata.get('description', None)
        trigger_types = metadata.get('trigger_types', [])
        poll_interval = metadata.get('poll_interval', None)

        # Add TrigerType models to the DB
        trigger_type_dbs = container_utils.add_trigger_models(pack=pack,
                                                              trigger_types=trigger_types)

        # Populate a list of references belonging to this sensor
        trigger_type_refs = []
        for trigger_type_db, _ in trigger_type_dbs:
            ref_obj = trigger_type_db.get_reference()
            trigger_type_ref = ref_obj.ref
            trigger_type_refs.append(trigger_type_ref)

        if entry_point and class_name:
            sensors_dir = os.path.dirname(sensor_metadata_file_path)
            sensor_file_path = os.path.join(sensors_dir, entry_point)
            # Add Sensor model to the DB
            sensor_obj = {
                'name': class_name,
                'description': description,
                'class_name': class_name,
                'file_path': sensor_file_path,
                'trigger_types': trigger_type_refs,
                'poll_interval': poll_interval
            }
            container_utils.add_sensor_model(pack=pack, sensor=sensor_obj)

    def register_sensors_from_packs(self, base_dir):
        pack_loader = ContentPackLoader()
        dirs = pack_loader.get_content(base_dir=base_dir, content_type='sensors')

        # Add system sensors to the core pack
        dirs['core'] = {}
        dirs['core'] = SYSTEM_SENSORS_PATH

        for pack, sensors_dir in six.iteritems(dirs):
            try:
                LOG.info('Registering sensors from pack: %s', pack)
                sensors = self._get_sensors_from_pack(sensors_dir)
                self._register_sensors_from_pack(pack=pack, sensors=sensors)
            except Exception as e:
                LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir,
                              str(e))
示例#44
0
文件: base.py 项目: LindsayHill/st2
class ResourceRegistrar(object):
    ALLOWED_EXTENSIONS = []

    def __init__(self, use_pack_cache=True, fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()
        self._runner_loader = RunnersLoader()

    def get_resources_from_pack(self, resources_dir):
        resources = []
        for ext in self.ALLOWED_EXTENSIONS:
            resources_glob = resources_dir

            if resources_dir.endswith('/'):
                resources_glob = resources_dir + ext
            else:
                resources_glob = resources_dir + '/*' + ext

            resource_files = glob.glob(resources_glob)
            resources.extend(resource_files)

        resources = sorted(resources)
        return resources

    def get_registered_packs(self):
        """
        Return a list of registered packs.

        :rype: ``list``
        """
        return REGISTERED_PACKS_CACHE.keys()

    def register_packs(self, base_dirs):
        """
        Register packs in all the provided directories.
        """
        packs = self._pack_loader.get_packs(base_dirs=base_dirs)

        registered_count = 0
        for pack_name, pack_path in six.iteritems(packs):
            self.register_pack(pack_name=pack_name, pack_dir=pack_path)
            registered_count += 1

        return registered_count

    def register_pack(self, pack_name, pack_dir):
        """
        Register pack in the provided directory.
        """
        if self._use_pack_cache and pack_name in REGISTERED_PACKS_CACHE:
            # This pack has already been registered during this register content run
            return

        LOG.debug('Registering pack: %s' % (pack_name))
        REGISTERED_PACKS_CACHE[pack_name] = True

        try:
            pack_db, _ = self._register_pack(pack_name=pack_name, pack_dir=pack_dir)
        except Exception:
            LOG.exception('Failed to register pack "%s"' % (pack_name))
            return None

        return pack_db

    def _register_pack(self, pack_name, pack_dir):
        """
        Register a pack and corresponding pack config schema (create a DB object in the system).

        Note: Pack registration now happens when registering the content and not when installing
        a pack using packs.install. Eventually this will be moved to the pack management API.
        """
        # 1. Register pack
        pack_db = self._register_pack_db(pack_name=pack_name, pack_dir=pack_dir)

        # 2. Register corresponding pack config schema
        config_schema_db = self._register_pack_config_schema_db(pack_name=pack_name,
                                                                pack_dir=pack_dir)

        return pack_db, config_schema_db

    def _register_pack_db(self, pack_name, pack_dir):
        pack_name = pack_name or ''
        manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)

        if not os.path.isfile(manifest_path):
            raise ValueError('Pack "%s" is missing %s file' % (pack_name, MANIFEST_FILE_NAME))

        content = self._meta_loader.load(manifest_path)
        if not content:
            raise ValueError('Pack "%s" metadata file is empty' % (pack_name))

        # The rules for the pack ref are as follows:
        # 1. If ref attribute is available, we used that
        # 2. If pack_name is available we use that (this only applies to packs
        # 2hich are in sub-directories)
        # 2. If attribute is not available, but pack name is and pack name meets the valid name
        # criteria, we use that
        content['ref'] = get_pack_ref_from_metadata(metadata=content,
                                                    pack_directory_name=pack_name)

        # Include a list of pack files
        pack_file_list = get_file_list(directory=pack_dir, exclude_patterns=EXCLUDE_FILE_PATTERNS)
        content['files'] = pack_file_list

        pack_api = PackAPI(**content)
        pack_api.validate()
        pack_db = PackAPI.to_model(pack_api)

        try:
            pack_db.id = Pack.get_by_ref(content['ref']).id
        except StackStormDBObjectNotFoundError:
            LOG.debug('Pack %s not found. Creating new one.', pack_name)

        pack_db = Pack.add_or_update(pack_db)
        LOG.debug('Pack %s registered.' % (pack_name))
        return pack_db

    def _register_pack_config_schema_db(self, pack_name, pack_dir):
        config_schema_path = os.path.join(pack_dir, CONFIG_SCHEMA_FILE_NAME)

        if not os.path.isfile(config_schema_path):
            # Note: Config schema is optional
            return None

        content = {}
        values = self._meta_loader.load(config_schema_path)
        content['pack'] = pack_name
        content['attributes'] = values

        config_schema_api = ConfigSchemaAPI(**content)
        config_schema_db = ConfigSchemaAPI.to_model(config_schema_api)

        try:
            config_schema_db.id = ConfigSchema.get_by_pack(pack_name).id
        except StackStormDBObjectNotFoundError:
            LOG.debug('Config schema for pack %s not found. Creating new one.', pack_name)

        config_schema_db = ConfigSchema.add_or_update(config_schema_db)
        LOG.debug('Config schema for pack %s registered.' % (pack_name))
        return config_schema_db

    def register_runner(self):
        pass
示例#45
0
class ActionChainRunner(ActionRunner):
    def __init__(self, runner_id):
        super(ActionChainRunner, self).__init__(runner_id=runner_id)
        self.chain_holder = None
        self._meta_loader = MetaLoader()

    def pre_run(self):
        chainspec_file = self.entry_point
        LOG.debug('Reading action chain from %s for action %s.',
                  chainspec_file, self.action)

        try:
            chainspec = self._meta_loader.load(file_path=chainspec_file,
                                               expected_type=dict)
        except Exception as e:
            message = (
                'Failed to parse action chain definition from "%s": %s' %
                (chainspec_file, str(e)))
            LOG.exception('Failed to load action chain definition.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

        try:
            self.chain_holder = ChainHolder(chainspec, self.action_name)
        except Exception as e:
            message = e.message or str(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

    def run(self, action_parameters):
        result = {'tasks': []}  # holds final result we store
        context_result = {
        }  # holds result which is used for the template context purposes
        top_level_error = None  # stores a reference to a top level error
        fail = True
        action_node = None

        try:
            action_node = self.chain_holder.get_next_node()
        except Exception as e:
            LOG.exception('Failed to get starting node "%s".',
                          action_node.name)

            error = ('Failed to get starting node "%s". Lookup failed: %s' %
                     (action_node.name, str(e)))
            trace = traceback.format_exc(10)
            top_level_error = {'error': error, 'traceback': trace}

        while action_node:
            fail = False
            error = None
            resolved_params = None
            liveaction = None

            created_at = datetime.datetime.now()

            try:
                resolved_params = ActionChainRunner._resolve_params(
                    action_node=action_node,
                    original_parameters=action_parameters,
                    results=context_result,
                    chain_vars=self.chain_holder.vars)
            except Exception as e:
                # Rendering parameters failed before we even got to running this action, abort and
                # fail the whole action chain
                LOG.exception('Failed to run action "%s".', action_node.name)

                fail = True
                error = (
                    'Failed to run task "%s". Parameter rendering failed: %s' %
                    (action_node.name, str(e)))
                trace = traceback.format_exc(10)
                top_level_error = {'error': error, 'traceback': trace}
                break

            # Verify that the referenced action exists
            # TODO: We do another lookup in cast_param, refactor to reduce number of lookups
            action_ref = action_node.ref
            action_db = action_db_util.get_action_by_ref(ref=action_ref)

            if not action_db:
                error = (
                    'Failed to run task "%s". Action with reference "%s" doesn\'t exist.'
                    % (action_node.name, action_ref))
                LOG.exception(error)

                fail = True
                top_level_error = {'error': error, 'traceback': error}
                break

            try:
                liveaction = ActionChainRunner._run_action(
                    action_node=action_node,
                    parent_execution_id=self.liveaction_id,
                    params=resolved_params)
            except Exception as e:
                # Save the traceback and error message
                LOG.exception('Failure in running action "%s".',
                              action_node.name)

                error = {
                    'error':
                    'Task "%s" failed: %s' % (action_node.name, str(e)),
                    'traceback': traceback.format_exc(10)
                }
                context_result[action_node.name] = error
            else:
                # Update context result
                context_result[action_node.name] = liveaction.result

                # Render and publish variables
                rendered_publish_vars = ActionChainRunner._render_publish_vars(
                    action_node=action_node,
                    action_parameters=action_parameters,
                    execution_result=liveaction.result,
                    previous_execution_results=context_result,
                    chain_vars=self.chain_holder.vars)

                if rendered_publish_vars:
                    self.chain_holder.vars.update(rendered_publish_vars)
            finally:
                # Record result and resolve a next node based on the task success or failure
                updated_at = datetime.datetime.now()

                format_kwargs = {
                    'action_node': action_node,
                    'liveaction_db': liveaction,
                    'created_at': created_at,
                    'updated_at': updated_at
                }

                if error:
                    format_kwargs['error'] = error

                task_result = self._format_action_exec_result(**format_kwargs)
                result['tasks'].append(task_result)

                try:
                    if not liveaction or liveaction.status == LIVEACTION_STATUS_FAILED:
                        fail = True
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-failure')
                    elif liveaction.status == LIVEACTION_STATUS_SUCCEEDED:
                        action_node = self.chain_holder.get_next_node(
                            action_node.name, condition='on-success')
                except Exception as e:
                    LOG.exception('Failed to get next node "%s".',
                                  action_node.name)

                    fail = True
                    error = (
                        'Failed to get next node "%s". Lookup failed: %s' %
                        (action_node.name, str(e)))
                    trace = traceback.format_exc(10)
                    top_level_error = {'error': error, 'traceback': trace}
                    # reset action_node here so that chain breaks on failure.
                    action_node = None

        if fail:
            status = LIVEACTION_STATUS_FAILED
        else:
            status = LIVEACTION_STATUS_SUCCEEDED

        if top_level_error:
            # Include top level error information
            result['error'] = top_level_error['error']
            result['traceback'] = top_level_error['traceback']

        return (status, result, None)

    @staticmethod
    def _render_publish_vars(action_node, action_parameters, execution_result,
                             previous_execution_results, chain_vars):
        """
        If no output is specified on the action_node the output is the entire execution_result.
        If any output is specified then only those variables are published as output of an
        execution of this action_node.
        The output variable can refer to a variable from the execution_result,
        previous_execution_results or chain_vars.
        """
        if not action_node.publish:
            return {}

        context = {}
        context.update(action_parameters)
        context.update({action_node.name: execution_result})
        context.update(previous_execution_results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: previous_execution_results})
        context.update({SYSTEM_KV_PREFIX: KeyValueLookup()})
        rendered_result = jinja_utils.render_values(
            mapping=action_node.publish, context=context)
        return rendered_result

    @staticmethod
    def _resolve_params(action_node, original_parameters, results, chain_vars):
        # setup context with original parameters and the intermediate results.
        context = {}
        context.update(original_parameters)
        context.update(results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: results})
        context.update({SYSTEM_KV_PREFIX: KeyValueLookup()})
        rendered_params = jinja_utils.render_values(mapping=action_node.params,
                                                    context=context)
        LOG.debug('Rendered params: %s: Type: %s', rendered_params,
                  type(rendered_params))
        return rendered_params

    @staticmethod
    def _run_action(action_node,
                    parent_execution_id,
                    params,
                    wait_for_completion=True):
        execution = LiveActionDB(action=action_node.ref)
        execution.parameters = action_param_utils.cast_params(
            action_ref=action_node.ref, params=params)
        if action_node.notify:
            execution.notify = NotificationsHelper.to_model(action_node.notify)
        execution.context = {
            'parent': str(parent_execution_id),
            'chain': vars(action_node)
        }

        liveaction, _ = action_service.schedule(execution)
        while (wait_for_completion
               and liveaction.status != LIVEACTION_STATUS_SUCCEEDED
               and liveaction.status != LIVEACTION_STATUS_FAILED):
            eventlet.sleep(1)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)
        return liveaction

    def _format_action_exec_result(self,
                                   action_node,
                                   liveaction_db,
                                   created_at,
                                   updated_at,
                                   error=None):
        """
        Format ActionExecution result so it can be used in the final action result output.

        :rtype: ``dict``
        """
        assert (isinstance(created_at, datetime.datetime))
        assert (isinstance(updated_at, datetime.datetime))

        result = {}

        execution_db = None
        if liveaction_db:
            execution_db = ActionExecution.get(
                liveaction__id=str(liveaction_db.id))

        result['id'] = action_node.name
        result['name'] = action_node.name
        result['execution_id'] = str(execution_db.id) if execution_db else None
        result['workflow'] = None

        result['created_at'] = isotime.format(dt=created_at)
        result['updated_at'] = isotime.format(dt=updated_at)

        if error or not liveaction_db:
            result['state'] = LIVEACTION_STATUS_FAILED
        else:
            result['state'] = liveaction_db.status

        if error:
            result['result'] = error
        else:
            result['result'] = liveaction_db.result

        return result
示例#46
0
文件: tester.py 项目: zwunix/st2
class RuleTester(object):
    def __init__(self,
                 rule_file_path=None,
                 rule_ref=None,
                 trigger_instance_file_path=None,
                 trigger_instance_id=None):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._rule_ref = rule_ref
        self._trigger_instance_file_path = trigger_instance_file_path
        self._trigger_instance_id = trigger_instance_id
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """

        rule_db = self._get_rule_db()
        trigger_instance_db, trigger_db = self._get_trigger_instance_db()

        # The trigger check needs to be performed here as that is not performed
        # by RulesMatcher.
        if rule_db.trigger != trigger_db.ref:
            LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
                     rule_db.trigger, trigger_db.ref)
            return False

        # Check if rule matches criteria.
        matcher = RulesMatcher(trigger_instance=trigger_instance_db,
                               trigger=trigger_db,
                               rules=[rule_db],
                               extra_info=True)
        matching_rules = matcher.get_matching_rules()

        # Rule does not match so early exit.
        if len(matching_rules) < 1:
            return False

        # Check if rule can be enforced
        enforcer = RuleEnforcer(trigger_instance=trigger_instance_db,
                                rule=rule_db)

        runner_type_db = mock.Mock()
        runner_type_db.runner_parameters = {}
        action_db = mock.Mock()
        action_db.parameters = {}
        params = rule_db.action.parameters  # pylint: disable=no-member

        context, additional_contexts = enforcer.get_action_execution_context(
            action_db=action_db, trace_context=None)

        # Note: We only return partially resolved parameters.
        # To be able to return all parameters we would need access to corresponding ActionDB,
        # RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
        # tool is meant to be used standalone.
        try:
            params = enforcer.get_resolved_parameters(
                action_db=action_db,
                runnertype_db=runner_type_db,
                params=params,
                context=context,
                additional_contexts=additional_contexts)

            LOG.info('Action parameters resolved to:')
            for param in six.iteritems(params):
                LOG.info('\t%s: %s', param[0], param[1])
            return True
        except (UndefinedError, ValueError) as e:
            LOG.error('Failed to resolve parameters\n\tOriginal error : %s',
                      six.text_type(e))
            return False
        except:
            LOG.exception('Failed to resolve parameters.')
            return False

    def _get_rule_db(self):
        if self._rule_file_path:
            return self._get_rule_db_from_file(
                file_path=os.path.realpath(self._rule_file_path))
        elif self._rule_ref:
            return Rule.get_by_ref(self._rule_ref)
        raise ValueError(
            'One of _rule_file_path or _rule_ref should be specified.')

    def _get_trigger_instance_db(self):
        if self._trigger_instance_file_path:
            return self._get_trigger_instance_db_from_file(
                file_path=os.path.realpath(self._trigger_instance_file_path))
        elif self._trigger_instance_id:
            trigger_instance_db = TriggerInstance.get_by_id(
                self._trigger_instance_id)
            trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
            return trigger_instance_db, trigger_db
        raise ValueError('One of _trigger_instance_file_path or'
                         '_trigger_instance_id should be specified.')

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        pack = data.get('pack', 'unknown')
        name = data.get('name', 'unknown')
        trigger = data['trigger']['type']
        criteria = data.get('criteria', None)
        action = data.get('action', {})

        rule_db = RuleDB(pack=pack,
                         name=name,
                         trigger=trigger,
                         criteria=criteria,
                         action=action,
                         enabled=True)
        rule_db.id = 'rule_tester_rule'

        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)
        instance.id = 'rule_tester_instance'

        trigger_ref = ResourceReference.from_string_reference(
            instance['trigger'])
        trigger_db = TriggerDB(pack=trigger_ref.pack,
                               name=trigger_ref.name,
                               type=trigger_ref.ref)
        return instance, trigger_db
示例#47
0
class FixturesLoader(object):
    def __init__(self):
        self.meta_loader = MetaLoader()

    def save_fixtures_to_db(self, fixtures_pack='generic', fixtures_dict=None):
        """
        Loads fixtures specified in fixtures_dict into the database
        and returns DB models for the fixtures.

        fixtures_dict should be of the form:
        {
            'actions': ['action-1.yaml', 'action-2.yaml'],
            'rules': ['rule-1.yaml'],
            'liveactions': ['execution-1.yaml']
        }

        :param fixtures_pack: Name of the pack to load fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to load for each type.
        :type fixtures_dict: ``dict``

        :rtype: ``dict``
        """
        if fixtures_dict is None:
            fixtures_dict = {}
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)

        db_models = {}
        for fixture_type, fixtures in six.iteritems(fixtures_dict):

            API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
            PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None)

            loaded_fixtures = {}
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
                api_model = API_MODEL(**fixture_dict)
                db_model = API_MODEL.to_model(api_model)
                db_model = PERSISTENCE_MODEL.add_or_update(db_model)
                loaded_fixtures[fixture] = db_model

            db_models[fixture_type] = loaded_fixtures

        return db_models

    def load_fixtures(self, fixtures_pack='generic', fixtures_dict=None):
        """
        Loads fixtures specified in fixtures_dict. We
        simply want to load the meta into dict objects.

        fixtures_dict should be of the form:
        {
            'actionchains': ['actionchain1.yaml', 'actionchain2.yaml'],
            'workflows': ['workflow.yaml']
        }

        :param fixtures_pack: Name of the pack to load fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to load for each type.
        :type fixtures_dict: ``dict``

        :rtype: ``dict``
        """
        if not fixtures_dict:
            return {}
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict)

        all_fixtures = {}
        for fixture_type, fixtures in six.iteritems(fixtures_dict):
            loaded_fixtures = {}
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
                loaded_fixtures[fixture] = fixture_dict
            all_fixtures[fixture_type] = loaded_fixtures

        return all_fixtures

    def load_models(self, fixtures_pack='generic', fixtures_dict=None):
        """
        Loads fixtures specified in fixtures_dict as db models. This method must be
        used for fixtures that have associated DB models. We simply want to load the
        meta as DB models but don't want to save them to db.

        fixtures_dict should be of the form:
        {
            'actions': ['action-1.yaml', 'action-2.yaml'],
            'rules': ['rule-1.yaml'],
            'liveactions': ['execution-1.yaml']
        }

        :param fixtures_pack: Name of the pack to load fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to load for each type.
        :type fixtures_dict: ``dict``

        :rtype: ``dict``
        """
        if not fixtures_dict:
            return {}
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)

        all_fixtures = {}
        for fixture_type, fixtures in six.iteritems(fixtures_dict):

            API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)

            loaded_models = {}
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
                api_model = API_MODEL(**fixture_dict)
                db_model = API_MODEL.to_model(api_model)
                loaded_models[fixture] = db_model
            all_fixtures[fixture_type] = loaded_models

        return all_fixtures

    def delete_fixtures_from_db(self, fixtures_pack='generic', fixtures_dict=None,
                                raise_on_fail=False):
        """
        Deletes fixtures specified in fixtures_dict from the database.

        fixtures_dict should be of the form:
        {
            'actions': ['action-1.yaml', 'action-2.yaml'],
            'rules': ['rule-1.yaml'],
            'liveactions': ['execution-1.yaml']
        }

        :param fixtures_pack: Name of the pack to delete fixtures from.
        :type fixtures_pack: ``str``

        :param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
        :type fixtures_dict: ``dict``

        :param raise_on_fail: Optional If True, raises exception if delete fails on any fixture.
        :type raise_on_fail: ``boolean``
        """
        if not fixtures_dict:
            return
        fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
        self._validate_fixture_dict(fixtures_dict)

        for fixture_type, fixtures in six.iteritems(fixtures_dict):
            API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
            PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None)
            for fixture in fixtures:
                fixture_dict = self.meta_loader.load(
                    self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
                # Note that when we have a reference mechanism consistent for
                # every model, we can just do a get and delete the object. Until
                # then, this model conversions are necessary.
                api_model = API_MODEL(**fixture_dict)
                db_model = API_MODEL.to_model(api_model)
                try:
                    PERSISTENCE_MODEL.delete(db_model)
                except:
                    if raise_on_fail:
                        raise

    def delete_models_from_db(self, models_dict, raise_on_fail=False):
        """
        Deletes models specified in models_dict from the database.

        models_dict should be of the form:
        {
            'actions': [ACTION1, ACTION2],
            'rules': [RULE1],
            'liveactions': [EXECUTION]
        }

        :param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
        :type fixtures_dict: ``dict``.

        :param raise_on_fail: Optional If True, raises exception if delete fails on any model.
        :type raise_on_fail: ``boolean``
        """
        for model_type, models in six.iteritems(models_dict):
            PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(model_type, None)
            for model in models:
                try:
                    PERSISTENCE_MODEL.delete(model)
                except:
                    if raise_on_fail:
                        raise

    def _validate_fixtures_pack(self, fixtures_pack):
        fixtures_pack_path = self._get_fixtures_pack_path(fixtures_pack)

        if not self._is_fixture_pack_exists(fixtures_pack_path):
            raise Exception('Fixtures pack not found ' +
                            'in fixtures path %s.' % get_fixtures_base_path())
        return fixtures_pack_path

    def _validate_fixture_dict(self, fixtures_dict, allowed=ALLOWED_FIXTURES):
        fixture_types = fixtures_dict.keys()
        for fixture_type in fixture_types:
            if fixture_type not in allowed:
                raise Exception('Disallowed fixture type: %s' % fixture_type)

    def _is_fixture_pack_exists(self, fixtures_pack_path):
        return os.path.exists(fixtures_pack_path)

    def _get_fixture_file_path_abs(self, fixtures_pack_path, fixtures_type, fixture_name):
        return os.path.join(fixtures_pack_path, fixtures_type, fixture_name)

    def _get_fixtures_pack_path(self, fixtures_pack_name):
        return os.path.join(get_fixtures_base_path(), fixtures_pack_name)

    def get_fixture_file_path_abs(self, fixtures_pack, fixtures_type, fixture_name):
        return os.path.join(get_fixtures_base_path(), fixtures_pack, fixtures_type, fixture_name)
示例#48
0
class ResourceRegistrar(object):
    ALLOWED_EXTENSIONS = []

    def __init__(self, use_pack_cache=True):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()

    def get_resources_from_pack(self, resources_dir):
        resources = []
        for ext in self.ALLOWED_EXTENSIONS:
            resources_glob = resources_dir

            if resources_dir.endswith('/'):
                resources_glob = resources_dir + ext
            else:
                resources_glob = resources_dir + '/*' + ext

            resource_files = glob.glob(resources_glob)
            resources.extend(resource_files)

        resources = sorted(resources)
        return resources

    def register_packs(self, base_dirs):
        """
        Register packs in all the provided directories.
        """
        packs = self._pack_loader.get_packs(base_dirs=base_dirs)

        registered_count = 0
        for pack_name, pack_path in six.iteritems(packs):
            self.register_pack(pack_name=pack_name, pack_dir=pack_path)
            registered_count += 1

        return registered_count

    def register_pack(self, pack_name, pack_dir):
        """
        Register pack in the provided directory.
        """
        if self._use_pack_cache and pack_name in REGISTERED_PACKS_CACHE:
            # This pack has already been registered during this register content run
            return

        LOG.debug('Registering pack: %s' % (pack_name))
        REGISTERED_PACKS_CACHE[pack_name] = True

        try:
            pack_db = self._register_pack(pack_name=pack_name,
                                          pack_dir=pack_dir)
        except Exception:
            LOG.exception('Failed to register pack "%s"' % (pack_name))
            return None

        return pack_db

    def _register_pack(self, pack_name, pack_dir):
        """
        Register a pack (create a DB object in the system).

        Note: Pack registration now happens when registering the content and not when installing
        a pack using packs.install. Eventually this will be moved to the pack management API.
        """
        manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)

        if not os.path.isfile(manifest_path):
            raise ValueError('Pack "%s" is missing %s file' %
                             (pack_name, MANIFEST_FILE_NAME))

        content = self._meta_loader.load(manifest_path)
        if not content:
            raise ValueError('Pack "%s" metadata file is empty' % (pack_name))

        content['ref'] = pack_name

        # Include a list of pack files
        pack_file_list = get_file_list(directory=pack_dir,
                                       exclude_patterns=EXCLUDE_FILE_PATTERNS)
        content['files'] = pack_file_list

        pack_api = PackAPI(**content)
        pack_db = PackAPI.to_model(pack_api)

        try:
            pack_db.id = Pack.get_by_ref(pack_name).id
        except ValueError:
            LOG.debug('Pack %s not found. Creating new one.', pack_name)

        pack_db = Pack.add_or_update(pack_db)
        LOG.debug('Pack %s registered.' % (pack_name))
        return pack_db
示例#49
0
 def __init__(self, runner_id):
     super(ActionChainRunner, self).__init__(runner_id=runner_id)
     self.chain_holder = None
     self._meta_loader = MetaLoader()
示例#50
0
class ActionChainRunner(ActionRunner):

    def __init__(self, runner_id):
        super(ActionChainRunner, self).__init__(runner_id=runner_id)
        self.chain_holder = None
        self._meta_loader = MetaLoader()
        self._stopped = False

    def pre_run(self):
        chainspec_file = self.entry_point
        LOG.debug('Reading action chain from %s for action %s.', chainspec_file,
                  self.action)

        try:
            chainspec = self._meta_loader.load(file_path=chainspec_file,
                                               expected_type=dict)
        except Exception as e:
            message = ('Failed to parse action chain definition from "%s": %s' %
                       (chainspec_file, str(e)))
            LOG.exception('Failed to load action chain definition.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

        try:
            self.chain_holder = ChainHolder(chainspec, self.action_name)
        except Exception as e:
            message = e.message or str(e)
            LOG.exception('Failed to instantiate ActionChain.')
            raise runnerexceptions.ActionRunnerPreRunError(message)

    def run(self, action_parameters):
        result = {'tasks': []}  # holds final result we store
        context_result = {}  # holds result which is used for the template context purposes
        top_level_error = None  # stores a reference to a top level error
        fail = True
        action_node = None

        try:
            action_node = self.chain_holder.get_next_node()
        except Exception as e:
            LOG.exception('Failed to get starting node "%s".', action_node.name)

            error = ('Failed to get starting node "%s". Lookup failed: %s' %
                     (action_node.name, str(e)))
            trace = traceback.format_exc(10)
            top_level_error = {
                'error': error,
                'traceback': trace
            }

        while action_node:
            fail = False
            error = None
            resolved_params = None
            liveaction = None

            created_at = datetime.datetime.now()

            try:
                resolved_params = ActionChainRunner._resolve_params(
                    action_node=action_node, original_parameters=action_parameters,
                    results=context_result, chain_vars=self.chain_holder.vars)
            except Exception as e:
                # Rendering parameters failed before we even got to running this action, abort and
                # fail the whole action chain
                LOG.exception('Failed to run action "%s".', action_node.name)

                fail = True
                error = ('Failed to run task "%s". Parameter rendering failed: %s' %
                         (action_node.name, str(e)))
                trace = traceback.format_exc(10)
                top_level_error = {
                    'error': error,
                    'traceback': trace
                }
                break

            # Verify that the referenced action exists
            # TODO: We do another lookup in cast_param, refactor to reduce number of lookups
            action_ref = action_node.ref
            action_db = action_db_util.get_action_by_ref(ref=action_ref)

            if not action_db:
                error = ('Failed to run task "%s". Action with reference "%s" doesn\'t exist.' %
                         (action_node.name, action_ref))
                LOG.exception(error)

                fail = True
                top_level_error = {
                    'error': error,
                    'traceback': error
                }
                break

            try:
                liveaction = self._run_action(
                    action_node=action_node, parent_execution_id=self.liveaction_id,
                    params=resolved_params)
            except Exception as e:
                # Save the traceback and error message
                LOG.exception('Failure in running action "%s".', action_node.name)

                error = {
                    'error': 'Task "%s" failed: %s' % (action_node.name, str(e)),
                    'traceback': traceback.format_exc(10)
                }
                context_result[action_node.name] = error
            else:
                # Update context result
                context_result[action_node.name] = liveaction.result

                # Render and publish variables
                rendered_publish_vars = ActionChainRunner._render_publish_vars(
                    action_node=action_node, action_parameters=action_parameters,
                    execution_result=liveaction.result, previous_execution_results=context_result,
                    chain_vars=self.chain_holder.vars)

                if rendered_publish_vars:
                    self.chain_holder.vars.update(rendered_publish_vars)
            finally:
                # Record result and resolve a next node based on the task success or failure
                updated_at = datetime.datetime.now()

                format_kwargs = {'action_node': action_node, 'liveaction_db': liveaction,
                                 'created_at': created_at, 'updated_at': updated_at}

                if error:
                    format_kwargs['error'] = error

                task_result = self._format_action_exec_result(**format_kwargs)
                result['tasks'].append(task_result)

                if self.liveaction_id:
                    self._stopped = action_service.is_action_canceled(self.liveaction_id)

                if not self._stopped:
                    try:
                        if not liveaction or liveaction.status == LIVEACTION_STATUS_FAILED:
                            fail = True
                            action_node = self.chain_holder.get_next_node(action_node.name,
                                                                          condition='on-failure')
                        elif liveaction.status == LIVEACTION_STATUS_SUCCEEDED:
                            action_node = self.chain_holder.get_next_node(action_node.name,
                                                                          condition='on-success')
                    except Exception as e:
                        LOG.exception('Failed to get next node "%s".', action_node.name)

                        fail = True
                        error = ('Failed to get next node "%s". Lookup failed: %s' %
                                 (action_node.name, str(e)))
                        trace = traceback.format_exc(10)
                        top_level_error = {
                            'error': error,
                            'traceback': trace
                        }
                        # reset action_node here so that chain breaks on failure.
                        action_node = None
                else:
                    LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
                    status = LIVEACTION_STATUS_CANCELED
                    return (status, result, None)

        if fail:
            status = LIVEACTION_STATUS_FAILED
        else:
            status = LIVEACTION_STATUS_SUCCEEDED

        if top_level_error:
            # Include top level error information
            result['error'] = top_level_error['error']
            result['traceback'] = top_level_error['traceback']

        return (status, result, None)

    @staticmethod
    def _render_publish_vars(action_node, action_parameters, execution_result,
                             previous_execution_results, chain_vars):
        """
        If no output is specified on the action_node the output is the entire execution_result.
        If any output is specified then only those variables are published as output of an
        execution of this action_node.
        The output variable can refer to a variable from the execution_result,
        previous_execution_results or chain_vars.
        """
        if not action_node.publish:
            return {}

        context = {}
        context.update(action_parameters)
        context.update({action_node.name: execution_result})
        context.update(previous_execution_results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: previous_execution_results})
        context.update({SYSTEM_KV_PREFIX: KeyValueLookup()})
        rendered_result = jinja_utils.render_values(mapping=action_node.publish, context=context)
        return rendered_result

    @staticmethod
    def _resolve_params(action_node, original_parameters, results, chain_vars):
        # setup context with original parameters and the intermediate results.
        context = {}
        context.update(original_parameters)
        context.update(results)
        context.update(chain_vars)
        context.update({RESULTS_KEY: results})
        context.update({SYSTEM_KV_PREFIX: KeyValueLookup()})
        rendered_params = jinja_utils.render_values(mapping=action_node.params, context=context)
        LOG.debug('Rendered params: %s: Type: %s', rendered_params, type(rendered_params))
        return rendered_params

    def _run_action(self, action_node, parent_execution_id, params, wait_for_completion=True):
        liveaction = LiveActionDB(action=action_node.ref)
        liveaction.parameters = action_param_utils.cast_params(action_ref=action_node.ref,
                                                               params=params)
        if action_node.notify:
            liveaction.notify = NotificationsHelper.to_model(action_node.notify)

        liveaction.context = {
            'parent': str(parent_execution_id),
            'chain': vars(action_node)
        }

        liveaction, _ = action_service.schedule(liveaction)

        while (wait_for_completion and
               liveaction.status != LIVEACTION_STATUS_SUCCEEDED and
               liveaction.status != LIVEACTION_STATUS_FAILED):
            eventlet.sleep(1)
            liveaction = action_db_util.get_liveaction_by_id(liveaction.id)

        return liveaction

    def _format_action_exec_result(self, action_node, liveaction_db, created_at, updated_at,
                                   error=None):
        """
        Format ActionExecution result so it can be used in the final action result output.

        :rtype: ``dict``
        """
        assert(isinstance(created_at, datetime.datetime))
        assert(isinstance(updated_at, datetime.datetime))

        result = {}

        execution_db = None
        if liveaction_db:
            execution_db = ActionExecution.get(liveaction__id=str(liveaction_db.id))

        result['id'] = action_node.name
        result['name'] = action_node.name
        result['execution_id'] = str(execution_db.id) if execution_db else None
        result['workflow'] = None

        result['created_at'] = isotime.format(dt=created_at)
        result['updated_at'] = isotime.format(dt=updated_at)

        if error or not liveaction_db:
            result['state'] = LIVEACTION_STATUS_FAILED
        else:
            result['state'] = liveaction_db.status

        if error:
            result['result'] = error
        else:
            result['result'] = liveaction_db.result

        return result
示例#51
0
 def __init__(self, runner_id):
     super(ActionChainRunner, self).__init__(runner_id=runner_id)
     self.chain_holder = None
     self._meta_loader = MetaLoader()
示例#52
0
文件: loader.py 项目: nzlosh/st2
class RBACDefinitionsLoader(object):
    """
    A class which loads role definitions and user role assignments from files on
    disk.
    """

    def __init__(self):
        base_path = cfg.CONF.system.base_path

        self._rbac_definitions_path = os.path.join(base_path, 'rbac/')
        self._role_definitions_path = os.path.join(self._rbac_definitions_path, 'roles/')
        self._role_assignments_path = os.path.join(self._rbac_definitions_path, 'assignments/')
        self._role_maps_path = os.path.join(self._rbac_definitions_path, 'mappings/')
        self._meta_loader = MetaLoader()

    def load(self):
        """
        :return: Dict with the following keys: roles, role_assiginments
        :rtype: ``dict``
        """
        result = {}
        result['roles'] = self.load_role_definitions()
        result['role_assignments'] = self.load_user_role_assignments()
        result['group_to_role_maps'] = self.load_group_to_role_maps()

        return result

    def load_role_definitions(self):
        """
        Load all the role definitions.

        :rtype: ``dict``
        """
        LOG.info('Loading role definitions from "%s"' % (self._role_definitions_path))
        file_paths = self._get_role_definitions_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading role definition from: %s' % (file_path))
            role_definition_api = self.load_role_definition_from_file(file_path=file_path)
            role_name = role_definition_api.name
            enabled = getattr(role_definition_api, 'enabled', True)

            if role_name in result:
                raise ValueError('Duplicate definition file found for role "%s"' % (role_name))

            if not enabled:
                LOG.debug('Skipping disabled role "%s"' % (role_name))
                continue

            result[role_name] = role_definition_api

        return result

    def load_user_role_assignments(self):
        """
        Load all the user role assignments.

        :rtype: ``dict``
        """
        LOG.info('Loading user role assignments from "%s"' % (self._role_assignments_path))
        file_paths = self._get_role_assiginments_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading user role assignments from: %s' % (file_path))
            role_assignment_api = self.load_user_role_assignments_from_file(file_path=file_path)
            username = role_assignment_api.username
            enabled = getattr(role_assignment_api, 'enabled', True)

            if username in result:
                raise ValueError('Duplicate definition file found for user "%s"' % (username))

            if not enabled:
                LOG.debug('Skipping disabled role assignment for user "%s"' % (username))
                continue

            result[username] = role_assignment_api

        return result

    def load_group_to_role_maps(self):
        """
        Load all the remote group to local role mappings.

        :rtype: ``dict``
        """
        LOG.info('Loading group to role map definitions from "%s"' % (self._role_maps_path))
        file_paths = self._get_group_to_role_maps_file_paths()

        result = {}
        for file_path in file_paths:
            LOG.debug('Loading group to role mapping from: %s' % (file_path))
            group_to_role_map_api = self.load_group_to_role_map_assignment_from_file(
                file_path=file_path)

            group_name = group_to_role_map_api.group
            result[group_name] = group_to_role_map_api

        return result

    def load_role_definition_from_file(self, file_path):
        """
        Load role definition from file.

        :param file_path: Path to the role definition file.
        :type file_path: ``str``

        :return: Role definition.
        :rtype: :class:`RoleDefinitionFileFormatAPI`
        """
        content = self._meta_loader.load(file_path)

        if not content:
            msg = ('Role definition file "%s" is empty and invalid' % file_path)
            raise ValueError(msg)

        role_definition_api = RoleDefinitionFileFormatAPI(**content)
        role_definition_api = role_definition_api.validate()

        return role_definition_api

    def load_user_role_assignments_from_file(self, file_path):
        """
        Load user role assignments from file.

        :param file_path: Path to the user role assignment file.
        :type file_path: ``str``

        :return: User role assignments.
        :rtype: :class:`UserRoleAssignmentFileFormatAPI`
        """
        content = self._meta_loader.load(file_path)

        if not content:
            msg = ('Role assignment file "%s" is empty and invalid' % file_path)
            raise ValueError(msg)

        user_role_assignment_api = UserRoleAssignmentFileFormatAPI(**content)
        user_role_assignment_api.file_path = file_path[file_path.rfind('assignments/'):]
        user_role_assignment_api = user_role_assignment_api.validate()

        return user_role_assignment_api

    def load_group_to_role_map_assignment_from_file(self, file_path):
        content = self._meta_loader.load(file_path)

        if not content:
            msg = ('Group to role map assignment file "%s" is empty and invalid' % (file_path))
            raise ValueError(msg)

        group_to_role_map_api = AuthGroupToRoleMapAssignmentFileFormatAPI(**content)
        group_to_role_map_api.file_path = file_path[file_path.rfind('mappings/'):]
        group_to_role_map_api = group_to_role_map_api.validate()

        return group_to_role_map_api

    def _get_role_definitions_file_paths(self):
        """
        Retrieve a list of paths for all the role definitions.

        Notes: Roles are sorted in an alphabetical order based on the role name.

        :rtype: ``list``
        """
        glob_str = self._role_definitions_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, key=functools.cmp_to_key(compare_path_file_name))
        return file_paths

    def _get_role_assiginments_file_paths(self):
        """
        Retrieve a list of paths for all the user role assignments.

        Notes: Assignments are sorted in an alphabetical order based on the username.

        :rtype: ``list``
        """
        glob_str = self._role_assignments_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, key=functools.cmp_to_key(compare_path_file_name))
        return file_paths

    def _get_group_to_role_maps_file_paths(self):
        """
        Retrieve a list of path for remote group to local role mapping assignment files.

        :rtype: ``list``
        """
        glob_str = self._role_maps_path + '*.yaml'
        file_paths = glob.glob(glob_str)
        file_paths = sorted(file_paths, key=functools.cmp_to_key(compare_path_file_name))
        return file_paths
示例#53
0
文件: base.py 项目: ruslantum/st2
 def __init__(self):
     self._meta_loader = MetaLoader()
     self._pack_loader = ContentPackLoader()
示例#54
0
def main(metadata_path, output_path, print_source=False):
    metadata_path = os.path.abspath(metadata_path)
    metadata_dir = os.path.dirname(metadata_path)

    meta_loader = MetaLoader()
    data = meta_loader.load(metadata_path)

    action_name = data['name']
    entry_point = data['entry_point']

    workflow_metadata_path = os.path.join(metadata_dir, entry_point)
    chainspec = meta_loader.load(workflow_metadata_path)

    chain_holder = ChainHolder(chainspec, 'workflow')

    graph_label = '%s action-chain workflow visualization' % (action_name)

    graph_attr = {
        'rankdir': 'TD',
        'labelloc': 't',
        'fontsize': '15',
        'label': graph_label
    }
    node_attr = {}
    dot = Digraph(comment='Action chain work-flow visualization',
                  node_attr=node_attr,
                  graph_attr=graph_attr,
                  format='png')
    #  dot.body.extend(['rankdir=TD', 'size="10,5"'])

    # Add all nodes
    node = chain_holder.get_next_node()
    while node:
        dot.node(node.name, node.name)
        node = chain_holder.get_next_node(curr_node_name=node.name)

    # Add connections
    node = chain_holder.get_next_node()
    processed_nodes = set([node.name])
    nodes = [node]
    while nodes:
        previous_node = nodes.pop()
        success_node = chain_holder.get_next_node(
            curr_node_name=previous_node.name, condition='on-success')
        failure_node = chain_holder.get_next_node(
            curr_node_name=previous_node.name, condition='on-failure')

        # Add success node (if any)
        if success_node:
            dot.edge(previous_node.name,
                     success_node.name,
                     constraint='true',
                     color='green',
                     label='on success')
            if success_node.name not in processed_nodes:
                nodes.append(success_node)
                processed_nodes.add(success_node.name)

        # Add failure node (if any)
        if failure_node:
            dot.edge(previous_node.name,
                     failure_node.name,
                     constraint='true',
                     color='red',
                     label='on failure')
            if failure_node.name not in processed_nodes:
                nodes.append(failure_node)
                processed_nodes.add(failure_node.name)

    if print_source:
        print(dot.source)

    if output_path:
        output_path = os.path.join(output_path, action_name)
    else:
        output_path = output_path or os.path.join(os.getcwd(), action_name)

    dot.format = 'png'
    dot.render(output_path)

    print('Graph saved at %s' % (output_path + '.png'))
示例#55
0
文件: tester.py 项目: nzlosh/st2
class RuleTester(object):
    def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
                 trigger_instance_id=None):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._rule_ref = rule_ref
        self._trigger_instance_file_path = trigger_instance_file_path
        self._trigger_instance_id = trigger_instance_id
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """

        rule_db = self._get_rule_db()
        trigger_instance_db, trigger_db = self._get_trigger_instance_db()

        # The trigger check needs to be performed here as that is not performed
        # by RulesMatcher.
        if rule_db.trigger != trigger_db.ref:
            LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
                     rule_db.trigger, trigger_db.ref)
            return False

        # Check if rule matches criteria.
        matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
                               rules=[rule_db], extra_info=True)
        matching_rules = matcher.get_matching_rules()

        # Rule does not match so early exit.
        if len(matching_rules) < 1:
            return False

        # Check if rule can be enforced
        enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)

        runner_type_db = mock.Mock()
        runner_type_db.runner_parameters = {}
        action_db = mock.Mock()
        action_db.parameters = {}
        params = rule_db.action.parameters  # pylint: disable=no-member

        context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
                                                                             trace_context=None)

        # Note: We only return partially resolved parameters.
        # To be able to return all parameters we would need access to corresponding ActionDB,
        # RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
        # tool is meant to be used standalone.
        try:
            params = enforcer.get_resolved_parameters(action_db=action_db,
                                                      runnertype_db=runner_type_db,
                                                      params=params,
                                                      context=context,
                                                      additional_contexts=additional_contexts)

            LOG.info('Action parameters resolved to:')
            for param in six.iteritems(params):
                LOG.info('\t%s: %s', param[0], param[1])
            return True
        except (UndefinedError, ValueError) as e:
            LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
            return False
        except:
            LOG.exception('Failed to resolve parameters.')
            return False

    def _get_rule_db(self):
        if self._rule_file_path:
            return self._get_rule_db_from_file(
                file_path=os.path.realpath(self._rule_file_path))
        elif self._rule_ref:
            return Rule.get_by_ref(self._rule_ref)
        raise ValueError('One of _rule_file_path or _rule_ref should be specified.')

    def _get_trigger_instance_db(self):
        if self._trigger_instance_file_path:
            return self._get_trigger_instance_db_from_file(
                file_path=os.path.realpath(self._trigger_instance_file_path))
        elif self._trigger_instance_id:
            trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
            trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
            return trigger_instance_db, trigger_db
        raise ValueError('One of _trigger_instance_file_path or'
                         '_trigger_instance_id should be specified.')

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        pack = data.get('pack', 'unknown')
        name = data.get('name', 'unknown')
        trigger = data['trigger']['type']
        criteria = data.get('criteria', None)
        action = data.get('action', {})

        rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
                         enabled=True)
        rule_db.id = 'rule_tester_rule'

        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)
        instance.id = 'rule_tester_instance'

        trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
        trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
        return instance, trigger_db
class RuleTester(object):
    def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
                 trigger_instance_id=None):
        """
        :param rule_file_path: Path to the file containing rule definition.
        :type rule_file_path: ``str``

        :param trigger_instance_file_path: Path to the file containg trigger instance definition.
        :type trigger_instance_file_path: ``str``
        """
        self._rule_file_path = rule_file_path
        self._rule_ref = rule_ref
        self._trigger_instance_file_path = trigger_instance_file_path
        self._trigger_instance_id = trigger_instance_id
        self._meta_loader = MetaLoader()

    def evaluate(self):
        """
        Evaluate trigger instance against the rule.

        :return: ``True`` if the rule matches, ``False`` otherwise.
        :rtype: ``boolean``
        """

        rule_db = self._get_rule_db()
        trigger_instance_db, trigger_db = self._get_trigger_instance_db()

        # The trigger check needs to be performed here as that is not performed
        # by RulesMatcher.
        if rule_db.trigger != trigger_db.ref:
            LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
                     rule_db.trigger, trigger_db.ref)
            return False

        # Check if rule matches criteria.
        matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
                               rules=[rule_db], extra_info=True)
        matching_rules = matcher.get_matching_rules()

        # Rule does not match so early exit.
        if len(matching_rules) < 1:
            return False

        # Check if rule can be enforced
        try:
            enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
            params = enforcer.get_resolved_parameters()
            LOG.info('Action parameters resolved to:')
            for param in six.iteritems(params):
                LOG.info('\t%s: %s', param[0], param[1])
            return True
        except (UndefinedError, ValueError) as e:
            LOG.error('Failed to resolve parameters\n\tOriginal error : %s', str(e))
            return False
        except:
            LOG.exception('Failed to resolve parameters.')
            return False

    def _get_rule_db(self):
        if self._rule_file_path:
            return self._get_rule_db_from_file(
                file_path=os.path.realpath(self._rule_file_path))
        elif self._rule_ref:
            return Rule.get_by_ref(self._rule_ref)
        raise ValueError('One of _rule_file_path or _rule_ref should be specified.')

    def _get_trigger_instance_db(self):
        if self._trigger_instance_file_path:
            return self._get_trigger_instance_db_from_file(
                file_path=os.path.realpath(self._trigger_instance_file_path))
        elif self._trigger_instance_id:
            trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
            trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
            return trigger_instance_db, trigger_db
        raise ValueError('One of _trigger_instance_file_path or'
                         '_trigger_instance_id should be specified.')

    def _get_rule_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        pack = data.get('pack', 'unknown')
        name = data.get('name', 'unknown')
        trigger = data['trigger']['type']
        criteria = data.get('criteria', None)
        action = data.get('action', {})

        rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
                         enabled=True)

        return rule_db

    def _get_trigger_instance_db_from_file(self, file_path):
        data = self._meta_loader.load(file_path=file_path)
        instance = TriggerInstanceDB(**data)

        trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
        trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
        return instance, trigger_db
示例#57
0
文件: base.py 项目: sjoerdapp/st2
class ResourceRegistrar(object):
    ALLOWED_EXTENSIONS = []

    def __init__(self, use_pack_cache=True, fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()
        self._runner_loader = RunnersLoader()

    def get_resources_from_pack(self, resources_dir):
        resources = []
        for ext in self.ALLOWED_EXTENSIONS:
            resources_glob = resources_dir

            if resources_dir.endswith('/'):
                resources_glob = resources_dir + ext
            else:
                resources_glob = resources_dir + '/*' + ext

            resource_files = glob.glob(resources_glob)
            resources.extend(resource_files)

        resources = sorted(resources)
        return resources

    def get_registered_packs(self):
        """
        Return a list of registered packs.

        :rype: ``list``
        """
        return REGISTERED_PACKS_CACHE.keys()

    def register_packs(self, base_dirs):
        """
        Register packs in all the provided directories.
        """
        packs = self._pack_loader.get_packs(base_dirs=base_dirs)

        registered_count = 0
        for pack_name, pack_path in six.iteritems(packs):
            self.register_pack(pack_name=pack_name, pack_dir=pack_path)
            registered_count += 1

        return registered_count

    def register_pack(self, pack_name, pack_dir):
        """
        Register pack in the provided directory.
        """
        if self._use_pack_cache and pack_name in REGISTERED_PACKS_CACHE:
            # This pack has already been registered during this register content run
            return

        LOG.debug('Registering pack: %s' % (pack_name))
        REGISTERED_PACKS_CACHE[pack_name] = True

        try:
            pack_db, _ = self._register_pack(pack_name=pack_name,
                                             pack_dir=pack_dir)
        except Exception:
            LOG.exception('Failed to register pack "%s"' % (pack_name))
            return None

        return pack_db

    def _register_pack(self, pack_name, pack_dir):
        """
        Register a pack and corresponding pack config schema (create a DB object in the system).

        Note: Pack registration now happens when registering the content and not when installing
        a pack using packs.install. Eventually this will be moved to the pack management API.
        """
        # 1. Register pack
        pack_db = self._register_pack_db(pack_name=pack_name,
                                         pack_dir=pack_dir)

        # 2. Register corresponding pack config schema
        config_schema_db = self._register_pack_config_schema_db(
            pack_name=pack_name, pack_dir=pack_dir)

        return pack_db, config_schema_db

    def _register_pack_db(self, pack_name, pack_dir):
        content = get_pack_metadata(pack_dir=pack_dir)

        # The rules for the pack ref are as follows:
        # 1. If ref attribute is available, we used that
        # 2. If pack_name is available we use that (this only applies to packs
        # 2hich are in sub-directories)
        # 2. If attribute is not available, but pack name is and pack name meets the valid name
        # criteria, we use that
        content['ref'] = get_pack_ref_from_metadata(
            metadata=content, pack_directory_name=pack_name)

        # Include a list of pack files
        pack_file_list = get_file_list(directory=pack_dir,
                                       exclude_patterns=EXCLUDE_FILE_PATTERNS)
        content['files'] = pack_file_list

        pack_api = PackAPI(**content)
        pack_api.validate()
        pack_db = PackAPI.to_model(pack_api)

        try:
            pack_db.id = Pack.get_by_ref(content['ref']).id
        except StackStormDBObjectNotFoundError:
            LOG.debug('Pack %s not found. Creating new one.', pack_name)

        pack_db = Pack.add_or_update(pack_db)
        LOG.debug('Pack %s registered.' % (pack_name))
        return pack_db

    def _register_pack_config_schema_db(self, pack_name, pack_dir):
        config_schema_path = os.path.join(pack_dir, CONFIG_SCHEMA_FILE_NAME)

        if not os.path.isfile(config_schema_path):
            # Note: Config schema is optional
            return None

        content = {}
        values = self._meta_loader.load(config_schema_path)
        content['pack'] = pack_name
        content['attributes'] = values

        config_schema_api = ConfigSchemaAPI(**content)
        config_schema_db = ConfigSchemaAPI.to_model(config_schema_api)

        try:
            config_schema_db.id = ConfigSchema.get_by_pack(pack_name).id
        except StackStormDBObjectNotFoundError:
            LOG.debug('Config schema for pack %s not found. Creating new one.',
                      pack_name)

        config_schema_db = ConfigSchema.add_or_update(config_schema_db)
        LOG.debug('Config schema for pack %s registered.' % (pack_name))
        return config_schema_db

    def register_runner(self):
        pass
示例#58
0
文件: base.py 项目: tzmvp/st2
class ResourceRegistrar(object):
    ALLOWED_EXTENSIONS = []

    def __init__(self,
                 use_pack_cache=True,
                 use_runners_cache=False,
                 fail_on_failure=False):
        """
        :param use_pack_cache: True to cache which packs have been registered in memory and making
                                sure packs are only registered once.
        :type use_pack_cache: ``bool``

        :param use_runners_cache: True to cache RunnerTypeDB objects in memory to reduce load on
                                  the database.
        :type use_runners_cache: ``bool``

        :param fail_on_failure: Throw an exception if resource registration fails.
        :type fail_on_failure: ``bool``
        """
        self._use_pack_cache = use_pack_cache
        self._use_runners_cache = use_runners_cache
        self._fail_on_failure = fail_on_failure

        self._meta_loader = MetaLoader()
        self._pack_loader = ContentPackLoader()

        # Maps runner name -> RunnerTypeDB
        self._runner_type_db_cache = {}

    def get_resources_from_pack(self, resources_dir):
        resources = []
        for ext in self.ALLOWED_EXTENSIONS:
            resources_glob = resources_dir

            if resources_dir.endswith('/'):
                resources_glob = resources_dir + ext
            else:
                resources_glob = resources_dir + '/*' + ext

            resource_files = glob.glob(resources_glob)
            resources.extend(resource_files)

        resources = sorted(resources)
        return resources

    def get_registered_packs(self):
        """
        Return a list of registered packs.

        :rype: ``list``
        """
        return list(REGISTERED_PACKS_CACHE.keys())

    def register_packs(self, base_dirs):
        """
        Register packs in all the provided directories.
        """
        packs = self._pack_loader.get_packs(base_dirs=base_dirs)

        registered_count = 0
        for pack_name, pack_path in six.iteritems(packs):
            self.register_pack(pack_name=pack_name, pack_dir=pack_path)
            registered_count += 1

        return registered_count

    def register_pack(self, pack_name, pack_dir):
        """
        Register pack in the provided directory.
        """
        if self._use_pack_cache and pack_name in REGISTERED_PACKS_CACHE:
            # This pack has already been registered during this register content run
            return

        LOG.debug('Registering pack: %s' % (pack_name))
        REGISTERED_PACKS_CACHE[pack_name] = True

        try:
            pack_db, _ = self._register_pack(pack_name=pack_name,
                                             pack_dir=pack_dir)
        except Exception as e:
            if self._fail_on_failure:
                msg = 'Failed to register pack "%s": %s' % (pack_name, str(e))
                raise ValueError(msg)

            LOG.exception('Failed to register pack "%s"' % (pack_name))
            return None

        return pack_db

    def _register_pack(self, pack_name, pack_dir):
        """
        Register a pack and corresponding pack config schema (create a DB object in the system).

        Note: Pack registration now happens when registering the content and not when installing
        a pack using packs.install. Eventually this will be moved to the pack management API.
        """
        # 1. Register pack
        pack_db = self._register_pack_db(pack_name=pack_name,
                                         pack_dir=pack_dir)

        # Display a warning if pack contains deprecated config.yaml file. Support for those files
        # will be fully removed in v2.4.0.
        config_path = os.path.join(pack_dir, 'config.yaml')
        if os.path.isfile(config_path):
            LOG.error(
                'Pack "%s" contains a deprecated config.yaml file (%s). '
                'Support for "config.yaml" files has been deprecated in StackStorm v1.6.0 '
                'in favor of config.schema.yaml config schema files and config files in '
                '/opt/stackstorm/configs/ directory. Support for config.yaml files has '
                'been removed in the release (v2.4.0) so please migrate. For more '
                'information please refer to %s ' %
                (pack_db.name, config_path,
                 'https://docs.stackstorm.com/reference/pack_configs.html'))

        # 2. Register corresponding pack config schema
        config_schema_db = self._register_pack_config_schema_db(
            pack_name=pack_name, pack_dir=pack_dir)

        return pack_db, config_schema_db

    def _register_pack_db(self, pack_name, pack_dir):
        content = get_pack_metadata(pack_dir=pack_dir)

        # The rules for the pack ref are as follows:
        # 1. If ref attribute is available, we used that
        # 2. If pack_name is available we use that (this only applies to packs
        # 2hich are in sub-directories)
        # 2. If attribute is not available, but pack name is and pack name meets the valid name
        # criteria, we use that
        content['ref'] = get_pack_ref_from_metadata(
            metadata=content, pack_directory_name=pack_name)

        # Include a list of pack files
        pack_file_list = get_file_list(directory=pack_dir,
                                       exclude_patterns=EXCLUDE_FILE_PATTERNS)
        content['files'] = pack_file_list
        content['path'] = pack_dir

        pack_api = PackAPI(**content)
        pack_api.validate()
        pack_db = PackAPI.to_model(pack_api)

        try:
            pack_db.id = Pack.get_by_ref(content['ref']).id
        except StackStormDBObjectNotFoundError:
            LOG.debug('Pack %s not found. Creating new one.', pack_name)

        pack_db = Pack.add_or_update(pack_db)
        LOG.debug('Pack %s registered.' % (pack_name))
        return pack_db

    def _register_pack_config_schema_db(self, pack_name, pack_dir):
        config_schema_path = os.path.join(pack_dir, CONFIG_SCHEMA_FILE_NAME)

        if not os.path.isfile(config_schema_path):
            # Note: Config schema is optional
            return None

        values = self._meta_loader.load(config_schema_path)

        if not values:
            raise ValueError('Config schema "%s" is empty and invalid.' %
                             (config_schema_path))

        content = {}
        content['pack'] = pack_name
        content['attributes'] = values

        config_schema_api = ConfigSchemaAPI(**content)
        config_schema_api = config_schema_api.validate()
        config_schema_db = ConfigSchemaAPI.to_model(config_schema_api)

        try:
            config_schema_db.id = ConfigSchema.get_by_pack(pack_name).id
        except StackStormDBObjectNotFoundError:
            LOG.debug('Config schema for pack %s not found. Creating new one.',
                      pack_name)

        config_schema_db = ConfigSchema.add_or_update(config_schema_db)
        LOG.debug('Config schema for pack %s registered.' % (pack_name))
        return config_schema_db