Esempio n. 1
0
def ext_pillar(minion_id,
               pillar,  # pylint: disable=W0613
               bucket,
               key=None,
               keyid=None,
               verify_ssl=True,
               location=None,
               multiple_env=False,
               environment='base',
               prefix='',
               service_url=None,
               kms_keyid=None,
               s3_cache_expire=30,  # cache for 30 seconds
               s3_sync_on_update=True):  # sync cache on update rather than jit

    '''
    Execute a command and read the output as YAML
    '''

    s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl,
                             kms_keyid, location)

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
                                               bucket))
    if prefix:
        pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix))

    if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]:
        return {}

    metadata = _init(s3_creds, bucket, multiple_env, environment, prefix, s3_cache_expire)

    if s3_sync_on_update:
        # sync the buckets to the local cache
        log.info('Syncing local pillar cache from S3...')
        for saltenv, env_meta in six.iteritems(metadata):
            for bucket, files in six.iteritems(_find_files(env_meta)):
                for file_path in files:
                    cached_file_path = _get_cached_file_name(bucket, saltenv,
                                                             file_path)
                    log.info('{0} - {1} : {2}'.format(bucket, saltenv,
                                                      file_path))
                    # load the file from S3 if not in the cache or too old
                    _get_file_from_s3(s3_creds, metadata, saltenv, bucket,
                                      file_path, cached_file_path)

        log.info('Sync local pillar cache from S3 completed.')

    opts = deepcopy(__opts__)
    opts['pillar_roots'][environment] = [os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir]

    # Avoid recursively re-adding this same pillar
    opts['ext_pillar'] = [x for x in opts['ext_pillar'] if 's3' not in x]

    pil = Pillar(opts, __grains__, minion_id, environment)

    compiled_pillar = pil.compile_pillar()

    return compiled_pillar
Esempio n. 2
0
def ext_pillar(minion_id, pillar, repo_string):
    '''
    Execute a command and read the output as YAML
    '''
    # split the branch and repo name
    branch, repo_location = repo_string.strip().split()

    # environment is "different" from the branch
    branch_env = branch
    if branch_env == 'master':
        branch_env = 'base'

    # Update first
    if not update(branch, repo_location):
        return {}

    # get the repo
    repo = init(branch, repo_location)

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'][branch_env] == [repo.working_dir]:
        return {}

    opts = deepcopy(__opts__)

    opts['pillar_roots'][branch_env] = [repo.working_dir]

    pil = Pillar(opts, __grains__, minion_id, 'base')

    return pil.compile_pillar()
Esempio n. 3
0
def ext_pillar(minion_id,
               repo_string,
               pillar_dirs):
    '''
    Execute a command and read the output as YAML
    '''
    if pillar_dirs is None:
        return
    # split the branch, repo name and optional extra (key=val) parameters.
    options = repo_string.strip().split()
    branch_env = options[0]
    repo_location = options[1]
    root = ''

    for extraopt in options[2:]:
        # Support multiple key=val attributes as custom parameters.
        DELIM = '='
        if DELIM not in extraopt:
            log.error('Incorrectly formatted extra parameter. '
                      'Missing {0!r}: {1}'.format(DELIM, extraopt))
        key, val = _extract_key_val(extraopt, DELIM)
        if key == 'root':
            root = val
        else:
            log.warning('Unrecognized extra parameter: {0}'.format(key))

    # environment is "different" from the branch
    branch, _, environment = branch_env.partition(':')

    gitpil = GitPillar(branch, repo_location, __opts__)
    branch = gitpil.branch

    if environment == '':
        if branch == 'master':
            environment = 'base'
        else:
            environment = branch

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))

    pillar_dirs.setdefault(pillar_dir, {})

    if pillar_dirs[pillar_dir].get(branch, False):
        return {}  # we've already seen this combo

    pillar_dirs[pillar_dir].setdefault(branch, True)

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
        return {}

    opts = deepcopy(__opts__)

    opts['pillar_roots'][environment] = [pillar_dir]

    pil = Pillar(opts, __grains__, minion_id, branch)

    return pil.compile_pillar()
Esempio n. 4
0
def ext_pillar(minion_id, repo, pillar_dirs):
    '''
    Checkout the ext_pillar sources and compile the resulting pillar SLS
    '''
    if isinstance(repo, six.string_types):
        return _legacy_git_pillar(minion_id, repo, pillar_dirs)
    else:
        opts = copy.deepcopy(__opts__)
        opts['pillar_roots'] = {}
        pillar = salt.utils.gitfs.GitPillar(opts)
        pillar.init_remotes(repo, PER_REMOTE_OVERRIDES)
        pillar.checkout()
        ret = {}
        merge_strategy = __opts__.get(
            'pillar_source_merging_strategy',
            'smart'
        )
        for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
            log.debug(
                'git_pillar is processing pillar SLS from {0} for pillar '
                'env \'{1}\''.format(pillar_dir, env)
            )
            opts['pillar_roots'] = {
                env: [d for (d, e) in six.iteritems(pillar.pillar_dirs)
                      if env == e]
            }
            local_pillar = Pillar(opts, __grains__, minion_id, env)
            ret = salt.utils.dictupdate.merge(
                ret,
                local_pillar.compile_pillar(ext=False),
                strategy=merge_strategy
            )
        return ret
Esempio n. 5
0
def ext_pillar(minion_id,
               pillar,  # pylint: disable=W0613
               bucket,
               key,
               keyid,
               verify_ssl,
               multiple_env=False,
               environment='base',
               prefix='',
               service_url=None):
    '''
    Execute a command and read the output as YAML
    '''

    s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl)

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
                                               bucket))
    if prefix:
        pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix))

    if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]:
        return {}

    metadata = _init(s3_creds, bucket, multiple_env, environment, prefix)

    if _s3_sync_on_update:
        # sync the buckets to the local cache
        log.info('Syncing local pillar cache from S3...')
        for saltenv, env_meta in metadata.iteritems():
            for bucket, files in _find_files(env_meta).iteritems():
                for file_path in files:
                    cached_file_path = _get_cached_file_name(bucket, saltenv,
                                                             file_path)
                    log.info('{0} - {1} : {2}'.format(bucket, saltenv,
                                                      file_path))
                    # load the file from S3 if not in the cache or too old
                    _get_file_from_s3(s3_creds, metadata, saltenv, bucket,
                                      file_path, cached_file_path)

        log.info('Sync local pillar cache from S3 completed.')

    opts = deepcopy(__opts__)
    opts['pillar_roots'][environment] = [pillar_dir]

    pil = Pillar(opts, __grains__, minion_id, environment)

    compiled_pillar = pil.compile_pillar()

    return compiled_pillar
Esempio n. 6
0
    def test_from_upper(self):
        '''Check whole calling stack from parent Pillar instance

        This test is closer to what happens in real life, and demonstrates
        how ``compile_pillar()`` is called twice.

        This kind of test should/would become non-necessary, once git_pillar,
        all these pillar are called exactly in the same way (git is an
        exception for now), and don't recurse.
        '''
        git_pillar.__opts__['ext_pillar'] = [
            dict(git=self.conf_line)
        ]
        pil = Pillar(git_pillar.__opts__,
                     git_pillar.__grains__,
                     'myminion', None)
        self.assertEqual(PILLAR_CONTENT, pil.compile_pillar(pillar_dirs={}))
Esempio n. 7
0
def ext_pillar(minion_id, repo, pillar_dirs):
    '''
    Execute a command and read the output as YAML
    '''
    if isinstance(repo, six.string_types):
        return _legacy_git_pillar(minion_id, repo, pillar_dirs)
    else:
        opts = copy.deepcopy(__opts__)
        opts['pillar_roots'] = {}
        pillar = salt.utils.gitfs.GitPillar(opts)
        pillar.init_remotes(repo, PER_REMOTE_PARAMS)
        pillar.checkout()
        ret = {}
        for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
            opts['pillar_roots'] = {env: [pillar_dir]}
            local_pillar = Pillar(opts, __grains__, minion_id, env)
            ret.update(local_pillar.compile_pillar(ext=False))
        return ret
Esempio n. 8
0
def ext_pillar(minion_id, pillar, repo_string):
    '''
    Execute a command and read the output as YAML
    '''
    # split the branch, repo name and optional extra (key=val) parameters.
    options = repo_string.strip().split()
    branch = options[0]
    repo_location = options[1]
    root = ''

    for extraopt in options[2:]:
        # Support multiple key=val attributes as custom parameters.
        DELIM = '='
        if DELIM not in extraopt:
            log.error(("Incorrectly formatted extra parameter."
                       " Missing '%s': %s"), DELIM, extraopt)
        key, val = _extract_key_val(extraopt, DELIM)
        if key == 'root':
            root = val
        else:
            log.warning("Unrecognized extra parameter: %s", key)

    gitpil = GitPillar(branch, repo_location, __opts__)

    # environment is "different" from the branch
    branch = (branch == 'master' and 'base' or branch)

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
        return {}

    gitpil.update()

    opts = deepcopy(__opts__)

    opts['pillar_roots'][branch] = [pillar_dir]

    pil = Pillar(opts, __grains__, minion_id, branch)

    return pil.compile_pillar()
def ext_pillar(minion_id,
               pillar,  # pylint: disable=W0613
               repo_string):
    '''
    Execute a command and read the output as YAML
    '''
    # split the branch, repo name and optional extra (key=val) parameters.
    options = repo_string.strip().split()
    branch = options[0]
    repo_location = options[1]
    root = ''

    for extraopt in options[2:]:
        # Support multiple key=val attributes as custom parameters.
        DELIM = '='
        if DELIM not in extraopt:
            log.error('Incorrectly formatted extra parameter. '
                      'Missing {0!r}: {1}'.format(DELIM, extraopt))
        key, val = _extract_key_val(extraopt, DELIM)
        if key == 'root':
            root = val
        else:
            log.warning('Unrecognized extra parameter: {0}'.format(key))

    svnpil = SvnPillar(branch, repo_location, root, __opts__)

    # environment is "different" from the branch
    branch = (branch == 'trunk' and 'base' or branch)

    pillar_dir = svnpil.pillar_dir()
    log.debug("[pillar_roots][{0}] = {1}".format(branch, pillar_dir))

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
        return {}
    svnpil.update()
    opts = deepcopy(__opts__)
    opts['pillar_roots'][branch] = [pillar_dir]
    pil = Pillar(opts, __grains__, minion_id, branch)
    return pil.compile_pillar()
Esempio n. 10
0
def ext_pillar(pillar, repo_string):
    '''
    Execute a command and read the output as YAML
    '''
    # split the branch and repo name
    branch, repo_location = repo_string.strip().split()

    # environment is "different" from the branch
    branch_env = branch
    if branch_env == 'master':
        branch_env = 'base'

    # make sure you have the branch
    if branch_env not in envs(branch, repo_location):
        # don't have that branch
        logging.warning('Unable to get branch {0} of git repo {1}, branch does not exit'.format(branch, repo_location))
        return {}

    # get the repo
    repo = init(branch, repo_location)

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar function
    if __opts__['pillar_roots'][branch_env] == [repo.working_dir]:
        return {}

    update(branch, repo_location)
    git_ = repo.git

    git_.checkout(branch)

    opts = deepcopy(__opts__)

    opts['pillar_roots'][branch_env] = [repo.working_dir]

    pil = Pillar(opts, __grains__, __grains__['id'], 'base')

    return pil.compile_pillar()
Esempio n. 11
0
def ext_pillar(minion_id, pillar, repo_string):
    '''
    Execute a command and read the output as YAML
    '''
    # split the branch and repo name
    branch, repo_location = repo_string.strip().split()

    gitpil = GitPillar(branch, repo_location, __opts__)

    # environment is "different" from the branch
    branch = (branch == 'master' and 'base' or branch)

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'].get(branch, []) == [gitpil.working_dir]:
        return {}

    opts = deepcopy(__opts__)

    opts['pillar_roots'][branch] = [gitpil.working_dir]

    pil = Pillar(opts, __grains__, minion_id, 'base')

    return pil.compile_pillar()
Esempio n. 12
0
def _legacy_git_pillar(minion_id, repo_string, pillar_dirs):
    '''
    Support pre-Beryllium config schema
    '''
    salt.utils.warn_until(
        'Oxygen',
        'The git ext_pillar configuration is deprecated. Please refer to the '
        'documentation at '
        'https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.git_pillar.html '
        'for more information. This configuration will no longer be supported '
        'as of the Oxygen release of Salt.')
    if pillar_dirs is None:
        return
    # split the branch, repo name and optional extra (key=val) parameters.
    options = repo_string.strip().split()
    branch_env = options[0]
    repo_location = options[1]
    root = ''

    for extraopt in options[2:]:
        # Support multiple key=val attributes as custom parameters.
        DELIM = '='
        if DELIM not in extraopt:
            log.error(
                'Legacy git_pillar: Incorrectly formatted extra parameter '
                '\'%s\' within \'%s\' missing \'%s\')', extraopt, repo_string,
                DELIM)
        key, val = _extract_key_val(extraopt, DELIM)
        if key == 'root':
            root = val
        else:
            log.error(
                'Legacy git_pillar: Unrecognized extra parameter \'%s\' '
                'in \'%s\'', key, repo_string)

    # environment is "different" from the branch
    cfg_branch, _, environment = branch_env.partition(':')

    gitpil = _LegacyGitPillar(cfg_branch, repo_location, __opts__)
    branch = gitpil.branch

    if environment == '':
        if branch == 'master':
            environment = 'base'
        else:
            environment = branch

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))
    log.debug('Legacy git_pillar: pillar_dir for \'%s\' is \'%s\'',
              repo_string, pillar_dir)
    log.debug('Legacy git_pillar: branch for \'%s\' is \'%s\'', repo_string,
              branch)

    pillar_dirs.setdefault(pillar_dir, {})

    if cfg_branch == '__env__' and branch not in ['master', 'base']:
        gitpil.update()
    elif pillar_dirs[pillar_dir].get(branch, False):
        log.debug('Already processed pillar_dir \'%s\' for \'%s\'', pillar_dir,
                  repo_string)
        return {}  # we've already seen this combo

    pillar_dirs[pillar_dir].setdefault(branch, True)

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
        return {}

    opts = copy.deepcopy(__opts__)

    opts['pillar_roots'][environment] = [pillar_dir]
    opts['__git_pillar'] = True

    pil = Pillar(opts, __grains__, minion_id, branch)

    return pil.compile_pillar(ext=False)
Esempio n. 13
0
    def test_no_loop(self):
        '''Check that the reinstantiation of a pillar object does recurse.

        This test goes in great details of patching that the dedicated
        utilities might do in a simpler way.
        Namely, we replace the main ``ext_pillar`` entry function by one
        that keeps count of its calls.

        Otherwise, the fact that the :class:`MaximumRecursion` error is caught
        can go in the way on the testing.

        On the current code base, this test fails if the two first lines of
        :func:``git_pillar.ext_pillar`::

            if pillar_dirs is None:
                return

        are replaced by::

            if pillar_dirs is None:
                pillar_dirs = {}

        .. note:: the explicit anti-recursion protection does not prevent
                  looping between two different Git pillars.

        This test will help subsequent refactors, and also as a base for other
        external pillars of the same kind.
        '''
        repo2 = os.path.join(self.tmpdir, 'repo_pillar2')
        conf_line2 = 'master file://{0}'.format(repo2)
        subprocess.check_call(['git', 'clone', self.repo_path, repo2])
        git_pillar.__opts__['ext_pillar'] = [
            dict(git=self.conf_line),
            dict(git=conf_line2),
        ]
        git_pillar._update(*conf_line2.split(None, 1))

        pil = Pillar(git_pillar.__opts__,
                     git_pillar.__grains__,
                     'myminion', 'base')

        orig_ext_pillar = pil.ext_pillars['git']
        orig_ext_pillar.count = 0

        def ext_pillar_count_calls(minion_id, repo_string, pillar_dirs):
            orig_ext_pillar.count += 1
            if orig_ext_pillar.count > 6:
                # going all the way to an infinite loop is harsh on the
                # test machine
                raise RuntimeError("Infinite loop detected")
            return orig_ext_pillar(minion_id, repo_string, pillar_dirs)

        from salt.loader import LazyLoader
        orig_getitem = LazyLoader.__getitem__

        def __getitem__(self, key):
            if key == 'git.ext_pillar':
                return ext_pillar_count_calls
            return orig_getitem(self, key)

        try:
            LazyLoader.__getitem__ = __getitem__
            self.assertEqual(PILLAR_CONTENT, pil.compile_pillar(pillar_dirs={}))
            self.assertTrue(orig_ext_pillar.count < 7)
        finally:
            LazyLoader.__getitem__ = orig_getitem
Esempio n. 14
0
def ext_pillar(
    minion_id,
    pillar,  # pylint: disable=W0613
    container,
    connection_string,
    multiple_env=False,
    environment="base",
    blob_cache_expire=30,
    blob_sync_on_update=True,
):
    """
    Execute a command and read the output as YAML.

    :param container: The name of the target Azure Blob Container.

    :param connection_string: The connection string to use to access the specified Azure Blob Container.

    :param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
        Defaults to false.

    :param environment: Specifies which environment the container represents when in single environment mode. Defaults
        to 'base' and is ignored if multiple_env is set as True.

    :param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.

    :param blob_sync_on_update: Specifies if the cache is synced on update. Defaults to True.

    """
    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(
        os.path.join(_get_cache_dir(), environment, container))

    if __opts__["pillar_roots"].get(environment, []) == [pillar_dir]:
        return {}

    metadata = _init(connection_string, container, multiple_env, environment,
                     blob_cache_expire)

    log.debug("Blob metadata: %s", metadata)

    if blob_sync_on_update:
        # sync the containers to the local cache
        log.info("Syncing local pillar cache from Azure Blob...")
        for saltenv, env_meta in metadata.items():
            for container, files in _find_files(env_meta).items():
                for file_path in files:
                    cached_file_path = _get_cached_file_name(
                        container, saltenv, file_path)
                    log.info("%s - %s : %s", container, saltenv, file_path)
                    # load the file from Azure Blob if not in the cache or too old
                    _get_file_from_blob(
                        connection_string,
                        metadata,
                        saltenv,
                        container,
                        file_path,
                        cached_file_path,
                    )

        log.info("Sync local pillar cache from Azure Blob completed.")

    opts = deepcopy(__opts__)
    opts["pillar_roots"][environment] = ([
        os.path.join(pillar_dir, environment)
    ] if multiple_env else [pillar_dir])

    # Avoid recursively re-adding this same pillar
    opts["ext_pillar"] = [
        x for x in opts["ext_pillar"] if "azureblob" not in x
    ]

    pil = Pillar(opts, __grains__, minion_id, environment)

    compiled_pillar = pil.compile_pillar(ext=False)

    return compiled_pillar
Esempio n. 15
0
    def test_no_loop(self):
        '''Check that the reinstantiation of a pillar object does recurse.

        This test goes in great details of patching that the dedicated
        utilities might do in a simpler way.
        Namely, we replace the main ``ext_pillar`` entry function by one
        that keeps count of its calls.

        Otherwise, the fact that the :class:`MaximumRecursion` error is catched
        can go in the way on the testing.

        On the current code base, this test fails if the two first lines of
        :func:``git_pillar.ext_pillar`::

            if pillar_dirs is None:
                return

        are replaced by::

            if pillar_dirs is None:
                pillar_dirs = {}

        .. note:: the explicit anti-recursion protection does not prevent
                  looping between two different Git pillars.

        This test will help subsequent refactors, and also as a base for other
        external pillars of the same kind.
        '''
        repo2 = os.path.join(self.tmpdir, 'repo_pillar2')
        conf_line2 = 'master file://{0}'.format(repo2)
        subprocess.check_call(['git', 'clone', self.repo_path, repo2])
        git_pillar.__opts__['ext_pillar'] = [
            dict(git=self.conf_line),
            dict(git=conf_line2),
        ]

        pil = Pillar(git_pillar.__opts__,
                     git_pillar.__grains__,
                     'myminon', 'base')

        orig_ext_pillar = pil.ext_pillars['git']
        orig_ext_pillar.count = 0

        def ext_pillar_count_calls(minion_id, repo_string, pillar_dirs):
            orig_ext_pillar.count += 1
            if orig_ext_pillar.count > 6:
                # going all the way to an infinite loop is harsh on the
                # test machine
                raise RuntimeError("Infinite loop detected")
            return orig_ext_pillar(minion_id, repo_string, pillar_dirs)

        from salt.loader import LazyLoader
        orig_getitem = LazyLoader.__getitem__

        def __getitem__(self, key):
            if key == 'git.ext_pillar':
                return ext_pillar_count_calls
            return orig_getitem(self, key)
        LazyLoader.__getitem__ = __getitem__

        self.assertEqual(PILLAR_CONTENT, pil.compile_pillar(pillar_dirs={}))
        self.assertTrue(orig_ext_pillar.count < 7)
Esempio n. 16
0
def _legacy_git_pillar(minion_id, repo_string, pillar_dirs):
    '''
    Support pre-Beryllium config schema
    '''
    if pillar_dirs is None:
        return
    # split the branch, repo name and optional extra (key=val) parameters.
    options = repo_string.strip().split()
    branch_env = options[0]
    repo_location = options[1]
    root = ''

    for extraopt in options[2:]:
        # Support multiple key=val attributes as custom parameters.
        DELIM = '='
        if DELIM not in extraopt:
            log.error('Incorrectly formatted extra parameter. '
                      'Missing \'{0}\': {1}'.format(DELIM, extraopt))
        key, val = _extract_key_val(extraopt, DELIM)
        if key == 'root':
            root = val
        else:
            log.warning('Unrecognized extra parameter: {0}'.format(key))

    # environment is "different" from the branch
    cfg_branch, _, environment = branch_env.partition(':')

    gitpil = _LegacyGitPillar(cfg_branch, repo_location, __opts__)
    branch = gitpil.branch

    if environment == '':
        if branch == 'master':
            environment = 'base'
        else:
            environment = branch

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))

    pillar_dirs.setdefault(pillar_dir, {})

    if cfg_branch == '__env__' and branch not in ['master', 'base']:
        gitpil.update()
    elif pillar_dirs[pillar_dir].get(branch, False):
        return {}  # we've already seen this combo

    pillar_dirs[pillar_dir].setdefault(branch, True)

    # Don't recurse forever-- the Pillar object will re-call the ext_pillar
    # function
    if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
        return {}

    opts = copy.deepcopy(__opts__)

    opts['pillar_roots'][environment] = [pillar_dir]
    opts['__git_pillar'] = True

    pil = Pillar(opts, __grains__, minion_id, branch)

    return pil.compile_pillar(ext=False)
Esempio n. 17
0
def ext_pillar(minion_id, pillar, *repos):  # pylint: disable=unused-argument
    '''
    Checkout the ext_pillar sources and compile the resulting pillar SLS
    '''
    opts = copy.deepcopy(__opts__)
    opts['pillar_roots'] = {}
    opts['__git_pillar'] = True
    git_pillar = salt.utils.gitfs.GitPillar(
        opts,
        repos,
        per_remote_overrides=PER_REMOTE_OVERRIDES,
        per_remote_only=PER_REMOTE_ONLY,
        global_only=GLOBAL_ONLY)
    if __opts__.get('__role') == 'minion':
        # If masterless, fetch the remotes. We'll need to remove this once
        # we make the minion daemon able to run standalone.
        git_pillar.fetch_remotes()
    git_pillar.checkout()
    ret = {}
    merge_strategy = __opts__.get('pillar_source_merging_strategy', 'smart')
    merge_lists = __opts__.get('pillar_merge_lists', False)
    for pillar_dir, env in six.iteritems(git_pillar.pillar_dirs):
        # Map env if env == '__env__' before checking the env value
        if env == '__env__':
            env = opts.get('pillarenv') \
                or opts.get('saltenv') \
                or opts.get('git_pillar_base')
            log.debug('__env__ maps to %s', env)

        # If pillarenv is set, only grab pillars with that match pillarenv
        if opts['pillarenv'] and env != opts['pillarenv']:
            log.debug(
                'env \'%s\' for pillar dir \'%s\' does not match '
                'pillarenv \'%s\', skipping', env, pillar_dir,
                opts['pillarenv'])
            continue
        if pillar_dir in git_pillar.pillar_linked_dirs:
            log.debug(
                'git_pillar is skipping processing on %s as it is a '
                'mounted repo', pillar_dir)
            continue
        else:
            log.debug(
                'git_pillar is processing pillar SLS from %s for pillar '
                'env \'%s\'', pillar_dir, env)

        pillar_roots = [pillar_dir]

        if __opts__['git_pillar_includes']:
            # Add the rest of the pillar_dirs in this environment to the
            # list, excluding the current pillar_dir being processed. This
            # is because it was already specified above as the first in the
            # list, so that its top file is sourced from the correct
            # location and not from another git_pillar remote.
            pillar_roots.extend([
                d for (d, e) in six.iteritems(git_pillar.pillar_dirs)
                if env == e and d != pillar_dir
            ])

        opts['pillar_roots'] = {env: pillar_roots}

        local_pillar = Pillar(opts, __grains__, minion_id, env)
        ret = salt.utils.dictupdate.merge(
            ret,
            local_pillar.compile_pillar(ext=False),
            strategy=merge_strategy,
            merge_lists=merge_lists)
    return ret
Esempio n. 18
0
def ext_pillar(
        minion_id,
        pillar,  # pylint: disable=W0613
        bucket,
        key=None,
        keyid=None,
        verify_ssl=True,
        location=None,
        multiple_env=False,
        environment='base',
        prefix='',
        service_url=None,
        kms_keyid=None,
        s3_cache_expire=30,  # cache for 30 seconds
        s3_sync_on_update=True,  # sync cache on update rather than jit
        path_style=False,
        https_enable=True):
    '''
    Execute a command and read the output as YAML
    '''

    s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl,
                             kms_keyid, location, path_style, https_enable)

    # normpath is needed to remove appended '/' if root is empty string.
    pillar_dir = os.path.normpath(
        os.path.join(_get_cache_dir(), environment, bucket))
    if prefix:
        pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix))

    if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]:
        return {}

    metadata = _init(s3_creds, bucket, multiple_env, environment, prefix,
                     s3_cache_expire)

    if s3_sync_on_update:
        # sync the buckets to the local cache
        log.info('Syncing local pillar cache from S3...')
        for saltenv, env_meta in six.iteritems(metadata):
            for bucket, files in six.iteritems(_find_files(env_meta)):
                for file_path in files:
                    cached_file_path = _get_cached_file_name(
                        bucket, saltenv, file_path)
                    log.info('{0} - {1} : {2}'.format(bucket, saltenv,
                                                      file_path))
                    # load the file from S3 if not in the cache or too old
                    _get_file_from_s3(s3_creds, metadata, saltenv, bucket,
                                      file_path, cached_file_path)

        log.info('Sync local pillar cache from S3 completed.')

    opts = deepcopy(__opts__)
    opts['pillar_roots'][environment] = [
        os.path.join(pillar_dir, environment)
    ] if multiple_env else [pillar_dir]

    # Avoid recursively re-adding this same pillar
    opts['ext_pillar'] = [x for x in opts['ext_pillar'] if 's3' not in x]

    pil = Pillar(opts, __grains__, minion_id, environment)

    compiled_pillar = pil.compile_pillar(ext=False)

    return compiled_pillar