Ejemplo n.º 1
0
    def _get_group_variables(self, groupname, vault_password=None):

        group = self.get_group(groupname)
        if group is None:
            raise Exception("group not found: %s" % groupname)

        vars = {}

        # plugin.get_group_vars retrieves just vars for specific group
        vars_results = [
            plugin.get_group_vars(group, vault_password=vault_password)
            for plugin in self._vars_plugins
            if hasattr(plugin, 'get_group_vars')
        ]
        for updated in vars_results:
            if updated is not None:
                vars = utils.combine_vars(vars, updated)

        # get group variables set by Inventory Parsers
        vars = utils.combine_vars(vars, group.get_variables())

        # Read group_vars/ files
        vars = utils.combine_vars(vars, self.get_group_vars(group))

        return vars
Ejemplo n.º 2
0
    def _get_host_variables(self, hostname, vault_password=None):

        host = self.get_host(hostname)
        if host is None:
            raise errors.AnsibleError("host not found: %s" % hostname)

        vars = {}

        # plugin.run retrieves all vars (also from groups) for host
        vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
        for updated in vars_results:
            if updated is not None:
                vars = utils.combine_vars(vars, updated)

        # plugin.get_host_vars retrieves just vars for specific host
        vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
        for updated in vars_results:
            if updated is not None:
                vars = utils.combine_vars(vars, updated)

        # still need to check InventoryParser per host vars
        # which actually means InventoryScript per host,
        # which is not performant
        if self.parser is not None:
            vars = utils.combine_vars(vars, self.parser.get_host_variables(host))

        # Read host_vars/ files
        vars = utils.combine_vars(vars, self.get_host_vars(host))

        return vars
Ejemplo n.º 3
0
        def update_vars_cache(host, inject, data, filename):

            """ update a host's varscache with new var data """

            data = utils.combine_vars(inject, data)
            self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
            self.playbook.callbacks.on_import_for_host(host, filename4)
Ejemplo n.º 4
0
 def _execute_accelerate_module(self):
     args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
         base64.b64encode(self.key.__str__()),
         str(self.accport),
         constants.ACCELERATE_DAEMON_TIMEOUT,
         int(utils.VERBOSITY),
         self.runner.accelerate_ipv6,
     )
     if constants.ACCELERATE_MULTI_KEY:
         args += " multi_key=yes"
     inject = dict(password=self.key)
     if getattr(self.runner, 'accelerate_inventory_host', False):
         inject = utils.combine_vars(
             inject,
             self.runner.inventory.get_variables(
                 self.runner.accelerate_inventory_host))
     else:
         inject = utils.combine_vars(
             inject, self.runner.inventory.get_variables(self.host))
     vvvv("attempting to start up the accelerate daemon...")
     self.ssh.connect()
     tmp_path = self.runner._make_tmp_path(self.ssh)
     return self.runner._execute_module(self.ssh,
                                        tmp_path,
                                        'accelerate',
                                        args,
                                        inject=inject)
Ejemplo n.º 5
0
        def update_vars_cache(host, inject, data, filename):
            """ update a host's varscache with new var data """

            data = utils.combine_vars(inject, data)
            self.playbook.VARS_CACHE[host] = utils.combine_vars(
                self.playbook.VARS_CACHE.get(host, {}), data)
            self.playbook.callbacks.on_import_for_host(host, filename4)
Ejemplo n.º 6
0
    def _get_variables(self, hostname, vault_password=None):

        host = self.get_host(hostname)
        if host is None:
            raise errors.AnsibleError("host not found: %s" % hostname)

        vars = {}

        # plugin.run retrieves all vars (also from groups) for host
        vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
        for updated in vars_results:
            if updated is not None:
                vars = utils.combine_vars(vars, updated)

        # plugin.get_host_vars retrieves just vars for specific host
        vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
        for updated in vars_results:
            if updated is not None:
                vars = utils.combine_vars(vars, updated)

        # get host variables set by Inventory Parsers
        vars = utils.combine_vars(vars, host.get_variables())

        # still need to check InventoryParser per host vars
        # which actually means InventoryScript per host,
        # which is not performant
        if self.parser is not None:
            vars = utils.combine_vars(vars, self.parser.get_host_variables(host))

        # Read host_vars/ files
        vars = utils.combine_vars(vars, self.get_host_vars(host))

        return vars
Ejemplo n.º 7
0
 def set_playbook_basedir(self, dir):
     """
     sets the base directory of the playbook so inventory can use it as a
     basedir for host_ and group_vars, and other things.
     """
     # Only update things if dir is a different playbook basedir
     if dir != self._playbook_basedir:
         self._playbook_basedir = dir
         # get group vars from group_vars/ files
         for group in self.groups:
             group.vars = utils.combine_vars(
                 group.vars, self.get_group_vars(group,
                                                 new_pb_basedir=True))
         # get host vars from host_vars/ files
         ### HACK: in 2.0 subset isn't a problem.  Never port this to 2.x
         ### Fixes: https://github.com/ansible/ansible/issues/13557
         old_subset = self._subset
         self._subset = None
         for host in self.get_hosts():
             host.vars = utils.combine_vars(
                 host.vars, self.get_host_vars(host, new_pb_basedir=True))
         self._subset = old_subset
         # invalidate cache
         self._vars_per_host = {}
         self._vars_per_group = {}
Ejemplo n.º 8
0
 def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
     # this number is arbitrary, but it seems sane
     if level > 20:
         raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
     for role in roles:
         role_path,role_vars = self._get_role_path(role)
         role_vars = utils.combine_vars(role_vars, passed_vars)
         vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
         vars_data = {}
         if os.path.isfile(vars):
             vars_data = utils.parse_yaml_from_file(vars)
             if vars_data:
                 role_vars = utils.combine_vars(vars_data, role_vars)
         defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
         defaults_data = {}
         if os.path.isfile(defaults):
             defaults_data = utils.parse_yaml_from_file(defaults)
         # the meta directory contains the yaml that should
         # hold the list of dependencies (if any)
         meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
         if os.path.isfile(meta):
             data = utils.parse_yaml_from_file(meta)
             if data:
                 dependencies = data.get('dependencies',[])
                 for dep in dependencies:
                     (dep_path,dep_vars) = self._get_role_path(dep)
                     meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
                     if os.path.isfile(meta):
                         meta_data = utils.parse_yaml_from_file(meta)
                         if meta_data:
                             allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
                             if not allow_dupes:
                                 if dep in self.included_roles:
                                     continue
                                 else:
                                     self.included_roles.append(dep)
                     dep_vars = utils.combine_vars(passed_vars, dep_vars)
                     dep_vars = utils.combine_vars(role_vars, dep_vars)
                     vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
                     vars_data = {}
                     if os.path.isfile(vars):
                         vars_data = utils.parse_yaml_from_file(vars)
                         if vars_data:
                             dep_vars = utils.combine_vars(vars_data, dep_vars)
                     defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
                     dep_defaults_data = {}
                     if os.path.isfile(defaults):
                         dep_defaults_data = utils.parse_yaml_from_file(defaults)
                     if 'role' in dep_vars:
                         del dep_vars['role']
                     self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
                     dep_stack.append([dep,dep_path,dep_vars,dep_defaults_data])
         # only add the current role when we're at the top level,
         # otherwise we'll end up in a recursive loop 
         if level == 0:
             dep_stack.append([role,role_path,role_vars,defaults_data])
     return dep_stack
Ejemplo n.º 9
0
    def get_variables(self):

        results = {}
        groups = self.get_groups()
        for group in sorted(groups, key=lambda g: g.depth):
            results = utils.combine_vars(results, group.get_variables())
        results = utils.combine_vars(results, self.vars)
        results['inventory_hostname'] = self.name
        results['inventory_hostname_short'] = self.name.split('.')[0]
        results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
        return results
Ejemplo n.º 10
0
 def _execute_accelerate_module(self):
     args = "password=%s port=%s debug=%d ipv6=%s" % (base64.b64encode(self.key.__str__()), str(self.accport), int(utils.VERBOSITY), self.runner.accelerate_ipv6)
     inject = dict(password=self.key)
     if getattr(self.runner, 'accelerate_inventory_host', False):
         inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
     else:
         inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
     vvvv("attempting to start up the accelerate daemon...")
     self.ssh.connect()
     tmp_path = self.runner._make_tmp_path(self.ssh)
     return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
Ejemplo n.º 11
0
 def _execute_accelerate_module(self):
     args = "password=%s port=%s debug=%d" % (base64.b64encode(self.key.__str__()), str(self.accport), int(utils.VERBOSITY))
     inject = dict(password=self.key)
     if self.runner.accelerate_inventory_host:
         inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
     else:
         inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
     vvvv("attempting to start up the accelerate daemon...")
     self.ssh.connect()
     tmp_path = self.runner._make_tmp_path(self.ssh)
     return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
Ejemplo n.º 12
0
    def get_variables(self):

        results = {}
        groups = self.get_groups()
        for group in sorted(groups, key=lambda g: g.depth):
            results = utils.combine_vars(results, group.get_variables())
        results = utils.combine_vars(results, self.vars)
        results['inventory_hostname'] = self.name
        results['inventory_hostname_short'] = self.name.split('.')[0]
        results['group_names'] = sorted(
            [g.name for g in groups if g.name != 'all'])
        return results
Ejemplo n.º 13
0
 def get_variables(self):
     # 获取当前host的变量值
     results = {}
     groups = self.get_groups() # 获取所有的父组
     for group in sorted(groups, key=lambda g: g.depth): # 根据组的深度对组进行排序
         # 我刚还在想group的深度有什么用,原来用在了这里,根据深度排序后,如果出现相同的组变量,则以深度最浅的组为准。
         results = utils.combine_vars(results, group.get_variables())
     # host变量的优先级高于组变量,如果出现相同的变量,则以host中的变量为准。
     results = utils.combine_vars(results, self.vars)
     results['inventory_hostname'] = self.name # inventory_hostname为可以跨host访问的变量
     results['inventory_hostname_short'] = self.name.split('.')[0] # inventory_hostname_short可以跨host访问
     results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) # group_names也可以跨host访问
     return results
Ejemplo n.º 14
0
def get_inject_vars(self, host):

    host_variables = self.inventory.get_variables(
        host, vault_password=self.vault_pass)
    ansible_host = self.inventory.get_host(host)

    # Keep track of variables in the order they will be merged
    to_merge = [
        ('Default Variables', self.default_vars),
    ]

    # Group variables
    groups = ansible_host.get_groups()
    for group in sorted(groups, key=lambda g: g.depth):
        to_merge.append(
            ("Group Variables ({})".format(group.name), group.get_variables())
        )

    combined_cache = self.get_combined_cache()

    # use combined_cache and host_variables to template the module_vars
    # we update the inject variables with the data we're about to template
    # since some of the variables we'll be replacing may be contained there too
    module_vars_inject = combine_vars(
        host_variables, combined_cache.get(host, {}))
    module_vars_inject = combine_vars(
        self.module_vars, module_vars_inject)
    module_vars = template.template(
        self.basedir, self.module_vars, module_vars_inject)

    inject = {}
    to_merge.extend([
        ('Host Variables', ansible_host.vars),
        ('Setup Cache', self.setup_cache.get(host, {})),
        ('Play Variables', self.play_vars),
        ('Play File Variables', self.play_file_vars),
        ('Role Variables', self.role_vars),
        ('Module Variables', module_vars),
        ('Variables Cache', self.vars_cache.get(host, {})),
        ('Role Parameters', self.role_params),
        ('Extra Variables', self.extra_vars),
    ])
    for name, value in to_merge:
        old_inject = inject
        inject = combine_vars(inject, value)
        print name
        show_diff(old_inject, inject)

    return inject
Ejemplo n.º 15
0
def get_inject_vars(self, host):

    host_variables = self.inventory.get_variables(
        host, vault_password=self.vault_pass)
    ansible_host = self.inventory.get_host(host)

    # Keep track of variables in the order they will be merged
    to_merge = [
        ('Default Variables', self.default_vars),
    ]

    # Group variables
    groups = ansible_host.get_groups()
    for group in sorted(groups, key=lambda g: g.depth):
        to_merge.append(
            ("Group Variables ({})".format(group.name), group.get_variables())
        )

    combined_cache = self.get_combined_cache()

    # use combined_cache and host_variables to template the module_vars
    # we update the inject variables with the data we're about to template
    # since some of the variables we'll be replacing may be contained there too
    module_vars_inject = combine_vars(
        host_variables, combined_cache.get(host, {}))
    module_vars_inject = combine_vars(
        self.module_vars, module_vars_inject)
    module_vars = template.template(
        self.basedir, self.module_vars, module_vars_inject)

    inject = {}
    to_merge.extend([
        ('Host Variables', ansible_host.vars),
        ('Setup Cache', self.setup_cache.get(host, {})),
        ('Play Variables', self.play_vars),
        ('Play File Variables', self.play_file_vars),
        ('Role Variables', self.role_vars),
        ('Module Variables', module_vars),
        ('Variables Cache', self.vars_cache.get(host, {})),
        ('Role Parameters', self.role_params),
        ('Extra Variables', self.extra_vars),
    ])
    for name, value in to_merge:
        old_inject = inject
        inject = combine_vars(inject, value)
        print name
        show_diff(old_inject, inject)

    return inject
Ejemplo n.º 16
0
def boilerplate_module(modfile, args, interpreter, check):
    """ simulate what ansible does with new style modules """

    #module_fh = open(modfile)
    #module_data = module_fh.read()
    #module_fh.close()

    replacer = module_common.ModuleReplacer()

    #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1

    complex_args = {}
    if args.startswith("@"):
        # Argument is a YAML file (JSON is a subset of YAML)
        complex_args = utils.combine_vars(complex_args,
                                          utils.parse_yaml_from_file(args[1:]))
        args = ''
    elif args.startswith("{"):
        # Argument is a YAML document (not a file)
        complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args))
        args = ''

    inject = {}
    if interpreter:
        if '=' not in interpreter:
            print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python'
            sys.exit(1)
        interpreter_type, interpreter_path = interpreter.split('=')
        if not interpreter_type.startswith('ansible_'):
            interpreter_type = 'ansible_%s' % interpreter_type
        if not interpreter_type.endswith('_interpreter'):
            interpreter_type = '%s_interpreter' % interpreter_type
        inject[interpreter_type] = interpreter_path

    if check:
        complex_args['CHECKMODE'] = True

    (module_data, module_style,
     shebang) = replacer.modify_module(modfile, complex_args, args, inject)

    modfile2_path = os.path.expanduser("~/.ansible_module_generated")
    print "* including generated source, if any, saving to: %s" % modfile2_path
    print "* this may offset any line numbers in tracebacks/debuggers!"
    modfile2 = open(modfile2_path, 'w')
    modfile2.write(module_data)
    modfile2.close()
    modfile = modfile2_path

    return (modfile2_path, module_style)
Ejemplo n.º 17
0
def monkeypatch_get_variables(self, hostname, vault_password=None):
    host = self.get_host(hostname)
    if host is None:
        raise errors.AnsibleError("host not found: %s" % hostname)

    vars = {}
    vars_results = [plugin.run(host) for plugin in self._vars_plugins]
    for updated in vars_results:
        if updated is not None:
            vars = utils.combine_vars(vars, updated)

    vars = utils.combine_vars(vars, host.get_variables())
    if self.parser is not None:
        vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
    return vars
Ejemplo n.º 18
0
def monkeypatch_get_variables(self, hostname, vault_password=None):
    host = self.get_host(hostname)
    if host is None:
        raise errors.AnsibleError("host not found: %s" % hostname)

    vars = {}
    vars_results = [ plugin.run(host) for plugin in self._vars_plugins ]
    for updated in vars_results:
        if updated is not None:
            vars = utils.combine_vars(vars, updated)

    vars = utils.combine_vars(vars, host.get_variables())
    if self.parser is not None:
        vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
    return vars
Ejemplo n.º 19
0
        def update_vars_cache(host, data, target_filename=None):

            """ update a host's varscache with new var data """

            self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
            if target_filename:
                self.playbook.callbacks.on_import_for_host(host, target_filename)
Ejemplo n.º 20
0
    def playbook_on_play_start(self, pattern):
        self.inventory = self.playbook.inventory

        # Don't log empty plays
        hosts_in_play = self.inventory.list_hosts(self.play.hosts)
        if len(hosts_in_play) == 0:
            return

        # Combine inventory vars, global vars and extra vars
        self.my_vars = utils.combine_vars(self.my_vars, self.play.vars)

        # This are not used until `playbook_on_stats`
        for myvar in self.audit_vars:
            val = get_dotted_val_in_dict(self.my_vars, myvar)
            self.audit_vars[myvar] = val

        self.logger.log('playbook_on_play_start', {
            'name': self.play.name,
            'remote_user': self.play.remote_user,
            'su': getattr(self.play, 'su', None),
            'su_user': getattr(self.play, 'su_user', None),
            'sudo': getattr(self.play, 'sudo', None),
            'sudo_user': getattr(self.play, 'sudo_user', None),
            'become': getattr(self.play, 'become', None),
            'become_method': getattr(self.play, 'become_method', None),
            'become_user': getattr(self.play, 'become_user', None),
            'serial': self.play.serial,
            'max_fail_percentage': self.play.max_fail_pct,
            'hosts': self.play.hosts,
            })
Ejemplo n.º 21
0
 def _execute_accelerate_module(self):
     args = "password=%s port=%s" % (base64.b64encode(self.key.__str__()), str(self.accport))
     inject = dict(password=self.key)
     inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
     self.ssh.connect()
     tmp_path = self.runner._make_tmp_path(self.ssh)
     return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
Ejemplo n.º 22
0
    def _merge_hosts(self, host, newhost):
        """
        Merge all of instance newhost into host
        合并两个host对象。
        """

        # name
        if host.name != newhost.name: # 待合并的Host对象必须name相同
            raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))

        # group membership relation
        for newgroup in newhost.groups: # 遍历新的Host对象的所属groups列表
            # dict with existing groups:
            hostgroups = dict([(g.name, g) for g in host.groups]) # 旧的Host对象的group列表的字典。
            # check if new group is already known as a group
            # 检测新的Host的当前group是否存在与旧的Host的groups列表中
            if newgroup.name not in hostgroups: # 如果存在则忽略,否则进行下一步处理
                if newgroup.name not in self.groups: # 如果当前group不存在与self.groups字典中,则添加到self.groups字典
                    # group does not exist yet in self, import him
                    self.groups[newgroup.name] = newgroup
                # group now exists but doesn't have host yet
                self.groups[newgroup.name].add_host(host) # 将当前Host对象添加到该Group对象中。

        # variables
        # 合并新旧两个Host对象的变量,以新对象为准,也就是说排序在后面的文件中的Host变量优先!
        host.vars = utils.combine_vars(host.vars, newhost.vars)
Ejemplo n.º 23
0
    def _merge_groups(self, group, newgroup):
        """ Merge all of instance newgroup into group,
            update parent/child relationships
            group lists may still contain group objects that exist in self with
            same name, but was instanciated as a different object in some other
            inventory parser; these are handled later """

        # name
        if group.name != newgroup.name: # 待合并的group对象必须name相同
            raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))

        # depth
        group.depth = max([group.depth, newgroup.depth]) # 获得group的真实深度

        # hosts list (host objects are by now already added to self.hosts)
        #  调用该函数之前已经调用了self_add_host(host)函数
        for host in newgroup.hosts: # 遍历所有新group的hosts对象列表
            grouphosts = dict([(h.name, h) for h in group.hosts]) # 创建旧group的host对象和对象名称组成的字典
            if host.name in grouphosts: # 判断新的group中的当前Host对象是否存在于旧group的Host对象列表中。
                # same host name but different object, merge
                self._merge_hosts(grouphosts[host.name], host) # 如果出现同名的Host,则需要进行合并
            else:
                # new membership, add host to group from self
                # group from self will also be added again to host.groups, but
                # as different object
                group.add_host(self.hosts[host.name]) # 将该Host对象添加到旧的group中
                # now remove this the old object for group in host.groups

                # 遍历当前Host对象的所有group,在上面的add_host操作中,host对象的groups列表中会出现相同的groups对象
                # 需要找到名称重复的group对象,并将其删除掉
                # Host对象的groups使用列表存储,使用append操作添加,可能出现相同名称的group对象存在的问题。
                # Group对象的hosts也使用同样的方式,为什么不使用字典操作?
                for hostgroup in [g for g in host.groups]:
                    if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
                        self.hosts[host.name].groups.remove(hostgroup)


        # group child membership relation
        for newchild in newgroup.child_groups: # 遍历新的group的所有子group
            # dict with existing child groups:
            childgroups = dict([(g.name, g) for g in group.child_groups])
            # check if child of new group is already known as a child
            # 判断当前子group是否存在与旧的group的子group中
            if newchild.name not in childgroups: # 如果不存在则添加,否则忽略;子组不涉及真实的host对象操作,相对简单。
                self.groups[group.name].add_child_group(newchild)

        # group parent membership relation
        for newparent in newgroup.parent_groups: # 遍历所有的父组
            # dict with existing parent groups:
            parentgroups = dict([(g.name, g) for g in group.parent_groups])
            # check if parent of new group is already known as a parent
            if newparent.name not in parentgroups: # 判断父组是否重复,如果不重复,则添加新的父组,如果重复则将该组加为新父组的子组。。
                if newparent.name not in self.groups:
                    # group does not exist yet in self, import him
                    self.groups[newparent.name] = newparent
                # group now exists but not yet as a parent here
                self.groups[newparent.name].add_child_group(group)

        # variables
        group.vars = utils.combine_vars(group.vars, newgroup.vars) # 合并相同组名的两个Group对象的变量,以新的group对象的变量为准
Ejemplo n.º 24
0
    def __init__(self, runner, host, port, user, password, private_key_file,
                 *args, **kwargs):
        self.runner = runner
        self.host = host
        self.ipv6 = ':' in self.host
        self.port = port
        self.user = str(user)
        self.password = password
        self.private_key_file = private_key_file
        self.HASHED_KEY_MAGIC = "|1|"
        self.has_pipelining = True
        #self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
        #self.sliver_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "sliver_name", "SLIVER_NAME", None)

        inject = {}
        inject = utils.combine_vars(
            inject, self.runner.inventory.get_variables(self.host))

        self.instance_id = inject["instance_id"]
        self.sliver_name = inject["sliver_name"]

        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
        self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',
                                                  mode=0700)
        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
Ejemplo n.º 25
0
        def process_files(filename,
                          filename2,
                          filename3,
                          filename4,
                          host=None):
            """ pseudo-algorithm for deciding where new vars should go """

            data = utils.parse_yaml_from_file(
                filename4, vault_password=self.vault_password)
            if data:
                if type(data) != dict:
                    raise errors.AnsibleError(
                        "%s must be stored as a dictionary/hash" % filename4)
                if host is not None:
                    target_filename = None
                    if utils.contains_vars(filename2):
                        if not utils.contains_vars(filename3):
                            target_filename = filename3
                        else:
                            target_filename = filename4
                    update_vars_cache(host,
                                      data,
                                      target_filename=target_filename)
                else:
                    self.vars = utils.combine_vars(self.vars, data)
                # we did process this file
                return True
            # we did not process this file
            return False
Ejemplo n.º 26
0
        def update_vars_cache(host, data, target_filename=None):

            """ update a host's varscache with new var data """

            self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
            if target_filename:
                self.playbook.callbacks.on_import_for_host(host, target_filename)
Ejemplo n.º 27
0
        def process_files(filename,
                          filename2,
                          filename3,
                          filename4,
                          host=None):
            """ pseudo-algorithm for deciding where new vars should go """

            data = utils.parse_yaml_from_file(
                filename4, vault_password=self.vault_password)
            if data:
                if type(data) != dict:
                    raise errors.AnsibleError(
                        "%s must be stored as a dictionary/hash" % filename4)
                if host is not None:
                    if self._has_vars_in(
                            filename2) and not self._has_vars_in(filename3):
                        # running a host specific pass and has host specific variables
                        # load into setup cache
                        update_vars_cache(host, inject, data, filename4)
                    elif self._has_vars_in(
                            filename3) and not self._has_vars_in(filename4):
                        # handle mixed scope variables in filepath
                        update_vars_cache(host, inject, data, filename4)

                elif not self._has_vars_in(filename4):
                    # found a non-host specific variable, load into vars and NOT
                    # the setup cache
                    if host is not None:
                        self.vars.update(data)
                    else:
                        self.vars = utils.combine_vars(self.vars, data)
Ejemplo n.º 28
0
 def set_playbook_basedir(self, dir):
     """
     sets the base directory of the playbook so inventory can use it as a
     basedir for host_ and group_vars, and other things.
     """
     # Only update things if dir is a different playbook basedir
     if dir != self._playbook_basedir:
         self._playbook_basedir = dir
         # get group vars from group_vars/ files
         for group in self.groups:
             group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
         # get host vars from host_vars/ files
         for host in self.get_hosts():
             host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
         # invalidate cache
         self._vars_per_host = {}
         self._vars_per_group = {}
Ejemplo n.º 29
0
 def get_variables(self, hostname, **kwargs):
     result = BaseInventory.get_variables(self, hostname, **kwargs)
     if hasattr(self, 'get_host_variables'):
         result = utils.combine_vars(
             result,
             self.get_host_variables(
                 hostname, vault_password=self._vault_password))
     return PloyInventoryDict(result)
Ejemplo n.º 30
0
 def set_playbook_basedir(self, dir):
     """
     sets the base directory of the playbook so inventory can use it as a
     basedir for host_ and group_vars, and other things.
     """
     # Only update things if dir is a different playbook basedir
     if dir != self._playbook_basedir:
         self._playbook_basedir = dir
         # get group vars from group_vars/ files
         for group in self.groups:
             group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
         # get host vars from host_vars/ files
         for host in self.get_hosts():
             host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
         # invalidate cache
         self._vars_per_host = {}
         self._vars_per_group = {}
Ejemplo n.º 31
0
    def _get_vars(self):
        ''' load the vars section from a play, accounting for all sorts of variable features
        including loading from yaml files, prompting, and conditional includes of the first
        file found in a list. '''

        if self.vars is None:
            self.vars = {}

        if type(self.vars) not in [dict, list]:
            raise errors.AnsibleError("'vars' section must contain only key/value pairs")

        vars = {}

        # translate a list of vars into a dict
        if type(self.vars) == list:
            for item in self.vars:
                if getattr(item, 'items', None) is None:
                    raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
                k, v = item.items()[0]
                vars[k] = v
        else:
            vars.update(self.vars)

        if type(self.vars_prompt) == list:
            for var in self.vars_prompt:
                if not 'name' in var:
                    raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")

                vname = var['name']
                prompt = var.get("prompt", vname)
                default = var.get("default", None)
                private = var.get("private", True)

                confirm = var.get("confirm", False)
                encrypt = var.get("encrypt", None)
                salt_size = var.get("salt_size", None)
                salt = var.get("salt", None)

                if vname not in self.playbook.extra_vars:
                    vars[vname] = self.playbook.callbacks.on_vars_prompt(
                                     vname, private, prompt, encrypt, confirm, salt_size, salt, default
                                  )

        elif type(self.vars_prompt) == dict:
            for (vname, prompt) in self.vars_prompt.iteritems():
                prompt_msg = "%s: " % prompt
                if vname not in self.playbook.extra_vars:
                    vars[vname] = self.playbook.callbacks.on_vars_prompt(
                                     varname=vname, private=False, prompt=prompt_msg, default=None
                                  )

        else:
            raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")

        if type(self.playbook.extra_vars) == dict:
            vars = utils.combine_vars(vars, self.playbook.extra_vars)

        return vars
Ejemplo n.º 32
0
    def _get_vars(self):
        ''' load the vars section from a play, accounting for all sorts of variable features
        including loading from yaml files, prompting, and conditional includes of the first
        file found in a list. '''

        if self.vars is None:
            self.vars = {}

        if type(self.vars) not in [dict, list]:
            raise errors.AnsibleError("'vars' section must contain only key/value pairs")

        vars = {}

        # translate a list of vars into a dict
        if type(self.vars) == list:
            for item in self.vars:
                if getattr(item, 'items', None) is None:
                    raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
                k, v = item.items()[0]
                vars[k] = v
        else:
            vars.update(self.vars)

        if type(self.vars_prompt) == list:
            for var in self.vars_prompt:
                if not 'name' in var:
                    raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")

                vname = var['name']
                prompt = var.get("prompt", vname)
                default = var.get("default", None)
                private = var.get("private", True)

                confirm = var.get("confirm", False)
                encrypt = var.get("encrypt", None)
                salt_size = var.get("salt_size", None)
                salt = var.get("salt", None)

                if vname not in self.playbook.extra_vars:
                    vars[vname] = self.playbook.callbacks.on_vars_prompt(
                                     vname, private, prompt, encrypt, confirm, salt_size, salt, default
                                  )

        elif type(self.vars_prompt) == dict:
            for (vname, prompt) in self.vars_prompt.iteritems():
                prompt_msg = "%s: " % prompt
                if vname not in self.playbook.extra_vars:
                    vars[vname] = self.playbook.callbacks.on_vars_prompt(
                                     varname=vname, private=False, prompt=prompt_msg, default=None
                                  )

        else:
            raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")

        if type(self.playbook.extra_vars) == dict:
            vars = utils.combine_vars(vars, self.playbook.extra_vars)

        return vars
Ejemplo n.º 33
0
    def _get_group_variables(self, groupname, vault_password=None):

        group = self.get_group(groupname)
        if group is None:
            raise Exception("group not found: %s" % groupname)

        vars = {}

        # plugin.get_group_vars retrieves just vars for specific group
        vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
        for updated in vars_results:
            if updated is not None:
                vars = utils.combine_vars(vars, updated)

        # Read group_vars/ files
        vars = utils.combine_vars(vars, self.get_group_vars(group))

        return vars
Ejemplo n.º 34
0
def parse_extra_vars(extras, vault_pass=None):
    inject_ansible_paths()
    from ansible import utils
    extra_vars = {}
    for extra_vars_opt in extras:
        if extra_vars_opt.startswith("@"):
            # Argument is a YAML file (JSON is a subset of YAML)
            kw = {}
            if vault_pass:
                kw['vault_password'] = vault_pass
            extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml_from_file(extra_vars_opt[1:]), **kw)
        elif extra_vars_opt and extra_vars_opt[0] in '[{':
            # Arguments as YAML
            extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml(extra_vars_opt))
        else:
            # Arguments as Key-value
            extra_vars = utils.combine_vars(extra_vars, utils.parse_kv(extra_vars_opt))
    return extra_vars
Ejemplo n.º 35
0
 def _execute_accelerate_module(self):
     args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
         base64.b64encode(self.key.__str__()), 
         str(self.accport), 
         constants.ACCELERATE_DAEMON_TIMEOUT, 
         int(utils.VERBOSITY), 
         self.runner.accelerate_ipv6,
     )
     if constants.ACCELERATE_MULTI_KEY:
         args += " multi_key=yes"
     inject = dict(password=self.key)
     if getattr(self.runner, 'accelerate_inventory_host', False):
         inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
     else:
         inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
     vvvv("attempting to start up the accelerate daemon...")
     self.ssh.connect()
     tmp_path = self.runner._make_tmp_path(self.ssh)
     return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
Ejemplo n.º 36
0
    def _merge_groups(self, group, newgroup):
        """ Merge all of instance newgroup into group,
            update parent/child relationships
            group lists may still contain group objects that exist in self with
            same name, but was instanciated as a different object in some other
            inventory parser; these are handled later """

        # name
        if group.name != newgroup.name:
            raise errors.AnsibleError("Cannot merge group %s with %s" %
                                      (group.name, newgroup.name))

        # depth
        group.depth = max([group.depth, newgroup.depth])

        # hosts list (host objects are by now already added to self.hosts)
        for host in newgroup.hosts:
            grouphosts = dict([(h.name, h) for h in group.hosts])
            if host.name in grouphosts:
                # same host name but different object, merge
                self._merge_hosts(grouphosts[host.name], host)
            else:
                # new membership, add host to group from self
                # group from self will also be added again to host.groups, but
                # as different object
                group.add_host(self.hosts[host.name])
                # now remove this the old object for group in host.groups
                for hostgroup in [g for g in host.groups]:
                    if hostgroup.name == group.name and hostgroup != self.groups[
                            group.name]:
                        self.hosts[host.name].groups.remove(hostgroup)

        # group child membership relation
        for newchild in newgroup.child_groups:
            # dict with existing child groups:
            childgroups = dict([(g.name, g) for g in group.child_groups])
            # check if child of new group is already known as a child
            if newchild.name not in childgroups:
                self.groups[group.name].add_child_group(newchild)

        # group parent membership relation
        for newparent in newgroup.parent_groups:
            # dict with existing parent groups:
            parentgroups = dict([(g.name, g) for g in group.parent_groups])
            # check if parent of new group is already known as a parent
            if newparent.name not in parentgroups:
                if newparent.name not in self.groups:
                    # group does not exist yet in self, import him
                    self.groups[newparent.name] = newparent
                # group now exists but not yet as a parent here
                self.groups[newparent.name].add_child_group(group)

        # variables
        group.vars = utils.combine_vars(group.vars, newgroup.vars)
Ejemplo n.º 37
0
    def run(self, host):
        # return the inventory variables for the host

        inventory = self.inventory
        #hostrec = inventory.get_host(host)

        groupz = sorted(inventory.groups_for_host(host.name),
                        key=lambda g: g.depth)
        groups = [g.name for g in groupz]
        basedir = inventory.basedir()

        if basedir is None:
            # could happen when inventory is passed in via the API
            return

        results = {}

        # load vars in inventory_dir/group_vars/name_of_group
        for x in groups:
            p = os.path.join(basedir, "group_vars/%s" % x)
            paths = [p, '.'.join([p, 'yml']), '.'.join([p, 'yaml'])]
            for path in paths:
                if os.path.exists(path):
                    data = utils.parse_yaml_from_file(path)
                    if type(data) != dict:
                        raise errors.AnsibleError(
                            "%s must be stored as a dictionary/hash" % path)
                    results = utils.combine_vars(results, data)

        # load vars in inventory_dir/hosts_vars/name_of_host
        p = os.path.join(basedir, "host_vars/%s" % host.name)
        paths = [p, '.'.join([p, 'yml']), '.'.join([p, 'yaml'])]
        for path in paths:
            if os.path.exists(path):
                data = utils.parse_yaml_from_file(path)
                if type(data) != dict:
                    raise errors.AnsibleError(
                        "%s must be stored as a dictionary/hash" % path)
                results = utils.combine_vars(results, data)

        return results
Ejemplo n.º 38
0
    def _load_role_defaults(self, defaults_files):
        # process default variables
        default_vars = {}
        for filename in defaults_files:
            if os.path.exists(filename):
                new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
                if new_default_vars:
                    if type(new_default_vars) != dict:
                        raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
                    default_vars = utils.combine_vars(default_vars, new_default_vars)

        return default_vars
Ejemplo n.º 39
0
    def _load_role_defaults(self, defaults_files):
        # process default variables
        default_vars = {}
        for filename in defaults_files:
            if os.path.exists(filename):
                new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
                if new_default_vars:
                    if type(new_default_vars) != dict:
                        raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
                    default_vars = utils.combine_vars(default_vars, new_default_vars)

        return default_vars
Ejemplo n.º 40
0
    def _merge_groups(self, group, newgroup):
        """ Merge all of instance newgroup into group,
            update parent/child relationships
            group lists may still contain group objects that exist in self with
            same name, but was instanciated as a different object in some other
            inventory parser; these are handled later """

        # name
        if group.name != newgroup.name:
            raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))

        # depth
        group.depth = max([group.depth, newgroup.depth])

        # hosts list (host objects are by now already added to self.hosts)
        for host in newgroup.hosts:
            grouphosts = dict([(h.name, h) for h in group.hosts])
            if host.name in grouphosts:
                # same host name but different object, merge
                self._merge_hosts(grouphosts[host.name], host)
            else:
                # new membership, add host to group from self
                # group from self will also be added again to host.groups, but
                # as different object
                group.add_host(self.hosts[host.name])
                # now remove this the old object for group in host.groups
                for hostgroup in [g for g in host.groups]:
                    if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
                        self.hosts[host.name].groups.remove(hostgroup)


        # group child membership relation
        for newchild in newgroup.child_groups:
            # dict with existing child groups:
            childgroups = dict([(g.name, g) for g in group.child_groups])
            # check if child of new group is already known as a child
            if newchild.name not in childgroups:
                self.groups[group.name].add_child_group(newchild)

        # group parent membership relation
        for newparent in newgroup.parent_groups:
            # dict with existing parent groups:
            parentgroups = dict([(g.name, g) for g in group.parent_groups])
            # check if parent of new group is already known as a parent
            if newparent.name not in parentgroups:
                if newparent.name not in self.groups:
                    # group does not exist yet in self, import him
                    self.groups[newparent.name] = newparent
                # group now exists but not yet as a parent here
                self.groups[newparent.name].add_child_group(group)

        # variables
        group.vars = utils.combine_vars(group.vars, newgroup.vars)
Ejemplo n.º 41
0
    def _prepare_vars(self, data, defaults={}):
        if isinstance(defaults, dict):
            vars = defaults.copy()
        else:
            vars = dict()

        if 'vars' in data:
            if isinstance(data['vars'], dict):
                vars = utils.combine_vars(vars, data['vars'])

            elif isinstance(vars, list):
                # nobody should really do this, but handle vars: a=1 b=2
                for v in vars:
                    vars = utils.combine_vars(vars, v)

        if 'vars_files' in data:
            # TODO vars_files needs to be done per host?
            pass

        # TODO do we need to run through template

        return dict(vars)
Ejemplo n.º 42
0
 def set_playbook_basedir(self, dir):
     """
     sets the base directory of the playbook so inventory can use it as a
     basedir for host_ and group_vars, and other things.
     """
     # Only update things if dir is a different playbook basedir
     if dir != self._playbook_basedir:
         self._playbook_basedir = dir
         # get group vars from group_vars/ files
         for group in self.groups:
             group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
         # get host vars from host_vars/ files
         ### HACK: in 2.0 subset isn't a problem.  Never port this to 2.x
         ### Fixes: https://github.com/ansible/ansible/issues/13557
         old_subset =  self._subset
         self._subset = None
         for host in self.get_hosts():
             host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
         self._subset = old_subset
         # invalidate cache
         self._vars_per_host = {}
         self._vars_per_group = {}
Ejemplo n.º 43
0
    def run(self, host):

        """ Main body of the plugin, does actual loading """

        results = {}

        # Load config
        config = self.get_config()
        if config is None:
            return results

        # Calculate profiles path (path to the 'profiles/' directory)
        profiles_path = self.get_profiles_path()
        if profiles_path is None:
            return results
        
        # Prepare absolute profile path (path to the actual profile folder
        # in 'profiles/' folder)
        profile_path = os.path.join(profiles_path, config['profile'])
        if not os.path.exists(profile_path) or not os.path.isdir(profile_path):
            raise errors.AnsibleError("There is no such profile: %s" % profile_path)            
        
        # Start from specified profile path
        current_path = os.path.abspath(profile_path)
        
        # Traverse directories up, until we reach 'profiles_path'
        while True:
            
            vars_path = os.path.join(current_path, "vars.yml")
            
            if (os.path.exists(vars_path) and 
                os.path.isfile(vars_path) and
                os.stat(vars_path).st_size != 0):            
            
                data = utils.parse_yaml_from_file(vars_path)
                if type(data) != dict:
                    raise errors.AnsibleError("%s must be stored as a dictionary/hash" % vars_path)            
                
                results = utils.combine_vars(data, results)
            
            # if we reached profiles folder, than we traversed all 
            # directories till profiles folder.
            if current_path == profiles_path:
                break;
            
            # select parent directory
            current_path = os.path.abspath(os.path.join(current_path, os.pardir))
            
        # all done, results is a dictionary of variables
        return results
Ejemplo n.º 44
0
    def playbook_on_start(self):
        # These are not used until `playbook_on_play_start`
        self.my_vars = self.playbook.global_vars
        self.my_vars = utils.combine_vars(
            self.my_vars, self.playbook.extra_vars)

        # This gets us the user that originally spawed the ansible process.
        # Watch out: On Linux, if you (yes, you) started some process that
        # starts ansible (i.e. jenkins), then you (yes, you) will be the one
        # listed as the user running ansible, even though you started jenkins
        # indirectly (e.g. using "sudo service jenkins start").
        if self.log_logname:
            p = Popen(['logname'], stdout=PIPE)
            logname = p.stdout.readline().rstrip('\n')
            p.terminate()
        else:
            logname = None

        log_entry = {
            'playbook': self.playbook.filename,
            'hosts': self.playbook.inventory.list_hosts(),
            'inventory': self.playbook.inventory.host_list,
            'only_tags': self.playbook.only_tags,
            'skip_tags': self.playbook.skip_tags,
            'check_mode': self.playbook.check,
            'automation_on_behalf_of': self.playbook.extra_vars.get(
                'automation_on_behalf_of', ''),
            'remote_user': self.playbook.remote_user,
            'su': getattr(self.playbook, 'su', None),
            'su_user': getattr(self.playbook, 'su_user', None),
            'sudo': getattr(self.playbook, 'sudo', None),
            'sudo_user': getattr(self.playbook, 'sudo_user', None),
            'become': getattr(self.playbook, 'become', None),
            'become_method': getattr(self.playbook, 'become_method', None),
            'become_user': getattr(self.playbook, 'become_user', None),
            'USER': os.getenv('USER'),
            'SUDO_USER': os.getenv('SUDO_USER'),
            'logname': logname,
        }

        self.logger.log('playbook_on_start', log_entry)
Ejemplo n.º 45
0
  def init(self):
    """
    To instantiate a new environment, we create a set
    of inventory files for that environment. 

    See individual playbooks for more info. 
    """

    # additional options which need to be processed with the init command
    for extra_vars_opt in self.options.extra_vars:
      self.extra_vars = utils.combine_vars(self.extra_vars,
                        utils.parse_yaml(extra_vars_opt))

    # basic string checking to prevent failures later in playbook
    if not re.match('^[a-zA-z]{1}[a-zA-Z0-9-]*', self.options.env_name):
      raise ValidationError(
        'The environment name must match the following\
  regexp: "[a-zA-z]{1}[a-zA-Z0-9-]*" ')

    # check to make sure this inventory does not already exist
    if (os.path.isdir((config['env_path'] + self.options.env_name)) and not
      self.options.force):
        raise ValidationError(
          'There is already an environment with name "%s".  Use -f, --force to \
  update the inventory variables for this environment. ' % self.options.env_name)

    # check that this is one of the working regions
    if self.extra_vars['region'] not in config['regions']:
      raise ValidationError(
          'Only the following regions are possible when using this tool due to \
availability of the ECS-optimized images used to run the Docker app: {}'.format(
          config['regions']))

    # TODO: validate images, eip and cloudformation limits?

    playbooks = ['init.yml']
    playbook_context = PlaybookExecution(
      playbooks, config, self.proj_inventory_path, self.options, self.extra_vars)
    playbook_context.run()  

    return {'playbook_results': playbook_context, 'env': self}
Ejemplo n.º 46
0
    def _merge_hosts(self,host, newhost):
        """ Merge all of instance newhost into host """

        # name
        if host.name != newhost.name:
            raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))

        # group membership relation
        for newgroup in newhost.groups:
            # dict with existing groups:
            hostgroups = dict([(g.name, g) for g in host.groups])
            # check if new group is already known as a group
            if newgroup.name not in hostgroups:
                if newgroup.name not in self.groups:
                    # group does not exist yet in self, import him
                    self.groups[newgroup.name] = newgroup
                # group now exists but doesn't have host yet
                self.groups[newgroup.name].add_host(host)

        # variables
        host.vars = utils.combine_vars(host.vars, newhost.vars)
Ejemplo n.º 47
0
    def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
        self.runner = runner
        self.host = host
        self.ipv6 = ":" in self.host
        self.port = port
        self.user = str(user)
        self.password = password
        self.private_key_file = private_key_file
        self.HASHED_KEY_MAGIC = "|1|"
        self.has_pipelining = True
        # self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
        # self.sliver_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "sliver_name", "SLIVER_NAME", None)

        inject = {}
        inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))

        self.instance_id = inject["instance_id"]
        self.sliver_name = inject["sliver_name"]

        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
        self.cp_dir = utils.prepare_writeable_dir("$HOME/.ansible/cp", mode=0700)
        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
Ejemplo n.º 48
0
        def process_files(filename, filename2, filename3, filename4, host=None):

            """ pseudo-algorithm for deciding where new vars should go """

            data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
            if data:
                if type(data) != dict:
                    raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
                if host is not None:
                    target_filename = None
                    if self._has_vars_in(filename2):
                        if not self._has_vars_in(filename3):
                            target_filename = filename3
                        else:
                            target_filename = filename4
                    update_vars_cache(host, data, target_filename=target_filename)
                else:
                    self.vars = utils.combine_vars(self.vars, data)
                # we did process this file
                return True
            # we did not process this file
            return False
Ejemplo n.º 49
0
 def _get_variables(self, hostname, **kwargs):
     host = self.get_host(hostname)
     if host is None:
         raise errors.AnsibleError("host not found: %s" % hostname)
     instance = self.ctrl.instances[hostname]
     result = dict(
         ansible_connection='execnet_connection',
         _ploy_instance=instance,
         _ploy_instances=self.ctrl.instances)
     for k, v in instance.config.items():
         if k == 'password' and instance.config['password-fallback']:
             result['ansible_ssh_pass'] = v
         elif k.startswith('ansible_'):
             result[k] = v
         elif k.startswith('ansible-'):
             result[k[len('ansible-'):].replace('-', '_')] = v
         else:
             result['ploy_%s' % k.replace('-', '_')] = v
             result['awsome_%s' % k.replace('-', '_')] = v
     vars = {}
     for plugin in self.ctrl.plugins.values():
         if 'get_ansible_vars' not in plugin:
             continue
         vars = utils.combine_vars(vars, plugin['get_ansible_vars'](instance))
     vars_results = [plugin.run(host) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
     for updated in vars_results:
         if updated is not None:
             vars = utils.combine_vars(vars, updated)
     vars_results = [plugin.get_host_vars(host) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
     for updated in vars_results:
         if updated is not None:
             vars = utils.combine_vars(vars, updated)
     vars = utils.combine_vars(vars, host.get_variables())
     if self.parser is not None:
         vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
     if hasattr(self, 'get_host_vars'):
         vars = utils.combine_vars(vars, self.get_host_vars(host))
     vars = utils.combine_vars(vars, result)
     return PloyInventoryDict(vars)
Ejemplo n.º 50
0
        def process_files(filename, filename2, filename3, filename4, host=None):

            """ pseudo-algorithm for deciding where new vars should go """

            data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
            if data:
                if type(data) != dict:
                    raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
                if host is not None:
                    if self._has_vars_in(filename2) and not self._has_vars_in(filename3):
                        # running a host specific pass and has host specific variables
                        # load into setup cache
                        update_vars_cache(host, inject, data, filename4)
                    elif self._has_vars_in(filename3) and not self._has_vars_in(filename4):
                        # handle mixed scope variables in filepath
                        update_vars_cache(host, inject, data, filename4)

                elif not self._has_vars_in(filename4):
                    # found a non-host specific variable, load into vars and NOT
                    # the setup cache
                    if host is not None:
                        self.vars.update(data)
                    else:
                        self.vars = utils.combine_vars(self.vars, data)
Ejemplo n.º 51
0
    def _update_vars_files_for_host(self, host):

        if type(self.vars_files) != list:
            self.vars_files = [ self.vars_files ]

        if host is not None:
            inject = {}
            inject.update(self.playbook.inventory.get_variables(host))
            inject.update(self.playbook.SETUP_CACHE[host])

        for filename in self.vars_files:

            if type(filename) == list:

                # loop over all filenames, loading the first one, and failing if # none found
                found = False
                sequence = []
                for real_filename in filename:
                    filename2 = template(self.basedir, real_filename, self.vars)
                    filename3 = filename2
                    if host is not None:
                        filename3 = template(self.basedir, filename2, inject)
                    filename4 = utils.path_dwim(self.basedir, filename3)
                    sequence.append(filename4)
                    if os.path.exists(filename4):
                        found = True
                        data = utils.parse_yaml_from_file(filename4)
                        if type(data) != dict:
                            raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
                        if host is not None:
                            if self._has_vars_in(filename2) and not self._has_vars_in(filename3):
                                # this filename has variables in it that were fact specific
                                # so it needs to be loaded into the per host SETUP_CACHE
                                self.playbook.SETUP_CACHE[host].update(data)
                                self.playbook.callbacks.on_import_for_host(host, filename4)
                        elif not self._has_vars_in(filename4):
                            # found a non-host specific variable, load into vars and NOT
                            # the setup cache
                            self.vars.update(data)
                    elif host is not None:
                        self.playbook.callbacks.on_not_import_for_host(host, filename4)
                    if found:
                        break
                if not found and host is not None:
                    raise errors.AnsibleError(
                        "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
                    )

            else:
                # just one filename supplied, load it!

                filename2 = template(self.basedir, filename, self.vars)
                filename3 = filename2
                if host is not None:
                    filename3 = template(self.basedir, filename2, inject)
                filename4 = utils.path_dwim(self.basedir, filename3)
                if self._has_vars_in(filename4):
                    continue
                new_vars = utils.parse_yaml_from_file(filename4)
                if new_vars:
                    if type(new_vars) != dict:
                        raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename4, type(new_vars)))
                    if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
                        # running a host specific pass and has host specific variables
                        # load into setup cache
                        self.playbook.SETUP_CACHE[host] = utils.combine_vars(
                            self.playbook.SETUP_CACHE[host], new_vars)
                        self.playbook.callbacks.on_import_for_host(host, filename4)
                    elif host is None:
                        # running a non-host specific pass and we can update the global vars instead
                        self.vars = utils.combine_vars(self.vars, new_vars)
Ejemplo n.º 52
0
    def _load_tasks(self, tasks, vars=None, default_vars=None, sudo_vars=None, additional_conditions=None, original_file=None, role_name=None):
        ''' handle task and handler include statements '''

        results = []
        if tasks is None:
            # support empty handler files, and the like.
            tasks = []
        if additional_conditions is None:
            additional_conditions = []
        if vars is None:
            vars = {}
        if default_vars is None:
            default_vars = {}
        if sudo_vars is None:
            sudo_vars = {}

        old_conditions = list(additional_conditions)

        for x in tasks:

            # prevent assigning the same conditions to each task on an include
            included_additional_conditions = list(old_conditions)

            if not isinstance(x, dict):
                raise errors.AnsibleError("expecting dict; got: %s" % x)

            # evaluate sudo vars for current and child tasks 
            included_sudo_vars = {}
            for k in ["sudo", "sudo_user"]:
                if k in x:
                    included_sudo_vars[k] = x[k]
                elif k in sudo_vars:
                    included_sudo_vars[k] = sudo_vars[k]
                    x[k] = sudo_vars[k]

            if 'meta' in x:
                if x['meta'] == 'flush_handlers':
                    results.append(Task(self,x))
                    continue

            task_vars = self.vars.copy()
            task_vars.update(vars)
            if original_file:
                task_vars['_original_file'] = original_file

            if 'include' in x:
                tokens = shlex.split(str(x['include']))
                items = ['']
                included_additional_conditions = list(additional_conditions)
                include_vars = {}
                for k in x:
                    if k.startswith("with_"):
                        utils.deprecated("include + with_items is an unsupported feature and has been undocumented for many releases because of this", "1.5")
                        plugin_name = k[5:]
                        if plugin_name not in utils.plugins.lookup_loader:
                            raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
                        terms = template(self.basedir, x[k], task_vars)
                        items = utils.plugins.lookup_loader.get(plugin_name, basedir=self.basedir, runner=None).run(terms, inject=task_vars)
                    elif k.startswith("when_"):
                        included_additional_conditions.insert(0, utils.compile_when_to_only_if("%s %s" % (k[5:], x[k])))
                    elif k == 'when':
                        if type(x[k]) is str:
                            included_additional_conditions.insert(0, utils.compile_when_to_only_if("jinja2_compare %s" % x[k]))
                        elif type(x[k]) is list:
                            for i in x[k]:
                                included_additional_conditions.insert(0, utils.compile_when_to_only_if("jinja2_compare %s" % i))
                    elif k in ("include", "vars", "default_vars", "only_if", "sudo", "sudo_user", "role_name"):
                        continue
                    else:
                        include_vars[k] = x[k]

                default_vars = x.get('default_vars', {})
                if not default_vars:
                    default_vars = self.default_vars
                else:
                    default_vars = utils.combine_vars(self.default_vars, default_vars)

                # append the vars defined with the include (from above) 
                # as well as the old-style 'vars' element. The old-style
                # vars are given higher precedence here (just in case)
                task_vars = utils.combine_vars(task_vars, include_vars)
                if 'vars' in x:
                    task_vars = utils.combine_vars(task_vars, x['vars'])

                if 'only_if' in x:
                    included_additional_conditions.append(x['only_if'])

                new_role = None
                if 'role_name' in x:
                    new_role = x['role_name']

                for item in items:
                    mv = task_vars.copy()
                    mv['item'] = item
                    for t in tokens[1:]:
                        (k,v) = t.split("=", 1)
                        mv[k] = template(self.basedir, v, mv)
                    dirname = self.basedir
                    if original_file:
                        dirname = os.path.dirname(original_file)
                    include_file = template(dirname, tokens[0], mv)
                    include_filename = utils.path_dwim(dirname, include_file)
                    data = utils.parse_yaml_from_file(include_filename)
                    if 'role_name' in x and data is not None:
                        for x in data:
                            if 'include' in x:
                                x['role_name'] = new_role
                    loaded = self._load_tasks(data, mv, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
                    results += loaded
            elif type(x) == dict:
                task = Task(self,x,module_vars=task_vars,default_vars=default_vars,additional_conditions=list(additional_conditions),role_name=role_name)
                results.append(task)
            else:
                raise Exception("unexpected task type")

        for x in results:
            if self.tags is not None:
                x.tags.extend(self.tags)

        return results
Ejemplo n.º 53
0
    def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
        # this number is arbitrary, but it seems sane
        if level > 20:
            raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
        for role in roles:
            role_path,role_vars = self._get_role_path(role)
            role_vars = utils.combine_vars(passed_vars, role_vars)
            vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
            vars_data = {}
            if os.path.isfile(vars):
                vars_data = utils.parse_yaml_from_file(vars)
                if vars_data:
                    role_vars = utils.combine_vars(vars_data, role_vars)
            defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
            defaults_data = {}
            if os.path.isfile(defaults):
                defaults_data = utils.parse_yaml_from_file(defaults)
            # the meta directory contains the yaml that should
            # hold the list of dependencies (if any)
            meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
            if os.path.isfile(meta):
                data = utils.parse_yaml_from_file(meta)
                if data:
                    dependencies = data.get('dependencies',[])
                    for dep in dependencies:
                        allow_dupes = False
                        (dep_path,dep_vars) = self._get_role_path(dep)
                        meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
                        if os.path.isfile(meta):
                            meta_data = utils.parse_yaml_from_file(meta)
                            if meta_data:
                                allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))

                        # if tags are set from this role, merge them
                        # into the tags list for the dependent role
                        if "tags" in passed_vars:
                            for included_role_dep in dep_stack:
                                included_dep_name = included_role_dep[0]
                                included_dep_vars = included_role_dep[2]
                                if included_dep_name == dep:
                                    if "tags" in included_dep_vars:
                                        included_dep_vars["tags"] = list(set(included_dep_vars["tags"] + passed_vars["tags"]))
                                    else:
                                        included_dep_vars["tags"] = passed_vars["tags"].copy()

                        dep_vars = utils.combine_vars(passed_vars, dep_vars)
                        dep_vars = utils.combine_vars(role_vars, dep_vars)
                        vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
                        vars_data = {}
                        if os.path.isfile(vars):
                            vars_data = utils.parse_yaml_from_file(vars)
                            if vars_data:
                                dep_vars = utils.combine_vars(vars_data, dep_vars)
                        defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
                        dep_defaults_data = {}
                        if os.path.isfile(defaults):
                            dep_defaults_data = utils.parse_yaml_from_file(defaults)
                        if 'role' in dep_vars:
                            del dep_vars['role']

                        if "tags" in passed_vars:
                            if not self._is_valid_tag(passed_vars["tags"]):
                                # one of the tags specified for this role was in the
                                # skip list, or we're limiting the tags and it didn't 
                                # match one, so we just skip it completely
                                continue

                        if not allow_dupes:
                            if dep in self.included_roles:
                                # skip back to the top, since we don't want to
                                # do anything else with this role
                                continue
                            else:
                                self.included_roles.append(dep)

                        # pass along conditionals from roles to dep roles
                        if type(role) is dict:
                            if 'when' in passed_vars:
                                if 'when' in dep_vars:
                                    tmpcond = []

                                    if type(passed_vars['when']) is str:
                                        tmpcond.append(passed_vars['when'])
                                    elif type(passed_vars['when']) is list:
                                        tmpcond.join(passed_vars['when'])

                                    if type(dep_vars['when']) is str:
                                        tmpcond.append(dep_vars['when'])
                                    elif type(dep_vars['when']) is list:
                                        tmpcond += dep_vars['when']

                                    if len(tmpcond) > 0:
                                        dep_vars['when'] = tmpcond
                                else:
                                    dep_vars['when'] = passed_vars['when']

                        self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
                        dep_stack.append([dep,dep_path,dep_vars,dep_defaults_data])

            # only add the current role when we're at the top level,
            # otherwise we'll end up in a recursive loop 
            if level == 0:
                self.included_roles.append(role)
                dep_stack.append([role,role_path,role_vars,defaults_data])
        return dep_stack
Ejemplo n.º 54
0
    def _update_vars_files_for_host(self, host):

        if type(self.vars_files) != list:
            self.vars_files = [ self.vars_files ]

        if host is not None:
            inject = {}
            inject.update(self.playbook.inventory.get_variables(host))
            inject.update(self.playbook.SETUP_CACHE[host])

        for filename in self.vars_files:

            if type(filename) == list:

                # loop over all filenames, loading the first one, and failing if # none found
                found = False
                sequence = []
                for real_filename in filename:
                    filename2 = template(self.basedir, real_filename, self.vars)
                    filename3 = filename2
                    if host is not None:
                        filename3 = template(self.basedir, filename2, inject)
                    filename4 = utils.path_dwim(self.basedir, filename3)
                    sequence.append(filename4)
                    if os.path.exists(filename4):
                        found = True
                        data = utils.parse_yaml_from_file(filename4)
                        if type(data) != dict:
                            raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
                        if host is not None:
                            if self._has_vars_in(filename2) and not self._has_vars_in(filename3):
                                # this filename has variables in it that were fact specific
                                # so it needs to be loaded into the per host SETUP_CACHE
                                self.playbook.SETUP_CACHE[host].update(data)
                                self.playbook.callbacks.on_import_for_host(host, filename4)
                        elif not self._has_vars_in(filename4):
                            # found a non-host specific variable, load into vars and NOT
                            # the setup cache
                            self.vars.update(data)
                    elif host is not None:
                        self.playbook.callbacks.on_not_import_for_host(host, filename4)
                    if found:
                        break
                if not found and host is not None:
                    raise errors.AnsibleError(
                        "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
                    )

            else:
                # just one filename supplied, load it!

                filename2 = template(self.basedir, filename, self.vars)
                filename3 = filename2
                if host is not None:
                    filename3 = template(self.basedir, filename2, inject)
                filename4 = utils.path_dwim(self.basedir, filename3)
                if self._has_vars_in(filename4):
                    continue
                new_vars = utils.parse_yaml_from_file(filename4)
                if new_vars:
                    if type(new_vars) != dict:
                        raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename4, type(new_vars)))
                    if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
                        # running a host specific pass and has host specific variables
                        # load into setup cache
                        self.playbook.SETUP_CACHE[host] = utils.combine_vars(
                            self.playbook.SETUP_CACHE[host], new_vars)
                        self.playbook.callbacks.on_import_for_host(host, filename4)
                    elif host is None:
                        # running a non-host specific pass and we can update the global vars instead
                        self.vars = utils.combine_vars(self.vars, new_vars)
Ejemplo n.º 55
0
    def _load_tasks(self,
                    tasks,
                    vars=None,
                    default_vars=None,
                    sudo_vars=None,
                    additional_conditions=None,
                    original_file=None,
                    role_name=None):
        ''' handle task and handler include statements '''

        results = []
        if tasks is None:
            # support empty handler files, and the like.
            tasks = []
        if additional_conditions is None:
            additional_conditions = []
        if vars is None:
            vars = {}
        if default_vars is None:
            default_vars = {}
        if sudo_vars is None:
            sudo_vars = {}

        old_conditions = list(additional_conditions)

        for x in tasks:

            # prevent assigning the same conditions to each task on an include
            included_additional_conditions = list(old_conditions)

            if not isinstance(x, dict):
                raise errors.AnsibleError(
                    "expecting dict; got: %s, error in %s" %
                    (x, original_file))

            # evaluate sudo vars for current and child tasks
            included_sudo_vars = {}
            for k in ["sudo", "sudo_user"]:
                if k in x:
                    included_sudo_vars[k] = x[k]
                elif k in sudo_vars:
                    included_sudo_vars[k] = sudo_vars[k]
                    x[k] = sudo_vars[k]

            if 'meta' in x:
                if x['meta'] == 'flush_handlers':
                    results.append(Task(self, x))
                    continue

            task_vars = self.vars.copy()
            task_vars.update(vars)
            if original_file:
                task_vars['_original_file'] = original_file

            if 'include' in x:
                tokens = shlex.split(str(x['include']))
                items = ['']
                included_additional_conditions = list(additional_conditions)
                include_vars = {}
                for k in x:
                    if k.startswith("with_"):
                        if original_file:
                            offender = " (in %s)" % original_file
                        else:
                            offender = ""
                        utils.deprecated(
                            "include + with_items is a removed deprecated feature"
                            + offender,
                            "1.5",
                            removed=True)
                    elif k.startswith("when_"):
                        utils.deprecated(
                            "\"when_<criteria>:\" is a removed deprecated feature, use the simplified 'when:' conditional directly",
                            None,
                            removed=True)
                    elif k == 'when':
                        if type(x[k]) is str:
                            included_additional_conditions.insert(0, x[k])
                        elif type(x[k]) is list:
                            for i in x[k]:
                                included_additional_conditions.insert(0, i)
                    elif k in ("include", "vars", "default_vars", "sudo",
                               "sudo_user", "role_name", "no_log"):
                        continue
                    else:
                        include_vars[k] = x[k]

                default_vars = x.get('default_vars', {})
                if not default_vars:
                    default_vars = self.default_vars
                else:
                    default_vars = utils.combine_vars(self.default_vars,
                                                      default_vars)

                # append the vars defined with the include (from above)
                # as well as the old-style 'vars' element. The old-style
                # vars are given higher precedence here (just in case)
                task_vars = utils.combine_vars(task_vars, include_vars)
                if 'vars' in x:
                    task_vars = utils.combine_vars(task_vars, x['vars'])

                if 'when' in x:
                    if isinstance(x['when'], (basestring, bool)):
                        included_additional_conditions.append(x['when'])
                    elif isinstance(x['when'], list):
                        included_additional_conditions.extend(x['when'])

                new_role = None
                if 'role_name' in x:
                    new_role = x['role_name']

                for item in items:
                    mv = task_vars.copy()
                    mv['item'] = item
                    for t in tokens[1:]:
                        (k, v) = t.split("=", 1)
                        mv[k] = template(self.basedir, v, mv)
                    dirname = self.basedir
                    if original_file:
                        dirname = os.path.dirname(original_file)
                    include_file = template(dirname, tokens[0], mv)
                    include_filename = utils.path_dwim(dirname, include_file)
                    data = utils.parse_yaml_from_file(
                        include_filename, vault_password=self.vault_password)
                    if 'role_name' in x and data is not None:
                        for y in data:
                            if isinstance(y, dict) and 'include' in y:
                                y['role_name'] = new_role
                    loaded = self._load_tasks(
                        data,
                        mv,
                        default_vars,
                        included_sudo_vars,
                        list(included_additional_conditions),
                        original_file=include_filename,
                        role_name=new_role)
                    results += loaded
            elif type(x) == dict:
                task = Task(self,
                            x,
                            module_vars=task_vars,
                            default_vars=default_vars,
                            additional_conditions=list(additional_conditions),
                            role_name=role_name)
                results.append(task)
            else:
                raise Exception("unexpected task type")

        for x in results:
            if self.tags is not None:
                x.tags.extend(self.tags)

        return results
Ejemplo n.º 56
0
    def __init__(self, playbook, ds, basedir, vault_password=None):
        ''' constructor loads from a play datastructure '''

        for x in ds.keys():
            if not x in Play.VALID_KEYS:
                raise errors.AnsibleError(
                    "%s is not a legal parameter in an Ansible Playbook" % x)

        # allow all playbook keys to be set by --extra-vars
        self.vars = ds.get('vars', {})
        self.vars_prompt = ds.get('vars_prompt', {})
        self.playbook = playbook
        self.vars = self._get_vars()
        self.basedir = basedir
        self.roles = ds.get('roles', None)
        self.tags = ds.get('tags', None)
        self.vault_password = vault_password

        if self.tags is None:
            self.tags = []
        elif type(self.tags) in [str, unicode]:
            self.tags = self.tags.split(",")
        elif type(self.tags) != list:
            self.tags = []

        # We first load the vars files from the datastructure
        # so we have the default variables to pass into the roles
        self.vars_files = ds.get('vars_files', [])
        if not isinstance(self.vars_files, list):
            raise errors.AnsibleError('vars_files must be a list')
        self._update_vars_files_for_host(None)

        # now we load the roles into the datastructure
        self.included_roles = []
        ds = self._load_roles(self.roles, ds)

        # and finally re-process the vars files as they may have
        # been updated by the included roles
        self.vars_files = ds.get('vars_files', [])
        if not isinstance(self.vars_files, list):
            raise errors.AnsibleError('vars_files must be a list')

        self._update_vars_files_for_host(None)

        # apply any extra_vars specified on the command line now
        if type(self.playbook.extra_vars) == dict:
            self.vars = utils.combine_vars(self.vars, self.playbook.extra_vars)

        # template everything to be efficient, but do not pre-mature template
        # tasks/handlers as they may have inventory scope overrides
        _tasks = ds.pop('tasks', [])
        _handlers = ds.pop('handlers', [])
        ds = template(basedir, ds, self.vars)
        ds['tasks'] = _tasks
        ds['handlers'] = _handlers

        self._ds = ds

        hosts = ds.get('hosts')
        if hosts is None:
            raise errors.AnsibleError('hosts declaration is required')
        elif isinstance(hosts, list):
            hosts = ';'.join(hosts)
        self.serial = int(ds.get('serial', 0))
        self.hosts = hosts
        self.name = ds.get('name', self.hosts)
        self._tasks = ds.get('tasks', [])
        self._handlers = ds.get('handlers', [])
        self.remote_user = ds.get('remote_user',
                                  ds.get('user', self.playbook.remote_user))
        self.remote_port = ds.get('port', self.playbook.remote_port)
        self.sudo = ds.get('sudo', self.playbook.sudo)
        self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user)
        self.transport = ds.get('connection', self.playbook.transport)
        self.remote_port = self.remote_port
        self.any_errors_fatal = utils.boolean(
            ds.get('any_errors_fatal', 'false'))
        self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
        self.accelerate_port = ds.get('accelerate_port', None)
        self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
        self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
        self.su = ds.get('su', self.playbook.su)
        self.su_user = ds.get('su_user', self.playbook.su_user)

        # gather_facts is not a simple boolean, as None means  that a 'smart'
        # fact gathering mode will be used, so we need to be careful here as
        # calling utils.boolean(None) returns False
        self.gather_facts = ds.get('gather_facts', None)
        if self.gather_facts:
            self.gather_facts = utils.boolean(self.gather_facts)

        # Fail out if user specifies a sudo param with a su param in a given play
        if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su')
                                                        or ds.get('su_user')):
            raise errors.AnsibleError(
                'sudo params ("sudo", "sudo_user") and su params '
                '("su", "su_user") cannot be used together')

        load_vars = {}
        load_vars['role_names'] = ds.get('role_names', [])
        load_vars['playbook_dir'] = self.basedir
        if self.playbook.inventory.basedir() is not None:
            load_vars['inventory_dir'] = self.playbook.inventory.basedir()

        self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
        self._handlers = self._load_tasks(self._ds.get('handlers', []),
                                          load_vars)

        # apply any missing tags to role tasks
        self._late_merge_role_tags()

        if self.sudo_user != 'root':
            self.sudo = True

        # place holder for the discovered hosts to be used in this play
        self._play_hosts = None