def _write_repositories_file(alpine_repo, alpine_version, local_repo): """ Write the /etc/apk/repositories file with the specified entries. @param alpine_repo: A nested dict of the alpine_repo configuration. @param alpine_version: A string of the Alpine version to use. @param local_repo: A string containing the base URL of a local repo. """ repo_file = '/etc/apk/repositories' alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) params = {'alpine_baseurl': alpine_baseurl, 'alpine_version': alpine_version, 'community_enabled': alpine_repo.get('community_enabled'), 'testing_enabled': alpine_repo.get('testing_enabled'), 'local_repo': local_repo} tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") template_fn = tfile[1] # Filepath is second item in tuple util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) LOG.debug('Generating Alpine repository configuration file: %s', repo_file) templater.render_to_file(template_fn, repo_file, params) # Clean up temporary template util.del_file(template_fn)
def generate_resolv_conf(cloud, log, params): template_fn = cloud.get_template_filename('resolv.conf') if not template_fn: log.warn("No template found, not rendering /etc/resolv.conf") return flags = [] false_flags = [] if 'options' in params: for key, val in params['options'].iteritems(): if type(val) == bool: if val: flags.append(key) else: false_flags.append(key) for flag in flags + false_flags: del params['options'][flag] params['flags'] = flags log.debug("Writing resolv.conf from template %s" % template_fn) if cloud.distro.name == "aix": templater.render_to_file(template_fn, '/etc/resolv.conf', params) else: # Network Manager likes to overwrite the resolv.conf file, so make sure # it is immutable after write util.subp(['chattr', '-i', '/etc/resolv.conf']) templater.render_to_file(template_fn, '/etc/resolv.conf', params) util.subp(['chattr', '+i', '/etc/resolv.conf'])
def write_ntp_config_template(cfg, cloud, path, template=None): servers = cfg.get('servers', []) pools = cfg.get('pools', []) if len(servers) == 0 and len(pools) == 0: pools = generate_server_names(cloud.distro.name) LOG.debug('Adding distro default ntp pool servers: %s', ','.join(pools)) params = { 'servers': servers, 'pools': pools, } if template is None: template = 'ntp.conf.%s' % cloud.distro.name template_fn = cloud.get_template_filename(template) if not template_fn: template_fn = cloud.get_template_filename('ntp.conf') if not template_fn: raise RuntimeError(("No template found, " "not rendering %s"), path) templater.render_to_file(template_fn, path, params)
def handle(name, cfg, cloud, log, _args): manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return # Render from a template file tpl_fn_name = cloud.get_template_filename("hosts.%s" % (cloud.distro.osfamily)) if not tpl_fn_name: raise RuntimeError( ("No hosts template could be" " found for distro %s") % (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', { 'hostname': hostname, 'fqdn': fqdn }) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return log.debug("Managing localhost in /etc/hosts") cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," " not managing /etc/hosts in module %s"), name)
def handle(name, cfg, cloud, log, _args): manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return # Render from a template file tpl_fn_name = cloud.get_template_filename("hosts.%s" % (cloud.distro.osfamily)) if not tpl_fn_name: raise RuntimeError(("No hosts template could be" " found for distro %s") % (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return log.debug("Managing localhost in /etc/hosts") cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," " not managing /etc/hosts in module %s"), name)
def write_ntp_config_template( distro_name, service_name=None, servers=None, pools=None, path=None, template_fn=None, template=None, ): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @param service_name: string. The name of the NTP client service. @param servers: A list of strings specifying ntp servers. Defaults to empty list. @param pools: A list of strings specifying ntp pools. Defaults to empty list. @param path: A string to specify where to write the rendered template. @param template_fn: A string to specify the template source file. @param template: A string specifying the contents of the template. This content will be written to a temporary file before being used to render the configuration file. @raises: ValueError when path is None. @raises: ValueError when template_fn is None and template is None. """ if not servers: servers = [] if not pools: pools = [] if (len(servers) == 0 and distro_name == "alpine" and service_name == "ntpd"): # Alpine's Busybox ntpd only understands "servers" configuration # and not "pool" configuration. servers = generate_server_names(distro_name) LOG.debug("Adding distro default ntp servers: %s", ",".join(servers)) elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug("Adding distro default ntp pool servers: %s", ",".join(pools)) if not path: raise ValueError("Invalid value for path parameter") if not template_fn and not template: raise ValueError("Not template_fn or template provided") params = {"servers": servers, "pools": pools} if template: tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) templater.render_to_file(template_fn, path, params) # clean up temporary template if template: util.del_file(template_fn)
def test_render_to_file(self): td = self.useFixture(fixtures.TempDir()).path src = os.path.join(td, "src") target = os.path.join(td, "target") with open(src, "w") as fp: fp.write(self.jinja_tmpl) templater.render_to_file(src, target, self.jinja_params) with open(target, "r") as fp: rendered = fp.read() self.assertEqual(rendered, self.jinja_expected)
def test_jinja_nonascii_render_to_file(self): """Test jinja render_to_file of a filename with non-ascii content.""" tmpl_fn = self.tmp_path("j-render-to-file.template") out_fn = self.tmp_path("j-render-to-file.out") write_file(filename=tmpl_fn, omode="wb", content=self.add_header( "jinja", self.jinja_utf8).encode('utf-8')) templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"}) result = load_file(out_fn, decode=False).decode('utf-8') self.assertEqual(result, self.jinja_utf8_rbob)
def generate_sources_list(codename, mirrors, cloud, log): template_fn = cloud.get_template_filename("sources.list.%s" % (cloud.distro.name)) if not template_fn: template_fn = cloud.get_template_filename("sources.list") if not template_fn: log.warn("No template found, not rendering /etc/apt/sources.list") return params = {"codename": codename} for k in mirrors: params[k] = mirrors[k] templater.render_to_file(template_fn, "/etc/apt/sources.list", params)
def generate_sources_list(codename, mirrors, cloud, log): template_fn = cloud.get_template_filename('sources.list.%s' % (cloud.distro.name)) if not template_fn: template_fn = cloud.get_template_filename('sources.list') if not template_fn: log.warn("No template found, not rendering /etc/apt/sources.list") return params = {'codename': codename} for k in mirrors: params[k] = mirrors[k] templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
def handle(name, cfg, cloud, log, _args): manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) hosts_fn = cloud.distro.hosts_fn if util.translate_bool(manage_hosts, addons=["template"]): if manage_hosts == "template": log.warning( "DEPRECATED: please use manage_etc_hosts: true instead of" " 'template'" ) (hostname, fqdn, _) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warning( "Option 'manage_etc_hosts' was set, but no hostname was found" ) return # Render from a template file tpl_fn_name = cloud.get_template_filename( "hosts.%s" % (cloud.distro.osfamily) ) if not tpl_fn_name: raise RuntimeError( "No hosts template could be found for distro %s" % (cloud.distro.osfamily) ) templater.render_to_file( tpl_fn_name, hosts_fn, {"hostname": hostname, "fqdn": fqdn} ) elif manage_hosts == "localhost": (hostname, fqdn, _) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warning( "Option 'manage_etc_hosts' was set, but no hostname was found" ) return log.debug("Managing localhost in %s", hosts_fn) cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug( "Configuration option 'manage_etc_hosts' is not set," " not managing %s in module %s", hosts_fn, name, )
def write_ntp_config_template(distro_name, servers=None, pools=None, path=None, template_fn=None, template=None): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @param servers: A list of strings specifying ntp servers. Defaults to empty list. @param pools: A list of strings specifying ntp pools. Defaults to empty list. @param path: A string to specify where to write the rendered template. @param template_fn: A string to specify the template source file. @param template: A string specifying the contents of the template. This content will be written to a temporary file before being used to render the configuration file. @raises: ValueError when path is None. @raises: ValueError when template_fn is None and template is None. """ if not servers: servers = [] if not pools: pools = [] if len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug('Adding distro default ntp pool servers: %s', ','.join(pools)) if not path: raise ValueError('Invalid value for path parameter') if not template_fn and not template: raise ValueError('Not template_fn or template provided') params = {'servers': servers, 'pools': pools} if template: tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) templater.render_to_file(template_fn, path, params) # clean up temporary template if template: util.del_file(template_fn)
def write_ntp_config_template(distro_name, servers=None, pools=None, path=None, template_fn=None, template=None): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @param servers: A list of strings specifying ntp servers. Defaults to empty list. @param pools: A list of strings specifying ntp pools. Defaults to empty list. @param path: A string to specify where to write the rendered template. @param template_fn: A string to specify the template source file. @param template: A string specifying the contents of the template. This content will be written to a temporary file before being used to render the configuration file. @raises: ValueError when path is None. @raises: ValueError when template_fn is None and template is None. """ if not servers: servers = [] if not pools: pools = [] if len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( 'Adding distro default ntp pool servers: %s', ','.join(pools)) if not path: raise ValueError('Invalid value for path parameter') if not template_fn and not template: raise ValueError('Not template_fn or template provided') params = {'servers': servers, 'pools': pools} if template: tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) templater.render_to_file(template_fn, path, params) # clean up temporary template if template: util.del_file(template_fn)
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): flags = [] false_flags = [] if 'options' in params: for key, val in params['options'].items(): if isinstance(val, bool): if val: flags.append(key) else: false_flags.append(key) for flag in flags + false_flags: del params['options'][flag] if not params.get('options'): params['options'] = {} params['flags'] = flags LOG.debug("Writing resolv.conf from template %s" % template_fn) templater.render_to_file(template_fn, target_fname, params)
def generate_resolv_conf(template_fn, params, target_fname): flags = [] false_flags = [] if 'options' in params: for key, val in params['options'].items(): if isinstance(val, bool): if val: flags.append(key) else: false_flags.append(key) for flag in flags + false_flags: del params['options'][flag] if not params.get('options'): params['options'] = {} params['flags'] = flags LOG.debug("Writing resolv.conf from template %s", template_fn) templater.render_to_file(template_fn, target_fname, params)
def handle(name, cfg, cloud, log, _args): manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return # Render from a template file tpl_fn_name = cloud.get_template_filename("hosts.%s" % (cloud.distro.osfamily)) # out_fn = cloud.paths.join(False, '/etc/hosts') out_fn = '/etc/hosts' params = {'hostname': hostname, 'fqdn': fqdn} devs = netinfo.netdev_info() for dev, info in devs.items(): params['dev_' + dev] = info['addr'] templater.render_to_file(tpl_fn_name, out_fn, params) if not tpl_fn_name: raise RuntimeError( ("No hosts template could be" " found for distro %s") % (cloud.distro.osfamily)) # templater.render_to_file(tpl_fn_name, '/etc/hosts', # {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return log.debug("Managing localhost in /etc/hosts") cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," " not managing /etc/hosts in module %s"), name)
def write_ntp_config_template(cfg, cloud): servers = cfg.get('servers', []) pools = cfg.get('pools', []) if len(servers) == 0 and len(pools) == 0: LOG.debug('Adding distro default ntp pool servers') pools = generate_server_names(cloud.distro.name) params = { 'servers': servers, 'pools': pools, } template_fn = cloud.get_template_filename('ntp.conf.%s' % (cloud.distro.name)) if not template_fn: template_fn = cloud.get_template_filename('ntp.conf') if not template_fn: raise RuntimeError(("No template found, " "not rendering %s"), NTP_CONF) templater.render_to_file(template_fn, NTP_CONF, params)
def generate_resolv_conf(cloud, log, params): template_fn = cloud.get_template_filename('resolv.conf') if not template_fn: log.warn("No template found, not rendering /etc/resolv.conf") return flags = [] false_flags = [] if 'options' in params: for key, val in params['options'].iteritems(): if type(val) == bool: if val: flags.append(key) else: false_flags.append(key) for flag in flags + false_flags: del params['options'][flag] params['flags'] = flags log.debug("Writing resolv.conf from template %s" % template_fn) templater.render_to_file(template_fn, '/etc/resolv.conf', params)
def _genrepo(cfg, cloud, log): """Generate yum repo files from provided templates.""" # The repo_preserve option is used to disable this feature if util.get_cfg_option_bool(cfg, 'repo_preserve', False): log.info("Not generating yum repo files, per configuration.") return log.debug("Generating default repo files") # get the repo dir from a legacy option (see cc_yum_add_repo.py) # TODO: get it from a more sensible path, or from yum? reposdir = util.get_cfg_option_str(cfg, 'yum_repo_dir', '/etc/yum.repos.d') # This function gets the mirror url from the config, with the region # name interpolated into it. mirror_info = cloud.datasource.get_package_mirror_info() log.debug("mirror_info: %r", mirror_info) if 'regional' not in mirror_info: log.debug('No mirror info found; ignoring.') return # It would be better to get 'name' from the config, but I'm not sure # where to put it in there that might end up being standard params = {'name': 'amzn', 'mirror': mirror_info['regional']} log.debug("Using mirror: %s", params['mirror']) repo_templates = glob(cloud.paths.template_tpl % 'amzn-*.repo') # extract the prefix and suffix from the template filename so we can # extract the filename later (tpl_prefix, tpl_suffix) = cloud.paths.template_tpl.split('%s', 1) for template_fn in repo_templates: out_fn = os.path.join( reposdir, # extract the filename from the template path template_fn[len(tpl_prefix):-len(tpl_suffix)]) templater.render_to_file(template_fn, out_fn, params)
def handle(name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] # Ensure the chef directories we use exist for d in CHEF_DIRS: util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) params = { 'server_url': chef_cfg['server_url'], 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), 'environment': util.get_cfg_option_str(chef_cfg, 'environment', '_default'), 'validation_name': chef_cfg['validation_name'] } templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") # set the firstboot json initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if (not os.path.isfile('/usr/bin/chef-client') or util.get_cfg_option_bool(chef_cfg, 'force_install', default=False)): install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": # this will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(cloud.distro, ruby_version, chef_version) # and finally, run chef-client log.debug('Running chef-client') util.subp(['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20'], capture=False) elif install_type == 'packages': # this will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) elif install_type == 'omnibus': url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) content = url_helper.readurl(url=url, retries=5) with util.tempdir() as tmpd: # use tmpd over tmpfile to avoid 'Text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd util.write_file(tmpf, str(content), mode=0700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type)
def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] # Ensure the chef directories we use exist chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: util.write_file(CHEF_VALIDATION_PEM_PATH, chef_cfg[key]) break # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) # Do a best effort attempt to ensure that the template values that # are associated with paths have there parent directory created # before they are used by the chef-client itself. param_paths = set() for (k, v) in params.items(): if k in CHEF_RB_TPL_PATH_KEYS and v: param_paths.add(os.path.dirname(v)) util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: log.warn("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', default=CHEF_FB_PATH) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... force_install = util.get_cfg_option_bool(chef_cfg, 'force_install', default=False) if not is_installed() or force_install: run = install_chef(cloud, chef_cfg, log) elif is_installed(): run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) else: run = False if run: run_chef(chef_cfg, log) post_run_chef(chef_cfg, log)
def handle(name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] # Ensure the chef directories we use exist for d in CHEF_DIRS: util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) params = { 'server_url': chef_cfg['server_url'], 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), 'environment': util.get_cfg_option_str(chef_cfg, 'environment', '_default'), 'validation_name': chef_cfg['validation_name'] } templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") # set the firstboot json initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if (not os.path.isfile('/usr/bin/chef-client') or util.get_cfg_option_bool( chef_cfg, 'force_install', default=False)): install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": # this will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(cloud.distro, ruby_version, chef_version) # and finally, run chef-client log.debug('Running chef-client') util.subp(['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20'], capture=False) elif install_type == 'packages': # this will install and run the chef-client from packages cloud.distro.install_packages(('chef', )) elif install_type == 'omnibus': url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) content = url_helper.readurl(url=url, retries=5) with util.tempdir() as tmpd: # use tmpd over tmpfile to avoid 'Text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd util.write_file(tmpf, str(content), mode=0700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type)
def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] # Ensure the chef directories we use exist chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) vcert = chef_cfg.get('validation_cert') vcert = '-----BEGIN RSA PRIVATE KEY-----\n' + \ '\n'.join(re.sub(' -----END RSA PRIVATE KEY-----$', '', \ re.sub('^-----BEGIN RSA PRIVATE KEY----- ', '', vcert)).split(' ')) + \ '\n-----END RSA PRIVATE KEY----- # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): log.warn("chef validation_cert provided as 'system', but " "validation_key path '%s' does not exist.", vkey_path) # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) # Do a best effort attempt to ensure that the template values that # are associated with paths have there parent directory created # before they are used by the chef-client itself. param_paths = set() for (k, v) in params.items(): if k in CHEF_RB_TPL_PATH_KEYS and v: param_paths.add(os.path.dirname(v)) util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: log.warn("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', default=CHEF_FB_PATH) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... force_install = util.get_cfg_option_bool(chef_cfg, 'force_install', default=False) if not is_installed() or force_install: run = install_chef(cloud, chef_cfg, log) elif is_installed(): run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) else: run = False if run: run_chef(chef_cfg, log) post_run_chef(chef_cfg, log)
def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] # Ensure the chef directories we use exist chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) vcert = chef_cfg.get('validation_cert') # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): log.warn( "chef validation_cert provided as 'system', but " "validation_key path '%s' does not exist.", vkey_path) # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) # Do a best effort attempt to ensure that the template values that # are associated with paths have there parent directory created # before they are used by the chef-client itself. param_paths = set() for (k, v) in params.items(): if k in CHEF_RB_TPL_PATH_KEYS and v: param_paths.add(os.path.dirname(v)) util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: log.warn("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', default=CHEF_FB_PATH) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... force_install = util.get_cfg_option_bool(chef_cfg, 'force_install', default=False) if not is_installed() or force_install: run = install_chef(cloud, chef_cfg, log) elif is_installed(): run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) else: run = False if run: run_chef(chef_cfg, log) post_run_chef(chef_cfg, log)
def handle_iscsi(cfg, cloud, log, definition, dev_entry_iscsi): # Handle iSCSI LUN device = definition.get("device") try: (iscsi_host, iscsi_proto, iscsi_port, iscsi_lun, iscsi_target) = device.split(":", 5)[1:] except Exception as e: util.logexc(log, "handle_iscsi: " "expected \"device\" attribute in the format: " "\"iscsi:<iSCSI host>:<protocol>:<port>:<LUN>:" "<iSCSI target name>\": %s" % e) return if dev_entry_iscsi == 0: (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if "initiator_name" in definition: initiator_name = definition.get("initiator_name") else: initiator_name = "iqn.2005-02.com.open-iscsi:%s" % hostname util.write_file(ISCSI_INITIATOR_PATH, "InitiatorName=%s" % initiator_name) multipath_tmpl_fn = cloud.get_template_filename("multipath.conf") if multipath_tmpl_fn: templater.render_to_file(multipath_tmpl_fn, "/etc/multipath.conf", {}) else: log.warn("handle_iscsi: template multipath.conf not found") if cloud.distro.osfamily == "redhat": iscsi_services = ["iscsi", "iscsid"] multipath_services = ["multipathd"] elif cloud.distro.osfamily == 'debian': iscsi_services = ["open-iscsi", "iscsid"] multipath_services = ["multipathd"] else: util.logexc(log, "handle_iscsi: " "unsupported osfamily \"%s\"" % cloud.distro.osfamily) return for service in iscsi_services: _service_wrapper(cloud, log, service, "enable") _service_wrapper(cloud, log, service, "restart") for service in multipath_services: _service_wrapper(cloud, log, service, "enable") _service_wrapper(cloud, log, service, "restart") blockdev = _iscsi_lun_discover(log, iscsi_host, iscsi_port, iscsi_lun, iscsi_target) if blockdev: lvm_group = definition.get("lvm_group") lvm_volume = definition.get("lvm_volume") fs_type = definition.get("fs_type") fs_label = definition.get("fs_label") fs_opts = definition.get("fs_opts") mount_point = definition.get("mount_point") mount_opts = definition.get("mount_opts") if not mount_opts: mount_opts = 'defaults,_netdev' else: if mount_opts.find("_netdev") == -1: mount_opts = "%s,_netdev" % (mount_opts) fs_freq = definition.get("fs_freq") if not fs_freq: fs_freq = "1" fs_passno = definition.get("fs_passno") if not fs_passno: fs_passno = "2" if lvm_group and lvm_volume: for vg_name in _list_vg_names(): if vg_name == lvm_group: util.logexc(log, "handle_iscsi: " "logical volume group '%s' exists already" % lvm_group) return for lv_name in _list_lv_names(): if lv_name == lvm_volume: util.logexc(log, "handle_iscsi: " "logical volume '%s' exists already" % lvm_volume) return blockdev = _create_lv(log, blockdev, lvm_group, lvm_volume) if blockdev: if mount_point and fs_type: _create_fs(log, blockdev, fs_type, fs_label, fs_opts) _add_fstab_entry(log, blockdev, mount_point, fs_type, fs_label, mount_opts, fs_freq, fs_passno) _mount_fs(log, mount_point) else: util.logexc(log, "handle_iscsi: " "expexted \"mount_point\" " "and \"fs_type\" parameters")