def test_ntp_handler_schema_validation_warns_invalid_key_present( self, m_select): """Ntp schema validation warns of invalid keys present in ntp config. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { 'ntp': { 'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org'] } } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp: Additional properties are not allowed " "('invalidkey' was unexpected)", self.logs.getvalue()) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['0.mycompany.pool.ntp.org']\n", content.decode())
def test_ntp_handler_real_distro_templates(self): """Test ntp handler renders the shipped distro ntp.conf templates.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] cfg = {'ntp': {'pools': pools, 'servers': servers}} ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): mycloud = self._get_cloud(distro) root_dir = dirname(dirname(os.path.realpath(util.__file__))) tmpl_file = os.path.join('{0}/templates/ntp.conf.{1}.tmpl'.format( root_dir, distro)) # Create a copy in our tmp_dir shutil.copy( tmpl_file, os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro)) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): with mock.patch.object(util, 'which', return_value=[True]): cc_ntp.handle('notimportant', cfg, mycloud, None, None) content = util.read_file_or_url('file://' + ntp_conf).contents expected_servers = '\n'.join( ['server {0} iburst'.format(server) for server in servers]) self.assertIn( expected_servers, content.decode(), 'failed to render ntp.conf for distro:{0}'.format(distro)) expected_pools = '\n'.join( ['pool {0} iburst'.format(pool) for pool in pools]) self.assertIn( expected_pools, content.decode(), 'failed to render ntp.conf for distro:{0}'.format(distro))
def test_ntp_user_provided_config_template_only(self, m_select, m_install, m_reload, m_schema): """Test custom template for default client""" custom = r'\n#MyCustomTemplate' user_template = NTP_TEMPLATE + custom client = 'chrony' cfg = { 'pools': ['mypool.org'], 'ntp_client': client, 'config': { 'template': user_template, } } expected_merged_cfg = { 'check_exe': 'chronyd', 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), 'template_name': 'client.conf', 'template': user_template, 'service_name': 'chrony', 'packages': ['chrony'] } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(client=client, distro=distro) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', {'ntp': cfg}, mycloud, None, None) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers []\npools ['mypool.org']\n%s" % custom, content.decode()) m_schema.assert_called_with(expected_merged_cfg)
def validate_cloudconfig_file(config_path, schema, annotate=False): """Validate cloudconfig file adheres to a specific jsonschema. @param config_path: Path to the yaml cloud-config file to parse. @param schema: Dict describing a valid jsonschema to validate against. @param annotate: Boolean set True to print original config file with error annotations on the offending lines. @raises SchemaValidationError containing any of schema_errors encountered. @raises RuntimeError when config_path does not exist. """ if not os.path.exists(config_path): raise RuntimeError('Configfile {0} does not exist'.format(config_path)) content = read_file_or_url('file://{0}'.format(config_path)).contents if not content.startswith(CLOUD_CONFIG_HEADER): errors = (('header', 'File {0} needs to begin with "{1}"'.format( config_path, CLOUD_CONFIG_HEADER.decode())), ) raise SchemaValidationError(errors) try: cloudconfig = yaml.safe_load(content) except yaml.parser.ParserError as e: errors = (('format', 'File {0} is not valid yaml. {1}'.format( config_path, str(e))), ) raise SchemaValidationError(errors) try: validate_cloudconfig_schema(cloudconfig, schema, strict=True) except SchemaValidationError as e: if annotate: print( annotated_cloudconfig_file(cloudconfig, content, e.schema_errors)) raise
def test_proxy_replaced(self): util.write_file(self.cfile, "content doesnt matter") cc_apt_configure.apply_apt_config({'apt_proxy': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) contents = str(util.read_file_or_url(self.pfile)) self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): """Ntp schema validation warns of duplicates in servers or pools. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { 'ntp': { 'pools': ['0.mypool.org', '0.mypool.org'], 'servers': ['10.0.0.1', '10.0.0.1'] } } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" " has non-unique elements\nntp.servers: " "['10.0.0.1', '10.0.0.1'] has non-unique elements", self.logs.getvalue()) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers ['10.0.0.1', '10.0.0.1']\n" "pools ['0.mypool.org', '0.mypool.org']\n", content.decode())
def test_ntp_the_whole_package(self, m_sysd, m_select): """Test enabled config renders template, and restarts service """ cfg = {'ntp': {'enabled': True}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig['confpath'] service_name = ntpconfig['service_name'] m_select.return_value = ntpconfig pools = cc_ntp.generate_server_names(mycloud.distro.name) # force uses systemd path m_sysd.return_value = True with mock.patch('cloudinit.config.cc_ntp.util') as m_util: # allow use of util.mergemanydict m_util.mergemanydict.side_effect = util.mergemanydict # default client is present m_util.which.return_value = True # use the config 'enabled' value m_util.is_false.return_value = util.is_false( cfg['ntp']['enabled']) cc_ntp.handle('notimportant', cfg, mycloud, None, None) m_util.subp.assert_called_with( ['systemctl', 'reload-or-restart', service_name], capture=True) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers []\npools {0}\n".format(pools), content.decode())
def test_ntp_user_provided_config_with_template(self, m_install, m_reload): custom = r'\n#MyCustomTemplate' user_template = NTP_TEMPLATE + custom confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') cfg = { 'ntp': { 'pools': ['mypool.org'], 'ntp_client': 'myntpd', 'config': { 'check_exe': 'myntpd', 'confpath': confpath, 'packages': ['myntp'], 'service_name': 'myntp', 'template': user_template, } } } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', cfg, mycloud, None, None) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers []\npools ['mypool.org']\n%s" % custom, content.decode())
def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' try: # It is ok for userdata to not exist (thats why we are stopping if # NOT_FOUND occurs) and just in that case returning an empty string. exception_cb = functools.partial(_skip_retry_on_codes, SKIP_USERDATA_CODES) response = util.read_file_or_url(ud_url, ssl_details=ssl_details, timeout=timeout, retries=retries, exception_cb=exception_cb) import re user_data = re.sub(r"-----([^\$]+)-----", r"", response.contents, re.M).strip() except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return user_data
def read_maas_seed_url(seed_url, header_cb=None, timeout=None, version=MD_VERSION, paths=None): """ Read the maas datasource at seed_url. - header_cb is a method that should return a headers dictionary for a given url Expected format of seed_url is are the following files: * <seed_url>/<version>/meta-data/instance-id * <seed_url>/<version>/meta-data/local-hostname * <seed_url>/<version>/user-data """ base_url = "%s/%s" % (seed_url, version) file_order = [ 'local-hostname', 'instance-id', 'public-keys', 'user-data', ] files = { 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'), 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'), 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'), 'user-data': "%s/%s" % (base_url, 'user-data'), } md = {} for name in file_order: url = files.get(name) if not header_cb: def _cb(url): return {} header_cb = _cb if name == 'user-data': retries = 0 else: retries = None try: ssl_details = util.fetch_ssl_details(paths) resp = util.read_file_or_url(url, retries=retries, headers_cb=header_cb, timeout=timeout, ssl_details=ssl_details) if resp.ok(): md[name] = str(resp) else: LOG.warn(("Fetching from %s resulted in" " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code != 404: raise return check_seed_contents(md, seed_url)
def _do_include(self, content, append_msg): # Include a list of urls, one per line # also support '#include <url here>' # or #include-once '<url here>' include_once_on = False for line in content.splitlines(): lc_line = line.lower() if lc_line.startswith("#include-once"): line = line[len("#include-once"):].lstrip() # Every following include will now # not be refetched.... but will be # re-read from a local urlcache (if it worked) include_once_on = True elif lc_line.startswith("#include"): line = line[len("#include"):].lstrip() # Disable the include once if it was on # if it wasn't, then this has no effect. include_once_on = False if line.startswith("#"): continue include_url = line.strip() if not include_url: continue include_once_fn = None content = None if include_once_on: include_once_fn = self._get_include_once_filename(include_url) if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: try: resp = util.read_file_or_url(include_url, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, mode=0o600) if resp.ok(): content = resp.contents else: LOG.warning(("Fetching from %s resulted in" " a invalid http code of %s"), include_url, resp.code) except UrlError as urle: message = str(urle) # Older versions of requests.exceptions.HTTPError may not # include the errant url. Append it for clarity in logs. if include_url not in message: message += ' for url: {0}'.format(include_url) LOG.warning(message) except IOError as ioe: LOG.warning("Fetching from %s resulted in %s", include_url, ioe) if content is not None: new_msg = convert_string(content) self._process_msg(new_msg, append_msg)
def test_apt_http_proxy_written(self): cfg = {"apt_http_proxy": "myproxy"} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) contents = str(util.read_file_or_url(self.pfile)) self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self): cfg = {'apt_http_proxy': 'myproxy'} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) contents = str(util.read_file_or_url(self.pfile)) self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def read_maas_seed_url(seed_url, header_cb=None, timeout=None, version=MD_VERSION, paths=None): """ Read the maas datasource at seed_url. - header_cb is a method that should return a headers dictionary for a given url Expected format of seed_url is are the following files: * <seed_url>/<version>/meta-data/instance-id * <seed_url>/<version>/meta-data/local-hostname * <seed_url>/<version>/user-data """ base_url = "%s/%s" % (seed_url, version) file_order = [ 'local-hostname', 'instance-id', 'public-keys', 'user-data', ] files = { 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'), 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'), 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'), 'user-data': "%s/%s" % (base_url, 'user-data'), } md = {} for name in file_order: url = files.get(name) if not header_cb: def _cb(url): return {} header_cb = _cb if name == 'user-data': retries = 0 else: retries = None try: ssl_details = util.fetch_ssl_details(paths) resp = util.read_file_or_url(url, retries=retries, headers_cb=header_cb, timeout=timeout, ssl_details=ssl_details) if resp.ok(): if name in BINARY_FIELDS: md[name] = resp.contents else: md[name] = util.decode_binary(resp.contents) else: LOG.warn(("Fetching from %s resulted in" " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code != 404: raise return check_seed_contents(md, seed_url)
def test_config_written(self): payload = "this is my apt config" cfg = {"apt_config": payload} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.cfile)) self.assertFalse(os.path.isfile(self.pfile)) self.assertEqual(str(util.read_file_or_url(self.cfile)), payload)
def test_config_written(self): payload = 'this is my apt config' cfg = {'apt_config': payload} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.cfile)) self.assertFalse(os.path.isfile(self.pfile)) self.assertEqual(str(util.read_file_or_url(self.cfile)), payload)
def test_ntp_handler_real_distro_ntp_templates(self): """Test ntp handler renders the shipped distro ntp client templates.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] for client in ['ntp', 'systemd-timesyncd', 'chrony']: for distro in cc_ntp.distros: distro_cfg = cc_ntp.distro_ntp_client_configs(distro) ntpclient = distro_cfg[client] confpath = (os.path.join(self.new_root, ntpclient.get('confpath')[1:])) template = ntpclient.get('template_name') # find sourcetree template file root_dir = (dirname(dirname(os.path.realpath(util.__file__))) + '/templates') source_fn = self._get_template_path(template, distro, basepath=root_dir) template_fn = self._get_template_path(template, distro) # don't fail if cloud-init doesn't have a template for # a distro,client pair if not os.path.exists(source_fn): continue # Create a copy in our tmp_dir shutil.copy(source_fn, template_fn) cc_ntp.write_ntp_config_template(distro, servers=servers, pools=pools, path=confpath, template_fn=template_fn) content = util.read_file_or_url('file://' + confpath).contents if client in ['ntp', 'chrony']: expected_servers = '\n'.join( ['server {0} iburst'.format(srv) for srv in servers]) print('distro=%s client=%s' % (distro, client)) self.assertIn(expected_servers, content.decode('utf-8'), ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) expected_pools = '\n'.join( ['pool {0} iburst'.format(pool) for pool in pools]) self.assertIn(expected_pools, content.decode('utf-8'), ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) elif client == 'systemd-timesyncd': expected_content = ( "# cloud-init generated file\n" + "# See timesyncd.conf(5) for details.\n\n" + "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools))) self.assertEqual(expected_content, content.decode())
def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): """write_ntp_config_template reads from $client.conf.distro.tmpl""" servers = [] pools = ['10.0.0.1', '10.0.0.2'] (confpath, template_fn) = self._generate_template() mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.write_ntp_config_template('ubuntu', servers=servers, pools=pools, path=confpath, template_fn=template_fn, template=None) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode())
def test_ntp_handler_schema_validation_allows_empty_ntp_config( self, m_select): """Ntp schema validation allows for an empty ntp: configuration.""" valid_empty_configs = [{'ntp': {}}, {'ntp': None}] for valid_empty_config in valid_empty_configs: for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) content = util.read_file_or_url('file://' + confpath).contents pools = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual("servers []\npools {0}\n".format(pools), content.decode()) self.assertNotIn('Invalid config:', self.logs.getvalue())
def test_timesyncd_template(self): """Test timesycnd template is correct""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] (confpath, template_fn) = self._generate_template(template=TIMESYNCD_TEMPLATE) cc_ntp.write_ntp_config_template('ubuntu', servers=servers, pools=pools, path=confpath, template_fn=template_fn, template=None) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), content.decode())
def test_ntp_handler_mocked_template(self): """Test ntp handler renders ubuntu ntp.conf template.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] cfg = {'ntp': {'pools': pools, 'servers': servers}} mycloud = self._get_cloud('ubuntu') ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): with mock.patch.object(util, 'which', return_value=None): cc_ntp.handle('notimportant', cfg, mycloud, None, None) content = util.read_file_or_url('file://' + ntp_conf).contents self.assertEqual('servers {0}\npools {1}\n'.format(servers, pools), content.decode())
def handle_part(self, _data, ctype, filename, payload, frequency): if ctype == TAUPAGE_AMI_CONFIG_MIME_TYPE: LOG.info("Got Taupage AMI configuration; merging with {config}".format(config=TAUPAGE_CONFIG)) LOG.debug("Parsing given input...") config_new = util.load_yaml(payload) LOG.debug("Loading existing configuration...") config_yaml = util.read_file_or_url(TAUPAGE_CONFIG) config_old = util.load_yaml(config_yaml) LOG.debug("Merging configurations...") config_merged = dict(config_old.items() + config_new.items()) LOG.debug("Storing merged configuration...") config_yaml = util.yaml_dumps(config_merged) util.write_file(TAUPAGE_CONFIG, config_yaml, 0o444)
def test_ntp_handler_timesyncd(self, m_select): """Test ntp handler configures timesyncd""" servers = ['192.168.2.1', '192.168.2.2'] pools = ['0.mypool.org'] cfg = {'ntp': {'servers': servers, 'pools': pools}} client = 'systemd-timesyncd' for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro, client=client) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", content.decode())
def _do_include(self, content, append_msg): # Include a list of urls, one per line # also support '#include <url here>' # or #include-once '<url here>' include_once_on = False for line in content.splitlines(): lc_line = line.lower() if lc_line.startswith("#include-once"): line = line[len("#include-once"):].lstrip() # Every following include will now # not be refetched.... but will be # re-read from a local urlcache (if it worked) include_once_on = True elif lc_line.startswith("#include"): line = line[len("#include"):].lstrip() # Disable the include once if it was on # if it wasn't, then this has no effect. include_once_on = False if line.startswith("#"): continue include_url = line.strip() if not include_url: continue include_once_fn = None content = None if include_once_on: include_once_fn = self._get_include_once_filename(include_url) if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: resp = util.read_file_or_url(include_url, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, mode=0o600) if resp.ok(): content = resp.contents else: LOG.warn(("Fetching from %s resulted in" " a invalid http code of %s"), include_url, resp.code) if content is not None: new_msg = convert_string(content) self._process_msg(new_msg, append_msg)
def _do_include(self, content, append_msg): # Include a list of urls, one per line # also support '#include <url here>' # or #include-once '<url here>' include_once_on = False for line in content.splitlines(): lc_line = line.lower() if lc_line.startswith("#include-once"): line = line[len("#include-once"):].lstrip() # Every following include will now # not be refetched.... but will be # re-read from a local urlcache (if it worked) include_once_on = True elif lc_line.startswith("#include"): line = line[len("#include"):].lstrip() # Disable the include once if it was on # if it wasn't, then this has no effect. include_once_on = False if line.startswith("#"): continue include_url = line.strip() if not include_url: continue include_once_fn = None content = None if include_once_on: include_once_fn = self._get_include_once_filename(include_url) if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: resp = util.read_file_or_url(include_url, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, mode=0o600) if resp.ok(): content = resp.contents else: LOG.warning(("Fetching from %s resulted in" " a invalid http code of %s"), include_url, resp.code) if content is not None: new_msg = convert_string(content) self._process_msg(new_msg, append_msg)
def test_apt_all_proxy_written(self): cfg = { "apt_http_proxy": "myproxy_http_proxy", "apt_https_proxy": "myproxy_https_proxy", "apt_ftp_proxy": "myproxy_ftp_proxy", } values = {"http": cfg["apt_http_proxy"], "https": cfg["apt_https_proxy"], "ftp": cfg["apt_ftp_proxy"]} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) contents = str(util.read_file_or_url(self.pfile)) for ptype, pval in values.iteritems(): self.assertTrue(self._search_apt_config(contents, ptype, pval))
def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): """write_ntp_config_template reads content from ntp.conf.tmpl. It reads ntp.conf.tmpl if present and renders the value from servers key. When no pools key is defined, template is rendered using an empty list for pools. """ distro = 'ubuntu' cfg = {'servers': ['192.168.2.1', '192.168.2.2']} mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist # Create ntp.conf.tmpl with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) content = util.read_file_or_url('file://' + ntp_conf).contents self.assertEqual("servers ['192.168.2.1', '192.168.2.2']\npools []\n", content.decode())
def test_ntp_handler_schema_validation_warns_non_string_item_type( self, m_sel): """Ntp schema validation warns of non-strings in pools or servers. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig['confpath'] m_sel.return_value = ntpconfig cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" "ntp.servers.1: None is not of type 'string'", self.logs.getvalue()) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers ['valid', None]\npools [123]\n", content.decode())
def test_ntp_handler_mocked_template_snappy(self, m_util): """Test ntp handler renders timesycnd.conf template on snappy.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] cfg = {'ntp': {'pools': pools, 'servers': servers}} mycloud = self._get_cloud('ubuntu') m_util.system_is_snappy.return_value = True # Create timesyncd.conf.tmpl tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) template = '{0}.tmpl'.format(tsyncd_conf) with open(template, 'wb') as stream: stream.write(TIMESYNCD_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): cc_ntp.handle('notimportant', cfg, mycloud, None, None) content = util.read_file_or_url('file://' + tsyncd_conf).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), content.decode())
def test_ntp_handler_schema_validation_warns_of_non_array_type( self, m_select): """Ntp schema validation warns of non-array pools or servers types. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp.pools: 123 is not of type 'array'\n" "ntp.servers: 'non-array' is not of type 'array'", self.logs.getvalue()) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers non-array\npools 123\n", content.decode())
def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): """write_ntp_config_template defaults pools servers upon empty config. When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ distro = 'ubuntu' pools = cc_ntp.generate_server_names(distro) servers = [] (confpath, template_fn) = self._generate_template() mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.write_ntp_config_template(distro, servers=servers, pools=pools, path=confpath, template_fn=template_fn, template=None) content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers []\npools {0}\n".format(pools), content.decode())
def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): """write_ntp_config_template reads content from ntp.conf.distro.tmpl. It reads ntp.conf.<distro>.tmpl before attempting ntp.conf.tmpl. It renders the value from the keys servers and pools. When no servers value is present, template is rendered using an empty list. """ distro = 'ubuntu' cfg = {'pools': ['10.0.0.1', '10.0.0.2']} mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl which isn't read with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(b'NOT READ: ntp.conf.<distro>.tmpl is primary') # Create ntp.conf.tmpl.<distro> with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) content = util.read_file_or_url('file://' + ntp_conf).contents self.assertEqual("servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode())
def test_apt_all_proxy_written(self): cfg = { 'apt_http_proxy': 'myproxy_http_proxy', 'apt_https_proxy': 'myproxy_https_proxy', 'apt_ftp_proxy': 'myproxy_ftp_proxy' } values = { 'http': cfg['apt_http_proxy'], 'https': cfg['apt_https_proxy'], 'ftp': cfg['apt_ftp_proxy'], } cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) contents = str(util.read_file_or_url(self.pfile)) for ptype, pval in values.iteritems(): self.assertTrue(self._search_apt_config(contents, ptype, pval))
def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' try: # It is ok for userdata to not exist (thats why we are stopping if # NOT_FOUND occurs) and just in that case returning an empty string. exception_cb = functools.partial(_skip_retry_on_codes, SKIP_USERDATA_CODES) response = util.read_file_or_url(ud_url, ssl_details=ssl_details, timeout=timeout, retries=retries, exception_cb=exception_cb) user_data = str(response) except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return user_data
def handle_part(self, _data, ctype, filename, payload, frequency): if ctype == TAUPAGE_AMI_CONFIG_MIME_TYPE: LOG.info("Got Taupage AMI configuration; merging with {config}".format(config=TAUPAGE_CONFIG)) LOG.debug("Parsing given input...") config_new = util.load_yaml(payload) LOG.debug("Loading existing configuration...") config_yaml = util.read_file_or_url(TAUPAGE_CONFIG) config_old = util.load_yaml(config_yaml) LOG.debug("Merging configurations...") config_merged = dict(config_old.items() + config_new.items()) LOG.debug("Storing merged configuration...") config_yaml = util.yaml_dumps(config_merged) util.write_file(TMP_TAUPAGE_CONFIG, config_yaml, 0o444) LOG.debug("Comparing current configuration with the old one...") subprocess.call(['diff', '-u0', TAUPAGE_CONFIG, TMP_TAUPAGE_CONFIG]) LOG.debug("Moving the new configuration into place...") shutil.move(TMP_TAUPAGE_CONFIG, TAUPAGE_CONFIG)
def test_defaults_pools_empty_lists_sles(self): """write_ntp_config_template defaults opensuse pools upon empty config. When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ distro = 'sles' default_pools = cc_ntp.generate_server_names(distro) (confpath, template_fn) = self._generate_template() cc_ntp.write_ntp_config_template(distro, servers=[], pools=[], path=confpath, template_fn=template_fn, template=None) content = util.read_file_or_url('file://' + confpath).contents for pool in default_pools: self.assertIn('opensuse', pool) self.assertEqual("servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue())
def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self): """write_ntp_config_template defaults pools servers upon empty config. When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ distro = 'sles' mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) content = util.read_file_or_url('file://' + ntp_conf).contents default_pools = [ "{0}.opensuse.pool.ntp.org".format(x) for x in range(0, cc_ntp.NR_POOL_SERVERS) ] self.assertEqual("servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue())
def test_ntp_handler_timesyncd(self, m_ntp_install): """Test ntp handler configures timesyncd""" m_ntp_install.return_value = False distro = 'ubuntu' cfg = { 'servers': ['192.168.2.1', '192.168.2.2'], 'pools': ['0.mypool.org'], } mycloud = self._get_cloud(distro) tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) # Create timesyncd.conf.tmpl template = '{0}.tmpl'.format(tsyncd_conf) print(template) with open(template, 'wb') as stream: stream.write(TIMESYNCD_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): cc_ntp.write_ntp_config_template(cfg, mycloud, tsyncd_conf, template='timesyncd.conf') content = util.read_file_or_url('file://' + tsyncd_conf).contents self.assertEqual("[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", content.decode())
def handle(name, cfg, cloud, log, args): if len(args) != 0: ph_cfg = util.read_conf(args[0]) else: if not 'phone_home' in cfg: log.debug(("Skipping module named %s, " "no 'phone_home' configuration found"), name) return ph_cfg = cfg['phone_home'] if 'url' not in ph_cfg: log.warn(("Skipping module named %s, " "no 'url' found in 'phone_home' configuration"), name) return url = ph_cfg['url'] post_list = ph_cfg.get('post', 'all') tries = ph_cfg.get('tries') try: tries = int(tries) except: tries = 10 util.logexc(log, "Configuration entry 'tries' is not an integer, " "using %s instead", tries) if post_list == "all": post_list = POST_LIST_ALL all_keys = {} all_keys['instance_id'] = cloud.get_instance_id() all_keys['hostname'] = cloud.get_hostname() pubkeys = { 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', } for (n, path) in pubkeys.iteritems(): try: all_keys[n] = util.load_file(path) except: util.logexc(log, "%s: failed to open, can not phone home that " "data!", path) submit_keys = {} for k in post_list: if k in all_keys: submit_keys[k] = all_keys[k] else: submit_keys[k] = None log.warn(("Requested key %s from 'post'" " configuration list not available"), k) # Get them read to be posted real_submit_keys = {} for (k, v) in submit_keys.iteritems(): if v is None: real_submit_keys[k] = 'N/A' else: real_submit_keys[k] = str(v) # Incase the url is parameterized url_params = { 'INSTANCE_ID': all_keys['instance_id'], } url = templater.render_string(url, url_params) try: util.read_file_or_url(url, data=real_submit_keys, retries=tries, sec_between=3, ssl_details=util.fetch_ssl_details(cloud.paths)) except: util.logexc(log, "Failed to post phone home data to %s in %s tries", url, tries)
def load_tfile(*args, **kwargs): """load_tfile_or_url load file and return content after decoding """ return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
def test_config_replaced(self): util.write_file(self.pfile, "content doesnt matter") cc_apt_configure.apply_apt_config({"apt_config": "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.cfile)) self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo")
def test_proxy_replaced(self): util.write_file(self.cfile, "content doesnt matter") cc_apt_configure.apply_apt_config({"apt_proxy": "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) contents = str(util.read_file_or_url(self.pfile)) self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) return util.read_file_or_url(url, data=data, headers=headers)
def get(self, url, secure=False): headers = self.headers if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) return util.read_file_or_url(url, headers=headers)
def load_tfile_or_url(*args, **kwargs): return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))