def test_validateconfig_schema_of_example(self, schema_id, example): """For a given example in a config module we test if it is valid according to the unified schema of all config modules """ schema = get_schema() config_load = load(example) # cloud-init-schema-v1 is permissive of additionalProperties at the # top-level. # To validate specific schemas against known documented examples # we need to only define the specific module schema and supply # strict=True. # TODO(Drop to pop/update once full schema is strict) schema.pop("allOf") schema.update(schema["$defs"][schema_id]) schema["additionalProperties"] = False # Some module examples reference keys defined in multiple schemas supplemental_schemas = { "cc_ubuntu_advantage": ["cc_power_state_change"], "cc_update_hostname": ["cc_set_hostname"], "cc_users_groups": ["cc_ssh_import_id"], "cc_disk_setup": ["cc_mounts"], } for supplement_id in supplemental_schemas.get(schema_id, []): supplemental_props = dict([(key, value) for key, value in schema["$defs"] [supplement_id]["properties"].items()]) schema["properties"].update(supplemental_props) validate_cloudconfig_schema(config_load, schema, strict=True)
def test_bring_up_all_interfaces_v2(self, m_subp, activator, available_calls, expected_call_list, available_mocks): network_state = parse_net_config_data(load(V2_CONFIG)) activator.bring_up_all_interfaces(network_state) for call in m_subp.call_args_list: assert call in expected_call_list
def test_load(self): valid_yaml = "foo: bar" expected = {'foo': 'bar'} with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmpf: tmpf.write(valid_yaml) tmpf.close() self.assertEqual(yaml.load(tmpf.name), expected)
def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver, m_device_devid): m_device_driver.return_value = 'virtio_net' m_device_devid.return_value = '0x15d8' net.apply_network_config_names(yaml.load(self.V2_CONFIG)) call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] m_rename_interfaces.assert_called_with([call])
def safeload_yaml_or_dict(data): ''' The meta data could be JSON or YAML. Since YAML is a strict superset of JSON, we will unmarshal the data as YAML. If data is None then a new dictionary is returned. ''' if not data: return {} return safeyaml.load(data)
def test_apply_v2_renames_skips_without_setname_or_mac( self, config_attr: str ): networking = LinuxNetworking() netcfg = yaml.load(getattr(self, config_attr)) with mock.patch.object( networking, "_rename_interfaces" ) as m_rename_interfaces: networking.apply_network_config_names(netcfg) m_rename_interfaces.assert_called_with([])
def network_config(self): # Pull the network configuration out of the metadata. if self.metadata and 'network' in self.metadata: data = self._get_encoded_metadata('network') if data: # Load the YAML-formatted network data into an object # and return it. net_config = safeyaml.load(data) LOG.debug("Loaded network config: %s", net_config) return net_config.get('network') return None
def load(data): ''' load first attempts to unmarshal the provided data as JSON, and if that fails then attempts to unmarshal the data as YAML. If data is None then a new dictionary is returned. ''' if not data: return {} try: return json.loads(data) except: return safeyaml.load(data)
def test_render_output_has_yaml(self, m_is_freebsd, m_subp): entry = { "yaml": V1, } network_config = safeyaml.load(entry["yaml"]) ns = cloudinit.net.network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) assert files == { "/etc/resolv.conf": "# dummy resolv.conf\n", "/etc/rc.conf": ("# dummy rc.conf\n" "ifconfig_eno1=" "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n"), }
def test_render_output_has_yaml(self, mock_subp): entry = { 'yaml': V1, } network_config = safeyaml.load(entry['yaml']) ns = cloudinit.net.network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) assert files == { '/etc/resolv.conf': '# dummy resolv.conf\n', '/etc/rc.conf': ("# dummy rc.conf\n" "ifconfig_eno1=" "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n") }
def test_photon_network_config_v1_with_duplicates(self): expected = """\ [Match] Name=eth0 [Network] DHCP=no DNS=1.2.3.4 Domains=test.com [Address] Address=192.168.0.102/24""" net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { self.nwk_file_path("eth0"): expected, } self._apply_and_verify(self.distro.apply_network_config, net_cfg, expected_cfgs.copy())
def test_apply_renames( self, m_device_driver, m_device_devid, config_attr: str, ): networking = LinuxNetworking() m_device_driver.return_value = "virtio_net" m_device_devid.return_value = "0x15d8" netcfg = yaml.load(getattr(self, config_attr)) with mock.patch.object( networking, "_rename_interfaces" ) as m_rename_interfaces: networking.apply_network_config_names(netcfg) assert ( mock.call( [["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]] ) == m_rename_interfaces.call_args_list[-1] )
def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): with self.assertRaises(RuntimeError): net.apply_network_config_names(yaml.load("version: 3"))
def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces): net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC)) m_rename_interfaces.assert_called_with([])
def validate_cloudconfig_file(config_path, schema, annotate=False): """Validate cloudconfig file adheres to a specific jsonschema. @param config_path: Path to the yaml cloud-config file to parse, or None to default to system userdata from Paths object. @param schema: Dict describing a valid jsonschema to validate against. @param annotate: Boolean set True to print original config file with error annotations on the offending lines. @raises SchemaValidationError containing any of schema_errors encountered. @raises RuntimeError when config_path does not exist. """ if config_path is None: # Use system's raw userdata path if os.getuid() != 0: raise RuntimeError( "Unable to read system userdata as non-root user." " Try using sudo") paths = read_cfg_paths() user_data_file = paths.get_ipath_cur("userdata_raw") content = load_file(user_data_file, decode=False) else: if not os.path.exists(config_path): raise RuntimeError( "Configfile {0} does not exist".format(config_path)) content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = (( "format-l1.c1", 'File {0} needs to begin with "{1}"'.format( config_path, CLOUD_CONFIG_HEADER.decode()), ), ) error = SchemaValidationError(errors) if annotate: print( annotated_cloudconfig_file({}, content, error.schema_errors, {})) raise error try: if annotate: cloudconfig, marks = safeyaml.load_with_marks(content) else: cloudconfig = safeyaml.load(content) marks = {} except (yaml.YAMLError) as e: line = column = 1 mark = None if hasattr(e, "context_mark") and getattr(e, "context_mark"): mark = getattr(e, "context_mark") elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"): mark = getattr(e, "problem_mark") if mark: line = mark.line + 1 column = mark.column + 1 errors = (( "format-l{line}.c{col}".format(line=line, col=column), "File {0} is not valid yaml. {1}".format(config_path, str(e)), ), ) error = SchemaValidationError(errors) if annotate: print( annotated_cloudconfig_file({}, content, error.schema_errors, {})) raise error from e if not isinstance(cloudconfig, dict): # Return a meaningful message on empty cloud-config if not annotate: raise RuntimeError("Cloud-config is not a YAML dict.") try: validate_cloudconfig_schema(cloudconfig, schema, strict=True) except SchemaValidationError as e: if annotate: print( annotated_cloudconfig_file(cloudconfig, content, e.schema_errors, marks)) raise
def _parse_network_state_from_config(self, config): yaml = safeyaml.load(config) return network_state.parse_net_config_data(yaml['network'])
def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): networking = LinuxNetworking() with pytest.raises(RuntimeError): networking.apply_network_config_names(yaml.load("version: 3"))
def handle_args(name, args): if not args.directory.endswith("/"): args.directory += "/" if not os.path.isdir(args.directory): os.makedirs(args.directory) if args.debug: log.setupBasicLogging(level=log.DEBUG) else: log.setupBasicLogging(level=log.WARN) if args.mac: known_macs = {} for item in args.mac: iface_name, iface_mac = item.split(",", 1) known_macs[iface_mac] = iface_name else: known_macs = None net_data = args.network_data.read() if args.kind == "eni": pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": pre_ns = safeyaml.load(net_data) if 'network' in pre_ns: pre_ns = pre_ns.get('network') if args.debug: sys.stderr.write('\n'.join( ["Input YAML", safeyaml.dumps(pre_ns), ""])) elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) elif args.kind == 'azure-imds': pre_ns = azure.parse_network_config(json.loads(net_data)) elif args.kind == 'vmware-imc': config = ovf.Config(ovf.ConfigFile(args.network_data.name)) pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) if args.debug: sys.stderr.write('\n'.join( ["", "Internal State", safeyaml.dumps(ns), ""])) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} if args.output_kind == "eni": r_cls = eni.Renderer config = distro.renderer_configs.get('eni') elif args.output_kind == "netplan": r_cls = netplan.Renderer config = distro.renderer_configs.get('netplan') # don't run netplan generate/apply config['postcmds'] = False # trim leading slash config['netplan_path'] = config['netplan_path'][1:] # enable some netplan features config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] elif args.output_kind == "networkd": r_cls = networkd.Renderer config = distro.renderer_configs.get('networkd') elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer config = distro.renderer_configs.get('sysconfig') else: raise RuntimeError("Invalid output_kind") r = r_cls(config=config) sys.stderr.write(''.join([ "Read input format '%s' from '%s'.\n" % ( args.kind, args.network_data.name), "Wrote output format '%s' to '%s'\n" % ( args.output_kind, args.directory)]) + "\n") r.render_network_state(network_state=ns, target=args.directory)
def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT): if args is None: args = [] if mocks is None: mocks = [] if files is None: files = {} if rootd is None: rootd = self.tmp_dir() unset = '_unset' wrap = self.tmp_path(path="_shwrap", dir=rootd) populate_dir(rootd, files) # DI_DEFAULT_POLICY* are declared always as to not rely # on the default in the code. This is because SRU releases change # the value in the code, and thus tests would fail there. head = [ "DI_MAIN=noop", "DEBUG_LEVEL=2", "DI_LOG=stderr", "PATH_ROOT='%s'" % rootd, ". " + self.dsid_path, 'DI_DEFAULT_POLICY="%s"' % policy_dmi, 'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_no_dmi, 'DI_EC2_STRICT_ID_DEFAULT="%s"' % ec2_strict_id, "" ] def write_mock(data): ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None} ddata.update(data) for k in ddata: if ddata[k] is None: ddata[k] = unset return SHELL_MOCK_TMPL % ddata mocklines = [] defaults = [ { 'name': 'detect_virt', 'RET': 'none', 'ret': 1 }, { 'name': 'uname', 'out': UNAME_MYSYS }, { 'name': 'blkid', 'out': BLKID_EFI_ROOT }, { 'name': 'ovf_vmware_transport_guestinfo', 'out': 'No value found', 'ret': 1 }, { 'name': 'dmi_decode', 'ret': 1, 'err': 'No dmidecode program. ERROR.' }, ] written = [d['name'] for d in mocks] for data in mocks: mocklines.append(write_mock(data)) for d in defaults: if d['name'] not in written: mocklines.append(write_mock(d)) endlines = [func + ' ' + ' '.join(['"%s"' % s for s in args])] with open(wrap, "w") as fp: fp.write('\n'.join(head + mocklines + endlines) + "\n") rc = 0 try: out, err = util.subp(['sh', '-c', '. %s' % wrap], capture=True) except util.ProcessExecutionError as e: rc = e.exit_code out = e.stdout err = e.stderr cfg = None cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg') if os.path.exists(cfg_out): contents = util.load_file(cfg_out) try: cfg = safeyaml.load(contents) except Exception as e: cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)} return CallReturn(rc, out, err, cfg, dir2dict(rootd))