def get_hcloud_data(): vendor_name = dmi.read_dmi_data('system-manufacturer') if vendor_name != "Hetzner": return (False, None) serial = dmi.read_dmi_data("system-serial-number") if serial: LOG.debug("Running on Hetzner Cloud: serial=%s", serial) else: raise RuntimeError("Hetzner Cloud detected, but no serial found") return (True, serial)
def test_container_returns_none(self): """In a container read_dmi_data should always return None.""" # first verify we get the value if not in container self._m_is_container.return_value = False key, val = ("system-product-name", "my_product") self._create_sysfs_file('product_name', val) self.assertEqual(val, dmi.read_dmi_data(key)) # then verify in container returns None self._m_is_container.return_value = True self.assertIsNone(dmi.read_dmi_data(key))
def platform_reports_gce(): pname = dmi.read_dmi_data("system-product-name") or "N/A" if pname == "Google Compute Engine" or pname == "Google": return True # system-product-name is not always guaranteed (LP: #1674861) serial = dmi.read_dmi_data("system-serial-number") or "N/A" if serial.startswith("GoogleCloud-"): return True LOG.debug("Not running on google cloud. product-name=%s serial=%s", pname, serial) return False
def detect_openstack(accept_oracle=False): """Return True when a potential OpenStack platform is detected.""" if not util.is_x86(): return True # Non-Intel cpus don't properly report dmi product names product_name = dmi.read_dmi_data('system-product-name') if product_name in VALID_DMI_PRODUCT_NAMES: return True elif dmi.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: return True elif accept_oracle and oracle._is_platform_viable(): return True elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: return True return False
def test_dmidecode_not_used_on_arm(self): self.patch_mapping({}) print("current =%s", subp) self._create_sysfs_parent_directory() dmi_val = "from-dmidecode" dmi_name = "use-dmidecode" self._configure_dmidecode_return(dmi_name, dmi_val) print("now =%s", subp) expected = {"armel": None, "aarch64": dmi_val, "x86_64": dmi_val} found = {} # we do not run the 'dmi-decode' binary on some arches # verify that anything requested that is not in the sysfs dir # will return None on those arches. with mock.patch("cloudinit.util.os.uname") as m_uname: for arch in expected: m_uname.return_value = ( "x-sysname", "x-nodename", "x-release", "x-version", arch, ) print("now2 =%s", subp) found[arch] = dmi.read_dmi_data(dmi_name) self.assertEqual(expected, found)
def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): self.patch_mapping( {"mapped-key": dmi.KernelNames("mapped-value", None)}) expected_dmi_value = "sys-used-correctly" self._create_sysfs_file("mapped-value", expected_dmi_value) self._configure_dmidecode_return("mapped-key", "wrong-wrong-wrong") self.assertEqual(expected_dmi_value, dmi.read_dmi_data("mapped-key"))
def test_empty_string_returned_instead_of_foxfox(self): # uninitialized dmi values show as \xff, return empty string my_len = 32 dmi_value = b'\xff' * my_len + b'\n' expected = "" dmi_key = 'system-product-name' sysfs_key = 'product_name' self._create_sysfs_file(sysfs_key, dmi_value) self.assertEqual(expected, dmi.read_dmi_data(dmi_key))
def test_ds_invalid_on_non_vmware_platform(self, m_fn): system_type = dmi.read_dmi_data("system-product-name") self.assertEqual(system_type, None) m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] ds = get_ds(self.tmp) ds.vmware_rpctool = "vmware-rpctool" ret = ds.get_data() self.assertFalse(ret)
def test_dmidecode_used_if_no_sysfs_file_on_disk(self): self.patch_mapping({}) self._create_sysfs_parent_directory() expected_dmi_value = 'dmidecode-used' self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) with mock.patch("cloudinit.util.os.uname") as m_uname: m_uname.return_value = ('x-sysname', 'x-nodename', 'x-release', 'x-version', 'x86_64') self.assertEqual(expected_dmi_value, dmi.read_dmi_data('use-dmidecode'))
def instance_id_matches_system_uuid(instance_id, field='system-uuid'): # quickly (local check only) if self.instance_id is still valid # we check kernel command line or files. if not instance_id: return False dmi_value = dmi.read_dmi_data(field) if not dmi_value: return False return instance_id.lower() == dmi_value.lower()
def _collect_platform_data(): """Returns a dictionary of platform info from dmi or /sys/hypervisor. Keys in the dictionary are as follows: uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. """ data = {} try: uuid = util.load_file("/sys/hypervisor/uuid").strip() data["uuid_source"] = "hypervisor" except Exception: uuid = dmi.read_dmi_data("system-uuid") data["uuid_source"] = "dmi" if uuid is None: uuid = "" data["uuid"] = uuid.lower() serial = dmi.read_dmi_data("system-serial-number") if serial is None: serial = "" data["serial"] = serial.lower() asset_tag = dmi.read_dmi_data("chassis-asset-tag") if asset_tag is None: asset_tag = "" data["asset_tag"] = asset_tag.lower() vendor = dmi.read_dmi_data("system-manufacturer") data["vendor"] = (vendor if vendor else "").lower() return data
def read_sysinfo(): # UpCloud embeds vendor ID and server UUID in the # SMBIOS information # Detect if we are on UpCloud and return the UUID vendor_name = dmi.read_dmi_data("system-manufacturer") if vendor_name != "UpCloud": return False, None server_uuid = dmi.read_dmi_data("system-uuid") if server_uuid: LOG.debug("system identified via SMBIOS as UpCloud server: %s", server_uuid) else: msg = ("system identified via SMBIOS as a UpCloud server, but " "did not provide an ID. Please contact support via" "https://hub.upcloud.com or via email with [email protected]") LOG.critical(msg) raise RuntimeError(msg) return True, server_uuid
def is_running_in_cloudsigma(self): """ Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ LOG.debug("determining hypervisor product name via dmi data") sys_product_name = dmi.read_dmi_data("system-product-name") if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False LOG.debug("detected hypervisor as %s", sys_product_name) return "cloudsigma" in sys_product_name.lower()
def read_sysinfo(): # DigitalOcean embeds vendor ID and instance/droplet_id in the # SMBIOS information # Detect if we are on DigitalOcean and return the Droplet's ID vendor_name = dmi.read_dmi_data("system-manufacturer") if vendor_name != "DigitalOcean": return (False, None) droplet_id = dmi.read_dmi_data("system-serial-number") if droplet_id: LOG.debug( "system identified via SMBIOS as DigitalOcean Droplet: %s", droplet_id, ) else: msg = ("system identified via SMBIOS as a DigitalOcean " "Droplet, but did not provide an ID. Please file a " "support ticket at: " "https://cloud.digitalocean.com/support/tickets/new") LOG.critical(msg) raise RuntimeError(msg) return (True, droplet_id)
def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] if uname_version == "BrandZ virtual linux": return SMARTOS_ENV_LX_BRAND if product_name is None: system_type = dmi.read_dmi_data("system-product-name") else: system_type = product_name if system_type and system_type.startswith("SmartDC"): return SMARTOS_ENV_KVM return None
def on_scaleway(): """ There are three ways to detect if you are on Scaleway: * check DMI data: not yet implemented by Scaleway, but the check is made to be future-proof. * the initrd created the file /var/run/scaleway. * "scaleway" is in the kernel cmdline. """ vendor_name = dmi.read_dmi_data('system-manufacturer') if vendor_name == 'Scaleway': return True if os.path.exists('/var/run/scaleway'): return True cmdline = util.get_cmdline() if 'scaleway' in cmdline: return True return False
def get_cloud_type(self): """ Description: Get the type for the cloud back end this instance is running on by examining the string returned by reading either: CLOUD_INFO_FILE or the dmi data. Input: None Returns: One of the following strings: 'RHEV', 'VSPHERE' or 'UNKNOWN' """ if os.path.exists(CLOUD_INFO_FILE): try: cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() except IOError: util.logexc( LOG, "Unable to access cloud info file at %s.", CLOUD_INFO_FILE, ) return "UNKNOWN" return cloud_type system_name = dmi.read_dmi_data("system-product-name") if not system_name: return "UNKNOWN" sys_name = system_name.upper() if sys_name.startswith("RHEV"): return "RHEV" if sys_name.startswith("VMWARE"): return "VSPHERE" return "UNKNOWN"
def _get_data(self): if not self.data_access_method: system_type = dmi.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") return False if "xen" not in system_type.lower(): LOG.debug("Not a Xen platform") return False if not self.data_access_method: if self.xenstore_read: metadata = xenstoredata("metadata", self.xenstore_read) userdata = xenstoredata("userdata", self.xenstore_read) vendordata = xenstoredata("vendordata", self.xenstore_read) if metadata or userdata or vendordata: self.data_access_method = True if not self.data_access_method: LOG.error("Failed to find data on xenstore-data") return False LOG.info("Using xenstore data for metadata, userdata and vendordata") # Access metadata from xenstore vm-data/metadata self.metadata = process_metadata(load_json_or_yaml(metadata)) # Access Userdata from xenstore vm-data/userdata self.userdata_raw = userdata # Access Vendordata from xenstore vm-data/vendordata self.vendordata_raw = vendordata if self.metadata or self.userdata_raw or self.vendordata_raw: return True else: return False
def _get_data(self): found = [] md = {} ud = "" vd = "" vmwareImcConfigFilePath = None nicspath = None defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) system_type = dmi.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") else: search_paths = ( "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") plugin = "libdeployPkgPlugin.so" deployPkgPluginPath = None for path in search_paths: deployPkgPluginPath = search_file(path, plugin) if deployPkgPluginPath: LOG.debug("Found the customization plugin at %s", deployPkgPluginPath) break if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory which indicates # that required metadata and userdata files are now # present. max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("cust.cfg", max_wait)) else: LOG.debug("Did not find the customization plugin.") md_path = None if vmwareImcConfigFilePath: imcdirpath = os.path.dirname(vmwareImcConfigFilePath) cf = ConfigFile(vmwareImcConfigFilePath) self._vmware_cust_conf = Config(cf) LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) try: (md_path, ud_path, nicspath) = collect_imc_file_paths( self._vmware_cust_conf) except FileNotFoundError as e: _raise_error_status( "File(s) missing in directory", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) else: LOG.debug("Did not find VMware Customization Config File") # Honor disable_vmware_customization setting on metadata absent if not md_path: if util.get_cfg_option_bool(self.sys_cfg, "disable_vmware_customization", True): LOG.debug( "Customization for VMware platform is disabled.") # reset vmwareImcConfigFilePath to None to avoid # customization for VMware platform vmwareImcConfigFilePath = None use_raw_data = bool(vmwareImcConfigFilePath and md_path) if use_raw_data: set_gc_status(self._vmware_cust_conf, "Started") LOG.debug("Start to load cloud-init meta data and user data") try: (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path) if network: self._network_config = network else: self._network_config = ( self.distro.generate_fallback_config() ) except safeyaml.YAMLError as e: _raise_error_status( "Error parsing the cloud-init meta data", e, GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT, vmwareImcConfigFilePath, self._vmware_cust_conf) except Exception as e: _raise_error_status( "Error loading cloud-init configuration", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) self._vmware_cust_found = True found.append('vmware-tools') util.del_dir(imcdirpath) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) set_gc_status(self._vmware_cust_conf, "Successful") elif vmwareImcConfigFilePath: # Load configuration from vmware_imc self._vmware_nics_to_enable = "" try: set_gc_status(self._vmware_cust_conf, "Started") (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) product_marker = self._vmware_cust_conf.marker_id hasmarkerfile = check_marker_exists( product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name # In case there is a custom script, check whether VMware # Tools configuration allow the custom script to run. if special_customization and customscript: defVal = "false" if self._vmware_cust_conf.default_run_post_script: LOG.debug( "Set default value to true due to" " customization configuration." ) defVal = "true" custScriptConfig = get_tools_config( CONFGROUPNAME_GUESTCUSTOMIZATION, GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, defVal) if custScriptConfig.lower() != "true": # Update the customization status if custom script # is disabled msg = "Custom script is disabled by VM Administrator" LOG.debug(msg) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) raise RuntimeError(msg) ccScriptsDir = os.path.join( self.paths.get_cpath("scripts"), "per-instance") except Exception as e: _raise_error_status( "Error parsing the customization Config File", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) if special_customization: if customscript: try: precust = PreCustomScript(customscript, imcdirpath) precust.execute() except Exception as e: _raise_error_status( "Error executing pre-customization script", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( self._vmware_cust_conf, True, True, self.distro.osfamily) except Exception as e: _raise_error_status( "Error preparing Network Configuration", e, GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) if special_customization: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password try: resetpwd = self._vmware_cust_conf.reset_password if adminpwd or resetpwd: pwdConfigurator.configure(adminpwd, resetpwd, self.distro) else: LOG.debug("Changing password is not needed") except Exception as e: _raise_error_status( "Error applying Password Configuration", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) if customscript: try: postcust = PostCustomScript(customscript, imcdirpath, ccScriptsDir) postcust.execute() except Exception as e: _raise_error_status( "Error executing post-customization script", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) if product_marker: try: setup_marker_files( product_marker, os.path.join(self.paths.cloud_dir, 'data')) except Exception as e: _raise_error_status( "Error creating marker files", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) set_gc_status(self._vmware_cust_conf, "Successful") else: np = [('com.vmware.guestInfo', transport_vmware_guestinfo), ('iso', transport_iso9660)] name = None for name, transfunc in np: contents = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.vendordata_raw = vd self.cfg = cfg return True
def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context( mock.patch.object(subp, 'which', lambda _: False)) self.patch_mapping({}) self.assertIsNone(dmi.read_dmi_data('expect-fail'))
def _get_subplatform(self): system_type = dmi.read_dmi_data("system-product-name").lower() if system_type == 'vmware': return 'vmware (%s)' % self.seed return 'ovf (%s)' % self.seed
def test_freebsd_uses_kenv(self): """On a FreeBSD system, kenv is called.""" self._m_is_FreeBSD.return_value = True key, val = ("system-product-name", "my_product") self._configure_kenv_return(key, val) self.assertEqual(dmi.read_dmi_data(key), val)
def test_container_returns_none_on_unknown(self): """In a container even bogus keys return None.""" self._m_is_container.return_value = True self._create_sysfs_file('product_name', "should-be-ignored") self.assertIsNone(dmi.read_dmi_data("bogus")) self.assertIsNone(dmi.read_dmi_data("system-product-name"))
def test_ds_valid_on_vmware_platform(self): system_type = dmi.read_dmi_data("system-product-name") self.assertEqual(system_type, PRODUCT_NAME)
def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): self.patch_mapping({'mapped-key': dmi.kdmi('mapped-value', None)}) expected_dmi_value = 'sys-used-correctly' self._create_sysfs_file('mapped-value', expected_dmi_value) self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong') self.assertEqual(expected_dmi_value, dmi.read_dmi_data('mapped-key'))
def _is_platform_viable(self): return dmi.read_dmi_data("system-product-name").startswith( EXOSCALE_DMI_NAME )
def _is_aliyun(): return dmi.read_dmi_data("system-product-name") == ALIYUN_PRODUCT
def _is_platform_viable(): asset_tag = dmi.read_dmi_data('chassis-asset-tag') return asset_tag == CHASSIS_ASSET_TAG
def _read_system_uuid(): sys_uuid = dmi.read_dmi_data('system-uuid') return None if sys_uuid is None else sys_uuid.lower()
def get_sysinfo(): return { "manufacturer": dmi.read_dmi_data("system-manufacturer"), "subid": dmi.read_dmi_data("system-serial-number"), }