def _collect_platform_data(): """Returns a dictionary of platform info from dmi or /sys/hypervisor. Keys in the dictionary are as follows: uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. """ data = {} try: uuid = util.load_file("/sys/hypervisor/uuid").strip() data['uuid_source'] = 'hypervisor' except Exception: uuid = util.read_dmi_data('system-uuid') data['uuid_source'] = 'dmi' if uuid is None: uuid = '' data['uuid'] = uuid.lower() serial = util.read_dmi_data('system-serial-number') if serial is None: serial = '' data['serial'] = serial.lower() return data
def dmi_data(): sys_uuid = util.read_dmi_data("system-uuid") sys_type = util.read_dmi_data("system-product-name") if not sys_uuid or not sys_type: return None return (sys_uuid.lower(), sys_type)
def test_container_returns_none(self): """In a container read_dmi_data should always return None.""" # first verify we get the value if not in container self._m_is_container.return_value = False key, val = ("system-product-name", "my_product") self._create_sysfs_file('product_name', val) self.assertEqual(val, util.read_dmi_data(key)) # then verify in container returns None self._m_is_container.return_value = True self.assertIsNone(util.read_dmi_data(key))
def platform_reports_gce(): pname = util.read_dmi_data('system-product-name') or "N/A" if pname == "Google Compute Engine": return True # system-product-name is not always guaranteed (LP: #1674861) serial = util.read_dmi_data('system-serial-number') or "N/A" if serial.startswith("GoogleCloud-"): return True LOG.debug("Not running on google cloud. product-name=%s serial=%s", pname, serial) return False
def detect_openstack(accept_oracle=False): """Return True when a potential OpenStack platform is detected.""" if not util.is_x86(): return True # Non-Intel cpus don't properly report dmi product names product_name = util.read_dmi_data('system-product-name') if product_name in VALID_DMI_PRODUCT_NAMES: return True elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: return True elif accept_oracle and oracle._is_platform_viable(): return True elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: return True return False
def get_cloud_type(self): ''' Description: Get the type for the cloud back end this instance is running on by examining the string returned by reading the dmi data. Input: None Returns: One of the following strings: 'RHEV', 'VSPHERE' or 'UNKNOWN' ''' uname_arch = os.uname()[4] if uname_arch.startswith("arm") or uname_arch == "aarch64": # Disabling because dmi data is not available on ARM processors LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)") return 'UNKNOWN' system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' sys_name = system_name.upper() if sys_name.startswith('RHEV'): return 'RHEV' if sys_name.startswith('VMWARE'): return 'VSPHERE' return 'UNKNOWN'
def get_smartos_environ(uname_version=None, product_name=None, uname_arch=None): uname = os.uname() if uname_arch is None: uname_arch = uname[4] if uname_arch.startswith("arm") or uname_arch == "aarch64": return None # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] if uname_version.lower() == 'brandz virtual linux': return SMARTOS_ENV_LX_BRAND if product_name is None: system_type = util.read_dmi_data("system-product-name") else: system_type = product_name if system_type and 'smartdc' in system_type.lower(): return SMARTOS_ENV_KVM return None
def dmi_data(): sys_type = util.read_dmi_data("system-product-name") if not sys_type: return None return sys_type
def get_cloud_type(self): ''' Description: Get the type for the cloud back end this instance is running on by examining the string returned by reading the dmi data. Input: None Returns: One of the following strings: 'RHEV', 'VSPHERE' or 'UNKNOWN' ''' system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' sys_name = system_name.upper() if sys_name.startswith('RHEV'): return 'RHEV' if sys_name.startswith('VMWARE'): return 'VSPHERE' return 'UNKNOWN'
def test_dmidecode_used_if_no_sysfs_file_on_disk(self): self.patch_mapping({}) self._create_sysfs_parent_directory() expected_dmi_value = 'dmidecode-used' self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) self.assertEqual(expected_dmi_value, util.read_dmi_data('use-dmidecode'))
def test_dots_returned_instead_of_foxfox(self): # uninitialized dmi values show as \xff, return those as . my_len = 32 dmi_value = b'\xff' * my_len + b'\n' expected = "" dmi_key = 'system-product-name' sysfs_key = 'product_name' self._create_sysfs_file(sysfs_key, dmi_value) self.assertEqual(expected, util.read_dmi_data(dmi_key))
def test_dmidecode_used_if_no_sysfs_file_on_disk(self): self.patch_mapping({}) self._create_sysfs_parent_directory() expected_dmi_value = 'dmidecode-used' self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) with mock.patch("cloudinit.util.os.uname") as m_uname: m_uname.return_value = ('x-sysname', 'x-nodename', 'x-release', 'x-version', 'x86_64') self.assertEqual(expected_dmi_value, util.read_dmi_data('use-dmidecode'))
def instance_id_matches_system_uuid(instance_id, field='system-uuid'): # quickly (local check only) if self.instance_id is still valid # we check kernel command line or files. if not instance_id: return False dmi_value = util.read_dmi_data(field) if not dmi_value: return False return instance_id.lower() == dmi_value.lower()
def instance_id_matches_system_uuid(instance_id, field="system-uuid"): # quickly (local check only) if self.instance_id is still valid # we check kernel command line or files. if not instance_id: return False dmi_value = util.read_dmi_data(field) if not dmi_value: return False return instance_id.lower() == dmi_value.lower()
def _collect_platform_data(): """Returns a dictionary of platform info from dmi or /sys/hypervisor. Keys in the dictionary are as follows: uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. """ data = {} try: uuid = util.load_file("/sys/hypervisor/uuid").strip() data['uuid_source'] = 'hypervisor' except Exception: uuid = util.read_dmi_data('system-uuid') data['uuid_source'] = 'dmi' if uuid is None: uuid = '' data['uuid'] = uuid.lower() serial = util.read_dmi_data('system-serial-number') if serial is None: serial = '' data['serial'] = serial.lower() asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag is None: asset_tag = '' data['asset_tag'] = asset_tag.lower() vendor = util.read_dmi_data('system-manufacturer') data['vendor'] = (vendor if vendor else '').lower() return data
def is_running_in_cloudsigma(self): """ Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ LOG.debug("determining hypervisor product name via dmi data") sys_product_name = util.read_dmi_data("system-product-name") if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower()
def read_sysinfo(): # DigitalOcean embeds vendor ID and instance/droplet_id in the # SMBIOS information # Detect if we are on DigitalOcean and return the Droplet's ID vendor_name = util.read_dmi_data("system-manufacturer") if vendor_name != "DigitalOcean": return (False, None) droplet_id = util.read_dmi_data("system-serial-number") if droplet_id: LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s", droplet_id) else: msg = ("system identified via SMBIOS as a DigitalOcean " "Droplet, but did not provide an ID. Please file a " "support ticket at: " "https://cloud.digitalocean.com/support/tickets/new") LOG.critical(msg) raise RuntimeError(msg) return (True, droplet_id)
def _collect_platform_data(): # returns a dictionary with all lower case values: # uuid: system-uuid from dmi or /sys/hypervisor # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' # serial: dmi 'system-serial-number' (/sys/.../product_serial) data = {} try: uuid = util.load_file("/sys/hypervisor/uuid").strip() data['uuid_source'] = 'hypervisor' except Exception: uuid = util.read_dmi_data('system-uuid') data['uuid_source'] = 'dmi' if uuid is None: uuid = '' data['uuid'] = uuid.lower() serial = util.read_dmi_data('system-serial-number') if serial is None: serial = '' data['serial'] = serial.lower() return data
def is_running_in_cloudsigma(self): """ Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ LOG.debug("determining hypervisor product name via dmi data") sys_product_name = util.read_dmi_data("system-product-name") if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False else: LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower() LOG.warning("failed to query dmi data for system product name") return False
def test_dmidecode_not_used_on_arm(self): self.patch_mapping({}) self._create_sysfs_parent_directory() dmi_val = 'from-dmidecode' dmi_name = 'use-dmidecode' self._configure_dmidecode_return(dmi_name, dmi_val) expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} found = {} # we do not run the 'dmi-decode' binary on some arches # verify that anything requested that is not in the sysfs dir # will return None on those arches. with mock.patch("cloudinit.util.os.uname") as m_uname: for arch in expected: m_uname.return_value = ('x-sysname', 'x-nodename', 'x-release', 'x-version', arch) found[arch] = util.read_dmi_data(dmi_name) self.assertEqual(expected, found)
def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] if uname_version == 'BrandZ virtual linux': return SMARTOS_ENV_LX_BRAND if product_name is None: system_type = util.read_dmi_data("system-product-name") else: system_type = product_name if system_type and system_type.startswith('SmartDC'): return SMARTOS_ENV_KVM return None
def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] if uname_version.lower() == 'brandz virtual linux': return SMARTOS_ENV_LX_BRAND if product_name is None: system_type = util.read_dmi_data("system-product-name") else: system_type = product_name if system_type and 'smartdc' in system_type.lower(): return SMARTOS_ENV_KVM return None
def on_scaleway(): """ There are three ways to detect if you are on Scaleway: * check DMI data: not yet implemented by Scaleway, but the check is made to be future-proof. * the initrd created the file /var/run/scaleway. * "scaleway" is in the kernel cmdline. """ vendor_name = util.read_dmi_data('system-manufacturer') if vendor_name == 'Scaleway': return True if os.path.exists('/var/run/scaleway'): return True cmdline = util.get_cmdline() if 'scaleway' in cmdline: return True return False
def is_running_in_cloudsigma(self): """ Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ uname_arch = os.uname()[4] if uname_arch.startswith("arm") or uname_arch == "aarch64": # Disabling because dmi data on ARM processors LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") return False LOG.debug("determining hypervisor product name via dmi data") sys_product_name = util.read_dmi_data("system-product-name") if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False else: LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower() LOG.warn("failed to query dmi data for system product name") return False
def get_cloud_type(self): ''' Description: Get the type for the cloud back end this instance is running on by examining the string returned by reading either: CLOUD_INFO_FILE or the dmi data. Input: None Returns: One of the following strings: 'RHEV', 'VSPHERE' or 'UNKNOWN' ''' if os.path.exists(CLOUD_INFO_FILE): try: cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() except IOError: util.logexc(LOG, 'Unable to access cloud info file at %s.', CLOUD_INFO_FILE) return 'UNKNOWN' return cloud_type system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' sys_name = system_name.upper() if sys_name.startswith('RHEV'): return 'RHEV' if sys_name.startswith('VMWARE'): return 'VSPHERE' return 'UNKNOWN'
def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context( mock.patch.object(util, 'which', lambda _: False)) self.patch_mapping({}) self.assertIsNone(util.read_dmi_data('expect-fail'))
def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) self._configure_dmidecode_return("key", "value") self.assertEqual(None, util.read_dmi_data("expect-fail"))
def _get_data(self): found = [] md = {} ud = "" vmwareImcConfigFilePath = None nicspath = None defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) system_type = util.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): search_paths = ("/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") plugin = "libdeployPkgPlugin.so" deployPkgPluginPath = None for path in search_paths: deployPkgPluginPath = search_file(path, plugin) if deployPkgPluginPath: LOG.debug("Found the customization plugin at %s", deployPkgPluginPath) break if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("cust.cfg", max_wait)) else: LOG.debug("Did not find the customization plugin.") if vmwareImcConfigFilePath: LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) nicspath = wait_for_imc_cfg_file(filename="nics.txt", maxwait=10, naplen=5) else: LOG.debug("Did not find VMware Customization Config File") else: LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: self._vmware_nics_to_enable = "" try: cf = ConfigFile(vmwareImcConfigFilePath) self._vmware_cust_conf = Config(cf) (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) imcdirpath = os.path.dirname(vmwareImcConfigFilePath) product_marker = self._vmware_cust_conf.marker_id hasmarkerfile = check_marker_exists( product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name custScriptConfig = get_tools_config( CONFGROUPNAME_GUESTCUSTOMIZATION, GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, "true") if custScriptConfig.lower() == "false": # Update the customization status if there is a # custom script is disabled if special_customization and customscript: msg = "Custom script is disabled by VM Administrator" LOG.debug(msg) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) raise RuntimeError(msg) ccScriptsDir = os.path.join(self.paths.get_cpath("scripts"), "per-instance") except Exception as e: _raise_error_status( "Error parsing the customization Config File", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) if special_customization: if customscript: try: precust = PreCustomScript(customscript, imcdirpath) precust.execute() except Exception as e: _raise_error_status( "Error executing pre-customization script", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( self._vmware_cust_conf, True, True, self.distro.osfamily) except Exception as e: _raise_error_status( "Error preparing Network Configuration", e, GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, vmwareImcConfigFilePath) if special_customization: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password try: resetpwd = self._vmware_cust_conf.reset_password if adminpwd or resetpwd: pwdConfigurator.configure(adminpwd, resetpwd, self.distro) else: LOG.debug("Changing password is not needed") except Exception as e: _raise_error_status( "Error applying Password Configuration", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) if customscript: try: postcust = PostCustomScript(customscript, imcdirpath, ccScriptsDir) postcust.execute() except Exception as e: _raise_error_status( "Error executing post-customization script", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) if product_marker: try: setup_marker_files( product_marker, os.path.join(self.paths.cloud_dir, 'data')) except Exception as e: _raise_error_status( "Error creating marker files", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) else: np = [('com.vmware.guestInfo', transport_vmware_guestinfo), ('iso', transport_iso9660)] name = None for name, transfunc in np: contents = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.cfg = cfg return True
def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) self._configure_dmidecode_return('key', 'value') self.assertIsNone(util.read_dmi_data('expect-fail'))
def get_data(self): found = [] md = {} ud = "" vmwareImcConfigFilePath = None nicspath = None defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) system_type = util.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") if not deployPkgPluginPath: deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", "libdeployPkgPlugin.so") if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("cust.cfg", max_wait)) if vmwareImcConfigFilePath: LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) nicspath = wait_for_imc_cfg_file(filename="nics.txt", maxwait=10, naplen=5) else: LOG.debug("Did not find VMware Customization Config File") else: LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: self._vmware_nics_to_enable = "" try: cf = ConfigFile(vmwareImcConfigFilePath) self._vmware_cust_conf = Config(cf) (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) markerid = self._vmware_cust_conf.marker_id markerexists = check_marker_exists(markerid) except Exception as e: LOG.debug("Error parsing the customization Config File") LOG.exception(e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) raise e finally: util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( self._vmware_cust_conf, True, True, self.distro.osfamily) except Exception as e: LOG.exception(e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) raise e if markerid and not markerexists: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password try: resetpwd = self._vmware_cust_conf.reset_password if adminpwd or resetpwd: pwdConfigurator.configure(adminpwd, resetpwd, self.distro) else: LOG.debug("Changing password is not needed") except Exception as e: LOG.debug("Error applying Password Configuration: %s", e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) return False if markerid: LOG.debug("Handle marker creation") try: setup_marker_files(markerid) except Exception as e: LOG.debug("Error creating marker files: %s", e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) return False self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) else: np = { 'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } name = None for (name, transfunc) in np.items(): (contents, _dev, _fname) = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.cfg = cfg return True
def on_upcloud(): return util.read_dmi_data('system-manufacturer') == "UpCloud"
def _get_data(self): found = [] md = {} ud = "" vmwareImcConfigFilePath = None nicspath = None defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) system_type = util.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): search_paths = ( "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") plugin = "libdeployPkgPlugin.so" deployPkgPluginPath = None for path in search_paths: deployPkgPluginPath = search_file(path, plugin) if deployPkgPluginPath: LOG.debug("Found the customization plugin at %s", deployPkgPluginPath) break if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("cust.cfg", max_wait)) else: LOG.debug("Did not find the customization plugin.") if vmwareImcConfigFilePath: LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) nicspath = wait_for_imc_cfg_file( filename="nics.txt", maxwait=10, naplen=5) else: LOG.debug("Did not find VMware Customization Config File") else: LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: self._vmware_nics_to_enable = "" try: cf = ConfigFile(vmwareImcConfigFilePath) self._vmware_cust_conf = Config(cf) (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) imcdirpath = os.path.dirname(vmwareImcConfigFilePath) product_marker = self._vmware_cust_conf.marker_id hasmarkerfile = check_marker_exists( product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name except Exception as e: _raise_error_status( "Error parsing the customization Config File", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) if special_customization: if customscript: try: precust = PreCustomScript(customscript, imcdirpath) precust.execute() except Exception as e: _raise_error_status( "Error executing pre-customization script", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( self._vmware_cust_conf, True, True, self.distro.osfamily) except Exception as e: _raise_error_status( "Error preparing Network Configuration", e, GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, vmwareImcConfigFilePath) if special_customization: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password try: resetpwd = self._vmware_cust_conf.reset_password if adminpwd or resetpwd: pwdConfigurator.configure(adminpwd, resetpwd, self.distro) else: LOG.debug("Changing password is not needed") except Exception as e: _raise_error_status( "Error applying Password Configuration", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) if customscript: try: postcust = PostCustomScript(customscript, imcdirpath) postcust.execute() except Exception as e: _raise_error_status( "Error executing post-customization script", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) if product_marker: try: setup_marker_files( product_marker, os.path.join(self.paths.cloud_dir, 'data')) except Exception as e: _raise_error_status( "Error creating marker files", e, GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath) self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) else: np = [('com.vmware.guestInfo', transport_vmware_guestinfo), ('iso', transport_iso9660)] name = None for name, transfunc in np: contents = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.cfg = cfg return True
def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @return: A dictionary of any metadata content for this instance. @raise: InvalidMetaDataException when the expected metadata service is unavailable, broken or disabled. """ crawled_data = {} # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] if os.path.isfile(REPROVISION_MARKER_FILE): candidates.insert(0, "IMDS") candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None reprovision = False for cdev in candidates: try: if cdev == "IMDS": ret = None reprovision = True elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, mtype="udf", sync=False) else: ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc raise sources.InvalidMetaDataException(msg) except util.MountFailedError: LOG.warning("%s was not mountable", cdev) continue perform_reprovision = reprovision or self._should_reprovision(ret) if perform_reprovision: if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds(self.fallback_interface, retries=10) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ 'cfg': cfg, 'files': files, 'metadata': util.mergemanydict([md, { 'imds': imds_md }]), 'userdata_raw': userdata_raw }) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: raise sources.InvalidMetaDataException('No Azure metadata found') if found == ddir: LOG.debug("using files cached in %s", ddir) seed = _get_random_seed() if seed: crawled_data['metadata']['random_seed'] = seed crawled_data['metadata']['instance-id'] = util.read_dmi_data( 'system-uuid') if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") use_cached_ephemeral = (net.is_up(self.fallback_interface) and getattr(self, '_ephemeral_dhcp_ctx', None)) if use_cached_ephemeral: self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: with EphemeralDHCPv4() as lease: self._report_ready(lease=lease) return crawled_data
def _get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag != AZURE_CHASSIS_ASSET_TAG: LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) return False ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] if os.path.isfile(REPROVISION_MARKER_FILE): candidates.insert(0, "IMDS") candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None reprovision = False for cdev in candidates: try: if cdev == "IMDS": ret = None reprovision = True elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, mtype="udf", sync=False) else: ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warning("%s was not mountable", cdev) continue if reprovision or self._should_reprovision(ret): ret = self._reprovision() (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: return False if found == ddir: LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config if not util.is_FreeBSD(): seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False) if seed: self.metadata['random_seed'] = seed user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) self.metadata['instance-id'] = util.read_dmi_data('system-uuid') return True
def get_data(self): found = [] md = {} ud = "" vmwarePlatformFound = False vmwareImcConfigFilePath = '' defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) system_type = util.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") if not deployPkgPluginPath: deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", "libdeployPkgPlugin.so") if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("/var/run/vmware-imc", "cust.cfg")) if vmwareImcConfigFilePath: LOG.debug("Found VMware DeployPkg Config File at %s" % vmwareImcConfigFilePath) else: LOG.debug("Did not find VMware DeployPkg Config File Path") else: LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: nics = "" try: cf = ConfigFile(vmwareImcConfigFilePath) conf = Config(cf) (md, ud, cfg) = read_vmware_imc(conf) dirpath = os.path.dirname(vmwareImcConfigFilePath) nics = get_nics_to_enable(dirpath) except Exception as e: LOG.debug("Error parsing the customization Config File") LOG.exception(e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) enable_nics(nics) return False finally: util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) try: LOG.debug("Applying the Network customization") nicConfigurator = NicConfigurator(conf.nics) nicConfigurator.configure() except Exception as e: LOG.debug("Error applying the Network Configuration") LOG.exception(e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) enable_nics(nics) return False vmwarePlatformFound = True set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) enable_nics(nics) else: np = {'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } name = None for (name, transfunc) in np.items(): (contents, _dev, _fname) = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0 and not vmwarePlatformFound: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.cfg = cfg return True
def _is_platform_viable(): asset_tag = util.read_dmi_data('chassis-asset-tag') return asset_tag == CHASSIS_ASSET_TAG
def _read_system_uuid(): sys_uuid = util.read_dmi_data('system-uuid') return None if sys_uuid is None else sys_uuid.lower()
def _get_data(self): defaults = { "instance-id": "nocloud", "dsmode": self.dsmode, } found = [] mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", 'network-config': None} try: # Parse the system serial label from dmi. If not empty, try parsing # like the commandline md = {} serial = util.read_dmi_data('system-serial-number') if serial and load_cmdline_data(md, serial): found.append("dmi") mydata = _merge_new_seed(mydata, {'meta-data': md}) except Exception: util.logexc(LOG, "Unable to parse dmi data") return False try: # Parse the kernel command line, getting data passed in md = {} if load_cmdline_data(md): found.append("cmdline") mydata = _merge_new_seed(mydata, {'meta-data': md}) except Exception: util.logexc(LOG, "Unable to parse command line data") return False # Check to see if the seed dir has data. pp2d_kwargs = {'required': ['user-data', 'meta-data'], 'optional': ['vendor-data', 'network-config']} for path in self.seed_dirs: try: seeded = util.pathprefix2dict(path, **pp2d_kwargs) found.append(path) LOG.debug("Using seeded data from %s", path) mydata = _merge_new_seed(mydata, seeded) break except ValueError: pass # If the datasource config had a 'seedfrom' entry, then that takes # precedence over a 'seedfrom' that was found in a filesystem # but not over external media if self.ds_cfg.get('seedfrom'): found.append("ds_config_seedfrom") mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom'] # fields appropriately named can also just come from the datasource # config (ie, 'user-data', 'meta-data', 'vendor-data' there) if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg: mydata = _merge_new_seed(mydata, self.ds_cfg) found.append("ds_config") def _pp2d_callback(mp, data): return util.pathprefix2dict(mp, **data) label = self.ds_cfg.get('fs_label', "cidata") if label is not None: # Query optical drive to get it in blkid cache for 2.6 kernels util.find_devs_with(path="/dev/sr0") util.find_devs_with(path="/dev/sr1") fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) label_list = util.find_devs_with("LABEL=%s" % label.upper()) label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) for dev in devlist: try: LOG.debug("Attempting to use data from %s", dev) try: seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: if dev in label_list: LOG.warning("device %s with label=%s not a" "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) LOG.debug("Using data from %s", dev) found.append(dev) break except OSError as e: if e.errno != errno.ENOENT: raise except util.MountFailedError: util.logexc(LOG, "Failed to mount %s when looking for " "data", dev) # There was no indication on kernel cmdline or data # in the seeddir suggesting this handler should be used. if len(found) == 0: return False # The special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg if "seedfrom" in mydata['meta-data']: seedfrom = mydata['meta-data']["seedfrom"] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False # This could throw errors, but the user told us to do it # so if errors are raised, let them raise (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], md_seed]) mydata['user-data'] = ud found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], defaults]) self.dsmode = self._determine_dsmode( [mydata['meta-data'].get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: LOG.debug("%s: not claiming datasource, dsmode=%s", self, self.dsmode) return False self.seed = ",".join(found) self.metadata = mydata['meta-data'] self.userdata_raw = mydata['user-data'] self.vendordata_raw = mydata['vendor-data'] self._network_config = mydata['network-config'] self._network_eni = mydata['meta-data'].get('network-interfaces') return True
def _get_subplatform(self): system_type = util.read_dmi_data("system-product-name").lower() if system_type == 'vmware': return 'vmware (%s)' % self.seed return 'ovf (%s)' % self.seed
def on_hetzner(): return util.read_dmi_data('system-manufacturer') == "Hetzner"
def _get_data(self): defaults = { "instance-id": "nocloud", "dsmode": self.dsmode, } found = [] mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", 'network-config': None} try: # Parse the system serial label from dmi. If not empty, try parsing # like the commandline md = {} serial = util.read_dmi_data('system-serial-number') if serial and load_cmdline_data(md, serial): found.append("dmi") mydata = _merge_new_seed(mydata, {'meta-data': md}) except Exception: util.logexc(LOG, "Unable to parse dmi data") return False try: # Parse the kernel command line, getting data passed in md = {} if load_cmdline_data(md): found.append("cmdline") mydata = _merge_new_seed(mydata, {'meta-data': md}) except Exception: util.logexc(LOG, "Unable to parse command line data") return False # Check to see if the seed dir has data. pp2d_kwargs = {'required': ['user-data', 'meta-data'], 'optional': ['vendor-data', 'network-config']} for path in self.seed_dirs: try: seeded = util.pathprefix2dict(path, **pp2d_kwargs) found.append(path) LOG.debug("Using seeded data from %s", path) mydata = _merge_new_seed(mydata, seeded) break except ValueError: pass # If the datasource config had a 'seedfrom' entry, then that takes # precedence over a 'seedfrom' that was found in a filesystem # but not over external media if self.ds_cfg.get('seedfrom'): found.append("ds_config_seedfrom") mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom'] # fields appropriately named can also just come from the datasource # config (ie, 'user-data', 'meta-data', 'vendor-data' there) if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg: mydata = _merge_new_seed(mydata, self.ds_cfg) found.append("ds_config") def _pp2d_callback(mp, data): return util.pathprefix2dict(mp, **data) label = self.ds_cfg.get('fs_label', "cidata") if label is not None: for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) try: seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: LOG.warning("device %s with label=%s not a" "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) LOG.debug("Using data from %s", dev) found.append(dev) break except OSError as e: if e.errno != errno.ENOENT: raise except util.MountFailedError: util.logexc(LOG, "Failed to mount %s when looking for " "data", dev) # There was no indication on kernel cmdline or data # in the seeddir suggesting this handler should be used. if len(found) == 0: return False # The special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg if "seedfrom" in mydata['meta-data']: seedfrom = mydata['meta-data']["seedfrom"] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False # This could throw errors, but the user told us to do it # so if errors are raised, let them raise (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], md_seed]) mydata['user-data'] = ud found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], defaults]) self.dsmode = self._determine_dsmode( [mydata['meta-data'].get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: LOG.debug("%s: not claiming datasource, dsmode=%s", self, self.dsmode) return False self.seed = ",".join(found) self.metadata = mydata['meta-data'] self.userdata_raw = mydata['user-data'] self.vendordata_raw = mydata['vendor-data'] self._network_config = mydata['network-config'] self._network_eni = mydata['meta-data'].get('network-interfaces') return True
def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context(mock.patch.object(util, "which", lambda _: False)) self.patch_mapping({}) self.assertEqual(None, util.read_dmi_data("expect-fail"))
def get_data(self): found = [] md = {} ud = "" vmwarePlatformFound = False vmwareImcConfigFilePath = '' defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) system_type = util.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") if not deployPkgPluginPath: deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", "libdeployPkgPlugin.so") if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("/var/run/vmware-imc", "cust.cfg")) if vmwareImcConfigFilePath: LOG.debug("Found VMware DeployPkg Config File at %s" % vmwareImcConfigFilePath) else: LOG.debug("Did not find VMware DeployPkg Config File Path") else: LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: nics = "" try: cf = ConfigFile(vmwareImcConfigFilePath) conf = Config(cf) (md, ud, cfg) = read_vmware_imc(conf) dirpath = os.path.dirname(vmwareImcConfigFilePath) nics = get_nics_to_enable(dirpath) except Exception as e: LOG.debug("Error parsing the customization Config File") LOG.exception(e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) enable_nics(nics) return False finally: util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) try: LOG.debug("Applying the Network customization") nicConfigurator = NicConfigurator(conf.nics) nicConfigurator.configure() except Exception as e: LOG.debug("Error applying the Network Configuration") LOG.exception(e) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) enable_nics(nics) return False vmwarePlatformFound = True set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) enable_nics(nics) else: np = { 'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } name = None for (name, transfunc) in np.items(): (contents, _dev, _fname) = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0 and not vmwarePlatformFound: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.cfg = cfg return True
def _is_aliyun(): return util.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
def _is_platform_viable(self): return util.read_dmi_data('system-product-name').startswith( EXOSCALE_DMI_NAME)
def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None for cdev in candidates: try: if cdev.startswith("/dev/"): ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: return False if found == ddir: LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False) if seed: self.metadata['random_seed'] = seed # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) if self.ds_cfg['agent_command'] == '__builtin__': metadata_func = get_metadata_from_fabric else: metadata_func = self.get_metadata_from_agent try: fabric_data = metadata_func() except Exception as exc: LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) return False self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() if found_ephemeral: self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral LOG.debug("using detected ephemeral0 of %s", found_ephemeral) cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: self.cfg['cloud_config_modules'] = cc_modules_override return True
def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None for cdev in candidates: try: if cdev.startswith("/dev/"): ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: return False if found == ddir: LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False) if seed: self.metadata['random_seed'] = seed # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: metadata_func = partial( get_metadata_from_fabric, fallback_lease_file=self.dhclient_lease_file) else: metadata_func = self.get_metadata_from_agent try: fabric_data = metadata_func() except Exception as exc: LOG.info( "Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) return False self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) return True
def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): self.patch_mapping({'mapped-key': 'mapped-value'}) expected_dmi_value = 'sys-used-correctly' self._create_sysfs_file('mapped-value', expected_dmi_value) self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong') self.assertEqual(expected_dmi_value, util.read_dmi_data('mapped-key'))
def test_container_returns_none_on_unknown(self): """In a container even bogus keys return None.""" self._m_is_container.return_value = True self._create_sysfs_file('product_name', "should-be-ignored") self.assertIsNone(util.read_dmi_data("bogus")) self.assertIsNone(util.read_dmi_data("system-product-name"))