def test_series_without_available_series(self): node = factory.make_Node() driver = {"name": "somedriver"} self.patch(third_party_drivers, "match_aliases_to_driver").return_value = driver self.assertEqual(get_third_party_driver(node, series="precise"), driver)
def get_node_preseed_context(request, node, osystem="", release=""): """Return the node-dependent context dictionary to be used to render preseed templates. :param node: See `get_preseed_filenames`. :param osystem: See `get_preseed_filenames`. :param release: See `get_preseed_filenames`. :return: The context dictionary. :rtype: dict. """ node_disable_pxe_url = request.build_absolute_uri( reverse("metadata-node-by-id", args=["latest", node.system_id]) ) node_disable_pxe_data = urlencode({"op": "netboot_off"}) driver = get_third_party_driver(node, series=release) return { "third_party_drivers": ( Config.objects.get_config("enable_third_party_drivers") ), "driver": driver, "driver_package": driver.get("package", ""), "node": node, "preseed_data": compose_preseed( request, get_preseed_type_for(node), node ), "node_disable_pxe_url": node_disable_pxe_url, "node_disable_pxe_data": node_disable_pxe_data, "license_key": node.get_effective_license_key(), }
def get_node_preseed_context( request, node, osystem='', release=''): """Return the node-dependent context dictionary to be used to render preseed templates. :param node: See `get_preseed_filenames`. :param osystem: See `get_preseed_filenames`. :param release: See `get_preseed_filenames`. :return: The context dictionary. :rtype: dict. """ node_disable_pxe_url = request.build_absolute_uri( reverse('metadata-node-by-id', args=['latest', node.system_id])) node_disable_pxe_data = urlencode({'op': 'netboot_off'}) driver = get_third_party_driver(node) return { 'third_party_drivers': ( Config.objects.get_config('enable_third_party_drivers')), 'driver': driver, 'driver_package': driver.get('package', ''), 'node': node, 'preseed_data': compose_preseed( request, get_preseed_type_for(node), node), 'node_disable_pxe_url': node_disable_pxe_url, 'node_disable_pxe_data': node_disable_pxe_data, 'license_key': node.get_effective_license_key(), }
def test_finds_match(self): node = factory.make_Node() mock = self.patch(third_party_drivers, "match_aliases_to_driver") base_driver = dict(comment="hooray") mock.return_value = base_driver driver = get_third_party_driver(node) self.assertEqual(base_driver, driver) # ensure driver is a copy, not the original base_driver["comment"] = "boo" self.assertEqual("hooray", driver["comment"])
def test_finds_match(self): node = factory.make_Node() mock = self.patch(third_party_drivers, 'match_aliases_to_driver') base_driver = dict(comment='hooray') mock.return_value = base_driver driver = get_third_party_driver(node) self.assertEqual(base_driver, driver) # ensure driver is a copy, not the original base_driver['comment'] = 'boo' self.assertEqual('hooray', driver['comment'])
def get_node_preseed_context(node, osystem='', release='', rack_controller=None, default_region_ip=None): """Return the node-dependent context dictionary to be used to render preseed templates. :param node: See `get_preseed_filenames`. :param osystem: See `get_preseed_filenames`. :param release: See `get_preseed_filenames`. :return: The context dictionary. :rtype: dict. """ if rack_controller is None: rack_controller = node.get_boot_rack_controller() # Create the url and the url-data (POST parameters) used to turn off # PXE booting once the install of the node is finished. node_disable_pxe_url = absolute_reverse( 'metadata-node-by-id', default_region_ip=default_region_ip, args=['latest', node.system_id], base_url=rack_controller.url) node_disable_pxe_data = urlencode({'op': 'netboot_off'}) driver = get_third_party_driver(node) return { 'third_party_drivers': (Config.objects.get_config('enable_third_party_drivers')), 'driver': driver, 'driver_package': driver.get('package', ''), 'node': node, 'preseed_data': compose_preseed(get_preseed_type_for(node), node, default_region_ip=default_region_ip), 'node_disable_pxe_url': node_disable_pxe_url, 'node_disable_pxe_data': node_disable_pxe_data, 'license_key': node.get_effective_license_key(), }
def get_config( system_id, local_ip, remote_ip, arch=None, subarch=None, mac=None, bios_boot_method=None): """Get the booting configration for the a machine. Returns a structure suitable for returning in the response for :py:class:`~provisioningserver.rpc.region.GetBootConfig`. Raises BootConfigNoResponse when booting machine should fail to next file. """ rack_controller = RackController.objects.get(system_id=system_id) region_ip = None if remote_ip is not None: region_ip = get_source_address(remote_ip) machine = get_node_from_mac_string(mac) # Fail with no response early so no extra work is performed. if machine is None and arch is None and mac is not None: # Request was pxelinux.cfg/01-<mac> for a machine MAAS does not know # about. So attempt fall back to pxelinux.cfg/default-<arch>-<subarch> # for arch detection. raise BootConfigNoResponse() if machine is not None: # Update the last interface, last access cluster IP address, and # the last used BIOS boot method. Only saving the fields that have # changed on the machine. update_fields = [] if (machine.boot_interface is None or machine.boot_interface.mac_address != mac): machine.boot_interface = PhysicalInterface.objects.get( mac_address=mac) update_fields.append("boot_interface") if (machine.boot_cluster_ip is None or machine.boot_cluster_ip != local_ip): machine.boot_cluster_ip = local_ip update_fields.append("boot_cluster_ip") if machine.bios_boot_method != bios_boot_method: machine.bios_boot_method = bios_boot_method update_fields.append("bios_boot_method") if len(update_fields) > 0: machine.save(update_fields=update_fields) # Update the VLAN of the boot interface to be the same VLAN for the # interface on the rack controller that the machine communicated with, # unless the VLAN is being relayed. rack_interface = rack_controller.interface_set.filter( ip_addresses__ip=local_ip).first() if (rack_interface is not None and machine.boot_interface.vlan != rack_interface.vlan): # Rack controller and machine is not on the same VLAN, with DHCP # relay this is possible. Lets ensure that the VLAN on the # interface is setup to relay through the identified VLAN. if not VLAN.objects.filter( id=machine.boot_interface.vlan_id, relay_vlan=rack_interface.vlan).exists(): # DHCP relay is not being performed for that VLAN. Set the VLAN # to the VLAN of the rack controller. machine.boot_interface.vlan = rack_interface.vlan machine.boot_interface.save() arch, subarch = machine.split_arch() preseed_url = compose_preseed_url( machine, rack_controller, default_region_ip=region_ip) hostname = machine.hostname domain = machine.domain.name purpose = machine.get_boot_purpose() # Log the request into the event log for that machine. if (machine.status == NODE_STATUS.ENTERING_RESCUE_MODE and purpose == 'commissioning'): event_log_pxe_request(machine, 'rescue') else: event_log_pxe_request(machine, purpose) # Get the correct operating system and series based on the purpose # of the booting machine. if purpose == "commissioning": osystem = Config.objects.get_config('commissioning_osystem') series = Config.objects.get_config('commissioning_distro_series') else: osystem = machine.get_osystem() series = machine.get_distro_series() if purpose == "xinstall" and osystem != "ubuntu": # Use only the commissioning osystem and series, for operating # systems other than Ubuntu. As Ubuntu supports HWE kernels, # and needs to use that kernel to perform the installation. osystem = Config.objects.get_config('commissioning_osystem') series = Config.objects.get_config( 'commissioning_distro_series') # Pre MAAS-1.9 the subarchitecture defined any kernel the machine # needed to be able to boot. This could be a hardware enablement # kernel(e.g hwe-t) or something like highbank. With MAAS-1.9 any # hardware enablement kernel must be specifed in the hwe_kernel field, # any other kernel, such as highbank, is still specifed as a # subarchitecture. Since Ubuntu does not support architecture specific # hardware enablement kernels(i.e a highbank hwe-t kernel on precise) # we give precedence to any kernel defined in the subarchitecture field if subarch == "generic" and machine.hwe_kernel: subarch = machine.hwe_kernel elif(subarch == "generic" and purpose == "commissioning" and machine.min_hwe_kernel): try: subarch = validate_hwe_kernel( None, machine.min_hwe_kernel, machine.architecture, osystem, series) except ValidationError: subarch = "no-such-kernel" # We don't care if the kernel opts is from the global setting or a tag, # just get the options _, effective_kernel_opts = machine.get_effective_kernel_options() # Add any extra options from a third party driver. use_driver = Config.objects.get_config('enable_third_party_drivers') if use_driver: driver = get_third_party_driver(machine) driver_kernel_opts = driver.get('kernel_opts', '') combined_opts = ('%s %s' % ( '' if effective_kernel_opts is None else effective_kernel_opts, driver_kernel_opts)).strip() if len(combined_opts): extra_kernel_opts = combined_opts else: extra_kernel_opts = None else: extra_kernel_opts = effective_kernel_opts kparams = BootResource.objects.get_kparams_for_node(machine) extra_kernel_opts = merge_kparams_with_extra(kparams, extra_kernel_opts) else: purpose = "commissioning" # enlistment preseed_url = compose_enlistment_preseed_url( rack_controller, default_region_ip=region_ip) hostname = 'maas-enlist' domain = 'local' osystem = Config.objects.get_config('commissioning_osystem') series = Config.objects.get_config('commissioning_distro_series') min_hwe_kernel = Config.objects.get_config('default_min_hwe_kernel') # When no architecture is defined for the enlisting machine select # the best boot resource for the operating system and series. If # none exists fallback to the default architecture. LP #1181334 if arch is None: resource = ( BootResource.objects.get_default_commissioning_resource( osystem, series)) if resource is None: arch = DEFAULT_ARCH else: arch, _ = resource.split_arch() # The subarch defines what kernel is booted. With MAAS 2.1 this changed # from hwe-<letter> to hwe-<version> or ga-<version>. Validation # converts between the two formats to make sure a bootable subarch is # selected. if subarch is None: min_hwe_kernel = validate_hwe_kernel( None, min_hwe_kernel, '%s/generic' % arch, osystem, series) else: min_hwe_kernel = validate_hwe_kernel( None, min_hwe_kernel, '%s/%s' % (arch, subarch), osystem, series) # If no hwe_kernel was found set the subarch to the default, 'generic.' if min_hwe_kernel is None: subarch = 'generic' else: subarch = min_hwe_kernel # Global kernel options for enlistment. extra_kernel_opts = Config.objects.get_config("kernel_opts") # Set the final boot purpose. if machine is None and arch == DEFAULT_ARCH: # If the machine is enlisting and the arch is the default arch (i386), # use the dedicated enlistment template which performs architecture # detection. boot_purpose = "enlist" elif purpose == 'poweroff': # In order to power the machine off, we need to get it booted in the # commissioning environment and issue a `poweroff` command. boot_purpose = 'commissioning' else: boot_purpose = purpose # Get the service address to the region for that given rack controller. server_host = get_maas_facing_server_host( rack_controller=rack_controller, default_region_ip=region_ip) kernel, initrd, boot_dtb = get_boot_filenames( arch, subarch, osystem, series) # Return the params to the rack controller. Include the system_id only # if the machine was known. params = { "arch": arch, "subarch": subarch, "osystem": osystem, "release": series, "kernel": kernel, "initrd": initrd, "boot_dtb": boot_dtb, "purpose": boot_purpose, "hostname": hostname, "domain": domain, "preseed_url": preseed_url, "fs_host": local_ip, "log_host": server_host, "extra_opts": '' if extra_kernel_opts is None else extra_kernel_opts, # As of MAAS 2.4 only HTTP boot is supported. This ensures MAAS 2.3 # rack controllers use HTTP boot as well. "http_boot": True, } if machine is not None: params["system_id"] = machine.system_id return params
def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data["fqdn"] = obj.fqdn data["actions"] = list(compile_node_actions(obj, self.user).keys()) data["node_type_display"] = obj.get_node_type_display() data["link_type"] = NODE_TYPE_TO_LINK_TYPE[obj.node_type] data["tags"] = [tag.name for tag in obj.tags.all()] if obj.node_type == NODE_TYPE.MACHINE or ( obj.is_controller and not for_list ): # Disk count and storage amount is shown on the machine listing # page and the machine and controllers details page. blockdevices = self.get_blockdevices_for(obj) physical_blockdevices = [ blockdevice for blockdevice in blockdevices if isinstance(blockdevice, PhysicalBlockDevice) ] data["physical_disk_count"] = len(physical_blockdevices) data["storage"] = round( sum(blockdevice.size for blockdevice in physical_blockdevices) / (1000 ** 3), 1, ) data["storage_tags"] = self.get_all_storage_tags(blockdevices) commissioning_script_results = [] testing_script_results = [] log_results = set() for hw_type in self._script_results.get(obj.id, {}).values(): for script_result in hw_type: if ( script_result.script_set.result_type == RESULT_TYPE.INSTALLATION ): # Don't include installation results in the health # status. continue elif script_result.status == SCRIPT_STATUS.ABORTED: # LP: #1724235 - Ignore aborted scripts. continue elif ( script_result.script_set.result_type == RESULT_TYPE.COMMISSIONING ): commissioning_script_results.append(script_result) if ( script_result.name in script_output_nsmap and script_result.status == SCRIPT_STATUS.PASSED ): log_results.add(script_result.name) elif ( script_result.script_set.result_type == RESULT_TYPE.TESTING ): testing_script_results.append(script_result) data["commissioning_status"] = self.dehydrate_test_statuses( commissioning_script_results ) data["testing_status"] = self.dehydrate_test_statuses( testing_script_results ) data["has_logs"] = ( log_results.difference(script_output_nsmap.keys()) == set() ) else: blockdevices = [] if obj.node_type != NODE_TYPE.DEVICE: # These values are not defined on a device. data["architecture"] = obj.architecture data["osystem"] = obj.osystem data["distro_series"] = obj.distro_series data["memory"] = obj.display_memory() data["status"] = obj.display_status() data["description"] = obj.description data["status_code"] = obj.status if for_list: for attr in ("numa_nodes_count", "sriov_support"): value = getattr(obj, attr, None) if value is not None: data[attr] = value # Filters are only available on machines and devices. if not obj.is_controller: # For filters subnets = self.get_all_subnets(obj) data["subnets"] = [subnet.cidr for subnet in subnets] data["fabrics"] = self.get_all_fabric_names(obj, subnets) data["spaces"] = self.get_all_space_names(subnets) data["extra_macs"] = [ "%s" % mac_address for mac_address in obj.get_extra_macs() ] data["link_speeds"] = sorted( set( [ interface.link_speed for interface in obj.interface_set.all() if interface.link_speed > 0 ] ) ) if not for_list: data["on_network"] = obj.on_network() if obj.node_type != NODE_TYPE.DEVICE: data["numa_nodes"] = [ self.dehydrate_numanode(numa_node) for numa_node in obj.numanode_set.all().order_by("index") ] # XXX lamont 2017-02-15 Much of this should be split out into # individual methods, rather than having this huge block of # dense code here. # Status of the commissioning, testing, and logs tabs data["metadata"] = { metadata.key: metadata.value for metadata in obj.nodemetadata_set.all() } # Network data["interfaces"] = [ self.dehydrate_interface(interface, obj) for interface in obj.interface_set.all().order_by("name") ] data["dhcp_on"] = self.get_providing_dhcp(obj) data["hwe_kernel"] = make_hwe_kernel_ui_text(obj.hwe_kernel) data["power_type"] = obj.power_type data["power_parameters"] = self.dehydrate_power_parameters( obj.power_parameters ) data["power_bmc_node_count"] = ( obj.bmc.node_set.count() if (obj.bmc is not None) else 0 ) # Storage data["disks"] = sorted( chain( ( self.dehydrate_blockdevice(blockdevice, obj) for blockdevice in blockdevices ), ( self.dehydrate_volume_group(volume_group) for volume_group in VolumeGroup.objects.filter_by_node( obj ) ), ( self.dehydrate_cache_set(cache_set) for cache_set in CacheSet.objects.get_cache_sets_for_node( obj ) ), ), key=itemgetter("name"), ) data["supported_filesystems"] = [ {"key": key, "ui": ui} for key, ui in FILESYSTEM_FORMAT_TYPE_CHOICES ] data["storage_layout_issues"] = obj.storage_layout_issues() data["special_filesystems"] = [ self.dehydrate_filesystem(filesystem) for filesystem in obj.get_effective_special_filesystems() ] data["grouped_storages"] = self.get_grouped_storages( physical_blockdevices ) ( layout_bd, detected_layout, ) = get_applied_storage_layout_for_node(obj) data["detected_storage_layout"] = detected_layout # The UI knows that a partition is in use when it has a mounted # partition. VMware ESXi does not directly mount the partitions # used. As MAAS can't model that inject a place holder so the # UI knows that these partitions are in use. if detected_layout == "vmfs6": for disk in data["disks"]: if disk["id"] == layout_bd.id: for partition in disk["partitions"]: if partition["name"].endswith("-part3"): # Partition 3 is for the default datastore. # This partition may be modified by the # user. continue partition[ "used_for" ] = "VMware ESXi OS partition" partition["filesystem"] = { "id": -1, "label": "RESERVED", "mount_point": "RESERVED", "mount_options": None, "fstype": None, "is_format_fstype": False, } # Events data["events"] = self.dehydrate_events(obj) # Machine logs data["installation_status"] = self.dehydrate_script_set_status( obj.current_installation_script_set ) # Third party drivers if Config.objects.get_config("enable_third_party_drivers"): # Pull modaliases from the cache modaliases = [] for script_result in commissioning_script_results: if script_result.name == LIST_MODALIASES_OUTPUT_NAME: if script_result.status == SCRIPT_STATUS.PASSED: # STDOUT is deferred in the cache so load it. script_result = ( ScriptResult.objects.filter( id=script_result.id ) .only("id", "status", "stdout") .first() ) modaliases = script_result.stdout.decode( "utf-8" ).splitlines() driver = get_third_party_driver( obj, detected_aliases=modaliases, series=obj.distro_series, ) if "module" in driver and "comment" in driver: data["third_party_driver"] = { "module": driver["module"], "comment": driver["comment"], } return data
def test_not_matching_series(self): node = factory.make_Node() driver = {"series": ["xenial", "bionic"], "name": "somedriver"} self.patch(third_party_drivers, "match_aliases_to_driver").return_value = driver self.assertEqual(get_third_party_driver(node, series="precise"), {})
def test_finds_no_match(self): node = factory.make_Node() mock = self.patch(third_party_drivers, "match_aliases_to_driver") mock.return_value = None driver = get_third_party_driver(node) self.assertEqual({}, driver)
def get_config( system_id, local_ip, remote_ip, arch=None, subarch=None, mac=None, bios_boot_method=None): """Get the booting configration for the a machine. Returns a structure suitable for returning in the response for :py:class:`~provisioningserver.rpc.region.GetBootConfig`. Raises BootConfigNoResponse when booting machine should fail to next file. """ rack_controller = RackController.objects.get(system_id=system_id) region_ip = None if remote_ip is not None: region_ip = get_source_address(remote_ip) machine = get_node_from_mac_string(mac) # Get the service address to the region for that given rack controller. server_host = get_maas_facing_server_host( rack_controller=rack_controller, default_region_ip=region_ip) # Fail with no response early so no extra work is performed. if machine is None and arch is None and mac is not None: # Request was pxelinux.cfg/01-<mac> for a machine MAAS does not know # about. So attempt fall back to pxelinux.cfg/default-<arch>-<subarch> # for arch detection. raise BootConfigNoResponse() configs = Config.objects.get_configs([ 'commissioning_osystem', 'commissioning_distro_series', 'enable_third_party_drivers', 'default_min_hwe_kernel', 'default_osystem', 'default_distro_series', 'kernel_opts', 'use_rack_proxy', 'maas_internal_domain', ]) if machine is not None: # Update the last interface, last access cluster IP address, and # the last used BIOS boot method. if (machine.boot_interface is None or machine.boot_interface.mac_address != mac): machine.boot_interface = PhysicalInterface.objects.get( mac_address=mac) if (machine.boot_cluster_ip is None or machine.boot_cluster_ip != local_ip): machine.boot_cluster_ip = local_ip if machine.bios_boot_method != bios_boot_method: machine.bios_boot_method = bios_boot_method # Reset the machine's status_expires whenever the boot_config is called # on a known machine. This allows a machine to take up to the maximum # timeout status to POST. machine.reset_status_expires() # Does nothing if the machine hasn't changed. machine.save() # Update the VLAN of the boot interface to be the same VLAN for the # interface on the rack controller that the machine communicated with, # unless the VLAN is being relayed. rack_interface = rack_controller.interface_set.filter( ip_addresses__ip=local_ip).select_related('vlan').first() if (rack_interface is not None and machine.boot_interface.vlan_id != rack_interface.vlan_id): # Rack controller and machine is not on the same VLAN, with DHCP # relay this is possible. Lets ensure that the VLAN on the # interface is setup to relay through the identified VLAN. if not VLAN.objects.filter( id=machine.boot_interface.vlan_id, relay_vlan=rack_interface.vlan_id).exists(): # DHCP relay is not being performed for that VLAN. Set the VLAN # to the VLAN of the rack controller. machine.boot_interface.vlan = rack_interface.vlan machine.boot_interface.save() arch, subarch = machine.split_arch() if configs['use_rack_proxy']: preseed_url = compose_preseed_url( machine, base_url=get_base_url_for_local_ip( local_ip, configs['maas_internal_domain'])) else: preseed_url = compose_preseed_url( machine, base_url=rack_controller.url, default_region_ip=region_ip) hostname = machine.hostname domain = machine.domain.name purpose = machine.get_boot_purpose() # Early out if the machine is booting local. if purpose == 'local': return { "system_id": machine.system_id, "arch": arch, "subarch": subarch, "osystem": machine.osystem, "release": machine.distro_series, "kernel": '', "initrd": '', "boot_dtb": '', "purpose": purpose, "hostname": hostname, "domain": domain, "preseed_url": preseed_url, "fs_host": local_ip, "log_host": server_host, "extra_opts": '', "http_boot": True, } # Log the request into the event log for that machine. if (machine.status in [ NODE_STATUS.ENTERING_RESCUE_MODE, NODE_STATUS.RESCUE_MODE] and purpose == 'commissioning'): event_log_pxe_request(machine, 'rescue') else: event_log_pxe_request(machine, purpose) osystem, series, subarch = get_boot_config_for_machine( machine, configs, purpose) # We don't care if the kernel opts is from the global setting or a tag, # just get the options _, effective_kernel_opts = machine.get_effective_kernel_options( default_kernel_opts=configs['kernel_opts']) # Add any extra options from a third party driver. use_driver = configs['enable_third_party_drivers'] if use_driver: driver = get_third_party_driver(machine) driver_kernel_opts = driver.get('kernel_opts', '') combined_opts = ('%s %s' % ( '' if effective_kernel_opts is None else effective_kernel_opts, driver_kernel_opts)).strip() if len(combined_opts): extra_kernel_opts = combined_opts else: extra_kernel_opts = None else: extra_kernel_opts = effective_kernel_opts kparams = BootResource.objects.get_kparams_for_node( machine, default_osystem=configs['default_osystem'], default_distro_series=configs['default_distro_series']) extra_kernel_opts = merge_kparams_with_extra(kparams, extra_kernel_opts) else: purpose = "commissioning" # enlistment if configs['use_rack_proxy']: preseed_url = compose_enlistment_preseed_url( base_url=get_base_url_for_local_ip( local_ip, configs['maas_internal_domain'])) else: preseed_url = compose_enlistment_preseed_url( rack_controller=rack_controller, default_region_ip=region_ip) hostname = 'maas-enlist' domain = 'local' osystem = configs['commissioning_osystem'] series = configs['commissioning_distro_series'] min_hwe_kernel = configs['default_min_hwe_kernel'] # When no architecture is defined for the enlisting machine select # the best boot resource for the operating system and series. If # none exists fallback to the default architecture. LP #1181334 if arch is None: resource = ( BootResource.objects.get_default_commissioning_resource( osystem, series)) if resource is None: arch = DEFAULT_ARCH else: arch, _ = resource.split_arch() # The subarch defines what kernel is booted. With MAAS 2.1 this changed # from hwe-<letter> to hwe-<version> or ga-<version>. Validation # converts between the two formats to make sure a bootable subarch is # selected. if subarch is None: min_hwe_kernel = validate_hwe_kernel( None, min_hwe_kernel, '%s/generic' % arch, osystem, series) else: min_hwe_kernel = validate_hwe_kernel( None, min_hwe_kernel, '%s/%s' % (arch, subarch), osystem, series) # If no hwe_kernel was found set the subarch to the default, 'generic.' if min_hwe_kernel is None: subarch = 'generic' else: subarch = min_hwe_kernel # Global kernel options for enlistment. extra_kernel_opts = configs["kernel_opts"] # Set the final boot purpose. if machine is None and arch == DEFAULT_ARCH: # If the machine is enlisting and the arch is the default arch (i386), # use the dedicated enlistment template which performs architecture # detection. boot_purpose = "enlist" elif purpose == 'poweroff': # In order to power the machine off, we need to get it booted in the # commissioning environment and issue a `poweroff` command. boot_purpose = 'commissioning' else: boot_purpose = purpose kernel, initrd, boot_dtb = get_boot_filenames( arch, subarch, osystem, series, commissioning_osystem=configs['commissioning_osystem'], commissioning_distro_series=configs['commissioning_distro_series']) # Return the params to the rack controller. Include the system_id only # if the machine was known. params = { "arch": arch, "subarch": subarch, "osystem": osystem, "release": series, "kernel": kernel, "initrd": initrd, "boot_dtb": boot_dtb, "purpose": boot_purpose, "hostname": hostname, "domain": domain, "preseed_url": preseed_url, "fs_host": local_ip, "log_host": server_host, "extra_opts": '' if extra_kernel_opts is None else extra_kernel_opts, # As of MAAS 2.4 only HTTP boot is supported. This ensures MAAS 2.3 # rack controllers use HTTP boot as well. "http_boot": True, } if machine is not None: params["system_id"] = machine.system_id return params
def get_config( system_id, local_ip, remote_ip, arch=None, subarch=None, mac=None, hardware_uuid=None, bios_boot_method=None, ): """Get the booting configration for a machine. Returns a structure suitable for returning in the response for :py:class:`~provisioningserver.rpc.region.GetBootConfig`. Raises BootConfigNoResponse when booting machine should fail to next file. """ rack_controller = RackController.objects.get(system_id=system_id) region_ip = None if remote_ip is not None: region_ip = get_source_address(remote_ip) machine = get_node_from_mac_or_hardware_uuid(mac, hardware_uuid) # Fail with no response early so no extra work is performed. if machine is None and arch is None and (mac or hardware_uuid): # PXELinux requests boot configuration in the following order: # 1. pxelinux.cfg/<hardware uuid> # 2. pxelinux.cfg/01-<mac> # 3. pxelinux.cfg/default-<arch>-<subarch> # If mac and/or hardware_uuid was given but no Node was found fail the # request so PXELinux will move onto the next request. raise BootConfigNoResponse() # Get all required configuration objects in a single query. configs = Config.objects.get_configs([ "commissioning_osystem", "commissioning_distro_series", "enable_third_party_drivers", "default_min_hwe_kernel", "default_osystem", "default_distro_series", "kernel_opts", "use_rack_proxy", "maas_internal_domain", "remote_syslog", "maas_syslog_port", ]) # Compute the syslog server. log_host, log_port = ( local_ip, (configs["maas_syslog_port"] if configs["maas_syslog_port"] else RSYSLOG_PORT), ) if configs["remote_syslog"]: log_host, log_port = splithost(configs["remote_syslog"]) if log_port is None: log_port = 514 # Fallback to default UDP syslog port. if machine is not None: # Update the last interface, last access cluster IP address, and # the last used BIOS boot method. if machine.boot_cluster_ip != local_ip: machine.boot_cluster_ip = local_ip if machine.bios_boot_method != bios_boot_method: machine.bios_boot_method = bios_boot_method try: machine.boot_interface = machine.interface_set.get( type=INTERFACE_TYPE.PHYSICAL, mac_address=mac) except ObjectDoesNotExist: # MAC is unknown or wasn't sent. Determine the boot_interface using # the boot_cluster_ip. subnet = Subnet.objects.get_best_subnet_for_ip(local_ip) boot_vlan = getattr(machine.boot_interface, "vlan", None) if subnet and subnet.vlan != boot_vlan: # This might choose the wrong interface, but we don't # have enough information to decide which interface is # the boot one. machine.boot_interface = machine.interface_set.filter( vlan=subnet.vlan).first() else: # Update the VLAN of the boot interface to be the same VLAN for the # interface on the rack controller that the machine communicated # with, unless the VLAN is being relayed. rack_interface = (rack_controller.interface_set.filter( ip_addresses__ip=local_ip).select_related("vlan").first()) if (rack_interface is not None and machine.boot_interface.vlan_id != rack_interface.vlan_id): # Rack controller and machine is not on the same VLAN, with # DHCP relay this is possible. Lets ensure that the VLAN on the # interface is setup to relay through the identified VLAN. if not VLAN.objects.filter( id=machine.boot_interface.vlan_id, relay_vlan=rack_interface.vlan_id, ).exists(): # DHCP relay is not being performed for that VLAN. Set the # VLAN to the VLAN of the rack controller. machine.boot_interface.vlan = rack_interface.vlan machine.boot_interface.save() # Reset the machine's status_expires whenever the boot_config is called # on a known machine. This allows a machine to take up to the maximum # timeout status to POST. machine.reset_status_expires() # Does nothing if the machine hasn't changed. machine.save() arch, subarch = machine.split_arch() if configs["use_rack_proxy"]: preseed_url = compose_preseed_url( machine, base_url=get_base_url_for_local_ip( local_ip, configs["maas_internal_domain"]), ) else: preseed_url = compose_preseed_url( machine, base_url=rack_controller.url, default_region_ip=region_ip, ) hostname = machine.hostname domain = machine.domain.name purpose = machine.get_boot_purpose() # Ephemeral deployments will have 'local' boot # purpose on power cycles. Set purpose back to # 'xinstall' so that the system can be re-deployed. if purpose == "local" and machine.ephemeral_deployment: purpose = "xinstall" # Early out if the machine is booting local. if purpose == "local": if machine.is_device: # Log that we are setting to local boot for a device. maaslog.warning( "Device %s with MAC address %s is PXE booting; " "instructing the device to boot locally." % (machine.hostname, mac)) # Set the purpose to 'local-device' so we can log a message # on the rack. purpose = "local-device" return { "system_id": machine.system_id, "arch": arch, "subarch": subarch, "osystem": machine.osystem, "release": machine.distro_series, "kernel": "", "initrd": "", "boot_dtb": "", "purpose": purpose, "hostname": hostname, "domain": domain, "preseed_url": preseed_url, "fs_host": local_ip, "log_host": log_host, "log_port": log_port, "extra_opts": "", "http_boot": True, } # Log the request into the event log for that machine. if (machine.status in [NODE_STATUS.ENTERING_RESCUE_MODE, NODE_STATUS.RESCUE_MODE] and purpose == "commissioning"): event_log_pxe_request(machine, "rescue") else: event_log_pxe_request(machine, purpose) osystem, series, subarch = get_boot_config_for_machine( machine, configs, purpose) # We don't care if the kernel opts is from the global setting or a tag, # just get the options _, effective_kernel_opts = machine.get_effective_kernel_options( default_kernel_opts=configs["kernel_opts"]) # Add any extra options from a third party driver. use_driver = configs["enable_third_party_drivers"] if use_driver: driver = get_third_party_driver(machine) driver_kernel_opts = driver.get("kernel_opts", "") combined_opts = ("%s %s" % ( "" if effective_kernel_opts is None else effective_kernel_opts, driver_kernel_opts, )).strip() if len(combined_opts): extra_kernel_opts = combined_opts else: extra_kernel_opts = None else: extra_kernel_opts = effective_kernel_opts kparams = BootResource.objects.get_kparams_for_node( machine, default_osystem=configs["default_osystem"], default_distro_series=configs["default_distro_series"], ) extra_kernel_opts = merge_kparams_with_extra(kparams, extra_kernel_opts) else: purpose = "commissioning" # enlistment if configs["use_rack_proxy"]: preseed_url = compose_enlistment_preseed_url( base_url=get_base_url_for_local_ip( local_ip, configs["maas_internal_domain"])) else: preseed_url = compose_enlistment_preseed_url( rack_controller=rack_controller, default_region_ip=region_ip) hostname = "maas-enlist" domain = "local" osystem = configs["commissioning_osystem"] series = configs["commissioning_distro_series"] min_hwe_kernel = configs["default_min_hwe_kernel"] # When no architecture is defined for the enlisting machine select # the best boot resource for the operating system and series. If # none exists fallback to the default architecture. LP #1181334 if arch is None: resource = BootResource.objects.get_default_commissioning_resource( osystem, series) if resource is None: arch = DEFAULT_ARCH else: arch, _ = resource.split_arch() # The subarch defines what kernel is booted. With MAAS 2.1 this changed # from hwe-<letter> to hwe-<version> or ga-<version>. Validation # converts between the two formats to make sure a bootable subarch is # selected. if subarch is None: min_hwe_kernel = validate_hwe_kernel(None, min_hwe_kernel, "%s/generic" % arch, osystem, series) else: min_hwe_kernel = validate_hwe_kernel( None, min_hwe_kernel, "%s/%s" % (arch, subarch), osystem, series, ) # If no hwe_kernel was found set the subarch to the default, 'generic.' if min_hwe_kernel is None: subarch = "generic" else: subarch = min_hwe_kernel # Global kernel options for enlistment. extra_kernel_opts = configs["kernel_opts"] boot_purpose = get_final_boot_purpose(machine, arch, purpose) kernel, initrd, boot_dtb = get_boot_filenames( arch, subarch, osystem, series, commissioning_osystem=configs["commissioning_osystem"], commissioning_distro_series=configs["commissioning_distro_series"], ) # Return the params to the rack controller. Include the system_id only # if the machine was known. params = { "arch": arch, "subarch": subarch, "osystem": osystem, "release": series, "kernel": kernel, "initrd": initrd, "boot_dtb": boot_dtb, "purpose": boot_purpose, "hostname": hostname, "domain": domain, "preseed_url": preseed_url, "fs_host": local_ip, "log_host": log_host, "log_port": log_port, "extra_opts": "" if extra_kernel_opts is None else extra_kernel_opts, # As of MAAS 2.4 only HTTP boot is supported. This ensures MAAS 2.3 # rack controllers use HTTP boot as well. "http_boot": True, } if machine is not None: params["system_id"] = machine.system_id return params
def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data["fqdn"] = obj.fqdn data["actions"] = list(compile_node_actions(obj, self.user).keys()) data["node_type_display"] = obj.get_node_type_display() data["link_type"] = NODE_TYPE_TO_LINK_TYPE[obj.node_type] data["extra_macs"] = [ "%s" % mac_address for mac_address in obj.get_extra_macs() ] subnets = self.get_all_subnets(obj) data["subnets"] = [subnet.cidr for subnet in subnets] data["fabrics"] = self.get_all_fabric_names(obj, subnets) data["spaces"] = self.get_all_space_names(subnets) data["tags"] = [tag.name for tag in obj.tags.all()] data["metadata"] = { metadata.key: metadata.value for metadata in obj.nodemetadata_set.all() } if obj.node_type != NODE_TYPE.DEVICE: data["architecture"] = obj.architecture data["memory"] = obj.display_memory() data["status"] = obj.display_status() data["status_code"] = obj.status boot_interface = obj.get_boot_interface() if boot_interface is not None: data["pxe_mac"] = "%s" % boot_interface.mac_address data["pxe_mac_vendor"] = obj.get_pxe_mac_vendor() else: data["pxe_mac"] = data["pxe_mac_vendor"] = "" blockdevices = self.get_blockdevices_for(obj) physical_blockdevices = [ blockdevice for blockdevice in blockdevices if isinstance(blockdevice, PhysicalBlockDevice) ] data["physical_disk_count"] = len(physical_blockdevices) data["storage"] = "%3.1f" % ( sum(blockdevice.size for blockdevice in physical_blockdevices) / (1000**3)) data["storage_tags"] = self.get_all_storage_tags(blockdevices) data["grouped_storages"] = self.get_grouped_storages( physical_blockdevices) data["osystem"] = obj.get_osystem(default=self.default_osystem) data["distro_series"] = obj.get_distro_series( default=self.default_distro_series) data["dhcp_on"] = self.get_providing_dhcp(obj) if obj.node_type != NODE_TYPE.DEVICE: commissioning_script_results = [] testing_script_results = [] log_results = set() for hw_type in self._script_results.get(obj.id, {}).values(): for script_result in hw_type: if (script_result.script_set.result_type == RESULT_TYPE.INSTALLATION): # Don't include installation results in the health # status. continue elif script_result.status == SCRIPT_STATUS.ABORTED: # LP: #1724235 - Ignore aborted scripts. continue elif (script_result.script_set.result_type == RESULT_TYPE.COMMISSIONING): commissioning_script_results.append(script_result) if (script_result.name in script_output_nsmap and script_result.status == SCRIPT_STATUS.PASSED): log_results.add(script_result.name) elif (script_result.script_set.result_type == RESULT_TYPE.TESTING): testing_script_results.append(script_result) data["commissioning_script_count"] = len( commissioning_script_results) data["commissioning_status"] = get_status_from_qs( commissioning_script_results) data["commissioning_status_tooltip"] = ( self.dehydrate_hardware_status_tooltip( commissioning_script_results).replace( 'test', 'commissioning script')) data["testing_script_count"] = len(testing_script_results) data["testing_status"] = get_status_from_qs(testing_script_results) data["testing_status_tooltip"] = ( self.dehydrate_hardware_status_tooltip(testing_script_results)) data["has_logs"] = (log_results.difference( script_output_nsmap.keys()) == set()) if not for_list: data["on_network"] = obj.on_network() if obj.node_type != NODE_TYPE.DEVICE: # XXX lamont 2017-02-15 Much of this should be split out into # individual methods, rather than having this huge block of # dense code here. # Network data["interfaces"] = [ self.dehydrate_interface(interface, obj) for interface in obj.interface_set.all().order_by('name') ] data["hwe_kernel"] = make_hwe_kernel_ui_text(obj.hwe_kernel) data["power_type"] = obj.power_type data["power_parameters"] = self.dehydrate_power_parameters( obj.power_parameters) data["power_bmc_node_count"] = obj.bmc.node_set.count() if ( obj.bmc is not None) else 0 # Storage data["disks"] = sorted(chain( (self.dehydrate_blockdevice(blockdevice, obj) for blockdevice in blockdevices), (self.dehydrate_volume_group(volume_group) for volume_group in VolumeGroup.objects.filter_by_node(obj)), (self.dehydrate_cache_set(cache_set) for cache_set in CacheSet.objects.get_cache_sets_for_node(obj)), ), key=itemgetter("name")) data["supported_filesystems"] = [{ 'key': key, 'ui': ui } for key, ui in FILESYSTEM_FORMAT_TYPE_CHOICES] data["storage_layout_issues"] = obj.storage_layout_issues() data["special_filesystems"] = [ self.dehydrate_filesystem(filesystem) for filesystem in obj.special_filesystems.order_by("id") ] # Events data["events"] = self.dehydrate_events(obj) # Machine logs data["installation_status"] = self.dehydrate_script_set_status( obj.current_installation_script_set) # Third party drivers if Config.objects.get_config('enable_third_party_drivers'): driver = get_third_party_driver(obj) if "module" in driver and "comment" in driver: data["third_party_driver"] = { "module": driver["module"], "comment": driver["comment"], } return data
def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data["fqdn"] = obj.fqdn data["actions"] = list(compile_node_actions(obj, self.user).keys()) data["node_type_display"] = obj.get_node_type_display() data["extra_macs"] = [ "%s" % mac_address for mac_address in obj.get_extra_macs() ] subnets = self.get_all_subnets(obj) data["subnets"] = [subnet.cidr for subnet in subnets] data["fabrics"] = self.get_all_fabric_names(obj, subnets) data["spaces"] = self.get_all_space_names(subnets) data["tags"] = [ tag.name for tag in obj.tags.all() ] if obj.node_type != NODE_TYPE.DEVICE: data["memory"] = obj.display_memory() data["status"] = obj.display_status() data["status_code"] = obj.status boot_interface = obj.get_boot_interface() if boot_interface is not None: data["pxe_mac"] = "%s" % boot_interface.mac_address data["pxe_mac_vendor"] = obj.get_pxe_mac_vendor() else: data["pxe_mac"] = data["pxe_mac_vendor"] = "" blockdevices = self.get_blockdevices_for(obj) physical_blockdevices = [ blockdevice for blockdevice in blockdevices if isinstance(blockdevice, PhysicalBlockDevice) ] data["physical_disk_count"] = len(physical_blockdevices) data["storage"] = "%3.1f" % ( sum( blockdevice.size for blockdevice in physical_blockdevices ) / (1000 ** 3)) data["storage_tags"] = self.get_all_storage_tags(blockdevices) data["osystem"] = obj.get_osystem( default=self.default_osystem) data["distro_series"] = obj.get_distro_series( default=self.default_distro_series) data["dhcp_on"] = self.get_providing_dhcp(obj) if not for_list: data["on_network"] = obj.on_network() if obj.node_type != NODE_TYPE.DEVICE: # XXX lamont 2017-02-15 Much of this should be split out into # individual methods, rather than having this huge block of # dense code here. # Network data["interfaces"] = [ self.dehydrate_interface(interface, obj) for interface in obj.interface_set.all().order_by('name') ] data["hwe_kernel"] = make_hwe_kernel_ui_text(obj.hwe_kernel) data["power_type"] = obj.power_type data["power_parameters"] = self.dehydrate_power_parameters( obj.power_parameters) data["power_bmc_node_count"] = obj.bmc.node_set.count() if ( obj.bmc is not None) else 0 # Storage data["disks"] = sorted(chain( (self.dehydrate_blockdevice(blockdevice, obj) for blockdevice in blockdevices), (self.dehydrate_volume_group(volume_group) for volume_group in VolumeGroup.objects.filter_by_node(obj)), (self.dehydrate_cache_set(cache_set) for cache_set in CacheSet.objects.get_cache_sets_for_node(obj)), ), key=itemgetter("name")) data["supported_filesystems"] = [ {'key': key, 'ui': ui} for key, ui in FILESYSTEM_FORMAT_TYPE_CHOICES ] data["storage_layout_issues"] = obj.storage_layout_issues() data["special_filesystems"] = [ self.dehydrate_filesystem(filesystem) for filesystem in obj.special_filesystems.order_by("id") ] # Events data["events"] = self.dehydrate_events(obj) # Machine output data = self.dehydrate_summary_output(obj, data) data["commissioning_results"] = self.dehydrate_script_set( obj.current_commissioning_script_set) data["commissioning_script_set_status"] = ( self.dehydrate_script_set_status( obj.current_commissioning_script_set)) data["testing_results"] = self.dehydrate_script_set( obj.current_testing_script_set) data["testing_script_set_status"] = ( self.dehydrate_script_set_status( obj.current_testing_script_set)) data["installation_results"] = self.dehydrate_script_set( obj.current_installation_script_set) data["installation_script_set_status"] = ( self.dehydrate_script_set_status( obj.current_installation_script_set)) # Third party drivers if Config.objects.get_config('enable_third_party_drivers'): driver = get_third_party_driver(obj) if "module" in driver and "comment" in driver: data["third_party_driver"] = { "module": driver["module"], "comment": driver["comment"], } return data