Ejemplo n.º 1
0
def _process_lxd_environment(node, data):
    """Process the environment results from the `LXD_OUTPUT_NAME` script."""
    # Verify the architecture is set correctly. This is how the architecture
    # gets set on controllers.
    node.architecture = kernel_to_debian_architecture(
        data["kernel_architecture"])

    # When a machine is commissioning the OS will always be the ephemeral
    # environment. Controllers run the machine-resources binary directly
    # on the running machine and LXD Pods are getting this data from LXD
    # on the running machine. In those cases the information gathered below
    # is correct.
    if ((node.is_controller or node.is_pod) and data.get("os_name")
            and data.get("os_version")):
        # This is how the hostname gets set on controllers and stays in sync on Pods.
        node.hostname = data["server_name"]

        # MAAS always stores OS information in lower case
        node.osystem = data["os_name"].lower()
        node.distro_series = data["os_version"].lower()
        # LXD always gives the OS version while MAAS stores Ubuntu releases
        # by release codename. e.g LXD gives 20.04 MAAS stores focal.
        if node.osystem == "ubuntu":
            release = get_release(node.distro_series)
            if release:
                node.distro_series = release["series"]
Ejemplo n.º 2
0
    def discover(self, pod_id, context):
        """Discover all Pod host resources."""
        client = yield self.get_client(pod_id, context)
        if not client.has_api_extension("virtual-machines"):
            raise LXDError(
                "Please upgrade your LXD host to 3.19+ for virtual machine support."
            )
        resources = yield deferToThread(lambda: client.resources)

        mac_addresses = []
        for card in resources["network"]["cards"]:
            for port in card["ports"]:
                mac_addresses.append(port["address"])

        # After the region creates the Pod object it will sync LXD commissioning
        # data for all hardware information.
        return DiscoveredPod(
            architectures=[
                kernel_to_debian_architecture(arch)
                for arch in client.host_info["environment"]["architectures"]
            ],
            name=client.host_info["environment"]["server_name"],
            mac_addresses=mac_addresses,
            capabilities=[
                Capabilities.COMPOSABLE,
                Capabilities.DYNAMIC_LOCAL_STORAGE,
                Capabilities.OVER_COMMIT,
                Capabilities.STORAGE_POOLS,
            ],
        )
Ejemplo n.º 3
0
    def _get_discovered_machine(self,
                                client,
                                machine,
                                storage_pools,
                                request=None):
        """Get the discovered machine."""
        # Check the power state first.
        state = machine.status_code
        try:
            power_state = LXD_VM_POWER_STATE[state]
        except KeyError:
            maaslog.error(
                f"{machine.name}: Unknown power status code: {state}")
            power_state = "unknown"

        def _get_discovered_block_device(name, device, requested_device=None):
            tags = requested_device.tags if requested_device else []
            # When LXD creates a QEMU disk the serial is always lxd_{device
            # name}. The device name is commonly "root" for the first disk. The
            # model and serial must be correctly defined here otherwise MAAS
            # will delete the disk created during composition which results in
            # losing the storage pool link. Without the storage pool link MAAS
            # can't determine how much of the storage pool has been used.
            serial = f"lxd_{name}"
            source = device.get("source")
            if source:
                pool = client.storage_pools.get(device["pool"])
                volume = pool.volumes.get("custom", source)
                size = volume.config.get("size")
            else:
                size = device.get("size")
            # Default disk size is 10GB in LXD
            size = convert_lxd_byte_suffixes(size or "10GB")
            return DiscoveredMachineBlockDevice(
                model="QEMU HARDDISK",
                serial=serial,
                id_path=f"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_{serial}",
                size=size,
                tags=tags,
                storage_pool=device.get("pool"),
            )

        expanded_config = machine.expanded_config
        iface_to_mac = {
            key.split(".")[1]: value
            for key, value in expanded_config.items() if key.endswith("hwaddr")
        }

        def _get_discovered_interface(name, device, boot):
            if "network" in device:
                # Try finding the nictype from the networks.
                # XXX: This should work for "bridge" networks,
                #      but will most likely produce weird results for the
                #      other types.
                network = client.networks.get(device["network"])
                attach_type = network.type
                attach_name = network.name
            else:
                attach_name = device["parent"]
                nictype = device["nictype"]
                attach_type = (InterfaceAttachType.BRIDGE
                               if nictype == "bridged" else nictype)
            mac = device.get("hwaddr")
            if mac is None:
                mac = iface_to_mac.get(name)
            return DiscoveredMachineInterface(
                mac_address=mac,
                vid=int(device.get("vlan", 0)),
                boot=boot,
                attach_type=attach_type,
                attach_name=attach_name,
            )

        extra_block_devices = 0
        block_devices = []
        interfaces = []
        for name, device in machine.expanded_devices.items():
            if device["type"] == "disk":
                requested_device = None
                if request:
                    # for composed VMs, the root disk is always the first
                    # one. Adjust the index so that it matches the requested
                    # device
                    if name == "root":
                        index = 0
                    else:
                        extra_block_devices += 1
                        index = extra_block_devices
                    requested_device = request.block_devices[index]
                block_devices.append(
                    _get_discovered_block_device(
                        name, device, requested_device=requested_device))
            elif device["type"] == "nic":
                interfaces.append(
                    _get_discovered_interface(name, device, not interfaces))

        # LXD uses different suffixes to store memory so make
        # sure we convert to MiB, which is what MAAS uses.
        memory = expanded_config.get("limits.memory")
        if memory is not None:
            memory = convert_lxd_byte_suffixes(memory, divisor=1024**2)
        else:
            memory = 1024
        hugepages_backed = _get_bool(
            expanded_config.get("limits.memory.hugepages"))
        cores, pinned_cores = _parse_cpu_cores(
            expanded_config.get("limits.cpu"))
        return DiscoveredMachine(
            hostname=machine.name,
            architecture=kernel_to_debian_architecture(machine.architecture),
            # 1 core and 1GiB of memory (we need it in MiB) is default for
            # LXD if not specified.
            cores=cores,
            memory=memory,
            cpu_speed=0,
            interfaces=interfaces,
            block_devices=block_devices,
            power_state=power_state,
            power_parameters={
                "instance_name": machine.name,
                "project": client.project,
            },
            tags=[],
            hugepages_backed=hugepages_backed,
            pinned_cores=pinned_cores,
            # LXD VMs use only UEFI.
            bios_boot_method="uefi",
        )
Ejemplo n.º 4
0
    def _discover(self, client: Client, pod_id: int, context: dict):
        self._check_required_extensions(client)

        if not client.trusted:
            # return empty information as the client is not authenticated and
            # gathering other info requires auth.
            return DiscoveredPod()

        self._ensure_project(client)

        environment = client.host_info["environment"]
        # After the region creates the Pod object it will sync LXD commissioning
        # data for all hardware information.
        discovered_pod = DiscoveredPod(
            # client.host_info["environment"]["architectures"] reports all the
            # architectures the host CPU supports, not the architectures LXD
            # supports. On x86_64 LXD reports [x86_64, i686] however LXD does
            # not currently support VMs on i686. The LXD API currently does not
            # have a way to query which architectures are usable for VMs. The
            # safest bet is to just use the kernel_architecture.
            architectures=[
                kernel_to_debian_architecture(
                    environment["kernel_architecture"])
            ],
            name=environment["server_name"],
            version=environment["server_version"],
            capabilities=[
                Capabilities.COMPOSABLE,
                Capabilities.DYNAMIC_LOCAL_STORAGE,
                Capabilities.OVER_COMMIT,
                Capabilities.STORAGE_POOLS,
            ],
        )

        # Discover networks. "unknown" interfaces are considered too to match
        # ethernets in containers.
        networks_state = [
            net.state() for net in client.networks.all()
            if net.type in ("unknown", "physical")
        ]
        discovered_pod.mac_addresses = list(
            {state.hwaddr
             for state in networks_state if state.hwaddr})

        # Discover storage pools.
        storage_pools = client.storage_pools.all()
        if not storage_pools:
            raise LXDPodError(
                "No storage pools exists.  Please create a storage pool in LXD."
            )
        pools = []
        local_storage = 0
        for storage_pool in storage_pools:
            discovered_storage_pool = self._get_discovered_pod_storage_pool(
                storage_pool)
            local_storage += discovered_storage_pool.storage
            pools.append(discovered_storage_pool)
        discovered_pod.storage_pools = pools
        discovered_pod.local_storage = local_storage

        # Discover VMs.
        host_cpu_speed = lxd_cpu_speed(client.resources)
        projects = [project.name for project in client.projects.all()]
        machines = []
        for project in projects:
            with self._get_client(pod_id, context,
                                  project=project) as project_cli:
                for virtual_machine in project_cli.virtual_machines.all():
                    discovered_machine = self._get_discovered_machine(
                        project_cli,
                        virtual_machine,
                        storage_pools=discovered_pod.storage_pools,
                    )
                    discovered_machine.cpu_speed = host_cpu_speed
                    machines.append(discovered_machine)
        discovered_pod.machines = machines

        return discovered_pod
Ejemplo n.º 5
0
 def test_kernel_to_debian_architecture(self):
     self.assertEqual(self.debian,
                      kernel_to_debian_architecture(self.kernel))
Ejemplo n.º 6
0
    def test_get_discovered_machine(self):
        driver = lxd_module.LXDPodDriver()
        Client = self.patch(lxd_module, "Client")
        client = Client.return_value
        mock_machine = Mock()
        mock_machine.name = factory.make_name("machine")
        mock_machine.architecture = "x86_64"
        expanded_config = {
            "limits.cpu": "2",
            "limits.memory": "1024MiB",
            "volatile.eth0.hwaddr": "00:16:3e:78:be:04",
            "volatile.eth1.hwaddr": "00:16:3e:f9:fc:cb",
            "volatile.eth2.hwaddr": "00:16:3e:f9:fc:cc",
        }
        expanded_devices = {
            "eth0": {
                "name": "eth0",
                "network": "lxdbr0",
                "type": "nic",
            },
            "eth1": {
                "name": "eth1",
                "nictype": "bridged",
                "parent": "br1",
                "type": "nic",
            },
            "eth2": {
                "name": "eth2",
                "nictype": "macvlan",
                "parent": "eno2",
                "type": "nic",
            },
            # SR-IOV devices created by MAAS have an explicit MAC set on
            # the device, so that it knows what the MAC will be.
            "eth3": {
                "name": "eth3",
                "hwaddr": "00:16:3e:f9:fc:dd",
                "nictype": "sriov",
                "parent": "eno3",
                "type": "nic",
            },
            "eth4": {
                "name": "eth4",
                "hwaddr": "00:16:3e:f9:fc:ee",
                "nictype": "sriov",
                "parent": "eno3",
                "vlan": "33",
                "type": "nic",
            },
            # An interface not created by MAAS, thus lacking an explicit
            # MAC.
            "eth5": {
                "name": "eth5",
                "nictype": "sriov",
                "parent": "eno3",
                "vlan": "44",
                "type": "nic",
            },
            "root": {
                "path": "/",
                "pool": "default",
                "type": "disk",
                "size": "20GB",
            },
        }
        mock_machine.expanded_config = expanded_config
        mock_machine.expanded_devices = expanded_devices
        mock_machine.status_code = 102
        mock_storage_pool = Mock()
        mock_storage_pool.name = "default"
        mock_storage_pool_resources = Mock()
        mock_storage_pool_resources.space = {
            "used": 207111192576,
            "total": 306027577344,
        }
        mock_storage_pool.resources.get.return_value = (
            mock_storage_pool_resources)
        mock_machine.storage_pools.get.return_value = mock_storage_pool
        mock_network = Mock()
        mock_network.type = "bridge"
        mock_network.name = "lxdbr0"
        client.networks.get.return_value = mock_network
        discovered_machine = yield ensureDeferred(
            driver.get_discovered_machine(client, mock_machine,
                                          [mock_storage_pool]))

        self.assertEqual(mock_machine.name, discovered_machine.hostname)
        self.assertEqual("uefi", discovered_machine.bios_boot_method)

        self.assertEqual(
            kernel_to_debian_architecture(mock_machine.architecture),
            discovered_machine.architecture,
        )
        self.assertEqual(
            lxd_module.LXD_VM_POWER_STATE[mock_machine.status_code],
            discovered_machine.power_state,
        )
        self.assertEqual(2, discovered_machine.cores)
        self.assertEqual(1024, discovered_machine.memory)
        self.assertEqual(
            mock_machine.name,
            discovered_machine.power_parameters["instance_name"],
        )
        self.assertEqual(
            discovered_machine.block_devices[0],
            DiscoveredMachineBlockDevice(
                model="QEMU HARDDISK",
                serial="lxd_root",
                id_path="/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_lxd_root",
                size=20 * 1000**3,
                block_size=512,
                tags=[],
                type="physical",
                storage_pool=expanded_devices["root"]["pool"],
                iscsi_target=None,
            ),
        )
        self.assertEqual(
            discovered_machine.interfaces[0],
            DiscoveredMachineInterface(
                mac_address=expanded_config["volatile.eth0.hwaddr"],
                vid=0,
                tags=[],
                boot=True,
                attach_type=InterfaceAttachType.BRIDGE,
                attach_name="lxdbr0",
            ),
        )
        self.assertEqual(
            discovered_machine.interfaces[1],
            DiscoveredMachineInterface(
                mac_address=expanded_config["volatile.eth1.hwaddr"],
                vid=0,
                tags=[],
                boot=False,
                attach_type=InterfaceAttachType.BRIDGE,
                attach_name="br1",
            ),
        )
        self.assertEqual(
            discovered_machine.interfaces[2],
            DiscoveredMachineInterface(
                mac_address=expanded_config["volatile.eth2.hwaddr"],
                vid=0,
                tags=[],
                boot=False,
                attach_type=InterfaceAttachType.MACVLAN,
                attach_name="eno2",
            ),
        )
        self.assertEqual(
            discovered_machine.interfaces[3],
            DiscoveredMachineInterface(
                mac_address=expanded_devices["eth3"]["hwaddr"],
                vid=0,
                tags=[],
                boot=False,
                attach_type=InterfaceAttachType.SRIOV,
                attach_name="eno3",
            ),
        )
        self.assertEqual(
            discovered_machine.interfaces[4],
            DiscoveredMachineInterface(
                mac_address=expanded_devices["eth4"]["hwaddr"],
                vid=33,
                tags=[],
                boot=False,
                attach_type=InterfaceAttachType.SRIOV,
                attach_name="eno3",
            ),
        )
        self.assertEqual(
            discovered_machine.interfaces[5],
            DiscoveredMachineInterface(
                mac_address=None,
                vid=44,
                tags=[],
                boot=False,
                attach_type=InterfaceAttachType.SRIOV,
                attach_name="eno3",
            ),
        )
        self.assertItemsEqual([], discovered_machine.tags)
        self.assertFalse(discovered_machine.hugepages_backed)
        self.assertEqual(discovered_machine.pinned_cores, [])
Ejemplo n.º 7
0
    async def discover(self, pod_id, context):
        """Discover all Pod host resources."""
        # Connect to the Pod and make sure it is valid.
        client = await self.get_client(pod_id, context)
        if not client.has_api_extension("virtual-machines"):
            raise LXDPodError(
                "Please upgrade your LXD host to 3.19+ for virtual machine support."
            )
        resources = await deferToThread(lambda: client.resources)

        mac_addresses = []
        for card in resources["network"]["cards"]:
            for port in card["ports"]:
                mac_addresses.append(port["address"])

        # After the region creates the Pod object it will sync LXD commissioning
        # data for all hardware information.
        discovered_pod = DiscoveredPod(
            # client.host_info["environment"]["architectures"] reports all the
            # architectures the host CPU supports, not the architectures LXD
            # supports. On x86_64 LXD reports [x86_64, i686] however LXD does
            # not currently support VMs on i686. The LXD API currently does not
            # have a way to query which architectures are usable for VMs. The
            # safest bet is to just use the kernel_architecture.
            architectures=[
                kernel_to_debian_architecture(
                    client.host_info["environment"]["kernel_architecture"])
            ],
            name=client.host_info["environment"]["server_name"],
            mac_addresses=mac_addresses,
            capabilities=[
                Capabilities.COMPOSABLE,
                Capabilities.DYNAMIC_LOCAL_STORAGE,
                Capabilities.OVER_COMMIT,
                Capabilities.STORAGE_POOLS,
            ],
        )

        # Check that we have at least one storage pool.
        # If not, user should be warned that they need to create one.
        storage_pools = await deferToThread(client.storage_pools.all)
        if not storage_pools:
            raise LXDPodError(
                "No storage pools exists.  Please create a storage pool in LXD."
            )

        # Discover Storage Pools.
        pools = []
        storage_pools = await deferToThread(client.storage_pools.all)
        local_storage = 0
        for storage_pool in storage_pools:
            discovered_storage_pool = self.get_discovered_pod_storage_pool(
                storage_pool)
            local_storage += discovered_storage_pool.storage
            pools.append(discovered_storage_pool)
        discovered_pod.storage_pools = pools
        discovered_pod.local_storage = local_storage

        # Discover VMs.
        machines = []
        virtual_machines = await deferToThread(client.virtual_machines.all)
        for virtual_machine in virtual_machines:
            discovered_machine = await self.get_discovered_machine(
                client,
                virtual_machine,
                storage_pools=discovered_pod.storage_pools,
            )
            discovered_machine.cpu_speed = lxd_cpu_speed(resources)
            machines.append(discovered_machine)
        discovered_pod.machines = machines

        # Return the DiscoveredPod.
        return discovered_pod
Ejemplo n.º 8
0
    async def get_discovered_machine(self,
                                     client,
                                     machine,
                                     storage_pools,
                                     request=None):
        """Get the discovered machine."""
        # Check the power state first.
        state = machine.status_code
        try:
            power_state = LXD_VM_POWER_STATE[state]
        except KeyError:
            maaslog.error(
                f"{machine.name}: Unknown power status code: {state}")
            power_state = "unknown"

        expanded_config = machine.expanded_config
        expanded_devices = machine.expanded_devices

        # Discover block devices.
        block_devices = []
        for idx, device in enumerate(expanded_devices):
            # Block device.
            # When request is provided map the tags from the request block
            # devices to the discovered block devices. This ensures that
            # composed machine has the requested tags on the block device.

            tags = []
            if (request is not None
                    and expanded_devices[device]["type"] == "disk"):
                tags = request.block_devices[0].tags

            device_info = expanded_devices[device]
            if device_info["type"] == "disk":
                # When LXD creates a QEMU disk the serial is always
                # lxd_{device name}. The device_name is defined by
                # the LXD profile or when adding a device. This is
                # commonly "root" for the first disk. The model and
                # serial must be correctly defined here otherwise
                # MAAS will delete the disk created during composition
                # which results in losing the storage pool link. Without
                # the storage pool link MAAS can't determine how much
                # of the storage pool has been used.
                serial = f"lxd_{device}"
                # Default disk size is 10GB.
                size = convert_lxd_byte_suffixes(
                    device_info.get("size", "10GB"))
                storage_pool = device_info.get("pool")
                block_devices.append(
                    DiscoveredMachineBlockDevice(
                        model="QEMU HARDDISK",
                        serial=serial,
                        id_path=
                        f"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_{serial}",
                        size=size,
                        tags=tags,
                        storage_pool=storage_pool,
                    ))

        # Discover interfaces.
        interfaces = []
        boot = True
        config_mac_address = {}
        for configuration in expanded_config:
            if configuration.endswith("hwaddr"):
                mac = expanded_config[configuration]
                name = configuration.split(".")[1]
                config_mac_address[name] = mac
        for name, device in expanded_devices.items():
            if device["type"] != "nic":
                continue
            device = expanded_devices[name]
            if "network" in device:
                # Try finding the nictype from the networks.
                # XXX: This should work for "bridge" networks,
                #      but will most likely produce weird results for the
                #      other types.
                network = await deferToThread(client.networks.get,
                                              device["network"])
                attach_type = network.type
                attach_name = network.name
            else:
                attach_name = device["parent"]
                nictype = device["nictype"]
                attach_type = (InterfaceAttachType.BRIDGE
                               if nictype == "bridged" else nictype)
            mac = device.get("hwaddr")
            if mac is None:
                mac = config_mac_address.get(name)

            interfaces.append(
                DiscoveredMachineInterface(
                    mac_address=mac,
                    vid=int(device.get("vlan", get_vid_from_ifname(name))),
                    boot=boot,
                    attach_type=attach_type,
                    attach_name=attach_name,
                ))
            boot = False

        # LXD uses different suffixes to store memory so make
        # sure we convert to MiB, which is what MAAS uses.
        memory = expanded_config.get("limits.memory")
        if memory is not None:
            memory = convert_lxd_byte_suffixes(memory, divisor=1024**2)
        else:
            memory = 1024
        hugepages_backed = _get_bool(
            expanded_config.get("limits.memory.hugepages"))
        cores, pinned_cores = _parse_cpu_cores(
            expanded_config.get("limits.cpu"))
        return DiscoveredMachine(
            hostname=machine.name,
            architecture=kernel_to_debian_architecture(machine.architecture),
            # 1 core and 1GiB of memory (we need it in MiB) is default for
            # LXD if not specified.
            cores=cores,
            memory=memory,
            cpu_speed=0,
            interfaces=interfaces,
            block_devices=block_devices,
            power_state=power_state,
            power_parameters={"instance_name": machine.name},
            tags=[],
            hugepages_backed=hugepages_backed,
            pinned_cores=pinned_cores,
            # LXD VMs use only UEFI.
            bios_boot_method="uefi",
        )
Ejemplo n.º 9
0
    def test_get_discovered_machine(self):
        driver = lxd_module.LXDPodDriver()
        Client = self.patch(lxd_module, "Client")
        client = Client.return_value
        mock_machine = Mock()
        mock_machine.name = factory.make_name("machine")
        mock_machine.architecture = "x86_64"
        expanded_config = {
            "limits.cpu": "2",
            "limits.memory": "1024MiB",
            "volatile.eth0.hwaddr": "00:16:3e:78:be:04",
            "volatile.eth1.hwaddr": "00:16:3e:f9:fc:cb",
        }
        expanded_devices = {
            "eth0": {
                "name": "eth0",
                "nictype": "bridged",
                "parent": "lxdbr0",
                "type": "nic",
            },
            "eth1": {
                "name": "eth1",
                "nictype": "bridged",
                "parent": "virbr1",
                "type": "nic",
            },
            "root": {
                "path": "/",
                "pool": "default",
                "type": "disk",
                "size": "20GB",
            },
        }
        mock_machine.expanded_config = expanded_config
        mock_machine.expanded_devices = expanded_devices
        mock_machine.status_code = 102
        mock_storage_pool = Mock()
        mock_storage_pool.name = "default"
        mock_storage_pool_resources = Mock()
        mock_storage_pool_resources.space = {
            "used": 207111192576,
            "total": 306027577344,
        }
        mock_storage_pool.resources.get.return_value = (
            mock_storage_pool_resources
        )
        mock_machine.storage_pools.get.return_value = mock_storage_pool
        discovered_machine = yield driver.get_discovered_machine(
            client, mock_machine, [mock_storage_pool]
        )

        self.assertEquals(mock_machine.name, discovered_machine.hostname)

        self.assertEquals(
            kernel_to_debian_architecture(mock_machine.architecture),
            discovered_machine.architecture,
        )
        self.assertEquals(
            lxd_module.LXD_VM_POWER_STATE[mock_machine.status_code],
            discovered_machine.power_state,
        )
        self.assertEquals(2, discovered_machine.cores)
        self.assertEquals(1024, discovered_machine.memory)
        self.assertEquals(
            mock_machine.name,
            discovered_machine.power_parameters["instance_name"],
        )
        self.assertThat(
            discovered_machine.block_devices[0],
            MatchesStructure.byEquality(
                model="QEMU HARDDISK",
                serial="lxd_root",
                id_path="/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_lxd_root",
                size=20 * 1000 ** 3,
                block_size=512,
                tags=[],
                type="physical",
                storage_pool=expanded_devices["root"]["pool"],
                iscsi_target=None,
            ),
        )
        self.assertThat(
            discovered_machine.interfaces[0],
            MatchesStructure.byEquality(
                mac_address=expanded_config["volatile.eth0.hwaddr"],
                vid=0,
                tags=[],
                boot=True,
                attach_type=expanded_devices["eth0"]["nictype"],
                attach_name="eth0",
            ),
        )
        self.assertThat(
            discovered_machine.interfaces[1],
            MatchesStructure.byEquality(
                mac_address=expanded_config["volatile.eth1.hwaddr"],
                vid=0,
                tags=[],
                boot=False,
                attach_type=expanded_devices["eth1"]["nictype"],
                attach_name="eth1",
            ),
        )
        self.assertItemsEqual([], discovered_machine.tags)
Ejemplo n.º 10
0
    def discover(self, pod_id: int, context: dict):
        """Discover all Pod host resources."""
        # Connect to the Pod and make sure it is valid.
        client = self._get_client(pod_id, context)
        if not client.has_api_extension("virtual-machines"):
            raise LXDPodError(
                "Please upgrade your LXD host to 3.19+ for virtual machine support."
            )

        self._ensure_project(client)

        # get MACs for host interfaces. "unknown" interfaces are considered too
        # to match ethernets in containers
        networks_state = [
            net.state() for net in client.networks.all()
            if net.type in ("unknown", "physical")
        ]
        mac_addresses = list(
            {state.hwaddr
             for state in networks_state if state.hwaddr})

        environment = client.host_info["environment"]
        # After the region creates the Pod object it will sync LXD commissioning
        # data for all hardware information.
        discovered_pod = DiscoveredPod(
            # client.host_info["environment"]["architectures"] reports all the
            # architectures the host CPU supports, not the architectures LXD
            # supports. On x86_64 LXD reports [x86_64, i686] however LXD does
            # not currently support VMs on i686. The LXD API currently does not
            # have a way to query which architectures are usable for VMs. The
            # safest bet is to just use the kernel_architecture.
            architectures=[
                kernel_to_debian_architecture(
                    environment["kernel_architecture"])
            ],
            name=environment["server_name"],
            version=environment["server_version"],
            mac_addresses=mac_addresses,
            capabilities=[
                Capabilities.COMPOSABLE,
                Capabilities.DYNAMIC_LOCAL_STORAGE,
                Capabilities.OVER_COMMIT,
                Capabilities.STORAGE_POOLS,
            ],
        )

        # Check that we have at least one storage pool.
        # If not, user should be warned that they need to create one.
        storage_pools = client.storage_pools.all()
        if not storage_pools:
            raise LXDPodError(
                "No storage pools exists.  Please create a storage pool in LXD."
            )

        # Discover Storage Pools.
        pools = []
        local_storage = 0
        for storage_pool in storage_pools:
            discovered_storage_pool = self._get_discovered_pod_storage_pool(
                storage_pool)
            local_storage += discovered_storage_pool.storage
            pools.append(discovered_storage_pool)
        discovered_pod.storage_pools = pools
        discovered_pod.local_storage = local_storage

        host_cpu_speed = lxd_cpu_speed(client.resources)

        # Discover VMs.
        projects = [project.name for project in client.projects.all()]
        machines = []
        for project in projects:
            project_cli = self._get_client(pod_id, context, project=project)
            for virtual_machine in project_cli.virtual_machines.all():
                discovered_machine = self._get_discovered_machine(
                    project_cli,
                    virtual_machine,
                    storage_pools=discovered_pod.storage_pools,
                )
                discovered_machine.cpu_speed = host_cpu_speed
                machines.append(discovered_machine)
        discovered_pod.machines = machines

        # Return the DiscoveredPod.
        return discovered_pod