Esempio n. 1
0
File: host.py Progetto: PFZheng/nova
    def update_status(self):
        """Update the current state of the cluster."""
        capacity, freespace = _get_ds_capacity_and_freespace(self._session,
                                                             self._cluster)

        # Get cpu, memory stats from the cluster
        stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
        about_info = self._session._call_method(vim_util, "get_about_info")
        data = {}
        data["vcpus"] = stats['cpu']['vcpus']
        data["cpu_info"] = {"vendor": stats['cpu']['vendor'],
                            "model": stats['cpu']['model'],
                            "topology": {"cores": stats['cpu']['cores'],
                                         "threads": stats['cpu']['vcpus']}}
        data["disk_total"] = capacity / units.Gi
        data["disk_available"] = freespace / units.Gi
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = utils.convert_version_to_int(
                str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [('i686', 'vmware', 'hvm'),
                                       ('x86_64', 'vmware', 'hvm')]

        self._stats = data
        return data
Esempio n. 2
0
    def update_status(self):
        """Update the current state of the cluster."""
        capacity, freespace = _get_ds_capacity_and_freespace(self._session,
            self._cluster, self._datastore_regex)

        # Get cpu, memory stats from the cluster
        stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
        about_info = self._session._call_method(vim_util, "get_about_info")
        data = {}
        data["vcpus"] = stats['vcpus']
        data["disk_total"] = capacity / units.Gi
        data["disk_available"] = freespace / units.Gi
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = utils.convert_version_to_int(
                str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [
            (arch.I686, hv_type.VMWARE, vm_mode.HVM),
            (arch.X86_64, hv_type.VMWARE, vm_mode.HVM)]

        self._stats = data
        return data
Esempio n. 3
0
    def update_status(self):
        """Update the current state of the cluster."""
        capacity, freespace = _get_ds_capacity_and_freespace(self._session,
                                                             self._cluster)

        # Get cpu, memory stats from the cluster
        stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
        about_info = self._session._call_method(vim_util, "get_about_info")
        data = {}
        data["vcpus"] = stats['cpu']['vcpus']
        data["cpu_info"] = {"vendor": stats['cpu']['vendor'],
                            "model": stats['cpu']['model'],
                            "topology": {"cores": stats['cpu']['cores'],
                                         "threads": stats['cpu']['vcpus']}}
        data["disk_total"] = capacity / units.Gi
        data["disk_available"] = freespace / units.Gi
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = utils.convert_version_to_int(
                str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [
            (arch.I686, hvtype.VMWARE, vm_mode.HVM),
            (arch.X86_64, hvtype.VMWARE, vm_mode.HVM)]

        self._stats = data
        return data
Esempio n. 4
0
    def update_status(self):
        """Update the current state of the cluster."""
        # Get the datastore in the cluster
        try:
            ds = vm_util.get_datastore_ref_and_name(self._session,
                                                    self._cluster)
        except exception.DatastoreNotFound:
            ds = (None, None, 0, 0)

        # Get cpu, memory stats from the cluster
        stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
        about_info = self._session._call_method(vim_util, "get_about_info")
        data = {}
        data["vcpus"] = stats['cpu']['vcpus']
        data["cpu_info"] = {"vendor": stats['cpu']['vendor'],
                            "model": stats['cpu']['model'],
                            "topology": {"cores": stats['cpu']['cores'],
                                         "threads": stats['cpu']['vcpus']}}
        data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
        data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = about_info.version
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [('i686', 'vmware', 'hvm'),
                                       ('x86_64', 'vmware', 'hvm')]

        self._stats = data
        return data
Esempio n. 5
0
    def update_status(self):
        """Update the current state of the cluster."""
        # Get cpu, memory stats, disk from the cluster
        stats = vm_util.get_stats_from_cluster(self._session, self._cluster,
                                               self._datastore_regex)
        about_info = self._session._call_method(vim_util, "get_about_info")
        data = {}
        data["vcpus"] = stats['cpu']['vcpus']
        data["cpu_info"] = {"vendor": stats['cpu']['vendor'],
                            "model": stats['cpu']['model'],
                            "topology": {"cores": stats['cpu']['cores'],
                                         "threads": stats['cpu']['vcpus']}}
        data["disk_total"] = stats['disk']['total']
        data["disk_available"] = stats['disk']['free']
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = utils.convert_version_to_int(
                str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [('i686', 'vmware', 'hvm'),
                                       ('x86_64', 'vmware', 'hvm')]

        self._stats = data
        return data
Esempio n. 6
0
    def update_status(self):
        """Update the current state of the cluster."""
        # Get the datastore in the cluster
        try:
            ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster)
        except exception.DatastoreNotFound:
            ds = (None, None, 0, 0)

        # Get cpu, memory stats from the cluster
        stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
        about_info = self._session._call_method(vim_util, "get_about_info")
        data = {}
        data["vcpus"] = stats["cpu"]["vcpus"]
        data["cpu_info"] = {
            "vendor": stats["cpu"]["vendor"],
            "model": stats["cpu"]["model"],
            "topology": {"cores": stats["cpu"]["cores"], "threads": stats["cpu"]["vcpus"]},
        }
        data["disk_total"] = ds[2] / units.Gi
        data["disk_available"] = ds[3] / units.Gi
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats["mem"]["total"]
        data["host_memory_free"] = stats["mem"]["free"]
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = utils.convert_version_to_int(str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [("i686", "vmware", "hvm"), ("x86_64", "vmware", "hvm")]

        self._stats = data
        return data
Esempio n. 7
0
    def test_get_stats_from_cluster(self):
        ManagedObjectRefs = [
            fake.ManagedObjectReference("host1", "HostSystem"),
            fake.ManagedObjectReference("host2", "HostSystem")
        ]
        hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
        respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
        prop_dict = {'host': hosts, 'resourcePool': respool}

        hardware = fake.DataObject()
        hardware.numCpuCores = 8
        hardware.numCpuThreads = 16
        hardware.vendor = "Intel"
        hardware.cpuModel = "Intel(R) Xeon(R)"

        runtime_host_1 = fake.DataObject()
        runtime_host_1.connectionState = "connected"
        runtime_host_2 = fake.DataObject()
        runtime_host_2.connectionState = "disconnected"

        prop_list_host_1 = [
            fake.Prop(name="hardware_summary", val=hardware),
            fake.Prop(name="runtime_summary", val=runtime_host_1)
        ]
        prop_list_host_2 = [
            fake.Prop(name="hardware_summary", val=hardware),
            fake.Prop(name="runtime_summary", val=runtime_host_2)
        ]
        fake_objects = fake.FakeRetrieveResult()
        fake_objects.add_object(
            fake.ObjectContent("prop_list_host1", prop_list_host_1))
        fake_objects.add_object(
            fake.ObjectContent("prop_list_host1", prop_list_host_2))

        respool_resource_usage = fake.DataObject()
        respool_resource_usage.maxUsage = 5368709120
        respool_resource_usage.overallUsage = 2147483648
        session = fake_session()

        def fake_call_method(*args):
            if "get_dynamic_properties" in args:
                return prop_dict
            elif "get_properties_for_a_collection_of_objects" in args:
                return fake_objects
            else:
                return respool_resource_usage

        with mock.patch.object(fake_session, '_call_method', fake_call_method):
            result = vm_util.get_stats_from_cluster(session, "cluster1")
            cpu_info = {}
            mem_info = {}
            cpu_info['vcpus'] = 16
            cpu_info['cores'] = 8
            cpu_info['vendor'] = ["Intel"]
            cpu_info['model'] = ["Intel(R) Xeon(R)"]
            mem_info['total'] = 5120
            mem_info['free'] = 3072
            expected_stats = {'cpu': cpu_info, 'mem': mem_info}
            self.assertEqual(expected_stats, result)
Esempio n. 8
0
    def test_get_stats_from_cluster(self):
        ManagedObjectRefs = [
            fake.ManagedObjectReference("host1", "HostSystem"),
            fake.ManagedObjectReference("host2", "HostSystem"),
        ]
        hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
        respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
        prop_dict = {"host": hosts, "resourcePool": respool}

        hardware = fake.DataObject()
        hardware.numCpuCores = 8
        hardware.numCpuThreads = 16
        hardware.vendor = "Intel"
        hardware.cpuModel = "Intel(R) Xeon(R)"

        runtime_host_1 = fake.DataObject()
        runtime_host_1.connectionState = "connected"
        runtime_host_2 = fake.DataObject()
        runtime_host_2.connectionState = "disconnected"

        prop_list_host_1 = [
            fake.Prop(name="hardware_summary", val=hardware),
            fake.Prop(name="runtime_summary", val=runtime_host_1),
        ]
        prop_list_host_2 = [
            fake.Prop(name="hardware_summary", val=hardware),
            fake.Prop(name="runtime_summary", val=runtime_host_2),
        ]
        fake_objects = fake.FakeRetrieveResult()
        fake_objects.add_object(fake.ObjectContent("prop_list_host1", prop_list_host_1))
        fake_objects.add_object(fake.ObjectContent("prop_list_host1", prop_list_host_2))

        respool_resource_usage = fake.DataObject()
        respool_resource_usage.maxUsage = 5368709120
        respool_resource_usage.overallUsage = 2147483648
        session = fake_session()

        def fake_call_method(*args):
            if "get_dynamic_properties" in args:
                return prop_dict
            elif "get_properties_for_a_collection_of_objects" in args:
                return fake_objects
            else:
                return respool_resource_usage

        with mock.patch.object(fake_session, "_call_method", fake_call_method):
            result = vm_util.get_stats_from_cluster(session, "cluster1")
            cpu_info = {}
            mem_info = {}
            cpu_info["vcpus"] = 16
            cpu_info["cores"] = 8
            cpu_info["vendor"] = ["Intel"]
            cpu_info["model"] = ["Intel(R) Xeon(R)"]
            mem_info["total"] = 5120
            mem_info["free"] = 3072
            expected_stats = {"cpu": cpu_info, "mem": mem_info}
            self.assertEqual(expected_stats, result)
Esempio n. 9
0
    def update_status(self):
        """Update the current state of the cluster."""
        data = {}
        try:
            capacity, freespace = _get_ds_capacity_and_freespace(
                self._session, self._cluster, self._datastore_regex)

            # Get cpu, memory stats from the cluster
            stats = vm_util.get_stats_from_cluster(self._session,
                                                   self._cluster)
            about_info = self._session._call_method(vim_util, "get_about_info")
        except (vexc.VimConnectionException, vexc.VimAttributeException) as ex:
            # VimAttributeException is thrown when vpxd service is down
            LOG.warning(
                _LW("Failed to connect with %(node)s. "
                    "Error: %(error)s"), {
                        'node': self._host_name,
                        'error': ex
                    })
            self._set_host_enabled(False)
            return data

        data["vcpus"] = stats['vcpus']
        data["disk_total"] = capacity / units.Gi
        data["disk_available"] = freespace / units.Gi
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = versionutils.convert_version_to_int(
            str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [
            (obj_fields.Architecture.I686, obj_fields.HVType.VMWARE,
             obj_fields.VMMode.HVM),
            (obj_fields.Architecture.X86_64, obj_fields.HVType.VMWARE,
             obj_fields.VMMode.HVM)
        ]

        self._stats = data
        if self._auto_service_disabled:
            self._set_host_enabled(True)
        return data
Esempio n. 10
0
File: host.py Progetto: amadev/nova
    def update_status(self):
        """Update the current state of the cluster."""
        data = {}
        try:
            capacity, freespace = _get_ds_capacity_and_freespace(self._session,
                self._cluster, self._datastore_regex)

            # Get cpu, memory stats from the cluster
            stats = vm_util.get_stats_from_cluster(self._session,
                                                   self._cluster)
            about_info = self._session._call_method(vim_util, "get_about_info")
        except (vexc.VimConnectionException, vexc.VimAttributeException) as ex:
            # VimAttributeException is thrown when vpxd service is down
            LOG.warning(_LW("Failed to connect with %(node)s. "
                            "Error: %(error)s"),
                        {'node': self._host_name, 'error': ex})
            self._set_host_enabled(False)
            return data

        data["vcpus"] = stats['vcpus']
        data["disk_total"] = capacity / units.Gi
        data["disk_available"] = freespace / units.Gi
        data["disk_used"] = data["disk_total"] - data["disk_available"]
        data["host_memory_total"] = stats['mem']['total']
        data["host_memory_free"] = stats['mem']['free']
        data["hypervisor_type"] = about_info.name
        data["hypervisor_version"] = versionutils.convert_version_to_int(
                str(about_info.version))
        data["hypervisor_hostname"] = self._host_name
        data["supported_instances"] = [
            (obj_fields.Architecture.I686,
             obj_fields.HVType.VMWARE,
             obj_fields.VMMode.HVM),
            (obj_fields.Architecture.X86_64,
             obj_fields.HVType.VMWARE,
             obj_fields.VMMode.HVM)]

        self._stats = data
        if self._auto_service_disabled:
            self._set_host_enabled(True)
        return data
Esempio n. 11
0
 def get_inventory(self, nodename):
     """Return a dict, keyed by resource class, of inventory information for
     the supplied node.
     """
     stats = vm_util.get_stats_from_cluster(self._session,
                                            self._cluster_ref)
     datastores = ds_util.get_available_datastores(self._session,
                                                   self._cluster_ref,
                                                   self._datastore_regex)
     total_disk_capacity = sum([ds.capacity for ds in datastores])
     max_free_space = max([ds.freespace for ds in datastores])
     reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
         CONF.reserved_host_disk_mb)
     result = {
         obj_fields.ResourceClass.VCPU: {
             'total': stats['cpu']['vcpus'],
             'reserved': CONF.reserved_host_cpus,
             'min_unit': 1,
             'max_unit': stats['cpu']['max_vcpus_per_host'],
             'step_size': 1,
         },
         obj_fields.ResourceClass.MEMORY_MB: {
             'total': stats['mem']['total'],
             'reserved': CONF.reserved_host_memory_mb,
             'min_unit': 1,
             'max_unit': stats['mem']['max_mem_mb_per_host'],
             'step_size': 1,
         },
         obj_fields.ResourceClass.DISK_GB: {
             'total': total_disk_capacity // units.Gi,
             'reserved': reserved_disk_gb,
             'min_unit': 1,
             'max_unit': max_free_space // units.Gi,
             'step_size': 1,
         },
     }
     return result
Esempio n. 12
0
 def get_inventory(self, nodename):
     """Return a dict, keyed by resource class, of inventory information for
     the supplied node.
     """
     stats = vm_util.get_stats_from_cluster(self._session,
                                            self._cluster_ref)
     datastores = ds_util.get_available_datastores(self._session,
                                                   self._cluster_ref,
                                                   self._datastore_regex)
     total_disk_capacity = sum([ds.capacity for ds in datastores])
     max_free_space = max([ds.freespace for ds in datastores])
     reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
         CONF.reserved_host_disk_mb)
     result = {
         fields.ResourceClass.VCPU: {
             'total': stats['cpu']['vcpus'],
             'reserved': CONF.reserved_host_cpus,
             'min_unit': 1,
             'max_unit': stats['cpu']['max_vcpus_per_host'],
             'step_size': 1,
         },
         fields.ResourceClass.MEMORY_MB: {
             'total': stats['mem']['total'],
             'reserved': CONF.reserved_host_memory_mb,
             'min_unit': 1,
             'max_unit': stats['mem']['max_mem_mb_per_host'],
             'step_size': 1,
         },
         fields.ResourceClass.DISK_GB: {
             'total': total_disk_capacity // units.Gi,
             'reserved': reserved_disk_gb,
             'min_unit': 1,
             'max_unit': max_free_space // units.Gi,
             'step_size': 1,
         },
     }
     return result
Esempio n. 13
0
    def _test_get_stats_from_cluster(self, connection_state="connected",
                                     maintenance_mode=False):
        ManagedObjectRefs = [fake.ManagedObjectReference("host1",
                                                         "HostSystem"),
                             fake.ManagedObjectReference("host2",
                                                         "HostSystem")]
        hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
        respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
        prop_dict = {'host': hosts, 'resourcePool': respool}

        hardware = fake.DataObject()
        hardware.numCpuCores = 8
        hardware.numCpuThreads = 16
        hardware.vendor = "Intel"
        hardware.cpuModel = "Intel(R) Xeon(R)"

        runtime_host_1 = fake.DataObject()
        runtime_host_1.connectionState = "connected"
        runtime_host_1.inMaintenanceMode = False

        runtime_host_2 = fake.DataObject()
        runtime_host_2.connectionState = connection_state
        runtime_host_2.inMaintenanceMode = maintenance_mode

        prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
                            fake.Prop(name="runtime_summary",
                                      val=runtime_host_1)]
        prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
                            fake.Prop(name="runtime_summary",
                                      val=runtime_host_2)]

        fake_objects = fake.FakeRetrieveResult()
        fake_objects.add_object(fake.ObjectContent("prop_list_host1",
                                                   prop_list_host_1))
        fake_objects.add_object(fake.ObjectContent("prop_list_host1",
                                                   prop_list_host_2))

        respool_resource_usage = fake.DataObject()
        respool_resource_usage.maxUsage = 5368709120
        respool_resource_usage.overallUsage = 2147483648

        def fake_call_method(*args):
            if "get_dynamic_properties" in args:
                return prop_dict
            elif "get_properties_for_a_collection_of_objects" in args:
                return fake_objects
            else:
                return respool_resource_usage

        session = fake.FakeSession()
        with mock.patch.object(session, '_call_method', fake_call_method):
            result = vm_util.get_stats_from_cluster(session, "cluster1")
            cpu_info = {}
            mem_info = {}
            if connection_state == "connected" and not maintenance_mode:
                cpu_info['vcpus'] = 32
                cpu_info['cores'] = 16
                cpu_info['vendor'] = ["Intel", "Intel"]
                cpu_info['model'] = ["Intel(R) Xeon(R)",
                                     "Intel(R) Xeon(R)"]
            else:
                cpu_info['vcpus'] = 16
                cpu_info['cores'] = 8
                cpu_info['vendor'] = ["Intel"]
                cpu_info['model'] = ["Intel(R) Xeon(R)"]
            mem_info['total'] = 5120
            mem_info['free'] = 3072
            expected_stats = {'cpu': cpu_info, 'mem': mem_info}
            self.assertEqual(expected_stats, result)
Esempio n. 14
0
    def update_provider_tree(self, provider_tree, nodename, allocations=None):
        """Update a ProviderTree object with current resource provider,
        inventory information and CPU traits.

        :param nova.compute.provider_tree.ProviderTree provider_tree:
            A nova.compute.provider_tree.ProviderTree object representing all
            the providers in the tree associated with the compute node, and any
            sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
            trait) associated via aggregate with any of those providers (but
            not *their* tree- or aggregate-associated providers), as currently
            known by placement.
        :param nodename:
            String name of the compute node (i.e.
            ComputeNode.hypervisor_hostname) for which the caller is requesting
            updated provider information.
        :param allocations:
            Dict of allocation data of the form:
              { $CONSUMER_UUID: {
                    # The shape of each "allocations" dict below is identical
                    # to the return from GET /allocations/{consumer_uuid}
                    "allocations": {
                        $RP_UUID: {
                            "generation": $RP_GEN,
                            "resources": {
                                $RESOURCE_CLASS: $AMOUNT,
                                ...
                            },
                        },
                        ...
                    },
                    "project_id": $PROJ_ID,
                    "user_id": $USER_ID,
                    "consumer_generation": $CONSUMER_GEN,
                },
                ...
              }
            If None, and the method determines that any inventory needs to be
            moved (from one provider to another and/or to a different resource
            class), the ReshapeNeeded exception must be raised. Otherwise, this
            dict must be edited in place to indicate the desired final state of
            allocations.
        :raises ReshapeNeeded: If allocations is None and any inventory needs
            to be moved from one provider to another and/or to a different
            resource class. At this time the VMware driver does not reshape.
        :raises: ReshapeFailed if the requested tree reshape fails for
            whatever reason.
        """
        # NOTE(cdent): This is a side-effecty method, we are changing the
        # the provider tree in place (on purpose).
        inv = provider_tree.data(nodename).inventory
        ratios = self._get_allocation_ratios(inv)
        stats = vm_util.get_stats_from_cluster(self._session,
                                               self._cluster_ref)
        datastores = ds_util.get_available_datastores(self._session,
                                                      self._cluster_ref,
                                                      self._datastore_regex)
        total_disk_capacity = sum([ds.capacity for ds in datastores])
        max_free_space = max([ds.freespace for ds in datastores])
        reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        result = {
            orc.VCPU: {
                'total': stats['cpu']['vcpus'],
                'reserved': CONF.reserved_host_cpus,
                'min_unit': 1,
                'max_unit': stats['cpu']['max_vcpus_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.VCPU],
            },
            orc.MEMORY_MB: {
                'total': stats['mem']['total'],
                'reserved': CONF.reserved_host_memory_mb,
                'min_unit': 1,
                'max_unit': stats['mem']['max_mem_mb_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.MEMORY_MB],
            },
        }

        # If a sharing DISK_GB provider exists in the provider tree, then our
        # storage is shared, and we should not report the DISK_GB inventory in
        # the compute node provider.
        # TODO(cdent): We don't do this yet, in part because of the issues
        # in bug #1784020, but also because we can represent all datastores
        # as shared providers and should do once update_provider_tree is
        # working well.
        if provider_tree.has_sharing_provider(orc.DISK_GB):
            LOG.debug('Ignoring sharing provider - see bug #1784020')
        result[orc.DISK_GB] = {
            'total': total_disk_capacity // units.Gi,
            'reserved': reserved_disk_gb,
            'min_unit': 1,
            'max_unit': max_free_space // units.Gi,
            'step_size': 1,
            'allocation_ratio': ratios[orc.DISK_GB],
        }

        provider_tree.update_inventory(nodename, result)
Esempio n. 15
0
File: driver.py Progetto: mahak/nova
    def update_provider_tree(self, provider_tree, nodename, allocations=None):
        """Update a ProviderTree object with current resource provider,
        inventory information and CPU traits.

        :param nova.compute.provider_tree.ProviderTree provider_tree:
            A nova.compute.provider_tree.ProviderTree object representing all
            the providers in the tree associated with the compute node, and any
            sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
            trait) associated via aggregate with any of those providers (but
            not *their* tree- or aggregate-associated providers), as currently
            known by placement.
        :param nodename:
            String name of the compute node (i.e.
            ComputeNode.hypervisor_hostname) for which the caller is requesting
            updated provider information.
        :param allocations:
            Dict of allocation data of the form:
              { $CONSUMER_UUID: {
                    # The shape of each "allocations" dict below is identical
                    # to the return from GET /allocations/{consumer_uuid}
                    "allocations": {
                        $RP_UUID: {
                            "generation": $RP_GEN,
                            "resources": {
                                $RESOURCE_CLASS: $AMOUNT,
                                ...
                            },
                        },
                        ...
                    },
                    "project_id": $PROJ_ID,
                    "user_id": $USER_ID,
                    "consumer_generation": $CONSUMER_GEN,
                },
                ...
              }
            If None, and the method determines that any inventory needs to be
            moved (from one provider to another and/or to a different resource
            class), the ReshapeNeeded exception must be raised. Otherwise, this
            dict must be edited in place to indicate the desired final state of
            allocations.
        :raises ReshapeNeeded: If allocations is None and any inventory needs
            to be moved from one provider to another and/or to a different
            resource class. At this time the VMware driver does not reshape.
        :raises: ReshapeFailed if the requested tree reshape fails for
            whatever reason.
        """
        # NOTE(cdent): This is a side-effecty method, we are changing the
        # the provider tree in place (on purpose).
        inv = provider_tree.data(nodename).inventory
        ratios = self._get_allocation_ratios(inv)
        stats = vm_util.get_stats_from_cluster(self._session,
                                               self._cluster_ref)
        datastores = ds_util.get_available_datastores(self._session,
                                                      self._cluster_ref,
                                                      self._datastore_regex)
        total_disk_capacity = sum([ds.capacity for ds in datastores])
        max_free_space = max([ds.freespace for ds in datastores])
        reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        result = {
            orc.VCPU: {
                'total': stats['cpu']['vcpus'],
                'reserved': CONF.reserved_host_cpus,
                'min_unit': 1,
                'max_unit': stats['cpu']['max_vcpus_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.VCPU],
            },
            orc.MEMORY_MB: {
                'total': stats['mem']['total'],
                'reserved': CONF.reserved_host_memory_mb,
                'min_unit': 1,
                'max_unit': stats['mem']['max_mem_mb_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.MEMORY_MB],
            },
        }

        # If a sharing DISK_GB provider exists in the provider tree, then our
        # storage is shared, and we should not report the DISK_GB inventory in
        # the compute node provider.
        # TODO(cdent): We don't do this yet, in part because of the issues
        # in bug #1784020, but also because we can represent all datastores
        # as shared providers and should do once update_provider_tree is
        # working well.
        if provider_tree.has_sharing_provider(orc.DISK_GB):
            LOG.debug('Ignoring sharing provider - see bug #1784020')
        result[orc.DISK_GB] = {
            'total': total_disk_capacity // units.Gi,
            'reserved': reserved_disk_gb,
            'min_unit': 1,
            'max_unit': max_free_space // units.Gi,
            'step_size': 1,
            'allocation_ratio': ratios[orc.DISK_GB],
        }

        provider_tree.update_inventory(nodename, result)