Esempio n. 1
0
    def test_resize_instance_with_disk_config_disabled(self):
        self.resize_called = False

        def resize_mock(*args, **kwargs):
            self.assertNotIn('os-disk-config:disk_config', kwargs)
            self.assertNotIn('auto_disk_config', kwargs)
            self.resize_called = True

        self.stubs.Set(compute_api.API, 'resize', resize_mock)

        image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
        image_href = 'http://localhost/v3/os-images/%s' % image_uuid
        body = {
            'resize': {
                "flavor_ref": "2",
                'os-disk-config:disk_config': 'MANUAL',
            },
        }

        req = fakes.HTTPRequestV3.blank(
            '/v3/servers/%s/action' % AUTO_INSTANCE_UUID)
        req.method = 'POST'
        req.body = jsonutils.dumps(body)
        req.headers["content-type"] = "application/json"
        req.body = jsonutils.dumps(body)
        self.no_disk_config_controller._action_resize(
            req,
            AUTO_INSTANCE_UUID,
            body)
        self.assertTrue(self.resize_called)
Esempio n. 2
0
    def test_rebuild_instance_with_disk_config_disabled(self):
        info = dict(image_href_in_call=None)

        def rebuild(self2, context, instance, image_href, *args, **kwargs):
            self.assertNotIn('os-disk-config:disk_config', kwargs)
            self.assertNotIn('auto_disk_config', kwargs)
            info['image_href_in_call'] = image_href

        self.stubs.Set(compute_api.API, 'rebuild', rebuild)
        image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
        body = {
            'rebuild': {
                'image_ref': image_uuid,
                'os-disk-config:disk_config': 'MANUAL',
            },
        }

        req = fakes.HTTPRequestV3.blank(
            '/v3/servers/%s/action' % AUTO_INSTANCE_UUID)
        req.method = 'POST'
        req.body = jsonutils.dumps(body)
        req.headers["content-type"] = "application/json"
        req.body = jsonutils.dumps(body)
        res = self.no_disk_config_controller._action_rebuild(
            req,
            AUTO_INSTANCE_UUID,
            body).obj
        self.assertEqual(info['image_href_in_call'], image_uuid)
Esempio n. 3
0
    def test_invalid_metadata_items_on_update_item(self):
        self.stubs.Set(nova.db, 'instance_metadata_update',
                       return_create_instance_metadata)
        data = {"metadata": {}}
        for num in range(FLAGS.quota_metadata_items + 1):
            data['metadata']['key%i' % num] = "blah"
        req = fakes.HTTPRequest.blank(self.url)
        req.method = 'PUT'
        req.body = jsonutils.dumps(data)
        req.headers["content-type"] = "application/json"

        #test for long key
        data = {"metadata": {"a" * 260: "value1"}}
        req.body = jsonutils.dumps(data)
        self.assertRaises(webob.exc.HTTPBadRequest,
                          self.controller.update_all, req, self.uuid, data)

        #test for long value
        data = {"metadata": {"key": "v" * 260}}
        req.body = jsonutils.dumps(data)
        self.assertRaises(webob.exc.HTTPBadRequest,
                          self.controller.update_all, req, self.uuid, data)

        #test for empty key.
        data = {"metadata": {"": "value1"}}
        req.body = jsonutils.dumps(data)
        self.assertRaises(webob.exc.HTTPBadRequest,
                          self.controller.update_all, req, self.uuid, data)
Esempio n. 4
0
    def test_create_server_detect_from_image(self):
        """If user doesn't pass in diskConfig for server, use image metadata
        to specify AUTO or MANUAL.
        """
        req = fakes.HTTPRequest.blank('/fake/servers')
        req.method = 'POST'
        req.content_type = 'application/json'
        body = {'server': {
                  'name': 'server_test',
                  'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
                  'flavorRef': '1',
               }}

        req.body = jsonutils.dumps(body)
        res = req.get_response(self.app)
        server_dict = jsonutils.loads(res.body)['server']
        self.assertDiskConfig(server_dict, 'MANUAL')

        req = fakes.HTTPRequest.blank('/fake/servers')
        req.method = 'POST'
        req.content_type = 'application/json'
        body = {'server': {
                  'name': 'server_test',
                  'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
                  'flavorRef': '1',
               }}

        req.body = jsonutils.dumps(body)
        res = req.get_response(self.app)
        server_dict = jsonutils.loads(res.body)['server']
        self.assertDiskConfig(server_dict, 'AUTO')
Esempio n. 5
0
    def _update_available_resource(self, context, resources):
        """
        We have already get the info needed from FusionCompute, so nothing
        to do here.
        :param context:
        :param resources:
        :return:
        """

        resources['free_ram_mb'] = (resources['memory_mb'] -
                                    resources['memory_mb_used'])
        resources['free_disk_gb'] = (resources['local_gb'] -
                                     resources['local_gb_used'])

        resources['current_workload'] = 0
        resources['pci_stats'] = jsonutils.dumps([])

        metrics = self._get_host_metrics(context, self.nodename)
        resources['metrics'] = jsonutils.dumps(metrics)

        # Reset values for extended resources
        self.ext_resources_handler.reset_resources(resources, self.driver)

        self._report_final_resource_view(resources)
        self._sync_compute_node(context, resources)
Esempio n. 6
0
    def authorize_console(self, context, token, console_type, host, port,
                          internal_access_path, instance_uuid):

        token_dict = {'token': token,
                      'instance_uuid': instance_uuid,
                      'console_type': console_type,
                      'host': host,
                      'port': port,
                      'internal_access_path': internal_access_path,
                      'last_activity_at': time.time()}
        data = jsonutils.dumps(token_dict)

        # We need to log the warning message if the token is not cached
        # successfully, because the failure will cause the console for
        # instance to not be usable.
        if not self.mc.set(token.encode('UTF-8'),
                           data, CONF.console_token_ttl):
            LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
                            {'token': token})
        tokens = self._get_tokens_for_instance(instance_uuid)
        # Remove the expired tokens from cache.
        for tok in tokens:
            token_str = self.mc.get(tok.encode('UTF-8'))
            if not token_str:
                tokens.remove(tok)
        tokens.append(token)
        if not self.mc.set(instance_uuid.encode('UTF-8'),
                           jsonutils.dumps(tokens)):
            LOG.warning(_LW("Instance: %(instance_uuid)s failed to save "
                            "into memcached"),
                        {'instance_uuid': instance_uuid})

        LOG.audit(_("Received Token: %(token)s, %(token_dict)s"),
                  {'token': token, 'token_dict': token_dict})
Esempio n. 7
0
    def _request(self, method, path, data=None, headers=None, **kwargs):
        """
        send request msg
        :param method:
        :param path:
        :param data:
        :param headers:
        :param kwargs:
        :return:
        """

        url = self._to_url(path)

        if not data:
            data = jsonutils.dumps({})
        elif isinstance(data, dict) or isinstance(data, list):
            data = jsonutils.dumps(data)

        try:
            data_for_log = copy.deepcopy(jsonutils.loads(data))
            utils.drop_password_key(data_for_log)
            LOG.debug(_('request: %s %s %s'), method, url,
                jsonutils.dumps(data_for_log))
        except Exception:
            LOG.debug(_('request: %s %s'), method, url)

        rsp = requests.request(method, url, data=data, headers=headers,
                               verify=False, **kwargs)
        return rsp
Esempio n. 8
0
    def _update_usage_from_instance(self, resources, instance):
        """Update usage for a single instance."""

        uuid = instance['uuid']
        is_new_instance = uuid not in self.tracked_instances
        is_deleted_instance = instance['vm_state'] == vm_states.DELETED

        if is_new_instance:
            self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
            sign = 1

        if is_deleted_instance:
            self.tracked_instances.pop(uuid)
            sign = -1

        self.stats.update_stats_for_instance(instance)

        if self.pci_tracker:
            self.pci_tracker.update_pci_for_instance(instance)

        # if it's a new or deleted instance:
        if is_new_instance or is_deleted_instance:
            # new instance, update compute node resource usage:
            self._update_usage(resources, instance, sign=sign)

        resources['current_workload'] = self.stats.calculate_workload()
        resources['stats'] = jsonutils.dumps(self.stats)
        if self.pci_tracker:
            resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
        else:
            resources['pci_stats'] = jsonutils.dumps([])
Esempio n. 9
0
    def get_available_resource(self, nodename):
        """Retrieve resource information.

        This method is called when nova-compute launches, and
        as part of a periodic task that records the results in the DB.

        :returns: dictionary describing resources

        """
        host_stats = self.get_host_stats(refresh=True)

        # Updating host information
        dic = {'vcpus': host_stats["vcpus"],
               'memory_mb': host_stats['host_memory_total'],
               'local_gb': host_stats['disk_total'],
               'vcpus_used': 0,
               'memory_mb_used': host_stats['host_memory_total'] -
                                 host_stats['host_memory_free'],
               'local_gb_used': host_stats['disk_used'],
               'hypervisor_type': host_stats['hypervisor_type'],
               'hypervisor_version': host_stats['hypervisor_version'],
               'hypervisor_hostname': host_stats['hypervisor_hostname'],
               'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
               'supported_instances': jsonutils.dumps(
                   host_stats['supported_instances']),
               }

        return dic
Esempio n. 10
0
    def test_rebuild_instance_with_disk_config(self):
        info = dict(image_href_in_call=None)

        def rebuild(self2, context, instance, image_href, *args, **kwargs):
            self.assertIn('auto_disk_config', kwargs)
            self.assertFalse(kwargs['auto_disk_config'])
            info['image_href_in_call'] = image_href

        self.stubs.Set(compute_api.API, 'rebuild', rebuild)

        image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
        image_href = 'http://localhost/v3/os-images/%s' % image_uuid
        body = {
            'rebuild': {
                'image_ref': image_uuid,
                'os-disk-config:disk_config': 'MANUAL',
            },
        }

        req = fakes.HTTPRequestV3.blank(
            '/v3/servers/%s/action' % AUTO_INSTANCE_UUID)
        req.method = 'POST'
        req.body = jsonutils.dumps(body)
        req.headers["content-type"] = "application/json"
        req.body = jsonutils.dumps(body)
        res = req.get_response(self.app)
        self.assertEqual(info['image_href_in_call'], image_uuid)
Esempio n. 11
0
    def _node_resource(self, node):
        vcpus_used = 0
        memory_mb_used = 0
        local_gb_used = 0

        vcpus = node['cpus']
        memory_mb = node['memory_mb']
        local_gb = node['local_gb']
        if node['instance_uuid']:
            vcpus_used = node['cpus']
            memory_mb_used = node['memory_mb']
            local_gb_used = node['local_gb']

        dic = {'vcpus': vcpus,
               'memory_mb': memory_mb,
               'local_gb': local_gb,
               'vcpus_used': vcpus_used,
               'memory_mb_used': memory_mb_used,
               'local_gb_used': local_gb_used,
               'hypervisor_type': self.get_hypervisor_type(),
               'hypervisor_version': self.get_hypervisor_version(),
               'hypervisor_hostname': str(node['uuid']),
               'cpu_info': 'baremetal cpu',
               'supported_instances':
                        jsonutils.dumps(self.supported_instances),
               'stats': jsonutils.dumps(self.extra_specs)
               }
        return dic
Esempio n. 12
0
    def test_snapshot_create_force(self):
        snapshot = {"volume_id": 12,
                "force": True,
                "display_name": "Snapshot Test Name",
                "display_description": "Snapshot Test Desc"}
        body = dict(snapshot=snapshot)
        req = webob.Request.blank('/v2/fake/os-snapshots')
        req.method = 'POST'
        req.body = jsonutils.dumps(body)
        req.headers['content-type'] = 'application/json'

        resp = req.get_response(self.app)
        self.assertEqual(resp.status_int, 200)

        resp_dict = jsonutils.loads(resp.body)
        self.assertTrue('snapshot' in resp_dict)
        self.assertEqual(resp_dict['snapshot']['displayName'],
                        snapshot['display_name'])
        self.assertEqual(resp_dict['snapshot']['displayDescription'],
                        snapshot['display_description'])

        # Test invalid force paramter
        snapshot = {"volume_id": 12,
                "force": '**&&^^%%$$##@@'}
        body = dict(snapshot=snapshot)
        req = webob.Request.blank('/v2/fake/os-snapshots')
        req.method = 'POST'
        req.body = jsonutils.dumps(body)
        req.headers['content-type'] = 'application/json'

        resp = req.get_response(self.app)
        self.assertEqual(resp.status_int, 400)
Esempio n. 13
0
    def test_invalid_metadata_items_on_update_item(self):
        self.stubs.Set(nova.db, 'instance_metadata_update',
                       return_create_instance_metadata)
        self.stubs.Set(nova.db, 'instance_metadata_update',
                       return_create_instance_metadata)
        data = {"metadata": {}}
        for num in range(CONF.quota_metadata_items + 1):
            data['metadata']['key%i' % num] = "blah"
        req = self._get_request()
        req.method = 'PUT'
        req.body = jsonutils.dumps(data)
        req.headers["content-type"] = "application/json"

        # test for long key
        data = {"metadata": {"a" * 260: "value1"}}
        req.body = jsonutils.dumps(data)
        self.assertRaises(self.validation_ex_large,
                          self.controller.update_all, req, self.uuid,
                          body=data)

        # test for long value
        data = {"metadata": {"key": "v" * 260}}
        req.body = jsonutils.dumps(data)
        self.assertRaises(self.validation_ex_large,
                          self.controller.update_all, req, self.uuid,
                          body=data)

        # test for empty key.
        data = {"metadata": {"": "value1"}}
        req.body = jsonutils.dumps(data)
        self.assertRaises(self.validation_ex,
                          self.controller.update_all, req, self.uuid,
                          body=data)
Esempio n. 14
0
 def host_call_plugin(self, _1, _2, plugin, method, _5):
     if (plugin, method) == ("agent", "version"):
         return as_json(returncode="0", message="1.0")
     elif (plugin, method) == ("agent", "key_init"):
         return as_json(returncode="D0", message="1")
     elif (plugin, method) == ("agent", "password"):
         return as_json(returncode="0", message="success")
     elif (plugin, method) == ("agent", "resetnetwork"):
         return as_json(returncode="0", message="success")
     elif (plugin, method) == ("glance", "upload_vhd"):
         return ""
     elif (plugin, method) == ("kernel", "copy_vdi"):
         return ""
     elif (plugin, method) == ("kernel", "create_kernel_ramdisk"):
         return ""
     elif (plugin, method) == ("kernel", "remove_kernel_ramdisk"):
         return ""
     elif (plugin, method) == ("migration", "move_vhds_into_sr"):
         return ""
     elif (plugin, method) == ("migration", "transfer_vhd"):
         return ""
     elif (plugin, method) == ("xenhost", "host_data"):
         return jsonutils.dumps({"host_memory": {"total": 10, "overhead": 20, "free": 30, "free-computed": 40}})
     elif plugin == "xenhost" and method in ["host_reboot", "host_startup", "host_shutdown"]:
         return jsonutils.dumps({"power_action": method[5:]})
     elif (plugin, method) == ("xenhost", "set_host_enabled"):
         enabled = "enabled" if _5.get("enabled") == "true" else "disabled"
         return jsonutils.dumps({"status": enabled})
     elif (plugin, method) == ("xenhost", "host_uptime"):
         return jsonutils.dumps({"uptime": "fake uptime"})
     else:
         raise Exception("No simulation in host_call_plugin for %s,%s" % (plugin, method))
Esempio n. 15
0
    def get_available_resource(self, nodename):
        (total_mem_mb,
         free_mem_mb,
         used_mem_mb) = self._get_host_memory_info()

        (total_hdd_gb,
         free_hdd_gb,
         used_hdd_gb) = self._get_local_hdd_info_gb()

        # Todo(alexpilotti): add CPU info
        cpu_info = []
        vcpus = utils.get_cpu_count()

        dic = {'vcpus': vcpus,
               'memory_mb': total_mem_mb,
               'memory_mb_used': used_mem_mb,
               'local_gb': total_hdd_gb,
               'local_gb_used': used_hdd_gb,
               'hypervisor_type': "vix",
               'hypervisor_version': self._get_hypervisor_version(),
               'hypervisor_hostname': platform.node(),
               'vcpus_used': 0,
               'cpu_info': jsonutils.dumps(cpu_info),
               'supported_instances': jsonutils.dumps([('i686', 'vix', 'hvm'),
                                                       ('x86_64', 'vix',
                                                        'hvm')])
               }

        return dic
Esempio n. 16
0
    def test_update_instance_with_disk_config(self):
        self.update_called = False

        def update(self2, update_dict):
            self.assertIn("auto_disk_config", update_dict)
            self.assertFalse(update_dict["auto_disk_config"])
            self.update_called = True

        def cache_db_instance(*arg, **kwargs):
            pass

        def save(self2, context, expected_task_state=None):
            pass

        self.stubs.Set(nova.objects.instance.Instance, "save", save)
        self.stubs.Set(nova.api.openstack.wsgi.Request, "cache_db_instance", cache_db_instance)
        self.stubs.Set(nova.objects.instance.Instance, "update", update)

        image_uuid = "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6"
        image_href = "http://localhost/v3/os-images/%s" % image_uuid
        body = {"server": {"name": "update_test", "os-disk-config:disk_config": "MANUAL"}}

        req = fakes.HTTPRequestV3.blank("/v3/servers/%s" % AUTO_INSTANCE_UUID)
        req.method = "PUT"
        req.body = jsonutils.dumps(body)
        req.headers["content-type"] = "application/json"
        req.body = jsonutils.dumps(body)
        res = req.get_response(self.app)
        self.assertTrue(self.update_called)
Esempio n. 17
0
    def authorize_console(self, context, token, console_type, host, port,
                          internal_access_path, instance_uuid=None):

        token_dict = {'token': token,
                      'instance_uuid': instance_uuid,
                      'console_type': console_type,
                      'host': host,
                      'port': port,
                      'internal_access_path': internal_access_path,
                      'last_activity_at': time.time()}
        data = jsonutils.dumps(token_dict)
        self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl)
        if instance_uuid is not None:
            tokens = self._get_tokens_for_instance(instance_uuid)
            # Remove the expired tokens from cache.
            for tok in tokens:
                token_str = self.mc.get(tok.encode('UTF-8'))
                if not token_str:
                    tokens.remove(tok)
            tokens.append(token)
            self.mc.set(instance_uuid.encode('UTF-8'),
                        jsonutils.dumps(tokens))

        LOG.audit(_("Received Token: %(token)s, %(token_dict)s"),
                  {'token': token, 'token_dict': token_dict})
Esempio n. 18
0
    def test_create_server_detect_from_image(self):
        """If user doesn't pass in diskConfig for server, use image metadata
        to specify AUTO or MANUAL.
        """
        req = fakes.HTTPRequestV3.blank("/v3/servers")
        req.method = "POST"
        req.content_type = "application/json"
        body = {
            "server": {"name": "server_test", "image_ref": "a440c04b-79fa-479c-bed1-0b816eaec379", "flavor_ref": "1"}
        }

        req.body = jsonutils.dumps(body)
        res = req.get_response(self.app)
        server_dict = jsonutils.loads(res.body)["server"]
        self.assertDiskConfig(server_dict, "MANUAL")

        req = fakes.HTTPRequestV3.blank("/v3/servers")
        req.method = "POST"
        req.content_type = "application/json"
        body = {
            "server": {"name": "server_test", "image_ref": "70a599e0-31e7-49b7-b260-868f441e862b", "flavor_ref": "1"}
        }

        req.body = jsonutils.dumps(body)
        res = req.get_response(self.app)
        server_dict = jsonutils.loads(res.body)["server"]
        self.assertDiskConfig(server_dict, "AUTO")
Esempio n. 19
0
    def get_available_resource(self):
        """Retrieve resource info.

        This method is called when nova-compute launches, and
        as part of a periodic task.

        :returns: dictionary describing resources

        """
        LOG.debug(_("get_available_resource called"))

        (total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info()

        (total_hdd_gb, free_hdd_gb, used_hdd_gb) = self._get_local_hdd_info_gb()

        cpu_info = self._get_cpu_info()
        cpu_topology = cpu_info["topology"]
        vcpus = cpu_topology["sockets"] * cpu_topology["cores"] * cpu_topology["threads"]

        dic = {
            "vcpus": vcpus,
            "memory_mb": total_mem_mb,
            "memory_mb_used": used_mem_mb,
            "local_gb": total_hdd_gb,
            "local_gb_used": used_hdd_gb,
            "hypervisor_type": "hyperv",
            "hypervisor_version": self._get_hypervisor_version(),
            "hypervisor_hostname": platform.node(),
            "vcpus_used": 0,
            "cpu_info": jsonutils.dumps(cpu_info),
            "supported_instances": jsonutils.dumps([("i686", "hyperv", "hvm"), ("x86_64", "hyperv", "hvm")]),
        }

        return dic
Esempio n. 20
0
    def _node_resource(self, node):
        # TODO(deva): refactor this to match ironic node datastruct
        vcpus_used = 0
        memory_mb_used = 0
        local_gb_used = 0

        vcpus = int(node.properties.get('cpus', 0))
        memory_mb = int(node.properties.get('memory_mb', 0))
        local_gb = int(node.properties.get('local_gb', 0))
        cpu_arch = str(node.properties.get('cpu_arch', 'NotFound'))
        nodes_extra_specs = self.extra_specs
        nodes_extra_specs['cpu_arch'] = cpu_arch

        if node.instance_uuid:
            vcpus_used = vcpus
            memory_mb_used = memory_mb
            local_gb_used = local_gb

        dic = {'vcpus': vcpus,
               'memory_mb': memory_mb,
               'local_gb': local_gb,
               'vcpus_used': vcpus_used,
               'memory_mb_used': memory_mb_used,
               'local_gb_used': local_gb_used,
               'hypervisor_type': self.get_hypervisor_type(),
               'hypervisor_version': self.get_hypervisor_version(),
               'hypervisor_hostname': str(node.uuid),
               'cpu_info': 'baremetal cpu',
               'supported_instances': jsonutils.dumps(
                                     _get_nodes_supported_instances(cpu_arch)),
               'stats': jsonutils.dumps(nodes_extra_specs)
               }
        return dic
Esempio n. 21
0
    def update_available_resource(self, context):
        """Override in-memory calculations of compute node resource usage based
        on data audited from the hypervisor layer.

        Add in resource claims in progress to account for operations that have
        declared a need for resources, but not necessarily retrieved them from
        the hypervisor layer yet.
        """
        LOG.audit(_("Auditing locally available compute resources"))
        resources = self.driver.get_available_resource(self.nodename)

        if not resources:
            # The virt driver does not support this function
            LOG.audit(_("Virt driver does not support " "'get_available_resource'  Compute tracking is disabled."))
            self.compute_node = None
            return
        resources["host_ip"] = CONF.my_ip

        self._verify_resources(resources)

        self._report_hypervisor_resource_view(resources)

        if "pci_passthrough_devices" in resources:
            if not self.pci_tracker:
                self.pci_tracker = pci_manager.PciDevTracker()
            self.pci_tracker.set_hvdevs(jsonutils.loads(resources.pop("pci_passthrough_devices")))

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(context, self.host, self.nodename)

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(resources, instances)

        # Grab all in-progress migrations:
        capi = self.conductor_api
        migrations = capi.migration_get_in_progress_by_host_and_node(context, self.host, self.nodename)

        self._update_usage_from_migrations(context, resources, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(resources, orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            resources["pci_stats"] = jsonutils.dumps(self.pci_tracker.stats)
        else:
            resources["pci_stats"] = jsonutils.dumps([])

        self._report_final_resource_view(resources)

        metrics = self._get_host_metrics(context, self.nodename)
        resources["metrics"] = jsonutils.dumps(metrics)
        self._sync_compute_node(context, resources)
Esempio n. 22
0
    def _update_usage_from_migration(self, context, instance, image_meta, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration["instance_uuid"]
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = migration["dest_compute"] == self.host and migration["dest_node"] == self.nodename
        outbound = migration["source_compute"] == self.host and migration["source_node"] == self.nodename
        same_node = incoming and outbound
        instance = objects.Instance.get_by_uuid(context, uuid, expected_attrs=["system_metadata"])
        record = self.tracked_instances.get(uuid, None)
        itype = None
        numa_topology = None
        core_bind = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if instance["instance_type_id"] == migration["old_instance_type_id"]:
                itype = self._get_instance_type(context, instance, "new_", migration["new_instance_type_id"])
                numa_topology = instance["system_metadata"].get("new_numa_topo")
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, "old_", migration["old_instance_type_id"])
                numa_topology = instance["system_metadata"].get("old_numa_topo")

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, "new_", migration["new_instance_type_id"])
            numa_topology = instance["system_metadata"].get("new_numa_topo")

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, "old_", migration["old_instance_type_id"])
            numa_topology = instance["system_metadata"].get("old_numa_topo")

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(instance["system_metadata"])

        if itype:
            host_topology = resources.get("numa_topology")
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            if numa_topology:
                numa_topology = jsonutils.loads(numa_topology)
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)

            if self.pci_tracker:
                if same_node or not outbound:
                    self.pci_tracker.update_pci_for_migration(context, instance)

            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources["pci_stats"] = jsonutils.dumps(self.pci_tracker.stats)
            else:
                resources["pci_stats"] = jsonutils.dumps([])
            self.tracked_migrations[uuid] = (migration, itype)
Esempio n. 23
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration['old_instance_type_id']):
                itype = self._get_instance_type(context, instance, 'new_',
                        migration['new_instance_type_id'])
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                        migration['old_instance_type_id'])

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                    migration['new_instance_type_id'])

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                    migration['old_instance_type_id'])

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(
                    instance['system_metadata'])

        if itype:
            numa_topology = (
                    hardware.VirtNUMAInstanceTopology.get_constraints(
                        itype, image_meta))
            usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources['pci_stats'] = jsonutils.dumps(
                        self.pci_tracker.stats)
            else:
                resources['pci_stats'] = jsonutils.dumps([])
            self.tracked_migrations[uuid] = (migration, itype)
Esempio n. 24
0
def generate_data():
    def _generate_stats(id_num):
        stats = {}
        i = 0
        while i < CONF.num_stat:
            key = 'key%d' % i
            stats[key] = id_num + i
            i = i + 1
        return stats

    print "Starting prepare data in DB"
    ctx = context.get_admin_context()
    for i in range(CONF.num_comp):
        if  i *100.0 % CONF.num_comp == 0:
            sys.stdout.write("prepared %d%% data\r" % (i * 100.0 / CONF.num_comp))
            sys.stdout.flush()
        svc_values = {
            'host': 'host-%d' % i,
            'binary': 'novadbtest',
            'topic': 'novadbtest',
            'report_count': 0,
        }
        #created service record
        service_ref = jsonutils.to_primitive(
                           db.service_get_by_host_and_topic(ctx, 
                                                            svc_values['host'],
                                                            svc_values['topic']))
        if not service_ref:
            service_ref = jsonutils.to_primitive(
                               db.service_create(ctx, svc_values))
        LOG.info('Service record created for id %d', service_ref['id'])
        #create/update compute node record
        comp_values = {
            'service_id': service_ref['id'],
            'vcpus': i,
            'memory_mb': i,
            'local_gb': i,
            'vcpus_used': i,
            'memory_mb_used': i,
            'local_gb_used': i,
            'hypervisor_type': 'qemu',
            'hypervisor_version': 1,
            'hypervisor_hostname': 'test',
            'free_ram_mb': i,
            'free_disk_gb': i,
            'current_workload': i,
            'running_vms': i,
            'disk_available_least': i,
            }
        comp_values['cpu_info'] = jsonutils.dumps(_generate_stats(i))
        if hasattr(ComputeNode, 'metrics'):
            comp_values['metrics'] = jsonutils.dumps(_generate_stats(i))
        if CONF.join_stats:
            comp_values['stats'] = _generate_stats(i)
        compute_ref = jsonutils.to_primitive(
                        db.compute_node_create(ctx, comp_values))
        LOG.info('Compute node record created for id %d', compute_ref['id'])
    print "Finish preparing data in DB"
    def _update_available_resource(self, context, resources):

        # initialise the compute node object, creating it
        # if it does not already exist.
        self._init_compute_node(context, resources)

        # if we could not init the compute node the tracker will be
        # disabled and we should quit now
        if self.disabled:
            return

        if 'pci_passthrough_devices' in resources:
            if not self.pci_tracker:
                n_id = self.compute_node['id'] if self.compute_node else None
                self.pci_tracker = pci_manager.PciDevTracker(context,
                                                             node_id=n_id)
            self.pci_tracker.set_hvdevs(jsonutils.loads(resources.pop(
                'pci_passthrough_devices')))

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context, self.host, self.nodename,
            expected_attrs=['system_metadata',
                            'numa_topology'])

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, resources, instances)

        # Grab all in-progress migrations:
        capi = self.conductor_api
        migrations = capi.migration_get_in_progress_by_host_and_node(context,
                self.host, self.nodename)

        self._update_usage_from_migrations(context, resources, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(context, resources, orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
        else:
            resources['pci_stats'] = jsonutils.dumps([])

        self._report_final_resource_view(resources)

        metrics = self._get_host_metrics(context, self.nodename)
        resources['metrics'] = jsonutils.dumps(metrics)
        self._update(context, resources)
        LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'),
                     {'host': self.host, 'node': self.nodename})
    def _update_available_resource(self, context, resources):
        if 'pci_passthrough_devices' in resources:
            if not self.pci_tracker:
                self.compute_node = self._get_compute_node_ref(context)
                node_id = (self.compute_node['id'] if self.compute_node else
                           None)
                self.pci_tracker = pci_manager.HuaweiPciDevTracker(
                    ctxt=context, node_id=node_id)
            self.pci_tracker.set_hvdevs(jsonutils.loads(resources.pop(
                'pci_passthrough_devices')))

        # In wmware clusters, the node name includes reserved char such
        # as ( or ), we should escape it.
        node_name = h_utils.regex_escape(self.nodename)

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context, self.host, node_name,
            expected_attrs=['system_metadata',
                            'numa_topology'])

        # find instance that in live-migrating
        func = self._find_isntances_to_host_in_livemig_by_db
        for inst in func(context):
            instances.objects.append(inst)
        
        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, resources, instances)

        # Grab all in-progress migrations:
        capi = self.conductor_api
        migrations = capi.migration_get_in_progress_by_host_and_node(context,
                self.host, self.nodename)

        self._update_usage_from_migrations(context, resources, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(context, resources, orphans)

        # NOTE(): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
        else:
            resources['pci_stats'] = jsonutils.dumps([])

        self._report_final_resource_view(resources)

        metrics = self._get_host_metrics(context, self.nodename)
        resources['metrics'] = jsonutils.dumps(metrics)
        self._sync_compute_node(context, resources)
Esempio n. 27
0
    def get_available_resource(self, nodename):
        host_stats = self._get_host_stats(nodename)

        return {'vcpus': host_stats['vcpus'], 'memory_mb': host_stats['host_memory_total'],
                'local_gb': host_stats['disk_total'], 'vcpus_used': 0,
                'memory_mb_used': host_stats['host_memory_total'] - host_stats['host_memory_free'],
                'local_gb_used': host_stats['disk_used'], 'hypervisor_type': host_stats['hypervisor_type'],
                'hypervisor_version': host_stats['hypervisor_version'],
                'hypervisor_hostname': host_stats['hypervisor_hostname'],
                'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
                'supported_instances': jsonutils.dumps(host_stats['supported_instances']), 'numa_topology': None, }
Esempio n. 28
0
 def test_get_by_instance_legacy(self):
     fakesysmeta = {
         "pci_requests": jsonutils.dumps([fake_legacy_pci_requests[0]]),
         "new_pci_requests": jsonutils.dumps([fake_legacy_pci_requests[1]]),
     }
     instance = objects.Instance(uuid="fake-uuid", system_metadata=fakesysmeta)
     requests = objects.InstancePCIRequests.get_by_instance(self.context, instance)
     self.assertEqual(2, len(requests.requests))
     self.assertEqual("alias_1", requests.requests[0].alias_name)
     self.assertFalse(requests.requests[0].is_new)
     self.assertEqual("alias_2", requests.requests[1].alias_name)
     self.assertTrue(requests.requests[1].is_new)
Esempio n. 29
0
    def _update_usage_from_migration(self, context, instance, resources,
                                     migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration['old_instance_type_id']):
                itype = self._get_instance_type(context, instance, 'new_',
                        migration['new_instance_type_id'])
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                        migration['old_instance_type_id'])

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                    migration['new_instance_type_id'])

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                    migration['old_instance_type_id'])

        if itype:
            self.stats.update_stats_for_migration(itype)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(instance)
            self._update_usage(resources, itype)
            resources['stats'] = self.stats
            if self.pci_tracker:
                resources['pci_stats'] = jsonutils.dumps(
                        self.pci_tracker.stats)
            else:
                resources['pci_stats'] = jsonutils.dumps({})
            self.tracked_migrations[uuid] = (migration, itype)
Esempio n. 30
0
    def __call__(self, target, creds, enforcer):
        """Check http: rules by calling to a remote server.

        This example implementation simply verifies that the response
        is exactly 'True'.
        """

        url = ('http:' + self.match) % target
        data = {'target': jsonutils.dumps(target),
                'credentials': jsonutils.dumps(creds)}
        post_data = urlparse.urlencode(data)
        f = urlrequest.urlopen(url, post_data)
        return f.read() == "True"
Esempio n. 31
0
 def test_instance_object_none_info_cache(self):
     inst = fake_instance.fake_instance_obj('fake-context',
                                            expected_attrs=['info_cache'])
     self.assertIsNone(inst.info_cache)
     result = compute_utils.get_nw_info_for_instance(inst)
     self.assertEqual(jsonutils.dumps([]), result.json())
Esempio n. 32
0
 def serialize(self, data, content_type):
     return jsonutils.dumps(data)
Esempio n. 33
0
 dict(id=1,
      local_gb=10,
      memory_mb=1024,
      vcpus=1,
      vcpus_used=0,
      local_gb_used=0,
      memory_mb_used=0,
      updated_at=None,
      cpu_info='baremetal cpu',
      service=dict(host='host1', disabled=False),
      hypervisor_hostname='node1uuid',
      host_ip='127.0.0.1',
      hypervisor_version=1,
      hypervisor_type='ironic',
      stats=jsonutils.dumps(
          dict(ironic_driver="nova.virt.ironic.driver.IronicDriver",
               cpu_arch='i386')),
      supported_instances='[["i386", "baremetal", "baremetal"]]',
      free_disk_gb=10,
      free_ram_mb=1024),
 dict(id=2,
      local_gb=20,
      memory_mb=2048,
      vcpus=1,
      vcpus_used=0,
      local_gb_used=0,
      memory_mb_used=0,
      updated_at=None,
      cpu_info='baremetal cpu',
      service=dict(host='host2', disabled=True),
      hypervisor_hostname='node2uuid',
Esempio n. 34
0
 def default(self, data):
     return jsonutils.dumps(data)
Esempio n. 35
0
    def _prepare_handoff_dest(self,
                              end_point,
                              dest_token,
                              instance,
                              dest_vmname=None):
        # information of current VM at source
        if dest_vmname:
            instance_name = dest_vmname
        else:
            instance_name = instance['display_name'] + "-handoff"
        flavor_memory = instance['memory_mb']
        flavor_cpu = instance['vcpus']
        requested_basevm_id = instance['system_metadata'][
            'image_base_sha256_uuid']
        original_overlay_url = \
            instance.get("metadata", dict()).get("overlay_url", None)

        # find matching base VM
        image_list = self._get_server_info(end_point, dest_token, "images")
        basevm_uuid = None
        for image_item in image_list:
            properties = image_item.get("metadata", None)
            if properties is None or len(properties) == 0:
                continue
            if properties.get(CloudletAPI.PROPERTY_KEY_CLOUDLET_TYPE) != \
                    CloudletAPI.IMAGE_TYPE_BASE_DISK:
                continue
            base_sha256_uuid = properties.get(
                CloudletAPI.PROPERTY_KEY_BASE_UUID)
            if base_sha256_uuid == requested_basevm_id:
                basevm_uuid = image_item['id']
                break
        if basevm_uuid is None:
            msg = "Cannot find matching Base VM with (%s) at (%s)" %\
                (str(requested_basevm_id), end_point.netloc)
            raise HandoffError(msg)

        # Find matching flavor.
        def find_matching_flavor(flavor_list, cpu_count, memory_mb):
            for flavor in flavor_list:
                vcpu = int(flavor['vcpus'])
                ram_mb = int(flavor['ram'])
                if vcpu == cpu_count and ram_mb == memory_mb:
                    flavor_ref = flavor['links'][0]['href']
                    flavor_id = flavor['id']
                    return flavor_ref, flavor_id
            return None, None

        flavor_list = self._get_server_info(end_point, dest_token, "flavors")
        flavor_ref, flavor_id = find_matching_flavor(flavor_list, flavor_cpu,
                                                     flavor_memory)
        if flavor_ref is None or flavor_id is None:
            msg = "Cannot find matching flavor with cpu=%d, memory=%d at %s" %\
                (flavor_cpu, flavor_memory, end_point.netloc)
            raise HandoffError(msg)

        # generate request
        meta_data = {
            "handoff_info": instance_name,
            "overlay_url": original_overlay_url
        }

        s = {
            "server": {
                "name": instance_name,
                "imageRef": str(basevm_uuid),
                "flavorRef": flavor_id,
                "metadata": meta_data,
                "min_count": "1",
                "max_count": "1",
                "key_name": None,
            }
        }
        params = jsonutils.dumps(s)
        headers = {
            "X-Auth-Token": dest_token,
            "Content-type": "application/json"
        }
        conn = httplib.HTTPConnection(end_point[1])
        conn.request("POST", "%s/servers" % end_point[2], params, headers)
        LOG.info("request handoff to %s" % (end_point.netloc))
        response = conn.getresponse()
        data = response.read()
        dd = jsonutils.loads(data)
        conn.close()

        return dd
Esempio n. 36
0
def _inject_metadata_into_fs(metadata, fs):
    LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s") % locals())
    metadata = dict([(m['key'], m['value']) for m in metadata])
    _inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
Esempio n. 37
0
 def _plugin_xenhost_set_host_enabled(self, method, args):
     enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
     return jsonutils.dumps({"status": enabled})
Esempio n. 38
0
 def _set_pci_request(self, claim):
     request = [{'count': 1,
                    'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
                   }]
     claim.instance.update(
         system_metadata={'new_pci_requests': jsonutils.dumps(request)})
Esempio n. 39
0
 def json_comparator(self, expected, obj_val):
     # json-ify an object field for comparison with its db str
     #equivalent
     self.assertEqual(expected, jsonutils.dumps(obj_val))
 def _make_request(self, url, body):
     req = webob.Request.blank('/v3' + url)
     req.method = 'POST'
     req.body = jsonutils.dumps(body)
     req.content_type = 'application/json'
     return req.get_response(self.app)
Esempio n. 41
0
def _inject_metadata_into_fs(metadata, fs):
    LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s"),
              {'fs': fs, 'metadata': metadata})
    _inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
Esempio n. 42
0
 def _encode_body(self, body):
     return jsonutils.dumps(body)
Esempio n. 43
0
    def __call__(self, req):
        request_id = context.generate_request_id()
        signature = req.params.get('Signature')
        if not signature:
            msg = _("Signature not provided")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)
        access = req.params.get('AWSAccessKeyId')
        if not access:
            msg = _("Access key not provided")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)

        # Make a copy of args for authentication and signature verification.
        auth_params = dict(req.params)
        # Not part of authentication args
        auth_params.pop('Signature')

        cred_dict = {
            'access': access,
            'signature': signature,
            'host': req.host,
            'verb': req.method,
            'path': req.path,
            'params': auth_params,
        }
        if "ec2" in CONF.keystone_ec2_url:
            creds = {'ec2Credentials': cred_dict}
        else:
            creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
        creds_json = jsonutils.dumps(creds)
        headers = {'Content-Type': 'application/json'}

        o = urlparse.urlparse(CONF.keystone_ec2_url)
        if o.scheme == "http":
            conn = httplib.HTTPConnection(o.netloc)
        else:
            conn = httplib.HTTPSConnection(o.netloc)
        conn.request('POST', o.path, body=creds_json, headers=headers)
        response = conn.getresponse()
        data = response.read()
        if response.status != 200:
            if response.status == 401:
                msg = response.reason
            else:
                msg = _("Failure communicating with keystone")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=response.status)
        result = jsonutils.loads(data)
        conn.close()

        try:
            token_id = result['access']['token']['id']
            user_id = result['access']['user']['id']
            project_id = result['access']['token']['tenant']['id']
            user_name = result['access']['user'].get('name')
            project_name = result['access']['token']['tenant'].get('name')
            roles = [
                role['name'] for role in result['access']['user']['roles']
            ]
        except (AttributeError, KeyError) as e:
            LOG.exception(_("Keystone failure: %s") % e)
            msg = _("Failure communicating with keystone")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)

        remote_address = req.remote_addr
        if CONF.use_forwarded_for:
            remote_address = req.headers.get('X-Forwarded-For', remote_address)

        catalog = result['access']['serviceCatalog']
        ctxt = context.RequestContext(user_id,
                                      project_id,
                                      user_name=user_name,
                                      project_name=project_name,
                                      roles=roles,
                                      auth_token=token_id,
                                      remote_address=remote_address,
                                      service_catalog=catalog)

        req.environ['nova.context'] = ctxt

        return self.application
Esempio n. 44
0
    def _node_resource(self, node):
        """Helper method to create resource dict from node stats."""
        vcpus = int(node.properties.get('cpus', 0))
        memory_mb = int(node.properties.get('memory_mb', 0))
        local_gb = int(node.properties.get('local_gb', 0))
        raw_cpu_arch = node.properties.get('cpu_arch', None)
        try:
            cpu_arch = arch.canonicalize(raw_cpu_arch)
        except exception.InvalidArchitectureName:
            cpu_arch = None
        if not cpu_arch:
            LOG.warn(_LW("cpu_arch not defined for node '%s'"), node.uuid)

        nodes_extra_specs = {}

        # NOTE(deva): In Havana and Icehouse, the flavor was required to link
        # to an arch-specific deploy kernel and ramdisk pair, and so the flavor
        # also had to have extra_specs['cpu_arch'], which was matched against
        # the ironic node.properties['cpu_arch'].
        # With Juno, the deploy image(s) may be referenced directly by the
        # node.driver_info, and a flavor no longer needs to contain any of
        # these three extra specs, though the cpu_arch may still be used
        # in a heterogeneous environment, if so desired.
        # NOTE(dprince): we use the raw cpu_arch here because extra_specs
        # filters aren't canonicalized
        nodes_extra_specs['cpu_arch'] = raw_cpu_arch

        # NOTE(gilliard): To assist with more precise scheduling, if the
        # node.properties contains a key 'capabilities', we expect the value
        # to be of the form "k1:v1,k2:v2,etc.." which we add directly as
        # key/value pairs into the node_extra_specs to be used by the
        # ComputeCapabilitiesFilter
        capabilities = node.properties.get('capabilities')
        if capabilities:
            for capability in str(capabilities).split(','):
                parts = capability.split(':')
                if len(parts) == 2 and parts[0] and parts[1]:
                    nodes_extra_specs[parts[0]] = parts[1]
                else:
                    LOG.warn(
                        _LW("Ignoring malformed capability '%s'. "
                            "Format should be 'key:val'."), capability)

        vcpus_used = 0
        memory_mb_used = 0
        local_gb_used = 0

        if node.instance_uuid:
            # Node has an instance, report all resource as unavailable
            vcpus_used = vcpus
            memory_mb_used = memory_mb
            local_gb_used = local_gb
        elif self._node_resources_unavailable(node):
            # The node's current state is such that it should not present any
            # of its resources to Nova
            vcpus = 0
            memory_mb = 0
            local_gb = 0

        dic = {
            'node':
            str(node.uuid),
            'hypervisor_hostname':
            str(node.uuid),
            'hypervisor_type':
            self._get_hypervisor_type(),
            'hypervisor_version':
            self._get_hypervisor_version(),
            'cpu_info':
            'baremetal cpu',
            'vcpus':
            vcpus,
            'vcpus_used':
            vcpus_used,
            'local_gb':
            local_gb,
            'local_gb_used':
            local_gb_used,
            'disk_total':
            local_gb,
            'disk_used':
            local_gb_used,
            'disk_available':
            local_gb - local_gb_used,
            'memory_mb':
            memory_mb,
            'memory_mb_used':
            memory_mb_used,
            'host_memory_total':
            memory_mb,
            'host_memory_free':
            memory_mb - memory_mb_used,
            'supported_instances':
            jsonutils.dumps(_get_nodes_supported_instances(cpu_arch)),
            'stats':
            jsonutils.dumps(nodes_extra_specs),
            'host':
            CONF.host,
        }
        dic.update(nodes_extra_specs)
        return dic
Esempio n. 45
0
 def _plugin_poweraction(self, method, args):
     return jsonutils.dumps({"power_action": method[5:]})
Esempio n. 46
0
def _json_dumps(properties, attr):
    prop = properties[attr]
    if not isinstance(prop, six.string_types):
        properties[attr] = jsonutils.dumps(prop)
Esempio n. 47
0
 def host_power_action(self, _host, action):
     """Reboots or shuts down the host."""
     args = {"action": jsonutils.dumps(action)}
     methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"}
     response = call_xenhost(self._session, methods[action], args)
     return response.get("power_action", response)
Esempio n. 48
0
 def json(self):
     return jsonutils.dumps(self)
Esempio n. 49
0
def _inject_metadata_into_fs(metadata, fs):
    metadata = dict([(m['key'], m['value']) for m in metadata])
    _inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
Esempio n. 50
0
def _json_dumps(properties, attr):
    prop = properties[attr]
    if not isinstance(prop, basestring):
        properties[attr] = jsonutils.dumps(prop)
Esempio n. 51
0
 def _convert_connector_to_db_format(self, updates):
     connector = updates.pop('connector', None)
     if connector is not None:
         updates['connector'] = jsonutils.dumps(connector)
Esempio n. 52
0
 def to_json(self):
     return jsonutils.dumps(self._to_dict())
Esempio n. 53
0
 def _plugin_xenhost_host_uptime(self, method, args):
     return jsonutils.dumps({"uptime": "fake uptime"})
Esempio n. 54
0
 def _get_create_request_json(self, body_dict):
     req = webob.Request.blank('/v2/fake/os-create-server-ext')
     req.headers['Content-Type'] = 'application/json'
     req.method = 'POST'
     req.body = jsonutils.dumps(body_dict)
     return req
Esempio n. 55
0
 def _request_data(self, verb, path):
     """Get data describing a limit request verb/path."""
     return jsonutils.dumps({"verb": verb, "path": path})
Esempio n. 56
0
    def __call__(self, req):
        context = req.environ['nova.context']
        request_id = context.request_id
        api_request = req.environ['ec2.request']
        result = None
        try:
            result = api_request.invoke(context)
        except exception.InstanceNotFound as ex:
            LOG.info(_('InstanceNotFound raised: %s'),
                     unicode(ex),
                     context=context)
            ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
            message = ex.message % {'instance_id': ec2_id}
            return ec2_error(req, request_id, type(ex).__name__, message)
        except exception.VolumeNotFound as ex:
            LOG.info(_('VolumeNotFound raised: %s'),
                     unicode(ex),
                     context=context)
            ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
            message = ex.message % {'volume_id': ec2_id}
            return ec2_error(req, request_id, type(ex).__name__, message)
        except exception.SnapshotNotFound as ex:
            LOG.info(_('SnapshotNotFound raised: %s'),
                     unicode(ex),
                     context=context)
            ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
            message = ex.message % {'snapshot_id': ec2_id}
            return ec2_error(req, request_id, type(ex).__name__, message)
        except exception.NotFound as ex:
            LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except exception.EC2APIError as ex:
            if ex.code:
                return ec2_error(req, request_id, ex.code, unicode(ex))
            else:
                return ec2_error(req, request_id,
                                 type(ex).__name__, unicode(ex))
        except exception.KeyPairExists as ex:
            LOG.debug(_('KeyPairExists raised: %s'),
                      unicode(ex),
                      context=context)
            code = 'InvalidKeyPair.Duplicate'
            return ec2_error(req, request_id, code, unicode(ex))
        except exception.InvalidKeypair as ex:
            LOG.debug(_('InvalidKeypair raised: %s'), unicode(ex), context)
            code = 'InvalidKeyPair.Format'
            return ec2_error(req, request_id, code, unicode(ex))
        except exception.InvalidParameterValue as ex:
            LOG.debug(_('InvalidParameterValue raised: %s'),
                      unicode(ex),
                      context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except exception.InvalidPortRange as ex:
            LOG.debug(_('InvalidPortRange raised: %s'),
                      unicode(ex),
                      context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except exception.NotAuthorized as ex:
            LOG.info(_('NotAuthorized raised: %s'),
                     unicode(ex),
                     context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except exception.InvalidRequest as ex:
            LOG.debug(_('InvalidRequest raised: %s'),
                      unicode(ex),
                      context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except exception.QuotaError as ex:
            LOG.debug(_('QuotaError raised: %s'), unicode(ex), context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except exception.InvalidInstanceIDMalformed as ex:
            LOG.debug(_('Invalid id: bogus (expecting "i-..."): %s'),
                      unicode(ex),
                      context=context)
            return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
        except Exception as ex:
            env = req.environ.copy()
            for k in env.keys():
                if not isinstance(env[k], basestring):
                    env.pop(k)

            LOG.exception(_('Unexpected error raised: %s'), unicode(ex))
            LOG.error(_('Environment: %s') % jsonutils.dumps(env))
            return ec2_error(
                req, request_id, 'UnknownError',
                _('An unknown error has occurred. '
                  'Please try your request again.'))
        else:
            resp = webob.Response()
            resp.status = 200
            resp.headers['Content-Type'] = 'text/xml'
            resp.body = str(result)
            return resp
Esempio n. 57
0
 def _convert_stats_to_db_format(self, updates):
     stats = updates.pop('stats', None)
     if stats is not None:
         updates['stats'] = jsonutils.dumps(stats)
Esempio n. 58
0
 def test_instance_dict_none_info_cache(self):
     inst = fake_instance.fake_db_instance(info_cache=None)
     self.assertIsNone(inst['info_cache'])
     result = compute_utils.get_nw_info_for_instance(inst)
     self.assertEqual(jsonutils.dumps([]), result.json())
Esempio n. 59
0
    def update_available_resource(self, context):
        """Override in-memory calculations of compute node resource usage based
        on data audited from the hypervisor layer.

        Add in resource claims in progress to account for operations that have
        declared a need for resources, but not necessarily retrieved them from
        the hypervisor layer yet.
        """
        LOG.audit(_("Auditing locally available compute resources"))
        resources = self.driver.get_available_resource(self.nodename)

        if not resources:
            # The virt driver does not support this function
            LOG.audit(
                _("Virt driver does not support "
                  "'get_available_resource'  Compute tracking is disabled."))
            self.compute_node = None
            return
        resources['host_ip'] = CONF.my_ip

        self._verify_resources(resources)

        self._report_hypervisor_resource_view(resources)

        if 'pci_passthrough_devices' in resources:
            if not self.pci_tracker:
                self.pci_tracker = pci_manager.PciDevTracker()
            self.pci_tracker.set_hvdevs(
                jsonutils.loads(resources.pop('pci_passthrough_devices')))

        # Grab all instances assigned to this node:
        instances = instance_obj.InstanceList.get_by_host_and_node(
            context, self.host, self.nodename)

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(resources, instances)

        # Grab all in-progress migrations:
        capi = self.conductor_api
        migrations = capi.migration_get_in_progress_by_host_and_node(
            context, self.host, self.nodename)

        self._update_usage_from_migrations(context, resources, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(resources, orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
        else:
            resources['pci_stats'] = jsonutils.dumps({})

        self._report_final_resource_view(resources)

        metrics = self._get_host_metrics(context, self.nodename)
        resources['metrics'] = jsonutils.dumps(metrics)
        self._sync_compute_node(context, resources)