コード例 #1
0
ファイル: test_api.py プロジェクト: alanmeadows/cobalt
    def test_bless_instance(self):
        instance_uuid = utils.create_instance(self.context)

        num_instance_before = len(db.instance_get_all(self.context))

        blessed_instance = self.cobalt_api.bless_instance(self.context, instance_uuid)

        self.assertEquals(vm_states.BUILDING, blessed_instance['vm_state'])
        # Ensure that we have a 2nd instance in the database that is a "clone"
        # of our original instance.
        instances = db.instance_get_all(self.context)
        self.assertTrue(len(instances) == (num_instance_before + 1),
                        "There should be one new instance after blessing.")

        # The virtual machine should be marked that it is now blessed.
        metadata = db.instance_metadata_get(self.context, blessed_instance['uuid'])
        self.assertTrue(metadata.has_key('blessed_from'),
                        "The instance should have a bless metadata after being blessed.")
        self.assertTrue(metadata['blessed_from'] == '%s' % instance_uuid,
            "The instance should have the blessed_from metadata set to true after being blessed. " \
          + "(value=%s)" % (metadata['blessed_from']))

        system_metadata = db.instance_system_metadata_get(self.context, blessed_instance['uuid'])
        self.assertTrue(system_metadata.has_key('blessed_from'),
            "The instance should have a bless system_metadata after being blessed.")
        self.assertTrue(system_metadata['blessed_from'] == '%s' % instance_uuid,
            "The instance should have the blessed_from system_metadata set to true after being blessed. "\
            + "(value=%s)" % (system_metadata['blessed_from']))

        db_blessed_instance = db.instance_get_by_uuid(self.context,
                                                      blessed_instance['uuid'])
        self.assertTrue(db_blessed_instance['info_cache'])
        self.assertIsNotNone(db_blessed_instance['info_cache']['network_info'])
コード例 #2
0
ファイル: test_compute.py プロジェクト: superstack/nova
    def test_run_kill_vm(self):
        """Detect when a vm is terminated behind the scenes"""
        self.stubs = stubout.StubOutForTesting()
        self.stubs.Set(compute_manager.ComputeManager,
                '_report_driver_status', nop_report_driver_status)

        instance_id = self._create_instance()

        self.compute.run_instance(self.context, instance_id)

        instances = db.instance_get_all(context.get_admin_context())
        LOG.info(_("Running instances: %s"), instances)
        self.assertEqual(len(instances), 1)

        instance_name = instances[0].name
        self.compute.driver.test_remove_vm(instance_name)

        # Force the compute manager to do its periodic poll
        error_list = self.compute.periodic_tasks(context.get_admin_context())
        self.assertFalse(error_list)

        instances = db.instance_get_all(context.get_admin_context())
        LOG.info(_("After force-killing instances: %s"), instances)
        self.assertEqual(len(instances), 1)
        self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
コード例 #3
0
ファイル: fakes.py プロジェクト: A7Zulu/nova
def mox_host_manager_db_calls(mock, context):
    mock.StubOutWithMock(db, 'compute_node_get_all')
    mock.StubOutWithMock(db, 'instance_get_all')

    db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
    db.instance_get_all(mox.IgnoreArg(),
            columns_to_join=['instance_type']).AndReturn(INSTANCES)
コード例 #4
0
ファイル: test_conductor.py プロジェクト: jdurgin/nova
 def test_instance_get_all(self):
     self.mox.StubOutWithMock(db, "instance_get_all_by_filters")
     db.instance_get_all(self.context)
     db.instance_get_all_by_filters(self.context, {"name": "fake-inst"}, "updated_at", "asc")
     self.mox.ReplayAll()
     self.conductor.instance_get_all(self.context)
     self.conductor.instance_get_all_by_filters(self.context, {"name": "fake-inst"}, "updated_at", "asc")
コード例 #5
0
 def index(self, req):
     context = req.environ['nova.context'].elevated()
     instances = db.instance_get_all(context)
     builder = self._get_builder(req)
     server_list = db.instance_get_all(context)
     servers = [builder.build(inst, True)['server']
             for inst in instances]
     return dict(servers=servers)
コード例 #6
0
ファイル: test_conductor.py プロジェクト: gminator/nova
 def test_instance_get_all(self):
     self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
     db.instance_get_all(self.context)
     db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
                                    'updated_at', 'asc')
     self.mox.ReplayAll()
     self.conductor.instance_get_all(self.context)
     self.conductor.instance_get_all_by_filters(self.context,
                                                {'name': 'fake-inst'},
                                                'updated_at', 'asc')
コード例 #7
0
ファイル: test_api.py プロジェクト: alanmeadows/cobalt
    def test_bless_instance_twice(self):

        instance_uuid = utils.create_instance(self.context)

        num_instance_before = len(db.instance_get_all(self.context))
        self.cobalt_api.bless_instance(self.context, instance_uuid)
        self.cobalt_api.bless_instance(self.context, instance_uuid)

        instances = db.instance_get_all(self.context)
        self.assertTrue(len(instances) == num_instance_before + 2,
                        "There should be 2 more instances because we blessed twice.")
コード例 #8
0
ファイル: test_compute.py プロジェクト: cp16net/reddwarf
    def test_run_terminate(self):
        """Make sure it is possible to  run and terminate instance"""
        instance_id = self._create_instance()

        self.compute.run_instance(self.context, instance_id)

        instances = db.instance_get_all(context.get_admin_context())
        LOG.info(_("Running instances: %s"), instances)
        self.assertEqual(len(instances), 1)

        self.compute.terminate_instance(self.context, instance_id)

        instances = db.instance_get_all(context.get_admin_context())
        LOG.info(_("After terminating instances: %s"), instances)
        self.assertEqual(len(instances), 0)
コード例 #9
0
def list_vms(host=None):
    """
      make a list of vms and expand out their fixed_ip and floating ips sensibly
    """
    flags.parse_args([])
    my_instances  = []
    if host is None:
        instances = db.instance_get_all(context.get_admin_context())
    else:
        instances = db.instance_get_all_by_host(
                      context.get_admin_context(), host)

    for instance in instances:
        my_inst = {}
        my_inst = dict(instance).copy()
        for (k,v) in my_inst.items():
            try:
                json.encoder(v)
            except TypeError, e:
                v = str(v)
                my_inst[k] = v

        ec2_id = db.get_ec2_instance_id_by_uuid(context.get_admin_context(), instance.uuid)
        ec2_id = 'i-' + hex(int(ec2_id)).replace('0x', '').zfill(8)
        my_inst['ec2_id'] = ec2_id
        try:
                fixed_ips = db.fixed_ip_get_by_instance(context.get_admin_context(), instance.uuid)
        except:
                pass
        my_inst['fixed_ips'] = [ ip.address for ip in fixed_ips ]
        my_inst['floating_ips'] = []
        for ip in fixed_ips:
            my_inst['floating_ips'].extend([ f_ip.address for f_ip in db.floating_ip_get_by_fixed_address(context.get_admin_context(), ip.address)])

        my_instances.append(my_inst)
コード例 #10
0
ファイル: instance_manager.py プロジェクト: SriniNa/hadev
    def get_all_instances(self, context, instance_uuid=None):
        """Returns a list of HostStates that represents all the hosts
        the HostManager knows about. Also, each of the consumable resources
        in HostState are pre-populated and adjusted based on data in the db.
        """

        # Get resource usage across the available compute nodes:
        instances = db.instance_get_all(context)
        instance_states = [];
        if instance_uuid == None:
            LOG.info(_("None instance_uuid"));

        for instance in instances:
            vm_state = instance.get('vm_state')

            if vm_state == None or vm_state != vm_states.ACTIVE:
                continue;
            if instance_uuid != None and instance['uuid'] == instance_uuid:
                LOG.info(_("found matching uuid instance"));
                continue;

            instance_state = self.instance_state_cls(instance);
            instance_states.append(instance_state);

        return instance_states;
コード例 #11
0
ファイル: imagecache.py プロジェクト: bhuvan/nova
    def _list_running_instances(self, context):
        """List running instances (on all compute nodes)."""
        self.used_images = {}
        self.image_popularity = {}
        self.instance_names = set()

        instances = db.instance_get_all(context)
        for instance in instances:
            self.instance_names.add(instance['name'])

            resize_states = [task_states.RESIZE_PREP,
                             task_states.RESIZE_MIGRATING,
                             task_states.RESIZE_MIGRATED,
                             task_states.RESIZE_FINISH]
            if instance['task_state'] in resize_states or \
                instance['vm_state'] in vm_states.RESIZED:
                self.instance_names.add(instance['name'] + '_resize')

            image_ref_str = str(instance['image_ref'])
            local, remote, insts = self.used_images.get(image_ref_str,
                                                        (0, 0, []))
            if instance['host'] == FLAGS.host:
                local += 1
            else:
                remote += 1
            insts.append(instance['name'])
            self.used_images[image_ref_str] = (local, remote, insts)

            self.image_popularity.setdefault(image_ref_str, 0)
            self.image_popularity[image_ref_str] += 1
コード例 #12
0
ファイル: test_manager.py プロジェクト: xww/nova-old
    def test_monitor_normal_instance(self):
        fake_instance = {'uuid': 'fake_uuid',
                         'host': 'fake_compute_host'}

        instances = [fake_instance]

        self.mox.StubOutWithMock(db, 'instance_get_all')
        self.mox.StubOutWithMock(self.manager, '_is_instance_abnormal')
        self.mox.StubOutWithMock(self.manager, '_is_instance_ha')

        #instance normal
        db.instance_get_all(self.context).AndReturn(instances)
        self.manager._is_instance_abnormal(self.context,
                                           fake_instance).AndReturn(False)
        self.mox.ReplayAll()
        self.manager.monitor_instance(self.context)
コード例 #13
0
 def test_boot_servers_with_affinity_overquota(self):
     # Tests that we check server group member quotas and cleanup created
     # resources when we fail with OverQuota.
     self.flags(quota_server_group_members=1)
     # make sure we start with 0 servers
     servers = self.api.get_servers(detail=False)
     self.assertEqual(0, len(servers))
     created_group = self.api.post_server_groups(self.affinity)
     ex = self.assertRaises(client.OpenStackApiException,
                            self._boot_servers_to_group,
                            created_group)
     self.assertEqual(403, ex.response.status_code)
     # _boot_servers_to_group creates 2 instances in the group in order, not
     # multiple servers in a single request. Since our quota is 1, the first
     # server create would pass, the second should fail, and we should be
     # left with 1 server and it's 1 block device mapping.
     servers = self.api.get_servers(detail=False)
     self.assertEqual(1, len(servers))
     ctxt = context.get_admin_context()
     servers = db.instance_get_all(ctxt)
     self.assertEqual(1, len(servers))
     ctxt_mgr = db_api.get_context_manager(ctxt)
     with ctxt_mgr.reader.using(ctxt):
         bdms = db_api._block_device_mapping_get_query(ctxt).all()
     self.assertEqual(1, len(bdms))
     self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
コード例 #14
0
ファイル: manage.py プロジェクト: RibeiroAna/nova
    def list(self, host=None):
        """Lists all fixed ips (optionally by host)."""
        ctxt = context.get_admin_context()

        try:
            if host is None:
                fixed_ips = db.fixed_ip_get_all(ctxt)
            else:
                fixed_ips = db.fixed_ip_get_by_host(ctxt, host)

        except exception.NotFound as ex:
            print(_("error: %s") % ex)
            return(2)

        instances = db.instance_get_all(context.get_admin_context())
        instances_by_uuid = {}
        for instance in instances:
            instances_by_uuid[instance['uuid']] = instance

        print("%-18s\t%-15s\t%-15s\t%s" % (_('network'),
                                              _('IP address'),
                                              _('hostname'),
                                              _('host')))

        all_networks = {}
        try:
            # use network_get_all to retrieve all existing networks
            # this is to ensure that IPs associated with deleted networks
            # will not throw exceptions.
            for network in db.network_get_all(context.get_admin_context()):
                all_networks[network.id] = network
        except exception.NoNetworksFound:
            # do not have any networks, so even if there are IPs, these
            # IPs should have been deleted ones, so return.
            print(_('No fixed IP found.'))
            return

        has_ip = False
        for fixed_ip in fixed_ips:
            hostname = None
            host = None
            network = all_networks.get(fixed_ip['network_id'])
            if network:
                has_ip = True
                if fixed_ip.get('instance_uuid'):
                    instance = instances_by_uuid.get(fixed_ip['instance_uuid'])
                    if instance:
                        hostname = instance['hostname']
                        host = instance['host']
                    else:
                        print(_('WARNING: fixed ip %s allocated to missing'
                                ' instance') % str(fixed_ip['address']))
                print("%-18s\t%-15s\t%-15s\t%s" % (
                        network['cidr'],
                        fixed_ip['address'],
                        hostname, host))

        if not has_ip:
            print(_('No fixed IP found.'))
コード例 #15
0
ファイル: test_gridcentric.py プロジェクト: jingsu/openstack
    def test_bless_instance(self):
        instance_id = utils.create_instance(self.context)

        num_instance_before = len(db.instance_get_all(self.context))
        self.gridcentric.bless_instance(self.context, instance_id)

        # Ensure that we have a 2nd instance in the database that is a "clone"
        # of our original instance.
        instances = db.instance_get_all(self.context)
        self.assertTrue(len(instances) == (num_instance_before + 1),
                        "There should be one new instances after blessing.")

        # The virtual machine should be marked that it is now blessed.
        metadata = db.instance_metadata_get(self.context, instance_id + 1)
        self.assertTrue(metadata.has_key('blessed'),
                        "The instance should have a bless metadata after being blessed.")
        self.assertTrue(metadata['blessed'] == '1',
            "The instance should have the bless metadata set to true after being blessed. " \
          + "(value=%s)" % (metadata['blessed']))
コード例 #16
0
ファイル: api.py プロジェクト: andrewbogott/novawikiplugins
    def update(self, req, id, body):
        """Add new filesystem."""
        name = id
        try:
            entry = body['fs_entry']
            size = entry['size']
            scope = entry['scope']
        except (TypeError, KeyError):
            raise webob.exc.HTTPUnprocessableEntity()
        if scope not in ['project', 'instance', 'global']:
            LOG.error(_("scope must be one of project, instance, or global"))
            raise webob.exc.HTTPUnprocessableEntity()

        context = req.environ['nova.context']
        project = context.project_id

        try:
            if self.has_db_support:
                sharedfs_db.filesystem_add(context, name, scope, project)

            self.fs_driver.create_fs(name, project, size)
        except exception.NotAuthorized:
            msg = _("Filesystem creation requires admin permissions.")
            raise webob.exc.HTTPForbidden(msg)

        if self.has_db_support:
            # Attach global or project-wide shares immediately.
            instance_list = []
            if scope == 'global':
                instance_list = db.instance_get_all(context)
            elif scope == 'project':
                instance_list = db.instance_get_all_by_project(context,
                                                               project)

            for instance in instance_list:
                try:
                    fixed_ips = db.fixed_ip_get_by_instance(context,
                                                            instance.id)
                    for ip in fixed_ips:
                        LOG.debug(_("attaching %(ip)s to filesystem %(fs)s.")
                                  % {'ip': ip['address'], 'fs': name})
                        try:
                            self.fs_driver.attach(name, ip['address'])
                        except exception.NotAuthorized:
                            LOG.warning(_("Insufficient permissions to attach"
                                       " %(instance)s to filesystem %(fs)s.") %
                                       {'instance': instance.name, 'fs': name})
                except exception.FixedIpNotFound:
                    LOG.warning(_("Unable to get IP address for %s.")
                              % instance.id)

        return _translate_fs_entry_view({'name': name,
                                         'size': size,
                                         'scope': scope,
                                         'project': project})
コード例 #17
0
ファイル: test_host_manager.py プロジェクト: bhuvan/nova
    def test_get_all_host_states(self):
        self.flags(reserved_host_memory_mb=512,
                reserved_host_disk_mb=1024)

        context = 'fake_context'
        topic = 'compute'

        self.mox.StubOutWithMock(db, 'compute_node_get_all')
        self.mox.StubOutWithMock(host_manager.LOG, 'warn')
        self.mox.StubOutWithMock(db, 'instance_get_all')

        db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
        # Invalid service
        host_manager.LOG.warn("No service for compute ID 5")
        db.instance_get_all(context,
                columns_to_join=['instance_type']).AndReturn(
                        fakes.INSTANCES)

        self.mox.ReplayAll()
        host_states = self.host_manager.get_all_host_states(context, topic)

        self.assertEqual(len(host_states), 4)
        # Check that .service is set properly
        for i in xrange(4):
            compute_node = fakes.COMPUTE_NODES[i]
            host = compute_node['service']['host']
            self.assertEqual(host_states[host].service,
                    compute_node['service'])
        self.assertEqual(host_states['host1'].free_ram_mb, 0)
        # 511GB
        self.assertEqual(host_states['host1'].free_disk_mb, 523264)
        self.assertEqual(host_states['host2'].free_ram_mb, 512)
        # 1023GB
        self.assertEqual(host_states['host2'].free_disk_mb, 1047552)
        self.assertEqual(host_states['host3'].free_ram_mb, 2560)
        # 3071GB
        self.assertEqual(host_states['host3'].free_disk_mb, 3144704)
        self.assertEqual(host_states['host4'].free_ram_mb, 7680)
        # 8191GB
        self.assertEqual(host_states['host4'].free_disk_mb, 8387584)
コード例 #18
0
ファイル: api.py プロジェクト: andrewbogott/novawikiplugins
    def delete(self, req, id):
        """Delete the filesystem identified by id."""
        name = id

        if self.has_db_support:
            # Unattach global or project-wide shares immediately.
            context = req.environ['nova.context']
            fs_entry = sharedfs_db.filesystem_get(context, name)
            if not fs_entry:
                msg = _("Filesystem %s not found.") % name
                raise webob.exc.HTTPNotFound(msg)
            scope = fs_entry.scope
            project = fs_entry.project_id
            instance_list = []
            if scope == 'global':
                instance_list = db.instance_get_all(context)
            elif scope == 'project':
                instance_list = db.instance_get_all_by_project(context,
                                                               project)

            for instance in instance_list:
                try:
                    fixed_ips = db.fixed_ip_get_by_instance(context,
                                                            instance.id)
                    for ip in fixed_ips:
                        LOG.debug(_("unattaching %(ip)s from fs %(fs)s.") %
                                  {'ip': ip['address'], 'fs': name})
                        try:
                            self.fs_driver.unattach(name, ip['address'])
                        except exception.NotAuthorized:
                            LOG.warning(_("Insufficient permission to unattach"
                                     " %(instance)s from filesystem %(fs)s.") %
                                          {'instance': instance.name,
                                           'fs': name})
                except exception.FixedIpNotFound:
                    LOG.warning(_("Unable to get IP address for %s.")
                              % instance.id)

            sharedfs_db.filesystem_delete(context, name)

        try:
            self.fs_driver.delete_fs(name, project)
        except exception.NotAuthorized:
            msg = _("Filesystem deletion requires admin permissions.")
            raise webob.exc.HTTPForbidden(msg)
        except exception.NotFound:
            msg = _("Filesystem %s does not exist.") % name
            raise webob.exc.HTTPNotFound(msg)

        return webob.Response(status_int=202)
コード例 #19
0
    def get_all_host_states(self, context, topic):
        """Returns a dict of all the hosts the HostManager
        knows about. Also, each of the consumable resources in HostState
        are pre-populated and adjusted based on data in the db.

        For example:
        {'192.168.1.100': HostState(), ...}

        Note: this can be very slow with a lot of instances.
        InstanceType table isn't required since a copy is stored
        with the instance (in case the InstanceType changed since the
        instance was created)."""

        if topic != 'compute':
            raise NotImplementedError(_(
                "host_manager only implemented for 'compute'"))

        host_state_map = {}

        # Make a compute node dict with the bare essential metrics.
        compute_nodes = db.compute_node_get_all(context)
        for compute in compute_nodes:
            service = compute['service']
            if not service:
                LOG.warn(_("No service for compute ID %s") % compute['id'])
                continue
            host = service['host']
            capabilities = self.service_states.get(host, None)
            host_state = self.host_state_cls(host, topic,
                    capabilities=capabilities,
                    service=dict(service.iteritems()))
            """ start add by DOCOMO """
            # pass context to access DB
            host_state.update_from_compute_node(compute, context=context)
            """ end add by DOCOMO """
            host_state_map[host] = host_state

        # "Consume" resources from the host the instance resides on.
        instances = db.instance_get_all(context)
        for instance in instances:
            host = instance['host']
            if not host:
                continue
            host_state = host_state_map.get(host, None)
            if not host_state:
                continue
            host_state.consume_from_instance(instance)
        return host_state_map
コード例 #20
0
ファイル: manage.py プロジェクト: comstud/nova
    def list(self, host=None):
        """Show a list of all instances."""

        print (
            "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
            "  %-10s %-10s %-10s %-5s"
            % (
                _("instance"),
                _("node"),
                _("type"),
                _("state"),
                _("launched"),
                _("image"),
                _("kernel"),
                _("ramdisk"),
                _("project"),
                _("user"),
                _("zone"),
                _("index"),
            )
        )

        if host is None:
            instances = db.instance_get_all(context.get_admin_context())
        else:
            instances = db.instance_get_all_by_host(context.get_admin_context(), host)

        for instance in instances:
            instance_type = flavors.extract_instance_type(instance)
            print (
                "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
                " %-10s %-10s %-10s %-5d"
                % (
                    instance["display_name"],
                    instance["host"],
                    instance_type["name"],
                    instance["vm_state"],
                    instance["launched_at"],
                    instance["image_ref"],
                    instance["kernel_id"],
                    instance["ramdisk_id"],
                    instance["project_id"],
                    instance["user_id"],
                    instance["availability_zone"],
                    instance["launch_index"],
                )
            )
コード例 #21
0
ファイル: manager.py プロジェクト: xww/nova-old
    def monitor_instance(self, ctxt):
        """
        check and update instance status,
        if instance is ha and failure, notify the ha module.
        """
        instances_failure_info = []

        instances = db.instance_get_all(ctxt)
        for instance in instances:
            instance_failure_info = self._get_instance_failure_info(instance)
            if not instance_failure_info:
                instance_failure_info = dict(uuid=instance['uuid'],
                                        failure_times=0,
                                        last_failure_time=timeutils.utcnow())

            if self._is_instance_abnormal(ctxt, instance):
                if (instance_failure_info['failure_times'] != 0 and
                    self.is_time_valid(
                        instance_failure_info['last_failure_time'],
                        timeutils.utcnow(),
                        (self.instance_recover_time *
                         instance_failure_info['failure_times']))):
                    LOG.info(_('instance %s is still in recovering...')
                             % instance['uuid'])
                    LOG.debug(_('its last failure time is %s')
                             % instance_failure_info['last_failure_time'])
                    instances_failure_info.append(instance_failure_info)
                    continue
                if self._is_instance_ha(ctxt, instance):
                    self._notify_ha_instance_failure(ctxt, instance,
                                    instance_failure_info['failure_times'])
                else:
                    self._notify_common_instance_failure(ctxt, instance)

                instance_failure_info['failure_times'] += 1
                instance_failure_info['last_failure_time'] = timeutils.utcnow()
                instances_failure_info.append(instance_failure_info)
                LOG.debug(_('instance %(uuid)s failed %(times)s times, '
                            'the latest failure time is %(failure_time)s') %
                          {'uuid': instance['uuid'],
                           'times': instance_failure_info['failure_times'],
                           'failure_time':
                           instance_failure_info['last_failure_time']})

        self.instances_failure_info = instances_failure_info
        self._report_failure_instances_info()
コード例 #22
0
    def _list_running_instances(self, context):
        """List running instances (on all compute nodes)."""
        self.used_images = {}
        self.image_popularity = {}

        instances = db.instance_get_all(context)
        for instance in instances:
            image_ref_str = str(instance['image_ref'])
            local, remote, insts = self.used_images.get(image_ref_str,
                                                        (0, 0, []))
            if instance['host'] == FLAGS.host:
                local += 1
            else:
                remote += 1
            insts.append(instance['name'])
            self.used_images[image_ref_str] = (local, remote, insts)

            self.image_popularity.setdefault(image_ref_str, 0)
            self.image_popularity[image_ref_str] += 1
コード例 #23
0
ファイル: test_manager.py プロジェクト: alanmeadows/cobalt
    def test_discard_a_blessed_instance(self):
        self.vmsconn.set_return_val("discard", None)
        blessed_uuid = utils.create_blessed_instance(self.context, source_uuid="UNITTEST_DISCARD")

        pre_discard_time = datetime.utcnow()
        self.cobalt.discard_instance(self.context, instance_uuid=blessed_uuid)

        try:
            db.instance_get(self.context, blessed_uuid)
            self.fail("The blessed instance should no longer exists after being discarded.")
        except exception.InstanceNotFound:
            # This ensures that the instance has been marked as deleted in the database. Now assert
            # that the rest of its attributes have been marked.
            self.context.read_deleted = 'yes'
            instances = db.instance_get_all(self.context)

            self.assertEquals(1, len(instances))
            discarded_instance = instances[0]

            self.assertTrue(pre_discard_time <= discarded_instance['terminated_at'])
            self.assertEquals(vm_states.DELETED, discarded_instance['vm_state'])
コード例 #24
0
ファイル: manage.py プロジェクト: AnyBucket/nova
    def list(self, host=None):
        """Show a list of all instances."""

        print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
               "  %-10s %-10s %-10s %-5s" % (_('instance'),
                                             _('node'),
                                             _('type'),
                                             _('state'),
                                             _('launched'),
                                             _('image'),
                                             _('kernel'),
                                             _('ramdisk'),
                                             _('project'),
                                             _('user'),
                                             _('zone'),
                                             _('index')))

        if host is None:
            instances = db.instance_get_all(context.get_admin_context())
        else:
            instances = db.instance_get_all_by_host(
                           context.get_admin_context(), host)

        for instance in instances:
            instance_type = instance_types.extract_instance_type(instance)
            print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
                   " %-10s %-10s %-10s %-5d" % (instance['display_name'],
                                                instance['host'],
                                                instance_type['name'],
                                                instance['vm_state'],
                                                instance['launched_at'],
                                                instance['image_ref'],
                                                instance['kernel_id'],
                                                instance['ramdisk_id'],
                                                instance['project_id'],
                                                instance['user_id'],
                                                instance['availability_zone'],
                                                instance['launch_index']))
コード例 #25
0
 def get_all(cls, context, expected_attrs=None):
     """Returns all instances on all nodes."""
     db_instances = db.instance_get_all(
         context, columns_to_join=_expected_cols(expected_attrs))
     return _make_instance_list(context, cls(), db_instances,
                                expected_attrs)
コード例 #26
0
    def test_get_all_host_states(self):
        self.flags(reserved_host_memory_mb=512,
                reserved_host_disk_mb=1024)

        context = 'fake_context'
        topic = 'compute'

        self.mox.StubOutWithMock(db, 'compute_node_get_all')
        self.mox.StubOutWithMock(baremetal_host_manager.LOG, 'warn')
        self.mox.StubOutWithMock(db, 'instance_get_all')
        self.stubs.Set(timeutils, 'utcnow', lambda: 31337)

        def _fake_bm_node_get_all(context, service_host=None):
            if service_host == 'host1':
                return BAREMETAL_NODES_1
            elif service_host == 'host2':
                return BAREMETAL_NODES_2
            elif service_host == 'host3':
                return BAREMETAL_NODES_3
            elif service_host == 'host4':
                return BAREMETAL_NODES_4
            else:
                return {}

        def _fake_bm_node_get_by_instance_uuid(context, instance_uuid):
            return None

        self.stubs.Set(bmdb, 'bm_node_get_all', _fake_bm_node_get_all)
        self.stubs.Set(bmdb, 'bm_node_get_by_instance_uuid',
                _fake_bm_node_get_by_instance_uuid)

        db.compute_node_get_all(context).AndReturn(BAREMETAL_COMPUTE_NODES)

        # Invalid service
        baremetal_host_manager.LOG.warn("No service for compute ID 5")
        db.instance_get_all(context,
                columns_to_join=['instance_type']).\
                AndReturn(BAREMETAL_INSTANCES)
        self.mox.ReplayAll()

        host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
                timestamp=1)
        host2_compute_capabs = dict(free_memory=1234, host_memory=5678,
                timestamp=1,
                instance_type_extra_specs={'baremetal_driver': 'test'})
        self.baremetal_host_manager.update_service_capabilities('compute',
                'host1', host1_compute_capabs)
        self.baremetal_host_manager.update_service_capabilities('compute',
                'host2', host2_compute_capabs)
        self.baremetal_host_manager.update_service_capabilities('compute',
                'host3', host2_compute_capabs)
        self.baremetal_host_manager.update_service_capabilities('compute',
                'host4', host2_compute_capabs)

        host_states = self.baremetal_host_manager.get_all_host_states(context,
                topic)

        num_bm_nodes = len(BAREMETAL_COMPUTE_NODES)

        # not contains broken entry
        self.assertEqual(len(host_states), num_bm_nodes - 1)
        self.assertIn('host1', host_states)
        self.assertIn('host2', host_states)
        self.assertIn('host3', host_states)
        self.assertIn('host4', host_states)

        # check returned value
        # host1 : subtract total ram of BAREMETAL_INSTANCES
        # from BAREMETAL_COMPUTE_NODES
        # host1 : total vcpu of BAREMETAL_INSTANCES
        self.assertEqual(host_states['host1'].free_ram_mb +\
                FLAGS.reserved_host_memory_mb, 8704)
        self.assertEqual(host_states['host1'].vcpus_used, 5)

        # host2 : subtract BAREMETAL_INSTANCES from BAREMETAL_NODES_2
        # host2 : total vcpu of BAREMETAL_INSTANCES
        self.assertEqual(host_states['host2'].free_ram_mb, 8192)
        self.assertEqual(host_states['host2'].vcpus_total, 3)

        # host3 : subtract BAREMETAL_INSTANCES from BAREMETAL_NODES_3
        # host3 : total vcpu of BAREMETAL_INSTANCES
        self.assertEqual(host_states['host3'].free_ram_mb, 2048)
        self.assertEqual(host_states['host3'].vcpus_total, 4)

        # host4 : subtract BAREMETAL_INSTANCES from BAREMETAL_NODES_4
        # host4 : total vcpu of BAREMETAL_INSTANCES
        self.assertEqual(host_states['host4'].free_ram_mb, 8192)
        self.assertEqual(host_states['host4'].vcpus_total, 5)

        self.mox.VerifyAll()
コード例 #27
0
ファイル: db_client.py プロジェクト: rafoul/pytest
 def clear_instances(self, context):
     instances = nova_db.instance_get_all(context)
     return instances
コード例 #28
0
ファイル: fakes.py プロジェクト: renuka-apte/nova
def mox_host_manager_db_calls(mock, context):
    mock.StubOutWithMock(db, "compute_node_get_all")
    mock.StubOutWithMock(db, "instance_get_all")

    db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
    db.instance_get_all(mox.IgnoreArg()).AndReturn(INSTANCES)
コード例 #29
0
ファイル: fakes.py プロジェクト: xtoddx/nova
def mox_host_manager_db_calls(mock, context):
    mock.StubOutWithMock(db, 'compute_node_get_all')
    mock.StubOutWithMock(db, 'instance_get_all')

    db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
    db.instance_get_all(mox.IgnoreArg()).AndReturn(INSTANCES)
コード例 #30
0
ファイル: zone_manager.py プロジェクト: BillTheBest/nova
 def _instance_get_all(self, context):
     """Broken out for testing."""
     return db.instance_get_all(context)
コード例 #31
0
ファイル: fakes.py プロジェクト: jakedahn/nova
def mox_host_manager_db_calls(mox, context):
    mox.StubOutWithMock(db, 'compute_node_get_all')
    mox.StubOutWithMock(db, 'instance_get_all')

    db.compute_node_get_all(context).AndReturn(COMPUTE_NODES)
    db.instance_get_all(context).AndReturn(INSTANCES)
コード例 #32
0
ファイル: instance.py プロジェクト: amatuerone/nova
 def get_all(cls, context, expected_attrs=None):
     """Returns all instances on all nodes."""
     db_instances = db.instance_get_all(
             context, columns_to_join=_expected_cols(expected_attrs))
     return _make_instance_list(context, cls(), db_instances,
                                expected_attrs)
コード例 #33
0
ファイル: zone_manager.py プロジェクト: Razique/nova
 def _instance_get_all(self, context):
     """Broken out for testing."""
     return db.instance_get_all(context)