예제 #1
0
 def _return_servers_objs(context,
                          search_opts=None,
                          limit=None,
                          marker=None,
                          want_objects=False,
                          expected_attrs=None,
                          sort_keys=None,
                          sort_dirs=None):
     db_insts = fake_instance_get_all_by_filters()(None,
                                                   limit=limit,
                                                   marker=marker)
     expected = [
         'metadata', 'system_metadata', 'flavor', 'info_cache',
         'security_groups'
     ]
     return base.obj_make_list(context,
                               objects.InstanceList(),
                               objects.Instance,
                               db_insts,
                               expected_attrs=expected)
예제 #2
0
    def test_get_all_host_states_after_delete_all(self, mock_get_by_host):
        mock_get_by_host.return_value = objects.InstanceList()
        context = 'fake_context'

        self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
        self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
        # all nodes active for first call
        objects.ServiceList.get_by_binary(
            context, 'nova-compute').AndReturn(fakes.SERVICES)
        objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
        # remove all nodes for second call
        objects.ServiceList.get_by_binary(
            context, 'nova-compute').AndReturn(fakes.SERVICES)
        objects.ComputeNodeList.get_all(context).AndReturn([])
        self.mox.ReplayAll()

        self.host_manager.get_all_host_states(context)
        self.host_manager.get_all_host_states(context)
        host_states_map = self.host_manager.host_state_map
        self.assertEqual(len(host_states_map), 0)
def fake_compute_get_all(*args, **kwargs):
    inst_list = [
        fakes.stub_instance_obj(
            None, 1, uuid=UUID1, host="host-1", node="node-1",
            reservation_id="r-1", launch_index=0,
            kernel_id=UUID4, ramdisk_id=UUID5,
            display_name="hostname-1",
            root_device_name="/dev/vda",
            user_data="userdata",
            services=fake_services("host-1")),
        fakes.stub_instance_obj(
            None, 2, uuid=UUID2, host="host-2", node="node-2",
            reservation_id="r-2", launch_index=1,
            kernel_id=UUID4, ramdisk_id=UUID5,
            display_name="hostname-2",
            root_device_name="/dev/vda",
            user_data="userdata",
            services=fake_services("host-2")),
    ]
    return objects.InstanceList(objects=inst_list)
예제 #4
0
 def test_recreate_instance_info(self, mock_get_by_host):
     host_name = 'fake_host'
     inst1 = fake_instance.fake_instance_obj('fake_context',
                                             uuid='aaa',
                                             host=host_name)
     inst2 = fake_instance.fake_instance_obj('fake_context',
                                             uuid='bbb',
                                             host=host_name)
     orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
     new_inst_list = objects.InstanceList(objects=[inst1, inst2])
     mock_get_by_host.return_value = new_inst_list
     self.host_manager._instance_info = {
         host_name: {
             'instances': orig_inst_dict,
             'updated': True,
         }
     }
     self.host_manager._recreate_instance_info('fake_context', host_name)
     new_info = self.host_manager._instance_info[host_name]
     self.assertEqual(len(new_info['instances']), len(new_inst_list))
     self.assertFalse(new_info['updated'])
예제 #5
0
 def test_init_instance_info(self, mock_spawn, mock_get_all,
                             mock_get_by_filters):
     mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
     cn1 = objects.ComputeNode(host='host1')
     cn2 = objects.ComputeNode(host='host2')
     inst1 = objects.Instance(host='host1', uuid='uuid1')
     inst2 = objects.Instance(host='host1', uuid='uuid2')
     inst3 = objects.Instance(host='host2', uuid='uuid3')
     mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2])
     mock_get_by_filters.return_value = objects.InstanceList(
             objects=[inst1, inst2, inst3])
     hm = self.host_manager
     hm._instance_info = {}
     hm._init_instance_info()
     self.assertEqual(len(hm._instance_info), 2)
     fake_info = hm._instance_info['host1']
     self.assertIn('uuid1', fake_info['instances'])
     self.assertIn('uuid2', fake_info['instances'])
     self.assertNotIn('uuid3', fake_info['instances'])
     exp_filters = {'deleted': False, 'host': [u'host1', u'host2']}
     mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters)
예제 #6
0
 def build_instances(self, ctxt, build_inst_kwargs):
     """Pick a cell (possibly ourselves) to build new instance(s) and
     forward the request accordingly.
     """
     # Target is ourselves first.
     filter_properties = build_inst_kwargs.get('filter_properties')
     if (filter_properties is not None and not isinstance(
             filter_properties['instance_type'], objects.Flavor)):
         # NOTE(danms): Handle pre-1.30 build_instances() call. Remove me
         # when we bump the RPC API version to 2.0.
         flavor = objects.Flavor(**filter_properties['instance_type'])
         build_inst_kwargs['filter_properties'] = dict(filter_properties,
                                                       instance_type=flavor)
     instances = build_inst_kwargs['instances']
     if not isinstance(instances[0], objects.Instance):
         # NOTE(danms): Handle pre-1.32 build_instances() call. Remove me
         # when we bump the RPC API version to 2.0
         build_inst_kwargs['instances'] = instance_obj._make_instance_list(
             ctxt, objects.InstanceList(), instances,
             ['system_metadata', 'metadata'])
     our_cell = self.state_manager.get_my_state()
     self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
예제 #7
0
 def test_update_instance_info(self):
     host_name = 'fake_host'
     inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa',
                                             host=host_name)
     inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb',
                                             host=host_name)
     orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
     self.host_manager._instance_info = {
             host_name: {
                 'instances': orig_inst_dict,
                 'updated': False,
             }}
     inst3 = fake_instance.fake_instance_obj('fake_context', uuid='ccc',
                                             host=host_name)
     inst4 = fake_instance.fake_instance_obj('fake_context', uuid='ddd',
                                             host=host_name)
     update = objects.InstanceList(objects=[inst3, inst4])
     self.host_manager.update_instance_info('fake_context', host_name,
                                            update)
     new_info = self.host_manager._instance_info[host_name]
     self.assertEqual(len(new_info['instances']), 4)
     self.assertTrue(new_info['updated'])
    def test_sync_power_states_instance_not_found(self):
        db_instance = fake_instance.fake_db_instance()
        ctxt = context.get_admin_context()
        instance_list = instance_obj._make_instance_list(
            ctxt, objects.InstanceList(), [db_instance], None)
        instance = instance_list[0]

        self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
        self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances')
        self.mox.StubOutWithMock(vm_utils, 'lookup')
        self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')

        objects.InstanceList.get_by_host(
            ctxt, self.compute.host, use_slave=True).AndReturn(instance_list)
        self.compute.driver.get_num_instances().AndReturn(1)
        vm_utils.lookup(self.compute.driver._session, instance['name'],
                        False).AndReturn(None)
        self.compute._sync_instance_power_state(ctxt, instance,
                                                power_state.NOSTATE)

        self.mox.ReplayAll()

        self.compute._sync_power_states(ctxt)
예제 #9
0
def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs,
                                sort_keys, sort_dirs):
    """Same as above, but return an InstanceList."""
    query_cell_subset = CONF.api.instance_list_per_project_cells
    # NOTE(danms): Replicated in part from instance_get_all_by_sort_filters(),
    # where if we're not admin we're restricted to our context's project
    if query_cell_subset and not ctx.is_admin:
        # We are not admin, and configured to only query the subset of cells
        # we could possibly have instances in.
        cell_mappings = objects.CellMappingList.get_by_project_id(
            ctx, ctx.project_id)
    else:
        # Either we are admin, or configured to always hit all cells,
        # so don't limit the list to a subset.
        context.load_cells()
        cell_mappings = context.CELLS

    batch_size = get_instance_list_cells_batch_size(limit, cell_mappings)

    columns_to_join = instance_obj._expected_cols(expected_attrs)
    instance_generator = get_instances_sorted(ctx,
                                              filters,
                                              limit,
                                              marker,
                                              columns_to_join,
                                              sort_keys,
                                              sort_dirs,
                                              cell_mappings=cell_mappings,
                                              batch_size=batch_size)

    if 'fault' in expected_attrs:
        # We join fault above, so we need to make sure we don't ask
        # make_instance_list to do it again for us
        expected_attrs = copy.copy(expected_attrs)
        expected_attrs.remove('fault')
    return instance_obj._make_instance_list(ctx, objects.InstanceList(),
                                            instance_generator, expected_attrs)
예제 #10
0
 def test_update_instance_info_unknown_host(self):
     self.host_manager._recreate_instance_info = mock.MagicMock()
     host_name = 'fake_host'
     inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa',
                                             host=host_name)
     inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb',
                                             host=host_name)
     orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
     self.host_manager._instance_info = {
             host_name: {
                 'instances': orig_inst_dict,
                 'updated': False,
             }}
     bad_host = 'bad_host'
     inst3 = fake_instance.fake_instance_obj('fake_context', uuid='ccc',
                                             host=bad_host)
     inst_list3 = objects.InstanceList(objects=[inst3])
     self.host_manager.update_instance_info('fake_context', bad_host,
                                            inst_list3)
     new_info = self.host_manager._instance_info[host_name]
     self.host_manager._recreate_instance_info.assert_called_once_with(
             'fake_context', bad_host)
     self.assertEqual(len(new_info['instances']), len(orig_inst_dict))
     self.assertFalse(new_info['updated'])
예제 #11
0
 def test_get_all_host_states_not_updated(self, mock_get_by_host,
                                          mock_get_all_comp,
                                          mock_get_svc_by_binary):
     mock_get_all_comp.return_value = fakes.COMPUTE_NODES
     mock_get_svc_by_binary.return_value = fakes.SERVICES
     context = 'fake_context'
     hm = self.host_manager
     inst1 = objects.Instance(uuid='uuid1')
     cn1 = objects.ComputeNode(host='host1')
     hm._instance_info = {
         'host1': {
             'instances': {
                 'uuid1': inst1
             },
             'updated': False
         }
     }
     host_state = host_manager.HostState('host1', cn1)
     self.assertFalse(host_state.instances)
     mock_get_by_host.return_value = objects.InstanceList(objects=[inst1])
     hm._add_instance_info(context, cn1, host_state)
     mock_get_by_host.assert_called_once_with(context, cn1.host)
     self.assertTrue(host_state.instances)
     self.assertEqual(host_state.instances['uuid1'], inst1)
예제 #12
0
 def get_all(*args, **kwargs):
     fields = instance_obj.INSTANCE_DEFAULT_FIELDS
     return instance_obj._make_instance_list(args[1],
                                             objects.InstanceList(),
                                             instances, fields)
예제 #13
0
def fake_compute_get_all(*args, **kwargs):
    inst_list = [
        fakes.stub_instance_obj(None, 1, uuid=UUID1, nw_cache=NW_CACHE),
        fakes.stub_instance_obj(None, 2, uuid=UUID2, nw_cache=NW_CACHE),
    ]
    return objects.InstanceList(objects=inst_list)
예제 #14
0
def fake_compute_get_all(*args, **kwargs):
    db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
    fields = instance_obj.INSTANCE_DEFAULT_FIELDS
    return instance_obj._make_instance_list(args[1], objects.InstanceList(),
                                            db_list, fields)
예제 #15
0
파일: servers.py 프로젝트: Drooids/nova
    def _get_servers(self, req, is_detail):
        """Returns a list of servers, based on any search options specified."""

        search_opts = {}
        search_opts.update(req.GET)

        context = req.environ['nova.context']
        remove_invalid_options(context, search_opts,
                               self._get_server_search_options())

        # Verify search by 'status' contains a valid status.
        # Convert it to filter by vm_state or task_state for compute_api.
        search_opts.pop('status', None)
        if 'status' in req.GET.keys():
            statuses = req.GET.getall('status')
            states = common.task_and_vm_state_from_status(statuses)
            vm_state, task_state = states
            if not vm_state and not task_state:
                return {'servers': []}
            search_opts['vm_state'] = vm_state
            # When we search by vm state, task state will return 'default'.
            # So we don't need task_state search_opt.
            if 'default' not in task_state:
                search_opts['task_state'] = task_state

        if 'changes-since' in search_opts:
            try:
                parsed = timeutils.parse_isotime(search_opts['changes-since'])
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
            search_opts['changes-since'] = parsed

        # By default, compute's get_all() will return deleted instances.
        # If an admin hasn't specified a 'deleted' search option, we need
        # to filter out deleted instances by setting the filter ourselves.
        # ... Unless 'changes-since' is specified, because 'changes-since'
        # should return recently deleted images according to the API spec.

        if 'deleted' not in search_opts:
            if 'changes-since' not in search_opts:
                # No 'changes-since', so we only want non-deleted servers
                search_opts['deleted'] = False

        if search_opts.get("vm_state") == ['deleted']:
            if context.is_admin:
                search_opts['deleted'] = True
            else:
                msg = _("Only administrators may list deleted instances")
                raise exc.HTTPForbidden(explanation=msg)

        # If all tenants is passed with 0 or false as the value
        # then remove it from the search options. Nothing passed as
        # the value for all_tenants is considered to enable the feature
        all_tenants = search_opts.get('all_tenants')
        if all_tenants:
            try:
                if not strutils.bool_from_string(all_tenants, True):
                    del search_opts['all_tenants']
            except ValueError as err:
                raise exception.InvalidInput(six.text_type(err))

        if 'all_tenants' in search_opts:
            policy.enforce(context, 'compute:get_all_tenants', {
                'project_id': context.project_id,
                'user_id': context.user_id
            })
            del search_opts['all_tenants']
        else:
            if context.project_id:
                search_opts['project_id'] = context.project_id
            else:
                search_opts['user_id'] = context.user_id

        limit, marker = common.get_limit_and_marker(req)
        # Sorting by multiple keys and directions is conditionally enabled
        sort_keys, sort_dirs = None, None
        if self.ext_mgr.is_loaded('os-server-sort-keys'):
            sort_keys, sort_dirs = common.get_sort_params(req.params)
        try:
            instance_list = self.compute_api.get_all(context,
                                                     search_opts=search_opts,
                                                     limit=limit,
                                                     marker=marker,
                                                     want_objects=True,
                                                     sort_keys=sort_keys,
                                                     sort_dirs=sort_dirs)
        except exception.MarkerNotFound:
            msg = _('marker [%s] not found') % marker
            raise exc.HTTPBadRequest(explanation=msg)
        except exception.FlavorNotFound:
            LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
            instance_list = objects.InstanceList()

        if is_detail:
            instance_list.fill_faults()
            response = self._view_builder.detail(req, instance_list)
        else:
            response = self._view_builder.index(req, instance_list)
        req.cache_db_instances(instance_list)
        return response
예제 #16
0
    def _get_servers(self, req, is_detail):
        """Returns a list of servers, based on any search options specified."""

        search_opts = {}
        search_opts.update(req.GET)

        context = req.environ['nova.context']
        remove_invalid_options(context, search_opts,
                               self._get_server_search_options())

        # Verify search by 'status' contains a valid status.
        # Convert it to filter by vm_state or task_state for compute_api.
        search_opts.pop('status', None)
        if 'status' in req.GET.keys():
            statuses = req.GET.getall('status')
            states = common.task_and_vm_state_from_status(statuses)
            vm_state, task_state = states
            if not vm_state and not task_state:
                return {'servers': []}
            search_opts['vm_state'] = vm_state
            # When we search by vm state, task state will return 'default'.
            # So we don't need task_state search_opt.
            if 'default' not in task_state:
                search_opts['task_state'] = task_state

        if 'changes-since' in search_opts:
            try:
                parsed = timeutils.parse_isotime(search_opts['changes-since'])
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
            search_opts['changes-since'] = parsed

        # By default, compute's get_all() will return deleted instances.
        # If an admin hasn't specified a 'deleted' search option, we need
        # to filter out deleted instances by setting the filter ourselves.
        # ... Unless 'changes-since' is specified, because 'changes-since'
        # should return recently deleted images according to the API spec.

        if 'deleted' not in search_opts:
            if 'changes-since' not in search_opts:
                # No 'changes-since', so we only want non-deleted servers
                search_opts['deleted'] = False

        if search_opts.get("vm_state") == ['deleted']:
            if context.is_admin:
                search_opts['deleted'] = True
            else:
                msg = _("Only administrators may list deleted instances")
                raise exc.HTTPForbidden(explanation=msg)

        # If tenant_id is passed as a search parameter this should
        # imply that all_tenants is also enabled unless explicitly
        # disabled. Note that the tenant_id parameter is filtered out
        # by remove_invalid_options above unless the requestor is an
        # admin.

        # TODO(gmann): 'all_tenants' flag should not be required while
        # searching with 'tenant_id'. Ref bug# 1185290
        # +microversions to achieve above mentioned behavior by
        # uncommenting below code.

        # if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
        # We do not need to add the all_tenants flag if the tenant
        # id associated with the token is the tenant id
        # specified. This is done so a request that does not need
        # the all_tenants flag does not fail because of lack of
        # policy permission for compute:get_all_tenants when it
        # doesn't actually need it.
        # if context.project_id != search_opts.get('tenant_id'):
        #    search_opts['all_tenants'] = 1

        # If all tenants is passed with 0 or false as the value
        # then remove it from the search options. Nothing passed as
        # the value for all_tenants is considered to enable the feature
        all_tenants = search_opts.get('all_tenants')
        if all_tenants:
            try:
                if not strutils.bool_from_string(all_tenants, True):
                    del search_opts['all_tenants']
            except ValueError as err:
                raise exception.InvalidInput(six.text_type(err))

        if 'all_tenants' in search_opts:
            policy.enforce(context, 'compute:get_all_tenants', {
                'project_id': context.project_id,
                'user_id': context.user_id
            })
            del search_opts['all_tenants']
        else:
            if context.project_id:
                search_opts['project_id'] = context.project_id
            else:
                search_opts['user_id'] = context.user_id

        limit, marker = common.get_limit_and_marker(req)
        sort_keys, sort_dirs = common.get_sort_params(req.params)
        try:
            instance_list = self.compute_api.get_all(
                context,
                search_opts=search_opts,
                limit=limit,
                marker=marker,
                want_objects=True,
                expected_attrs=['pci_devices'],
                sort_keys=sort_keys,
                sort_dirs=sort_dirs)
        except exception.MarkerNotFound:
            msg = _('marker [%s] not found') % marker
            raise exc.HTTPBadRequest(explanation=msg)
        except exception.FlavorNotFound:
            LOG.debug("Flavor '%s' could not be found ", search_opts['flavor'])
            instance_list = objects.InstanceList()

        if is_detail:
            instance_list.fill_faults()
            response = self._view_builder.detail(req, instance_list)
        else:
            response = self._view_builder.index(req, instance_list)
        req.cache_db_instances(instance_list)
        return response
    def setUp(self):
        super(FlavorExtraSpecsPolicyTest, self).setUp()
        self.controller = flavors_extraspecs.FlavorExtraSpecsController()
        self.flavor_ctrl = flavors.FlavorsController()
        self.fm_ctrl = flavor_manage.FlavorManageController()
        self.server_ctrl = servers.ServersController()
        self.req = fakes.HTTPRequest.blank('')
        self.server_ctrl._view_builder._add_security_grps = mock.MagicMock()
        self.server_ctrl._view_builder._get_metadata = mock.MagicMock()
        self.server_ctrl._view_builder._get_addresses = mock.MagicMock()
        self.server_ctrl._view_builder._get_host_id = mock.MagicMock()
        self.server_ctrl._view_builder._get_fault = mock.MagicMock()
        self.server_ctrl._view_builder._add_host_status = mock.MagicMock()

        self.instance = fake_instance.fake_instance_obj(
            self.project_member_context,
            id=1,
            uuid=uuids.fake_id,
            project_id=self.project_id,
            vm_state=vm_states.ACTIVE)

        self.mock_get = self.useFixture(
            fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
        self.mock_get.return_value = self.instance

        fakes.stub_out_secgroup_api(self,
                                    security_groups=[{
                                        'name': 'default'
                                    }])
        self.mock_get_all = self.useFixture(
            fixtures.MockPatchObject(self.server_ctrl.compute_api,
                                     'get_all')).mock
        self.mock_get_all.return_value = objects.InstanceList(
            objects=[self.instance])

        def get_flavor_extra_specs(context, flavor_id):
            return fake_flavor.fake_flavor_obj(
                self.project_member_context,
                id=1,
                uuid=uuids.fake_id,
                project_id=self.project_id,
                is_public=False,
                extra_specs={'hw:cpu_policy': 'shared'},
                expected_attrs='extra_specs')

        self.stub_out('nova.api.openstack.common.get_flavor',
                      get_flavor_extra_specs)

        # Check that all are able to get flavor extra specs.
        self.all_authorized_contexts = [
            self.legacy_admin_context, self.system_admin_context,
            self.project_admin_context, self.project_member_context,
            self.project_reader_context, self.project_foo_context,
            self.system_member_context, self.system_reader_context,
            self.system_foo_context, self.other_project_member_context,
            self.other_project_reader_context
        ]
        self.all_unauthorized_contexts = []
        # Check that all system scoped are able to get flavor extra specs.
        self.all_system_authorized_contexts = [
            self.legacy_admin_context, self.system_admin_context,
            self.project_admin_context, self.project_member_context,
            self.project_reader_context, self.project_foo_context,
            self.system_member_context, self.system_reader_context,
            self.system_foo_context, self.other_project_member_context,
            self.other_project_reader_context
        ]
        self.all_system_unauthorized_contexts = []

        # Check that admin is able to create, update and delete flavor
        # extra specs.
        self.admin_authorized_contexts = [
            self.legacy_admin_context, self.system_admin_context,
            self.project_admin_context
        ]
        # Check that non-admin is not able to create, update and
        # delete flavor extra specs.
        self.admin_unauthorized_contexts = [
            self.system_member_context, self.system_reader_context,
            self.system_foo_context, self.project_member_context,
            self.project_reader_context, self.project_foo_context,
            self.other_project_member_context,
            self.other_project_reader_context
        ]
예제 #18
0
파일: servers.py 프로젝트: xagent003/nova
    def _get_servers(self, req, is_detail):
        """Returns a list of servers, based on any search options specified."""

        search_opts = {}
        search_opts.update(req.GET)

        context = req.environ['nova.context']
        remove_invalid_options(context, search_opts,
                self._get_server_search_options(req))

        # Verify search by 'status' contains a valid status.
        # Convert it to filter by vm_state or task_state for compute_api.
        search_opts.pop('status', None)
        if 'status' in req.GET.keys():
            statuses = req.GET.getall('status')
            states = common.task_and_vm_state_from_status(statuses)
            vm_state, task_state = states
            if not vm_state and not task_state:
                return {'servers': []}
            search_opts['vm_state'] = vm_state
            # When we search by vm state, task state will return 'default'.
            # So we don't need task_state search_opt.
            if 'default' not in task_state:
                search_opts['task_state'] = task_state

        if 'changes-since' in search_opts:
            try:
                parsed = timeutils.parse_isotime(search_opts['changes-since'])
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
            search_opts['changes-since'] = parsed

        # By default, compute's get_all() will return deleted instances.
        # If an admin hasn't specified a 'deleted' search option, we need
        # to filter out deleted instances by setting the filter ourselves.
        # ... Unless 'changes-since' is specified, because 'changes-since'
        # should return recently deleted instances according to the API spec.

        if 'deleted' not in search_opts:
            if 'changes-since' not in search_opts:
                # No 'changes-since', so we only want non-deleted servers
                search_opts['deleted'] = False
        else:
            # Convert deleted filter value to a valid boolean.
            # Return non-deleted servers if an invalid value
            # is passed with deleted filter.
            search_opts['deleted'] = strutils.bool_from_string(
                search_opts['deleted'], default=False)

        if search_opts.get("vm_state") == ['deleted']:
            if context.is_admin:
                search_opts['deleted'] = True
            else:
                msg = _("Only administrators may list deleted instances")
                raise exc.HTTPForbidden(explanation=msg)

        if api_version_request.is_supported(req, min_version='2.26'):
            for tag_filter in TAG_SEARCH_FILTERS:
                if tag_filter in search_opts:
                    search_opts[tag_filter] = search_opts[
                        tag_filter].split(',')

        # If tenant_id is passed as a search parameter this should
        # imply that all_tenants is also enabled unless explicitly
        # disabled. Note that the tenant_id parameter is filtered out
        # by remove_invalid_options above unless the requestor is an
        # admin.

        # TODO(gmann): 'all_tenants' flag should not be required while
        # searching with 'tenant_id'. Ref bug# 1185290
        # +microversions to achieve above mentioned behavior by
        # uncommenting below code.

        # if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
            # We do not need to add the all_tenants flag if the tenant
            # id associated with the token is the tenant id
            # specified. This is done so a request that does not need
            # the all_tenants flag does not fail because of lack of
            # policy permission for compute:get_all_tenants when it
            # doesn't actually need it.
            # if context.project_id != search_opts.get('tenant_id'):
            #    search_opts['all_tenants'] = 1

        all_tenants = common.is_all_tenants(search_opts)
        # use the boolean from here on out so remove the entry from search_opts
        # if it's present
        search_opts.pop('all_tenants', None)

        elevated = None
        if all_tenants:
            if is_detail:
                context.can(server_policies.SERVERS % 'detail:get_all_tenants')
            else:
                context.can(server_policies.SERVERS % 'index:get_all_tenants')
            elevated = context.elevated()
        else:
            if context.project_id:
                search_opts['project_id'] = context.project_id
            else:
                search_opts['user_id'] = context.user_id

        limit, marker = common.get_limit_and_marker(req)
        sort_keys, sort_dirs = common.get_sort_params(req.params)

        expected_attrs = ['pci_devices']
        if is_detail:
            if api_version_request.is_supported(req, '2.26'):
                expected_attrs.append("tags")

            # merge our expected attrs with what the view builder needs for
            # showing details
            expected_attrs = self._view_builder.get_show_expected_attrs(
                                                                expected_attrs)

        try:
            instance_list = self.compute_api.get_all(elevated or context,
                    search_opts=search_opts, limit=limit, marker=marker,
                    expected_attrs=expected_attrs,
                    sort_keys=sort_keys, sort_dirs=sort_dirs)
        except exception.MarkerNotFound:
            msg = _('marker [%s] not found') % marker
            raise exc.HTTPBadRequest(explanation=msg)
        except exception.FlavorNotFound:
            LOG.debug("Flavor '%s' could not be found ",
                      search_opts['flavor'])
            instance_list = objects.InstanceList()

        if is_detail:
            instance_list._context = context
            instance_list.fill_faults()
            response = self._view_builder.detail(req, instance_list)
        else:
            response = self._view_builder.index(req, instance_list)
        req.cache_db_instances(instance_list)
        return response
예제 #19
0
 def _fake_instance_get_by_host_and_node(self, context, host, nodename,
                                         expected_attrs=None):
     return objects.InstanceList(
         objects=[i for i in self._instances.values() if i['host'] == host])