def check_policy(context, action): target = { 'project_id': context.project_id, 'user_id': context.user_id, } _action = 'network:%s' % action policy.enforce(context, _action, target)
def update(self, req, id, body): """Update server then pass on to version-specific controller.""" ctxt = req.environ['nova.context'] update_dict = {} if 'name' in body['server']: update_dict['display_name'] = body['server']['name'] # TODO(oomichi): The following host_id validation code can be removed # when setting "'additionalProperties': False" in base_update schema. if 'host_id' in body['server']: msg = _("host_id cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) if list(self.update_extension_manager): self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict) instance = common.get_instance(self.compute_api, ctxt, id, want_objects=True, expected_attrs=['pci_devices']) try: # NOTE(mikal): this try block needs to stay because save() still # might throw an exception. req.cache_db_instance(instance) policy.enforce(ctxt, 'compute:update', instance) instance.update(update_dict) instance.save() return self._view_builder.show(req, instance) except exception.NotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg)
def check_policy(context, action): target = { 'project_id': context.project_id, 'user_id': context.user_id, } _action = 'network:{0!s}'.format(action) policy.enforce(context, _action, target)
def update(self, req, id, body): """Update server then pass on to version-specific controller.""" if not self.is_valid_body(body, 'server'): raise exc.HTTPBadRequest(_("The request body is invalid")) ctxt = req.environ['nova.context'] update_dict = {} if 'name' in body['server']: name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() if 'host_id' in body['server']: msg = _("host_id cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) if list(self.update_extension_manager): self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict) try: instance = self.compute_api.get(ctxt, id, want_objects=True, expected_attrs=['pci_devices']) req.cache_db_instance(instance) policy.enforce(ctxt, 'compute:update', instance) instance.update(update_dict) instance.save() except exception.NotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) return self._view_builder.show(req, instance)
def update(self, req, id, body): """Update server then pass on to version-specific controller.""" if not self.is_valid_body(body, 'server'): raise exc.HTTPBadRequest(_("The request body is invalid")) ctxt = req.environ['nova.context'] update_dict = {} if 'name' in body['server']: name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() if 'host_id' in body['server']: msg = _("host_id cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) if list(self.update_extension_manager): self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict) instance = common.get_instance(self.compute_api, ctxt, id, want_objects=True, expected_attrs=['pci_devices']) try: # NOTE(mikal): this try block needs to stay because save() still # might throw an exception. req.cache_db_instance(instance) policy.enforce(ctxt, 'compute:update', instance) instance.update(update_dict) instance.save() return self._view_builder.show(req, instance) except exception.NotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg)
def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine)
def _action_create_image(self, req, id, body): """Snapshot a server instance.""" context = req.environ['nova.context'] entity = body.get("createImage", {}) image_name = entity.get("name") if not image_name: msg = _("createImage entity requires name attribute") raise exc.HTTPBadRequest(explanation=msg) props = {} metadata = entity.get('metadata', {}) common.check_img_metadata_properties_quota(context, metadata) try: props.update(metadata) except ValueError: msg = _("Invalid metadata") raise exc.HTTPBadRequest(explanation=msg) instance = self._get_server(context, req, id) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) try: if self.compute_api.is_volume_backed_instance(context, instance, bdms): policy.enforce(context, 'compute:snapshot_volume_backed', {'project_id': context.project_id, 'user_id': context.user_id}) image = self.compute_api.snapshot_volume_backed( context, instance, image_name, extra_properties=props) else: image = self.compute_api.snapshot(context, instance, image_name, extra_properties=props) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id) except exception.Invalid as err: raise exc.HTTPBadRequest(explanation=err.format_message()) # build location of newly-created image entity image_id = str(image['id']) url_prefix = self._view_builder._update_glance_link_prefix( req.application_url) image_ref = common.url_join(url_prefix, context.project_id, 'images', image_id) resp = webob.Response(status_int=202) resp.headers['Location'] = image_ref return resp
def update(self, req, id, body): """Update server then pass on to version-specific controller.""" ctxt = req.environ['nova.context'] update_dict = {} if 'name' in body['server']: update_dict['display_name'] = body['server']['name'] if list(self.update_extension_manager): self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict) instance = common.get_instance(self.compute_api, ctxt, id, want_objects=True, expected_attrs=['pci_devices']) try: # NOTE(mikal): this try block needs to stay because save() still # might throw an exception. req.cache_db_instance(instance) policy.enforce(ctxt, 'compute:update', instance) instance.update(update_dict) instance.save() return self._view_builder.show(req, instance) except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg)
def _action_create_image(self, req, id, body): """Snapshot a server instance.""" context = req.environ['nova.context'] entity = body.get("createImage", {}) image_name = entity.get("name") if not image_name: msg = _("createImage entity requires name attribute") raise exc.HTTPBadRequest(explanation=msg) props = {} metadata = entity.get('metadata', {}) common.check_img_metadata_properties_quota(context, metadata) try: props.update(metadata) except ValueError: msg = _("Invalid metadata") raise exc.HTTPBadRequest(explanation=msg) instance = self._get_server(context, req, id) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) try: if self.compute_api.is_volume_backed_instance(context, instance, bdms): policy.enforce(context, 'compute:snapshot_volume_backed', {'project_id': context.project_id, 'user_id': context.user_id}) image = self.compute_api.snapshot_volume_backed( context, instance, image_name, extra_properties=props) else: image = self.compute_api.snapshot(context, instance, image_name, extra_properties=props) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id) except exception.Invalid as err: raise exc.HTTPBadRequest(explanation=err.format_message()) # build location of newly-created image entity image_id = str(image['id']) url_prefix = self._view_builder._update_glance_link_prefix( req.application_url) image_ref = os.path.join(url_prefix, context.project_id, 'images', image_id) resp = webob.Response(status_int=202) resp.headers['Location'] = image_ref return resp
def test_admin_or_owner_rules(self): for rule in self.admin_or_owner_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.non_admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, { 'project_id': 'fake', 'user_id': 'fake' })
def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext("admin", "fake", roles=["AdMiN"]) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target)
def test_check_policy(self): self.mox.StubOutWithMock(policy, 'enforce') target = { 'project_id': self.context.project_id, 'user_id': self.context.user_id, } policy.enforce(self.context, 'network:get_all', target) self.mox.ReplayAll() api.check_policy(self.context, 'get_all')
def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target)
def test_modified_policy_reloads(self): action = "example:test" with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") policy.enforce(self.context, action, self.target) with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") # NOTE(vish): reset stored policy cache so we don't have to sleep(1) policy._POLICY_CACHE = {} self.assertRaises(exception.PolicyNotAllowed, policy.enforce, self.context, action, self.target)
def update(self, req, id, body): """Update server then pass on to version-specific controller.""" if not self.is_valid_body(body, 'server'): raise exc.HTTPUnprocessableEntity() ctxt = req.environ['nova.context'] update_dict = {} if 'name' in body['server']: name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() if 'accessIPv4' in body['server']: access_ipv4 = body['server']['accessIPv4'] if access_ipv4: self._validate_access_ipv4(access_ipv4) update_dict['access_ip_v4'] = (access_ipv4 and access_ipv4.strip() or None) if 'accessIPv6' in body['server']: access_ipv6 = body['server']['accessIPv6'] if access_ipv6: self._validate_access_ipv6(access_ipv6) update_dict['access_ip_v6'] = (access_ipv6 and access_ipv6.strip() or None) if 'auto_disk_config' in body['server']: auto_disk_config = strutils.bool_from_string( body['server']['auto_disk_config']) update_dict['auto_disk_config'] = auto_disk_config if 'hostId' in body['server']: msg = _("HostId cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) if 'personality' in body['server']: msg = _("Personality cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) instance = self._get_server(ctxt, req, id) try: policy.enforce(ctxt, 'compute:update', instance) instance.update(update_dict) # Note instance.save can throw a NotFound exception instance.save() except exception.NotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) return self._view_builder.show(req, instance)
def update(self, req, id, body): """Update server then pass on to version-specific controller.""" if not self.is_valid_body(body, 'server'): raise exc.HTTPUnprocessableEntity() ctxt = req.environ['nova.context'] update_dict = {} if 'name' in body['server']: name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() if 'accessIPv4' in body['server']: access_ipv4 = body['server']['accessIPv4'] if access_ipv4: self._validate_access_ipv4(access_ipv4) update_dict['access_ip_v4'] = ( access_ipv4 and access_ipv4.strip() or None) if 'accessIPv6' in body['server']: access_ipv6 = body['server']['accessIPv6'] if access_ipv6: self._validate_access_ipv6(access_ipv6) update_dict['access_ip_v6'] = ( access_ipv6 and access_ipv6.strip() or None) if 'auto_disk_config' in body['server']: auto_disk_config = strutils.bool_from_string( body['server']['auto_disk_config']) update_dict['auto_disk_config'] = auto_disk_config if 'hostId' in body['server']: msg = _("HostId cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) if 'personality' in body['server']: msg = _("Personality cannot be updated.") raise exc.HTTPBadRequest(explanation=msg) instance = self._get_server(ctxt, req, id) try: policy.enforce(ctxt, 'compute:update', instance) instance.update(update_dict) # Note instance.save can throw a NotFound exception instance.save() except exception.NotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) return self._view_builder.show(req, instance)
def test_enforce_http_true(self, req_mock): req_mock.post('http://www.example.com/', text='True') action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result)
def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.flags(policy_file=tmpfilename) action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") # NOTE(vish): reset stored policy cache so we don't have to # sleep(1) policy._POLICY_CACHE = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target)
def test_enforce_http_true(self, req_mock): req_mock.post('http://www.example.com/', text='True') action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(result, True)
def test_enforce_http_true(self): def fakeurlopen(url, post_data): return StringIO.StringIO("True") self.stubs.Set(urllib2, 'urlopen', fakeurlopen) action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(result, True)
def authorized(self, ctxt): """Return whether or not the context is authorized for this filter based on policy. The policy action is "cells_scheduler_filter:<name>" where <name> is the name of the filter class. """ name = 'cells_scheduler_filter:' + self.__class__.__name__ target = {'project_id': ctxt.project_id, 'user_id': ctxt.user_id} return policy.enforce(ctxt, name, target, do_raise=False)
def authorized(self, ctxt): """Return whether or not the context is authorized for this filter based on policy. The policy action is "cells_scheduler_filter:<name>" where <name> is the name of the filter class. """ name = "cells_scheduler_filter:" + self.__class__.__name__ target = {"project_id": ctxt.project_id, "user_id": ctxt.user_id} return policy.enforce(ctxt, name, target, do_raise=False)
def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, "policy") self.flags(policy_file=tmpfilename, group="oslo_policy") # NOTE(uni): context construction invokes policy check to determin # is_admin or not. As a side-effect, policy reset is needed here # to flush existing policy cache. policy.reset() action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": ""}') policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target)
def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.flags(policy_file=tmpfilename) # NOTE(uni): context construction invokes policy check to determin # is_admin or not. As a side-effect, policy reset is needed here # to flush existing policy cache. policy.reset() action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": ""}') policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target)
def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.flags(policy_file=tmpfilename) # NOTE(uni): context construction invokes policy check to determin # is_admin or not. As a side-effect, policy reset is needed here # to flush existing policy cache. policy.reset() action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ""}""") policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": "!"}""") # NOTE(vish): reset stored policy cache so we don't have to # sleep(1) policy._POLICY_CACHE = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target)
def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertEqual(result, True)
def test_enforce_bad_action_noraise(self): action = "example:denied" result = policy.enforce(self.context, action, self.target, False) self.assertEqual(result, False)
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(six.text_type(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', {'project_id': context.project_id, 'user_id': context.user_id}) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) # Sorting by multiple keys and directions is conditionally enabled sort_keys, sort_dirs = None, None if self.ext_mgr.is_loaded('os-server-sort-keys'): sort_keys, sort_dirs = common.get_sort_params(req.params) try: instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, sort_keys=sort_keys, sort_dirs=sort_dirs) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def test_enforce_http_true(self, mock_urlrequest): mock_urlrequest.return_value = StringIO("True") action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(result, True)
def test_enforce_http_true(self, mock_urlrequest): action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(result, True)
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. status = search_opts.pop('status', None) if status is not None: vm_state, task_state = common.task_and_vm_state_from_status(status) if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes_since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes_since']) except ValueError: msg = _('Invalid changes_since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes_since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes_since' is specified, because 'changes_since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes_since' not in search_opts: # No 'changes_since', so we only want non-deleted servers search_opts['deleted'] = False if 'changes_since' in search_opts: search_opts['changes-since'] = search_opts.pop('changes_since') if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPBadRequest(explanation=msg) # If tenant_id is passed as a search parameter this should # imply that all_tenants is also enabled unless explicitly # disabled. Note that the tenant_id parameter is filtered out # by remove_invalid_options above unless the requestor is an # admin. if 'tenant_id' in search_opts and not 'all_tenants' in search_opts: # We do not need to add the all_tenants flag if the tenant # id associated with the token is the tenant id # specified. This is done so a request that does not need # the all_tenants flag does not fail because of lack of # policy permission for compute:get_all_tenants when it # doesn't actually need it. if context.project_id != search_opts.get('tenant_id'): search_opts['all_tenants'] = 1 # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(str(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', {'project_id': context.project_id, 'user_id': context.user_id}) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) try: instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices']) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: log_msg = _("Flavor '%s' could not be found ") LOG.debug(log_msg, search_opts['flavor']) instance_list = [] if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def test_enforce_good_action(self): action = "example:allowed" policy.enforce(self.context, action, self.target)
def test_non_admin_only_rules(self): for rule in self.non_admin_only_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, self.target)
def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target)
def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {})
def test_enforce_http_true(self, req_mock): req_mock.post("http://www.example.com/", text="True") action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result)
def test_allow_all_rules(self): for rule in self.allow_all_rules: policy.enforce(self.non_admin_context, rule, self.target)
def launch_instance(self, context, instance_uuid, params={}): pid = context.project_id uid = context.user_id instance = self.get(context, instance_uuid) if not (self._is_instance_blessed(context, instance)): # The instance is not blessed. We can't launch new instances from it. raise exception.NovaException( _(("Instance %s is not a live image. " + "Please create a live image to launch from it.") % instance_uuid)) # Set up security groups to be added - we are passed in names, but need ID's security_groups = params.pop('security_groups', None) if security_groups is None or len(security_groups) == 0: security_groups = ['default'] self.compute_api._check_requested_secgroups(context, security_groups) num_instances = params.pop('num_instances', 1) try: i_list = range(num_instances) if len(i_list) == 0: raise exception.NovaException( _('num_instances must be at least 1')) except TypeError: raise exception.NovaException( _('num_instances must be an integer')) reservations = self._acquire_addition_reservation( context, instance, num_instances) try: launch_instances = [] # We are handling num_instances in this (odd) way because this is how # standard nova handles it. availability_zone, forced_host, forced_node = \ self.compute_api._handle_availability_zone( context, params.get('availability_zone')) filter_properties = { 'scheduler_hints': params.pop('scheduler_hints', {}) } if forced_host: policy.enforce(context, 'compute:create:forced', {}) filter_properties['force_hosts'] = [forced_host] for i in xrange(num_instances): instance_params = params.copy() # Create a new launched instance. launch_instances.append(self._copy_instance(context, instance, instance_params.get('name', "%s-%s" %\ (instance['display_name'], "clone")), launch=True, new_user_data=instance_params.pop('user_data', None), security_groups=security_groups, key_name=instance_params.pop('key_name', None), launch_index=i, # Note this is after groking by handle_az above availability_zone=availability_zone)) request_spec = self._create_request_spec(context, launch_instances, security_groups) hosts = self.scheduler_rpcapi.select_hosts(context, request_spec, filter_properties) for host, launch_instance in zip(hosts, launch_instances): self._cast_cobalt_message('launch_instance', context, launch_instance, host, {"params": params}) self._commit_reservation(context, reservations) except: ei = sys.exc_info() self._rollback_reservation(context, reservations) raise ei[0], ei[1], ei[2] return self.get(context, launch_instances[0]['uuid'])
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) # If tenant_id is passed as a search parameter this should # imply that all_tenants is also enabled unless explicitly # disabled. Note that the tenant_id parameter is filtered out # by remove_invalid_options above unless the requestor is an # admin. # TODO(gmann): 'all_tenants' flag should not be required while # searching with 'tenant_id'. Ref bug# 1185290 # +microversions to achieve above mentioned behavior by # uncommenting below code. # if 'tenant_id' in search_opts and 'all_tenants' not in search_opts: # We do not need to add the all_tenants flag if the tenant # id associated with the token is the tenant id # specified. This is done so a request that does not need # the all_tenants flag does not fail because of lack of # policy permission for compute:get_all_tenants when it # doesn't actually need it. # if context.project_id != search_opts.get('tenant_id'): # search_opts['all_tenants'] = 1 # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(six.text_type(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', { 'project_id': context.project_id, 'user_id': context.user_id }) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) sort_keys, sort_dirs = common.get_sort_params(req.params) try: instance_list = self.compute_api.get_all( context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices'], sort_keys=sort_keys, sort_dirs=sort_dirs) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found ", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def launch_instance(self, context, instance_uuid, params={}): pid = context.project_id uid = context.user_id instance = self.get(context, instance_uuid) if not(self._is_instance_blessed(context, instance_uuid)): # The instance is not blessed. We can't launch new instances from it. raise exception.NovaException( _(("Instance %s is not a live image. " + "Please create a live image to launch from it.") % instance_uuid)) # Set up security groups to be added - we are passed in names, but need ID's security_group_names = params.pop('security_groups', None) if security_group_names != None: security_groups = [self.db.security_group_get_by_name(context, context.project_id, sg) for sg in security_group_names] else: security_groups = None num_instances = params.pop('num_instances', 1) try: i_list = range(num_instances) if len(i_list) == 0: raise exception.NovaException(_('num_instances must be at least 1')) except TypeError: raise exception.NovaException(_('num_instances must be an integer')) reservations = self._acquire_addition_reservation(context, instance, num_instances) try: launch_instances = [] # We are handling num_instances in this (odd) way because this is how # standard nova handles it. availability_zone, forced_host, forced_node = \ self.compute_api._handle_availability_zone( params.get('availability_zone')) filter_properties = { 'scheduler_hints' : params.pop('scheduler_hints', {}) } if forced_host: policy.enforce(context, 'compute:create:forced', {}) filter_properties['force_hosts'] = [forced_host] for i in xrange(num_instances): instance_params = params.copy() # Create a new launched instance. launch_instances.append(self._copy_instance(context, instance_uuid, instance_params.get('name', "%s-%s" %\ (instance['display_name'], "clone")), launch=True, new_user_data=instance_params.pop('user_data', None), security_groups=security_groups, key_name=instance_params.pop('key_name', None), launch_index=i, # Note this is after groking by handle_az above availability_zone=availability_zone)) request_spec = self._create_request_spec(context, launch_instances) hosts = self.scheduler_rpcapi.select_hosts(context,request_spec,filter_properties) for host, launch_instance in zip(hosts, launch_instances): self._cast_cobalt_message('launch_instance', context, launch_instance['uuid'], host, { "params" : params }) self._commit_reservation(context, reservations) except: ei = sys.exc_info() self._rollback_reservation(context, reservations) raise ei[0], ei[1], ei[2] return self.get(context, launch_instances[0]['uuid'])
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(six.text_type(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', { 'project_id': context.project_id, 'user_id': context.user_id }) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) # Sorting by multiple keys and directions is conditionally enabled sort_keys, sort_dirs = None, None if self.ext_mgr.is_loaded('os-server-sort-keys'): sort_keys, sort_dirs = common.get_sort_params(req.params) try: instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, sort_keys=sort_keys, sort_dirs=sort_dirs) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def test_admin_or_owner_rules(self): for rule in self.admin_or_owner_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.non_admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, {"project_id": "fake", "user_id": "fake"})
def check_policy(context, action): target = {"project_id": context.project_id, "user_id": context.user_id} _action = "network:%s" % action policy.enforce(context, _action, target)