def get_data(self): domains = [] domain_id = api.keystone.get_effective_domain_id(self.request) if policy.check((("identity", "identity:list_domains"),), self.request): try: if domain_id: domain = api.keystone.domain_get(self.request, domain_id) domains.append(domain) else: domains = api.keystone.domain_list(self.request) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain list.')) elif policy.check((("identity", "identity:get_domain"),), self.request): try: domain = api.keystone.domain_get(self.request, domain_id) domains.append(domain) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain information.')) else: msg = _("Insufficient privilege level to view domain information.") messages.info(self.request, msg) return domains
def get_data(self): domains = [] domain_context = self.request.session.get('domain_context') if policy.check(( ("identity", "identity:list_domains"), ), self.request) and not domain_context: try: domains = api.keystone.domain_list(self.request) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain list.')) elif policy.check(( ("identity", "identity:get_domain"), ), self.request): try: domain_id = identity.get_domain_id_for_operation(self.request) domain = api.keystone.domain_get(self.request, domain_id) domains.append(domain) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain information.')) else: msg = _("Insufficient privilege level to view domain information.") messages.info(self.request, msg) return domains
def get_data(self): users = [] filters = self.get_filters() if policy.check((("identity", "identity:list_users"),), self.request): domain_context = api.keystone.get_effective_domain_id(self.request) try: users = api.keystone.user_list(self.request, domain=domain_context, filters=filters) except Exception: exceptions.handle(self.request, _('Unable to retrieve user list.')) elif policy.check((("identity", "identity:get_user"),), self.request): try: user = api.keystone.user_get(self.request, self.request.user.id) users.append(user) except Exception: exceptions.handle(self.request, _('Unable to retrieve user information.')) else: msg = _("Insufficient privilege level to view user information.") messages.info(self.request, msg) if api.keystone.VERSIONS.active >= 3: domain_lookup = api.keystone.domain_lookup(self.request) for u in users: u.domain_name = domain_lookup.get(u.domain_id) return users
def get_data(self): tenants = [] marker = self.request.GET.get( project_tables.TenantsTable._meta.pagination_param, None) domain_context = self.request.session.get('domain_context', None) self._more = False if policy.check((("sdscontroller", "sdscontroller:list_projects"),), self.request): try: tenants, self._more = api.keystone.tenant_list( self.request, domain=domain_context, paginate=True, marker=marker) except Exception: exceptions.handle(self.request, _("Unable to retrieve project list.")) elif policy.check((("sdscontroller", "sdscontroller:list_user_projects"),), self.request): try: tenants, self._more = api.keystone.tenant_list( self.request, user=self.request.user.id, paginate=True, marker=marker, admin=False) except Exception: exceptions.handle(self.request, _("Unable to retrieve project information.")) else: msg = \ _("Insufficient privilege level to view project information.") messages.info(self.request, msg) return tenants
def _test_get_router_ha_permission_with_policy_check(self, ha_enabled): self.mox.StubOutWithMock(policy, "check") role = (("network", "create_router:ha"),) policy.check(role, self.request).AndReturn(True) api.neutron.is_extension_supported(self.request, "l3-ha").AndReturn(ha_enabled) self.mox.ReplayAll() self.assertEqual(ha_enabled, api.neutron.get_feature_permission(self.request, "l3-ha", "create"))
def get_context(request, context=None): """Returns common context data for network topology views.""" if context is None: context = {} network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) context['launch_instance_allowed'] = policy.check( (("compute", "compute:create"),), request) context['instance_quota_exceeded'] = _quota_exceeded(request, 'instances') context['create_network_allowed'] = policy.check( (("network", "create_network"),), request) context['network_quota_exceeded'] = _quota_exceeded(request, 'networks') context['create_router_allowed'] = ( network_config.get('enable_router', True) and policy.check((("network", "create_router"),), request)) context['router_quota_exceeded'] = _quota_exceeded(request, 'routers') context['console_type'] = getattr(settings, 'CONSOLE_TYPE', 'AUTO') context['show_ng_launch'] = ( base.is_service_enabled(request, 'compute') and getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True)) context['show_legacy_launch'] = ( base.is_service_enabled(request, 'compute') and getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False)) return context
def get_data(self): tenants = [] domain_context = self.request.session.get('domain_context', None) if policy.check((("identity", "identity:list_projects"),), self.request): try: tenants, _more = keystone_api.tenant_list( self.request, domain=domain_context, paginate=False) except Exception: exceptions.handle(self.request, _("Unable to retrieve project list.")) elif policy.check((("identity", "identity:list_user_projects"),), self.request): try: tenants, _more = keystone_api.tenant_list( self.request, user=self.request.user.id, paginate=False, admin=False) except Exception: exceptions.handle(self.request, _("Unable to retrieve project information.")) else: msg = \ _("Insufficient privilege level to view project information.") messages.info(self.request, msg) return tenants
def keystoneclient(request, admin=False): """Returns a client connected to the Keystone backend. Several forms of authentication are supported: * Username + password -> Unscoped authentication * Username + password + tenant id -> Scoped authentication * Unscoped token -> Unscoped authentication * Unscoped token + tenant id -> Scoped authentication * Scoped token -> Scoped authentication Available services and data from the backend will vary depending on whether the authentication was scoped or unscoped. Lazy authentication if an ``endpoint`` parameter is provided. Calls requiring the admin endpoint should have ``admin=True`` passed in as a keyword argument. The client is cached so that subsequent API calls during the same request/response cycle don't have to be re-authenticated. """ user = request.user if admin: if (not policy.check((("identity", "admin_required"),), request) and not policy.check((("identity", "project_admin_required"),), request) and not policy.check((("identity", "support_required"),), request)): raise exceptions.NotAuthorized endpoint_type = 'adminURL' else: endpoint_type = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'internalURL') api_version = VERSIONS.get_active_version() # Take care of client connection caching/fetching a new client. # Admin vs. non-admin clients are cached separately for token matching. cache_attr = "_keystoneclient_admin" if admin \ else backend.KEYSTONE_CLIENT_ATTR if (hasattr(request, cache_attr) and (not user.token.id or getattr(request, cache_attr).auth_token == user.token.id)): conn = getattr(request, cache_attr) else: endpoint = _get_endpoint_url(request, endpoint_type) insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) LOG.debug("Creating a new keystoneclient connection to %s." % endpoint) remote_addr = request.environ.get('REMOTE_ADDR', '') conn = api_version['client'].Client(token=user.token.id, endpoint=endpoint, original_ip=remote_addr, insecure=insecure, cacert=cacert, auth_url=endpoint, debug=settings.DEBUG) setattr(request, cache_attr, conn) return conn
def get_data(self): tenants = [] marker = self.request.GET.get( project_tables.TenantsTable._meta.pagination_param, None) self._more = False filters = self.get_filters() self._needs_filter_first = False if policy.check((("identity", "identity:list_projects"),), self.request): # If filter_first is set and if there are not other filters # selected, then search criteria must be provided and # return an empty list filter_first = getattr(settings, 'FILTER_DATA_FIRST', {}) if filter_first.get('identity.projects', False) and len( filters) == 0: self._needs_filter_first = True self._more = False return tenants domain_id = identity.get_domain_id_for_operation(self.request) try: tenants, self._more = api.keystone.tenant_list( self.request, domain=domain_id, paginate=True, filters=filters, marker=marker) except Exception: exceptions.handle(self.request, _("Unable to retrieve project list.")) elif policy.check((("identity", "identity:list_user_projects"),), self.request): try: tenants, self._more = api.keystone.tenant_list( self.request, user=self.request.user.id, paginate=True, marker=marker, filters=filters, admin=False) except Exception: exceptions.handle(self.request, _("Unable to retrieve project information.")) else: msg = \ _("Insufficient privilege level to view project information.") messages.info(self.request, msg) if api.keystone.VERSIONS.active >= 3: domain_lookup = api.keystone.domain_lookup(self.request) for t in tenants: t.domain_name = domain_lookup.get(t.domain_id) return tenants
def allowed(self, request, volume=None): if volume: project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None) attach_allowed = policy.check((("compute", "compute:attach_volume"),), request, {"project_id": project_id}) detach_allowed = policy.check((("compute", "compute:detach_volume"),), request, {"project_id": project_id}) if attach_allowed or detach_allowed: return volume.status in ("available", "in-use") return False
def __init__(self, request, *args, **kwargs): super(CreateImageForm, self).__init__(request, *args, **kwargs) if (not settings.HORIZON_IMAGES_ALLOW_UPLOAD or not policy.check((("image", "upload_image"),), request)): self._hide_file_source_type() if not policy.check((("image", "set_image_location"),), request): self._hide_url_source_type() if not policy.check((("image", "publicize_image"),), request): self._hide_is_public() self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES
def _test_get_dvr_permission_with_policy_check(self, policy_check_allowed, operation): self.mox.StubOutWithMock(policy, "check") if operation == "create": role = (("network", "create_router:distributed"),) elif operation == "get": role = (("network", "get_router:distributed"),) policy.check(role, self.request).AndReturn(policy_check_allowed) if policy_check_allowed: api.neutron.is_extension_supported(self.request, "dvr").AndReturn(policy_check_allowed) self.mox.ReplayAll() self.assertEqual(policy_check_allowed, api.neutron.get_feature_permission(self.request, "dvr", operation))
def _test_get_router_ha_permission_with_policy_check(self, ha_enabled): self.mox.StubOutWithMock(policy, "check") role = (("network", "create_router:ha"),) policy.check(role, self.request).AndReturn(True) neutronclient = self.stub_neutronclient() if ha_enabled: extensions = self.api_extensions.list() else: extensions = {} neutronclient.list_extensions().AndReturn({"extensions": extensions}) self.mox.ReplayAll() self.assertEqual(ha_enabled, api.neutron.get_feature_permission(self.request, "l3-ha", "create"))
def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) user = self.get_data() tenant = self.get_tenant(user.project_id) table = project_tables.UsersTable(self.request) domain_id = getattr(user, "domain_id", None) domain_name = '' if api.keystone.VERSIONS.active >= 3: try: if policy.check((("identity", "identity:get_domain"),), self.request): domain = api.keystone.domain_get( self.request, domain_id) domain_name = domain.name else: domain = api.keystone.get_default_domain(self.request) domain_name = domain.get('name') except Exception: exceptions.handle(self.request, _('Unable to retrieve project domain.')) context["description"] = getattr(user, "description", _("None")) context["user"] = user if tenant: context["tenant_name"] = tenant.name context["domain_id"] = domain_id context["domain_name"] = domain_name context["url"] = self.get_redirect_url() context["actions"] = table.render_row_actions(user) return context
def _test_get_dvr_permission_with_policy_check(self, policy_check_allowed, operation): self.mox.StubOutWithMock(policy, 'check') if operation == "create": role = (("network", "create_router:distributed"),) elif operation == "get": role = (("network", "get_router:distributed"),) policy.check(role, self.request).AndReturn(policy_check_allowed) if policy_check_allowed: neutronclient = self.stub_neutronclient() neutronclient.list_extensions() \ .AndReturn({'extensions': self.api_extensions.list()}) self.mox.ReplayAll() self.assertEqual(policy_check_allowed, api.neutron.get_feature_permission(self.request, 'dvr', operation))
def get_data(self): roles = [] filters = self.get_filters() self._needs_filter_first = False if policy.check((("identity", "identity:list_roles"),), self.request): # If filter_first is set and if there are not other filters # selected, then search criteria must be provided # and return an empty list filter_first = getattr(settings, 'FILTER_DATA_FIRST', {}) if filter_first.get('identity.roles', False) and len(filters) == 0: self._needs_filter_first = True return roles try: roles = api.keystone.role_list(self.request, filters=filters) except Exception: exceptions.handle(self.request, _('Unable to retrieve roles list.')) else: msg = _("Insufficient privilege level to view role information.") messages.info(self.request, msg) return roles
def allowed(self, request, security_group=None): policy_target = self.get_policy_target(request, security_group) if not api.base.is_service_enabled(request, "network"): policy_rules = (("compute", "compute_extension:security_groups"),) return policy.check(policy_rules, request, policy_target) return True
def get_initial(self): user = self.get_object() domain_id = getattr(user, "domain_id", None) domain_name = '' # Retrieve the domain name where the project belongs if api.keystone.VERSIONS.active >= 3: try: if policy.check((("identity", "identity:get_domain"),), self.request): domain = api.keystone.domain_get(self.request, domain_id) domain_name = domain.name else: domain = api.keystone.get_default_domain(self.request) domain_name = domain.get('name') except Exception: exceptions.handle(self.request, _('Unable to retrieve project domain.')) data = {'domain_id': domain_id, 'domain_name': domain_name, 'id': user.id, 'name': user.name, 'project': user.project_id, 'email': getattr(user, 'email', None), 'description': getattr(user, 'description', None)} if api.keystone.VERSIONS.active >= 3: for key in getattr(settings, 'USER_TABLE_EXTRA_INFO', {}): data[key] = getattr(user, key, None) return data
def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) user = self.get_data() tenant = self.get_tenant(user.project_id) table = project_tables.UsersTable(self.request) domain_id = getattr(user, "domain_id", None) domain_name = "" if api.keystone.VERSIONS.active >= 3: try: if policy.check((("identity", "identity:get_domain"),), self.request): domain = api.keystone.domain_get(self.request, domain_id) domain_name = domain.name else: domain = api.keystone.get_default_domain(self.request) domain_name = domain.get("name") except Exception: exceptions.handle(self.request, _("Unable to retrieve project domain.")) context["description"] = getattr(user, "description", _("None")) extra_info = getattr(settings, "USER_TABLE_EXTRA_INFO", {}) context["extras"] = dict((display_key, getattr(user, key, "")) for key, display_key in extra_info.items()) context["user"] = user if tenant: context["tenant_name"] = tenant.name context["domain_id"] = domain_id context["domain_name"] = domain_name context["url"] = self.get_redirect_url() context["actions"] = table.render_row_actions(user) return context
def get_dvr_permission(request, operation): """Check if "distributed" field can be displayed. :param request: Request Object :param operation: Operation type. The valid value is "get" or "create" """ network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) if not network_config.get('enable_distributed_router', False): return False policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None) if operation not in ("get", "create"): raise ValueError(_("The 'operation' parameter for get_dvr_permission " "is invalid. It should be 'get' or 'create'.")) role = (("network", "%s_router:distributed" % operation),) if policy_check: has_permission = policy.check(role, request) else: has_permission = True if not has_permission: return False try: return is_extension_supported(request, 'dvr') except Exception: msg = _('Failed to check Neutron "dvr" extension is not supported') LOG.info(msg) return False
def handle(self, request, data): meta = create_image_metadata(data) # Add image source file or URL to metadata if (api.glance.get_image_upload_mode() != 'off' and policy.check((("image", "upload_image"),), request) and data.get('image_file', None)): meta['data'] = data['image_file'] elif data.get('is_copying'): meta['copy_from'] = data['image_url'] else: meta['location'] = data['image_url'] try: image = api.glance.image_create(request, **meta) messages.info(request, _('Your image %s has been queued for creation.') % meta['name']) return image except Exception as e: msg = _('Unable to create new image') # TODO(nikunj2512): Fix this once it is fixed in glance client if hasattr(e, 'code') and e.code == 400: if "Invalid disk format" in e.details: msg = _('Unable to create new image: Invalid disk format ' '%s for image.') % meta['disk_format'] elif "Image name too long" in e.details: msg = _('Unable to create new image: Image name too long.') elif "not supported" in e.details: msg = _('Unable to create new image: URL scheme not ' 'supported.') exceptions.handle(request, msg) return False
def get_data(self): groups = [] filters = self.get_filters() self._needs_filter_first = False if policy.check((("identity", "identity:list_groups"),), self.request): # If filter_first is set and if there are not other filters # selected, then search criteria must be provided and # return an empty list filter_first = getattr(settings, 'FILTER_DATA_FIRST', {}) if filter_first.get('identity.groups', False) \ and len(filters) == 0: self._needs_filter_first = True return groups domain_id = identity.get_domain_id_for_operation(self.request) try: groups = api.keystone.group_list(self.request, domain=domain_id, filters=filters) except Exception: exceptions.handle(self.request, _('Unable to retrieve group list.')) else: msg = _("Insufficient privilege level to view group information.") messages.info(self.request, msg) return groups
def allowed(self, request, project, cell): policy_rule = (("identity", "identity:update_project"),) return ( (cell.column.name != 'enabled' or request.user.project_id != cell.datum.id) and api.keystone.keystone_can_edit_project() and policy.check(policy_rule, request))
def test_check_identity_rule_not_found_true(self): policy_backend.reset() value = policy.check((("identity", "i_dont_exist"),), request=self.request) # this should succeed because the default check for # identity is admin_required self.assertTrue(value)
def get_initial(self): user = self.get_object() domain_id = getattr(user, "domain_id", None) domain_name = "" # Retrieve the domain name where the project belongs if api.keystone.VERSIONS.active >= 3: try: if policy.check((("identity", "identity:get_domain"),), self.request): domain = api.keystone.domain_get(self.request, domain_id) domain_name = domain.name else: domain = api.keystone.get_default_domain(self.request) domain_name = domain.get("name") except Exception: exceptions.handle(self.request, _("Unable to retrieve project domain.")) data = { "domain_id": domain_id, "domain_name": domain_name, "id": user.id, "name": user.name, "project": user.project_id, "email": getattr(user, "email", None), "description": getattr(user, "description", None), } if api.keystone.VERSIONS.active >= 3: for key in getattr(settings, "USER_TABLE_EXTRA_INFO", {}): data[key] = getattr(user, key, None) return data
def get_volume_types_data(self): try: volume_types = \ api.cinder.volume_type_list_with_qos_associations(self.request) except Exception: volume_types = [] exceptions.handle(self.request, _("Unable to retrieve volume types")) encryption_allowed = policy.check( (("volume", "volume_extension:volume_type_encryption"),), self.request) if encryption_allowed: # Gather volume type encryption information try: vol_type_enc_list = api.cinder.volume_encryption_type_list( self.request) except Exception: vol_type_enc_list = [] msg = _( 'Unable to retrieve volume type encryption information.') exceptions.handle(self.request, msg) vol_type_enc_dict = OrderedDict([(e.volume_type_id, e) for e in vol_type_enc_list]) for volume_type in volume_types: vol_type_enc = vol_type_enc_dict.get(volume_type.id, None) if vol_type_enc is not None: volume_type.encryption = vol_type_enc volume_type.encryption.name = volume_type.name else: volume_type.encryption = None return volume_types
def handle(self, request, data): meta = create_image_metadata(data) # Add image source file or URL to metadata if ( settings.HORIZON_IMAGES_ALLOW_UPLOAD and policy.check((("image", "upload_image"),), request) and data.get("image_file", None) ): meta["data"] = self.files["image_file"] elif data["is_copying"]: meta["copy_from"] = data["image_url"] else: meta["location"] = data["image_url"] try: image = api.glance.image_create(request, **meta) messages.success(request, _("Your image %s has been queued for creation.") % meta["name"]) return image except Exception as e: msg = _("Unable to create new image") # TODO(nikunj2512): Fix this once it is fixed in glance client if hasattr(e, "code") and e.code == 400: if "Invalid disk format" in e.details: msg = _("Unable to create new image: Invalid disk format " "%s for image.") % meta["disk_format"] elif "Image name too long" in e.details: msg = _("Unable to create new image: Image name too long.") exceptions.handle(request, msg) return False
def allowed(self, request): return policy.check( (("orchestration", "stacks:template"), ("orchestration", "stacks:lookup"), ("orchestration", "stacks:show"), ("orchestration", "events:index"),), request)
def get_rows(self): projects = self.filtered_data if (projects and hasattr(projects[0], 'parent_id') and project_identity.VERSIONS.active >= 3): self.set_immediate_parent(projects) if policy.check((("identity", "identity:get_project"),), self.request): self.set_closer_parent(projects, self.request) if not projects or not hasattr(projects[0], 'parent'): return super(TenantsTable, self).get_rows() for project in projects: project.immediate_subprojects = [] for child in projects: if child.parent and child.parent.id == project.id: project.immediate_subprojects.append(child) for project in projects: if project.id == self.request.user.project_id: root_projects = [project] rows = [] while root_projects: p = root_projects[0] row = self._meta.row_class(self, p) if self.get_object_id(p) == self.current_item_id: self.selected = True row.classes.append('current_selected') rows.append(row) root_projects.remove(p) root_projects[0:0] = sorted(p.immediate_subprojects, key=lambda project: project.name) return rows
def test_check_identity_rule_not_found_false(self): policy.reset() value = policy.check((("identity", "i_dont_exist"),), request=self.request) # this should fail because the default check for # identity is admin_required self.assertFalse(value)
def allowed(self, request): return policy.check( (("orchestration", "cloudformation:DescribeStackEvents"), ), request)
def allowed(self, request): return policy.check((("murano", "list_deployments"), ), request)
def get_data(self, request, context, *args, **kwargs): context["cluster"] = {} hypervisor_stats_count = {} hypervisor_stats_count['down'] = 0 hypervisor_stats_count['up'] = 0 instance_stats_count = {} instance_stats_count['down'] = 0 instance_stats_count['up'] = 0 instance_stats_count['active'] = 0 instance_stats_count['running'] = 0 instance_stats_count['idle'] = 0 instance_stats_count['free'] = 0 if policy.check((('identity', 'admin_required'),), self.request): # TODO get region extra or description context["cluster"]["clusters_list"] = request.user.available_services_regions context["cluster"]["current_cluster"] = request.user.services_region stats = api.nova.hypervisor_stats(self.request) context["cluster"]["hypervisor_stats"] = api.nova.hypervisor_stats(self.request) hypervisor_list = api.nova.hypervisor_list(self.request) for hypervisor in hypervisor_list: if hypervisor.state == 'up': hypervisor_stats_count['up'] += 1 if hypervisor.state == 'down': hypervisor_stats_count['down'] += 1 # TODO check data and other api instance_list = api.nova.server_list(self.request, all_tenants=True)[0] for instance in instance_list: if instance.status == 'ACTIVE': instance_stats_count['active'] += 1 instance_stats_count['up'] += 1 if instance.status == 'RUNNING': instance_stats_count['running'] += 1 instance_stats_count['up'] += 1 if instance.status == 'SUSPENDED': instance_stats_count['idle'] += 1 instance_stats_count['up'] += 1 if instance.status == 'CRASHED': instance_stats_count['free'] += 1 instance_stats_count['up'] += 1 if instance.status == 'SHUTOFF': instance_stats_count['down'] += 1 elif policy.check((('identity', 'admin_or_owner'),),self.request): # doing tenant_id = self.request.user.token.project.get('id') # request.user.tenant_id tenant_quota = api.nova.tenant_quota_get(self.request, tenant_id) one_cluster = { 'vcpus': tenant_quota.get('cores').limit, 'memory_mb': tenant_quota.get('ram').limit / 1024.0, 'local_gb_used': 0, 'local_gb': 0 } # vms all count: ttenant_quota.get('instances').limit # from django.utils import timezone # now = timezone.now() # import datetime # now = datetime.date.today() # start_day = datetime.date(now.year, now.month, 1) # start = datetime.datetime(start_day.year, start_day.month, # start_day.day, 0, 0, 0, 0) # end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0) # api.nova.usage_get(self.request, self.request.user.token.project.get('id'), start, end) context["cluster"]["hypervisor_stats"] = one_cluster context["cluster"]["hypervisor_stats_count"] = hypervisor_stats_count context["cluster"]["instance_stats_count"] = instance_stats_count return context
def allowed(self, request, fip=None): policy_rules = (("network", "delete_floatingip"), ) return policy.check(policy_rules, request)
def keystoneclient(request, admin=False): """Returns a client connected to the Keystone backend. Several forms of authentication are supported: * Username + password -> Unscoped authentication * Username + password + tenant id -> Scoped authentication * Unscoped token -> Unscoped authentication * Unscoped token + tenant id -> Scoped authentication * Scoped token -> Scoped authentication Available services and data from the backend will vary depending on whether the authentication was scoped or unscoped. Lazy authentication if an ``endpoint`` parameter is provided. Calls requiring the admin endpoint should have ``admin=True`` passed in as a keyword argument. The client is cached so that subsequent API calls during the same request/response cycle don't have to be re-authenticated. """ client_version = VERSIONS.get_active_version() user = request.user token_id = user.token.id if is_multi_domain_enabled(): # Cloud Admin, Domain Admin or Mixed Domain Admin if is_domain_admin(request): domain_token = request.session.get('domain_token') if domain_token: token_id = getattr(domain_token, 'auth_token', None) if admin: if not policy.check((("identity", "admin_required"), ), request): raise exceptions.NotAuthorized endpoint_type = 'adminURL' else: endpoint_type = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL') # Take care of client connection caching/fetching a new client. # Admin vs. non-admin clients are cached separately for token matching. cache_attr = "_keystoneclient_admin" if admin \ else backend.KEYSTONE_CLIENT_ATTR if (hasattr(request, cache_attr) and (not user.token.id or getattr(request, cache_attr).auth_token == user.token.id)): conn = getattr(request, cache_attr) else: endpoint = _get_endpoint_url(request, endpoint_type) verify = not getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) verify = verify and cacert LOG.debug("Creating a new keystoneclient connection to %s.", endpoint) remote_addr = request.environ.get('REMOTE_ADDR', '') token_auth = token_endpoint.Token(endpoint=endpoint, token=token_id) keystone_session = session.Session(auth=token_auth, original_ip=remote_addr, verify=verify) conn = client_version['client'].Client(session=keystone_session, debug=settings.DEBUG) setattr(request, cache_attr, conn) return conn
def test_check_nova_context_is_admin_true(self): policy.reset() value = policy.check((("compute", "context_is_admin"), ), request=self.request) self.assertTrue(value)
def test_scope_not_found(self): policy.reset() value = policy.check((("dummy", "default"), ), request=self.request) self.assertTrue(value)
def _can_access_kibana(self): return policy.check(((getattr(settings, 'KIBANA_POLICY_SCOPE'), getattr(settings, 'KIBANA_POLICY_RULE')), ), self.request)
def test_check_admin_required_true(self): policy.reset() value = policy.check((("identity", "admin_required"), ), request=self.request) self.assertTrue(value)
def is_domain_admin(request): return policy.check((("identity", "admin_and_matching_domain_id"), ), request)
def allowed(self, request, project, cell): return api.keystone.keystone_can_edit_project() and \ policy.check((("identity", "identity:update_project"),), request)
def allowed(self, request): return policy.check( (("volume", "consistencygroup:get_all"),), request )
def allowed(self, request): return policy.check(( ("orchestration", "cloudformation:DescribeStacks"), ("orchestration", "cloudformation:ListStackResources"), ), request)
def is_cloud_admin(request): return policy.check((("identity", "cloud_admin"), ), request)
def allowed(self, request, user, cell): return api.keystone.keystone_can_edit_user() and \ policy.check((("identity", "identity:update_user"),), request)
def allowed(self, request, volume_type, cell): return policy.check( ("volume_extension", "volume_extension:types_manage"), request)
def _get_networks(self, request): # Get neutron data # if we didn't specify tenant_id, all networks shown as admin user. # so it is need to specify the networks. However there is no need to # specify tenant_id for subnet. The subnet which belongs to the public # network is needed to draw subnet information on public network. try: # NOTE(amotoki): # To support auto allocated network in the network topology view, # we need to handle the auto allocated network which haven't been # created yet. The current network topology logic cannot not handle # fake network ID properly, so we temporarily exclude # pre-auto-allocated-network from the network topology view. # It would be nice if someone is interested in supporting it. neutron_networks = api.neutron.network_list_for_tenant( request, request.user.tenant_id, include_pre_auto_allocate=False) except Exception: neutron_networks = [] networks = [] for network in neutron_networks: allow_delete_subnet = policy.check( (("network", "delete_subnet"), ), request, target={ 'network:tenant_id': getattr(network, 'tenant_id', None) }) obj = { 'name': network.name_or_id, 'id': network.id, 'subnets': [{ 'id': subnet.id, 'cidr': subnet.cidr } for subnet in network.subnets], 'status': self.trans.network[network.status], 'allow_delete_subnet': allow_delete_subnet, 'original_status': network.status, 'router:external': network['router:external'] } self.add_resource_url('horizon:project:networks:subnets:detail', obj['subnets']) networks.append(obj) # Add public networks to the networks list if self.is_router_enabled: try: neutron_public_networks = api.neutron.network_list( request, **{'router:external': True}) except Exception: neutron_public_networks = [] my_network_ids = [net['id'] for net in networks] for publicnet in neutron_public_networks: if publicnet.id in my_network_ids: continue try: subnets = [{ 'id': subnet.id, 'cidr': subnet.cidr } for subnet in publicnet.subnets] self.add_resource_url( 'horizon:project:networks:subnets:detail', subnets) except Exception: subnets = [] networks.append({ 'name': publicnet.name_or_id, 'id': publicnet.id, 'subnets': subnets, 'status': self.trans.network[publicnet.status], 'original_status': publicnet.status, 'router:external': publicnet['router:external'] }) self.add_resource_url('horizon:project:networks:detail', networks) return sorted(networks, key=lambda x: x.get('router:external'), reverse=True)
def get_data(self): images = [] if not policy.check((("image", "get_images"), ), self.request): msg = _("Insufficient privilege level to retrieve image list.") messages.info(self.request, msg) return images filters = self.get_filters() filter_first = getattr(settings, 'FILTER_DATA_FIRST', {}) if filter_first.get('admin.images', False) and \ len(filters) == len(self.DEFAULT_FILTERS): self._prev = False self._more = False self._needs_filter_first = True return images self._needs_filter_first = False prev_marker = self.request.GET.get( project_tables.AdminImagesTable._meta.prev_pagination_param, None) if prev_marker is not None: marker = prev_marker else: marker = self.request.GET.get( project_tables.AdminImagesTable._meta.pagination_param, None) limit = self.request.GET.get( project_tables.AdminImagesTable._meta.limit_param, None) reversed_order = prev_marker is not None try: images, self._more, self._prev = api.glance.image_list_detailed( self.request, marker=marker, limit=limit, paginate=True, filters=filters, sort_dir='asc', sort_key='name', reversed_order=reversed_order) self._limit = limit except Exception: self._prev = False self._more = False self._limit = None msg = _('Unable to retrieve image list.') exceptions.handle(self.request, msg) if images: try: tenants, more = api.keystone.tenant_list(self.request) except Exception: tenants = [] msg = _('Unable to retrieve project list.') exceptions.handle(self.request, msg) tenant_dict = dict([(t.id, t.name) for t in tenants]) for image in images: image.tenant_name = tenant_dict.get(image.owner) return images
def __init__(self, request, *args, **kwargs): super(UpdateProjectMembersAction, self).__init__(request, *args, **kwargs) err_msg = _('Unable to retrieve user list. Please try again later.') # Use the domain_id from the project domain_id = self.initial.get("domain_id", None) project_id = '' if 'project_id' in self.initial: project_id = self.initial['project_id'] # Get the default role try: default_role = api.keystone.get_default_support_role(self.request) # Default role is necessary to add members to a project if default_role is None: default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_SUPPORT_ROLE", None) msg = (_('Could not find default role "%s" in Keystone') % default) raise exceptions.NotFound(msg) except Exception: LOG.warn(traceback.format_exc()) exceptions.handle(self.request, err_msg, redirect=reverse(INDEX_URL)) default_role_name = self.get_default_role_field_name() self.fields[default_role_name] = forms.CharField(required=False) self.fields[default_role_name].initial = default_role.id # Get list of available users # only list user have support role all_users = [] try: if policy.check((("identity", "admin_required"),), request) or\ policy.check((("identity", "support_required"),), request): all_users = api.keystone.user_list( request, domain=domain_id, filters={'default_role_id': default_role.id}) else: parent_id = self.request.user.id all_users = api.keystone.user_list( self.request, domain=domain_id, filters={ "parent_id": parent_id, 'default_role_id': default_role.id }, ) except Exception: exceptions.handle(request, err_msg) users_list = [(user.id, user.name) for user in all_users] # Get list of roles role_list = [default_role] # try: # role_list = api.keystone.role_list(request) # for role in role_list[:]: # if not request.user.is_superuser and role.name != default_role.name: # role_list.remove(role) # except Exception: # exceptions.handle(request, # err_msg, # redirect=reverse(INDEX_URL)) for role in role_list: field_name = self.get_member_field_name(role.id) label = role.name self.fields[field_name] = forms.MultipleChoiceField(required=False, label=label) self.fields[field_name].choices = users_list self.fields[field_name].initial = [] # Figure out users & roles if project_id: try: users_roles = api.keystone.get_project_users_roles( request, project_id) except Exception: exceptions.handle(request, err_msg, redirect=reverse(INDEX_URL)) for user_id in users_roles: roles_ids = users_roles[user_id] for role_id in roles_ids: if role_id == default_role.id: field_name = self.get_member_field_name(role_id) self.fields[field_name].initial.append(user_id)
def test_check_admin_required_false(self): policy_backend.reset() value = policy.check((("identity", "admin_required"), ), request=self.request) self.assertFalse(value)
def is_domain_admin(request): # TODO(btully): check this to verify that domain id is in scope vs target return policy.check((("identity", "admin_and_matching_domain_id"), ), request)
def allowed(self, request, fip): policy_rules = (("network", "update_floatingip"), ) return not fip.port_id and policy.check(policy_rules, request)
def test_check_nova_context_is_admin_false(self): policy_backend.reset() value = policy.check((("compute", "context_is_admin"), ), request=self.request) self.assertFalse(value)
def allowed(self, request, project, cell): policy_rule = (("identity", "identity:update_project"), ) return ((cell.column.name != 'enabled' or request.user.token.project['id'] != cell.datum.id) and api.keystone.keystone_can_edit_project() and policy.check(policy_rule, request))
def __init__(self, request, *args, **kwargs): super(CreateImageForm, self).__init__(request, *args, **kwargs) if (api.glance.get_image_upload_mode() == 'off' or not policy.check( (("image", "upload_image"), ), request)): self._hide_file_source_type() if not policy.check((("image", "set_image_location"), ), request): self._hide_url_source_type() # GlanceV2 feature removals if api.glance.VERSIONS.active >= 2: # NOTE: GlanceV2 doesn't support copy-from feature, sorry! self._hide_is_copying() if not settings.IMAGES_ALLOW_LOCATION: self._hide_url_source_type() if (api.glance.get_image_upload_mode() == 'off' or not policy.check( (("image", "upload_image"), ), request)): # Neither setting a location nor uploading image data is # allowed, so throw an error. msg = _('The current Horizon settings indicate no valid ' 'image creation methods are available. Providing ' 'an image location and/or uploading from the ' 'local file system must be allowed to support ' 'image creation.') messages.error(request, msg) raise ValidationError(msg) if not policy.check((("image", "publicize_image"), ), request): self._hide_is_public() self.fields['disk_format'].choices = \ api.glance.get_image_formats(request) try: kernel_images = api.glance.image_list_detailed( request, filters={'disk_format': 'aki'})[0] except Exception: kernel_images = [] msg = _('Unable to retrieve image list.') messages.error(request, msg) if kernel_images: choices = [('', _("Choose an image"))] for image in kernel_images: choices.append((image.id, image)) self.fields['kernel'].choices = choices else: del self.fields['kernel'] try: ramdisk_images = api.glance.image_list_detailed( request, filters={'disk_format': 'ari'})[0] except Exception: ramdisk_images = [] msg = _('Unable to retrieve image list.') messages.error(request, msg) if ramdisk_images: choices = [('', _("Choose an image"))] for image in ramdisk_images: choices.append((image.id, image)) self.fields['ramdisk'].choices = choices else: del self.fields['ramdisk']
def __init__(self, request, *args, **kwargs): super(UpdateNetwork, self).__init__(request, *args, **kwargs) if not policy.check((("network", "update_network:shared"), ), request): self.fields['shared'].widget = forms.HiddenInput()