def get_disabled_quotas(request): # We no longer supports nova network, so we always disable # network related nova quota fields. disabled_quotas = set() # Cinder if not cinder.is_volume_service_enabled(request): disabled_quotas.update(CINDER_QUOTA_FIELDS) # Neutron if not base.is_service_enabled(request, 'network'): disabled_quotas.update(NEUTRON_QUOTA_FIELDS) else: if not neutron.is_extension_supported(request, 'security-group'): disabled_quotas.update(['security_group', 'security_group_rule']) if not neutron.is_router_enabled(request): disabled_quotas.update(['router', 'floatingip']) try: if not neutron.is_quotas_extension_supported(request): disabled_quotas.update(NEUTRON_QUOTA_FIELDS) except Exception: LOG.exception("There was an error checking if the Neutron " "quotas extension is enabled.") # Nova if not (base.is_service_enabled(request, 'compute') and nova.can_set_quotas()): disabled_quotas.update(NOVA_QUOTA_FIELDS) # There appear to be no glance quota fields currently return disabled_quotas
def get_disabled_quotas(request): disabled_quotas = [] # Cinder if not base.is_service_enabled(request, 'volume'): disabled_quotas.extend(CINDER_QUOTA_FIELDS) # Neutron if not base.is_service_enabled(request, 'network'): disabled_quotas.extend(NEUTRON_QUOTA_FIELDS) else: # Remove the nova network quotas disabled_quotas.extend(['floating_ips', 'fixed_ips']) if neutron.is_extension_supported(request, 'security-group'): # If Neutron security group is supported, disable Nova quotas disabled_quotas.extend(['security_groups', 'security_group_rules']) else: # If Nova security group is used, disable Neutron quotas disabled_quotas.extend(['security_group', 'security_group_rule']) try: if not neutron.is_quotas_extension_supported(request): disabled_quotas.extend(NEUTRON_QUOTA_FIELDS) except Exception: LOG.exception("There was an error checking if the Neutron " "quotas extension is enabled.") return disabled_quotas
def get_context(request, context=None): """Returns common context data for network topology views.""" if context is None: context = {} network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) context['launch_instance_allowed'] = policy.check( (("compute", "compute:create"),), request) context['instance_quota_exceeded'] = _quota_exceeded(request, 'instances') context['create_network_allowed'] = policy.check( (("network", "create_network"),), request) context['network_quota_exceeded'] = _quota_exceeded(request, 'networks') context['create_router_allowed'] = ( network_config.get('enable_router', True) and policy.check((("network", "create_router"),), request)) context['router_quota_exceeded'] = _quota_exceeded(request, 'routers') context['console_type'] = getattr(settings, 'CONSOLE_TYPE', 'AUTO') context['show_ng_launch'] = ( base.is_service_enabled(request, 'compute') and getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True)) context['show_legacy_launch'] = ( base.is_service_enabled(request, 'compute') and getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False)) return context
def test_quota_sets_defaults_get_when_service_is_disabled(self, client): filters = {'user': {'tenant_id': 'tenant'}} request = self.mock_rest_request(**{'GET': dict(filters)}) base.is_service_enabled(request, 'network').AndReturn(False) self.mox.ReplayAll() response = neutron.DefaultQuotaSets().get(request) self.assertStatusCode(response, 501) self.assertEqual(response.content.decode('utf-8'), '"Service Neutron is disabled."') client.tenant_quota_get.assert_not_called()
def __init__(self, request): neutron_enabled = base.is_service_enabled(request, 'network') nova_enabled = base.is_service_enabled(request, 'compute') self.secgroups, self.floating_ips = None, None if neutron_enabled: self.floating_ips = neutron.FloatingIpManager(request) elif nova_enabled: self.floating_ips = nova.FloatingIpManager(request) if (neutron_enabled and neutron.is_extension_supported(request, 'security-group')): self.secgroups = neutron.SecurityGroupManager(request) elif nova_enabled: self.secgroups = nova.SecurityGroupManager(request)
def allowed(self, request): try: return (base.is_service_enabled(request, 'network') and neutron.is_extension_supported(request, 'agent')) except Exception: exceptions.handle(request, _('Unable to get network agents info.')) return False
def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__( request, context, *args, **kwargs) source_type_choices = [ ('', _("Select source")), ("image_id", _("Boot from image")), ("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, 'volume'): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append(("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle(request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append(("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices # Item 5 @ https://blueprints.launchpad.net/jio/+spec/dashboard-ui-t1 # Pre-fill instance name in the following format: <TenantName>-MyServer<number> self._init_instance_name(request)
def tenant_limit_usages(request): # TODO(licostan): This method shall be removed from Quota module. # ProjectUsage/BaseUsage maybe used instead on volume/image dashboards. limits = {} try: if base.is_service_enabled(request, 'compute'): limits.update(nova.tenant_absolute_limits(request, reserved=True)) except Exception: msg = _("Unable to retrieve compute limit information.") exceptions.handle(request, msg) if cinder.is_volume_service_enabled(request): try: limits.update(cinder.tenant_absolute_limits(request)) except cinder.cinder_exception.ClientException: msg = _("Unable to retrieve volume limit information.") exceptions.handle(request, msg) # TODO(amotoki): Support neutron quota details extensions # which returns limit/usage/reserved per resource. # Note that the data format is different from nova/cinder limit API. # https://developer.openstack.org/ # api-ref/network/v2/#quotas-details-extension-quota-details return limits
def __init__(self, request, *args, **kwargs): super(Create, self).__init__(request, *args, **kwargs) self.neutron_enabled = base.is_service_enabled(request, 'network') net_choices = network.network_list(request) if self.neutron_enabled: self.fields['neutron_net_id'] = forms.ChoiceField( choices=[(' ', ' ')] + [(choice.id, choice.name_or_id) for choice in net_choices], label=_("Neutron Net"), widget=forms.Select( attrs={'class': 'switchable', 'data-slug': 'net'})) for net in net_choices: # For each network create switched choice field with # the its subnet choices subnet_field_name = 'subnet-choices-%s' % net.id subnet_field = forms.ChoiceField( choices=(), label=_("Neutron Subnet"), widget=forms.Select(attrs={ 'class': 'switched', 'data-switch-on': 'net', 'data-net-%s' % net.id: _("Neutron Subnet") })) self.fields[subnet_field_name] = subnet_field subnet_choices = neutron.subnet_list( request, network_id=net.id) self.fields[subnet_field_name].choices = [ (' ', ' ')] + [(choice.id, choice.name_or_id) for choice in subnet_choices] else: self.fields['nova_net_id'] = forms.ChoiceField( choices=[(' ', ' ')] + [(choice.id, choice.name_or_id) for choice in net_choices], label=_("Nova Net"), widget=forms.Select( attrs={'class': 'switched', 'data-slug': 'net'}))
def _update_project_quota(self, request, data, project_id): # update the project quota nova_data = dict( [(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS]) try: nova.tenant_quota_update(request, project_id, **nova_data) if base.is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in quotas.CINDER_QUOTA_FIELDS]) cinder.tenant_quota_update(request, project_id, **cinder_data) if api.base.is_service_enabled(request, 'network') and \ api.neutron.is_quotas_extension_supported(request): neutron_data = {} disabled_quotas = quotas.get_disabled_quotas(request) for key in quotas.NEUTRON_QUOTA_FIELDS: if key not in disabled_quotas: neutron_data[key] = data[key] api.neutron.tenant_quota_update(request, project_id, **neutron_data) return True except Exception: exceptions.handle(request, _('Modified project information and ' 'members, but unable to modify ' 'project quotas.')) return False
def _update_project_quota(self, request, data, project_id): # Update the project quota. enabled_quota_groups = getattr(settings, 'ENABLED_QUOTA_GROUPS', {}) nova_data = dict( [(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS]) try: if enabled_quota_groups.get('nova', True): nova.tenant_quota_update(request, project_id, **nova_data) if enabled_quota_groups.get('cinder', True): if base.is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in quotas.CINDER_QUOTA_FIELDS]) cinder.tenant_quota_update(request, project_id, **cinder_data) if enabled_quota_groups.get('neutron', True): if api.base.is_service_enabled(request, 'network') and \ api.neutron.is_quotas_extension_supported(request): neutron_data = {} disabled_quotas = quotas.get_disabled_quotas(request) for key in quotas.NEUTRON_QUOTA_FIELDS: if key not in disabled_quotas: neutron_data[key] = data[key] api.neutron.tenant_quota_update(request, project_id, **neutron_data) except Exception: exceptions.handle(request, _('Unable to set project quotas.'))
def allowed(self, request, image=None): # jt return False if (image and image.container_format not in NOT_LAUNCHABLE_FORMATS and base.is_service_enabled(request, 'volume')): return image.status == "active" return False
def _update_project_quota(self, request, data, project_id): # Update the project quota. nova_data = dict( [(key, data[key]) for key in NOVA_QUOTA_FIELDS]) data['region_id'] = request.user.services_region try: nova.tenant_quota_update(request, project_id, region=data['region_id'], **nova_data) if base.is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in quotas.CINDER_QUOTA_FIELDS]) cinder.tenant_quota_update(request, project_id, region=data['region_id'], **cinder_data) if api.base.is_service_enabled(request, 'network') and \ api.neutron.is_quotas_extension_supported(request): neutron_data = {} for key in NEUTRON_QUOTA_FIELDS: neutron_data[key] = data[key] api.neutron.tenant_quota_update(request, project_id, region=data['region_id'], **neutron_data) except Exception: raise exceptions.handle(request, _('Unable to set project quotas.'))
def tenant_limit_usages(request): # TODO(licostan): This method shall be removed from Quota module. # ProjectUsage/BaseUsage maybe used instead on volume/image dashboards. limits = {} try: limits.update(nova.tenant_absolute_limits(request)) except Exception: msg = _("Unable to retrieve compute limit information.") exceptions.handle(request, msg) if base.is_service_enabled(request, 'volume'): try: limits.update(cinder.tenant_absolute_limits(request)) volumes = cinder.volume_list(request) snapshots = cinder.volume_snapshot_list(request) total_size = sum([getattr(volume, 'size', 0) for volume in volumes]) limits['gigabytesUsed'] = total_size limits['volumesUsed'] = len(volumes) limits['snapshotsUsed'] = len(snapshots) except Exception: msg = _("Unable to retrieve volume limit information.") exceptions.handle(request, msg) return limits
def get_share_networks_data(self): try: share_networks = manila.share_network_list( self.request, detailed=True, search_opts={'all_tenants': True}) if base.is_service_enabled(self.request, 'network'): neutron_net_names = dict((net.id, net.name) for net in neutron.network_list(self.request)) neutron_subnet_names = dict((net.id, net.name) for net in neutron.subnet_list(self.request)) for sn in share_networks: sn.neutron_net = neutron_net_names.get( sn.neutron_net_id) or sn.neutron_net_id or "-" sn.neutron_subnet = neutron_subnet_names.get( sn.neutron_subnet_id) or sn.neutron_subnet_id or "-" else: nova_net_names = dict( [(net.id, net.label) for net in network.network_list(self.request)]) for sn in share_networks: sn.nova_net = nova_net_names.get( sn.nova_net_id) or sn.nova_net_id or "-" except Exception: share_networks = [] exceptions.handle(self.request, _("Unable to retrieve share networks")) utils.set_project_name_to_objects(self.request, share_networks) return share_networks
def __init__(self, request): ironic_enabled = base.is_service_enabled(request, 'baremetal') if ironic_enabled: self.node_class = IronicNode else: self.node_class = BareMetalNode
def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__( request, context, *args, **kwargs) source_type_choices = [ ('', _("Select source")), ("image_id", _("Boot from image")), ("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, 'volume'): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append(("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle(request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append(("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices
def get_help_text(self, extra_context=None): extra = {} if extra_context is None else dict(extra_context) try: extra['usages'] = quotas.tenant_quota_usages( self.request, targets=('instances', 'cores', 'ram', 'volumes', 'gigabytes')) extra['usages_json'] = json.dumps(extra['usages']) extra['cinder_enabled'] = \ base.is_service_enabled(self.request, 'volume') flavors = json.dumps([f._info for f in instance_utils.flavor_list(self.request)]) extra['flavors'] = flavors images = image_utils.get_available_images( self.request, self.initial['project_id'], self._images_cache) if images is not None: attrs = [{'id': i.id, 'min_disk': getattr(i, 'min_disk', 0), 'min_ram': getattr(i, 'min_ram', 0), 'size': functions.bytes_to_gigabytes(i.size)} for i in images] extra['images'] = json.dumps(attrs) except Exception: exceptions.handle(self.request, _("Unable to retrieve quota information.")) return super(SetInstanceDetailsAction, self).get_help_text(extra)
def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) redirect = reverse(INDEX_URL) plan = api.tuskar.Plan.get_the_plan(self.request) stack = self.get_stack() role = self.get_role(redirect) context['role'] = role if stack: context['nodes'] = self._get_nodes(stack, role) else: context['nodes'] = [] context['flavor'] = role.flavor(plan) context['image'] = role.image(plan) if stack: if api_base.is_service_enabled(self.request, 'metering'): # Meter configuration in the following format: # (meter label, url part, barchart (True/False)) context['meter_conf'] = ( (_('System Load'), metering_utils.url_part('hardware.cpu.load.1min', False), None), (_('CPU Utilization'), metering_utils.url_part('hardware.system_stats.cpu.util', True), '100'), (_('Swap Utilization'), metering_utils.url_part('hardware.memory.swap.util', True), '100'), ) return context
def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__(request, context, *args, **kwargs) # Hide the device field if the hypervisor doesn't support it. if not nova.can_set_mount_point(): self.fields["device_name"].widget = forms.widgets.HiddenInput() source_type_choices = [ ("", _("Select source")), ("image_id", _("Boot from image")), ("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, "volume"): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append(("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle(request, _("Unable to retrieve extensions " "information.")) source_type_choices.append(("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields["source_type"].choices = source_type_choices
def populate_volume_snapshot_id_choices(self, request, context): snapshots = [] try: if base.is_service_enabled(request, "volume") or base.is_service_enabled(request, "volumev2"): available = api.cinder.VOLUME_STATE_AVAILABLE snapshots = [ self._get_volume_display_name(s) for s in cinder.volume_snapshot_list(self.request, search_opts=dict(status=available)) ] except Exception: exceptions.handle(self.request, _("Unable to retrieve list of volume " "snapshots.")) if snapshots: snapshots.insert(0, ("", _("Select Volume Snapshot"))) else: snapshots.insert(0, ("", _("No volume snapshots available"))) return snapshots
def allowed(self, request, volume=None): if volume and base.is_service_enabled(request, 'volume'): if "Create Volume" in request.session['user_policies'].get(request.user.openstackname): return True and volume.status == "available" else: return False return False
def __init__(self, request): super(NetworkClient, self).__init__(request) if base.is_service_enabled(request, 'network'): self.network_list = neutron.network_list self.network_get = neutron.network_get else: self.network_list = _nova_network_list self.network_get = _nova_network_get
def populate_volume_id_choices(self, request, context): volumes = [] try: if (base.is_service_enabled(request, 'volume') or base.is_service_enabled(request, 'volumev2')): available = api.cinder.VOLUME_STATE_AVAILABLE volumes = [self._get_volume_display_name(v) for v in cinder.volume_list(self.request, search_opts=dict(status=available, bootable=1))] except Exception: exceptions.handle(self.request, _('Unable to retrieve list of volumes.')) if volumes: volumes.insert(0, ("", _("Select Volume"))) else: volumes.insert(0, ("", _("No volumes available"))) return volumes
def get_context_data(self, request): node = self.tab_group.kwargs['node'] context = {'node': node} try: resource = api.heat.Resource.get_by_node(self.request, node) except LookupError: pass else: context['role'] = resource.role context['stack'] = resource.stack context['kernel_image'] = api.node.image_get( request, node.driver_info['pxe_deploy_kernel'] ) context['ramdisk_image'] = api.node.image_get( request, node.driver_info['pxe_deploy_ramdisk'] ) if node.instance_uuid: if api_base.is_service_enabled(self.request, 'metering'): # Meter configuration in the following format: # (meter label, url part, y_max) context['meter_conf'] = ( (_('System Load'), metering_utils.url_part('hardware.cpu.load.1min', False), None), (_('CPU Utilization'), metering_utils.url_part('hardware.system_stats.cpu.util', True), '100'), (_('Swap Utilization'), metering_utils.url_part('hardware.memory.swap.util', True), '100'), (_('Current'), metering_utils.url_part('hardware.ipmi.current', False), None), (_('Network IO'), metering_utils.url_part('network-io', False), None), (_('Disk IO'), metering_utils.url_part('disk-io', False), None), (_('Temperature'), metering_utils.url_part('hardware.ipmi.temperature', False), None), (_('Fan Speed'), metering_utils.url_part('hardware.ipmi.fan', False), None), (_('Voltage'), metering_utils.url_part('hardware.ipmi.voltage', False), None), ) return context
def tenant_quota_usages(request): cloud = None if 'cloud' in request.GET: cloud = request.GET['cloud'] elif 'cloud' in request.POST: cloud = request.POST['cloud'] # Get our quotas and construct our usage object. disabled_quotas = [] if not is_service_enabled(request, 'volume'): disabled_quotas.extend(['volumes', 'gigabytes']) usages = QuotaUsage() for quota in get_tenant_quota_data(request, disabled_quotas): usages.add_quota(quota) # Get our usages. floating_ips = nova.tenant_floating_ip_list(request) #flavors = dict([(f.id, f) for f in nova.flavor_list(request) if limit_by_cloud(f) ]) flavors = dict([(f.id, f) for f in nova.flavor_list(request) ]) instances = nova.server_list(request) if cloud is not None: instances = [instance for instance in instances if get_cloud(instance) == cloud] # Fetch deleted flavors if necessary. missing_flavors = [instance.flavor['id'] for instance in instances if instance.flavor['id'] not in flavors] for missing in missing_flavors: if missing not in flavors: try: flavors[missing] = nova.flavor_get(request, missing) except: flavors[missing] = {} exceptions.handle(request, ignore=True) usages.tally('instances', len(instances)) usages.tally('floating_ips', len(floating_ips)) if 'volumes' not in disabled_quotas: volumes = cinder.volume_list(request) usages.tally('gigabytes', sum([int(v.size) for v in volumes])) usages.tally('volumes', len(volumes)) # Sum our usage based on the flavors of the instances. for flavor in [flavors[instance.flavor['id']] for instance in instances]: usages.tally('cores', getattr(flavor, 'vcpus', None)) usages.tally('ram', getattr(flavor, 'ram', None)) # Initialise the tally if no instances have been launched yet if len(instances) == 0: usages.tally('cores', 0) usages.tally('ram', 0) return usages
def handle(self, request, data): # create the project domain_context = self.request.session.get('domain_context', None) try: desc = data['description'] self.object = api.keystone.tenant_create(request, name=data['name'], description=desc, enabled=data['enabled'], domain=domain_context) except Exception: exceptions.handle(request, ignore=True) return False project_id = self.object.id # update project members users_to_add = 0 try: available_roles = api.keystone.role_list(request) member_step = self.get_step(PROJECT_USER_MEMBER_SLUG) # count how many users are to be added for role in available_roles: field_name = member_step.get_member_field_name(role.id) role_list = data[field_name] users_to_add += len(role_list) # add new users to project for role in available_roles: field_name = member_step.get_member_field_name(role.id) role_list = data[field_name] users_added = 0 for user in role_list: api.keystone.add_tenant_user_role(request, project=project_id, user=user, role=role.id) users_added += 1 users_to_add -= users_added except Exception: exceptions.handle(request, _('Failed to add %s project members ' 'and set project quotas.') % users_to_add) # Update the project quota. nova_data = dict([(key, data[key]) for key in NOVA_QUOTA_FIELDS]) try: nova.tenant_quota_update(request, project_id, **nova_data) if is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in CINDER_QUOTA_FIELDS]) cinder.tenant_quota_update(request, project_id, **cinder_data) except Exception: exceptions.handle(request, _('Unable to set project quotas.')) return True
def servers_update_addresses(request, servers, all_tenants=False): """Retrieve servers networking information from Neutron if enabled. Should be used when up to date networking information is required, and Nova's networking info caching mechanism is not fast enough. """ neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: neutron.servers_update_addresses(request, servers, all_tenants)
def get_manila_limits(self): """Get share limits if manila is enabled.""" if not base.is_service_enabled(self.request, 'share'): return try: self.limits.update(manila.tenant_absolute_limits(self.request)) except Exception: msg = _("Unable to retrieve share limit information.") horizon.exceptions.handle(self.request, msg) return
def get_quota_data(request, method_name): quotasets = [] tenant_id = request.user.tenant_id quotasets.append(getattr(nova, method_name)(request, tenant_id)) if is_service_enabled(request, 'volume'): quotasets.append(getattr(cinder, method_name)(request, tenant_id)) qs = QuotaSet() for quota in itertools.chain(*quotasets): qs[quota.name] = quota.limit return qs
def handle(self, request, data): try: if base.is_service_enabled(request, 'share'): manila_data = dict([(key, data[key]) for key in api_manila.MANILA_QUOTA_FIELDS]) api_manila.default_quota_update(request, **manila_data) return True except Exception: exceptions.handle(request, _('Unable to update default quotas.')) return False
def get_volume_snapshots_data(self): if base.is_service_enabled(self.request, 'volume'): try: snapshots = api.cinder.volume_snapshot_list(self.request) except Exception: snapshots = [] exceptions.handle(self.request, _("Unable to retrieve " "volume snapshots.")) else: snapshots = [] return snapshots
def populate_volume_id_choices(self, request, context): volumes = [] try: if (base.is_service_enabled(request, 'volume') or base.is_service_enabled(request, 'volumev2')): available = api.cinder.VOLUME_STATE_AVAILABLE volumes = [ self._get_volume_display_name(v) for v in cinder.volume_list( self.request, search_opts=dict(status=available, bootable=1)) ] except Exception: exceptions.handle(self.request, _('Unable to retrieve list of volumes.')) if volumes: volumes.insert(0, ("", _("Select Volume"))) else: volumes.insert(0, ("", _("No volumes available"))) return volumes
def handle(self, request, data): # create the project domain_context = self.request.session.get('domain_context', None) try: desc = data['description'] self.object = api.keystone.tenant_create(request, name=data['name'], description=desc, enabled=data['enabled'], domain=domain_context) except: exceptions.handle(request, ignore=True) return False project_id = self.object.id # update project members users_to_add = 0 try: available_roles = api.keystone.role_list(request) # count how many users are to be added for role in available_roles: role_list = data["role_" + role.id] users_to_add += len(role_list) # add new users to project for role in available_roles: role_list = data["role_" + role.id] users_added = 0 for user in role_list: api.keystone.add_tenant_user_role(request, project=project_id, user=user, role=role.id) users_added += 1 users_to_add -= users_added except: exceptions.handle( request, _('Failed to add %s project members ' 'and set project quotas.' % users_to_add)) # Update the project quota. nova_data = dict([(key, data[key]) for key in NOVA_QUOTA_FIELDS]) try: nova.tenant_quota_update(request, project_id, **nova_data) if is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in CINDER_QUOTA_FIELDS]) cinder.tenant_quota_update(request, project_id, **cinder_data) except: exceptions.handle(request, _('Unable to set project quotas.')) return True
def get_disabled_quotas(request): disabled_quotas = set([]) # Cinder if not cinder.is_volume_service_enabled(request): disabled_quotas.update(CINDER_QUOTA_FIELDS) # Neutron if not base.is_service_enabled(request, 'network'): disabled_quotas.update(NEUTRON_QUOTA_FIELDS) else: # Remove the nova network quotas disabled_quotas.update(['floating_ips', 'fixed_ips']) if neutron.is_extension_supported(request, 'security-group'): # If Neutron security group is supported, disable Nova quotas disabled_quotas.update(['security_groups', 'security_group_rules']) else: # If Nova security group is used, disable Neutron quotas disabled_quotas.update(['security_group', 'security_group_rule']) if not neutron.is_router_enabled(request): disabled_quotas.update(['router', 'floatingip']) try: if not neutron.is_quotas_extension_supported(request): disabled_quotas.update(NEUTRON_QUOTA_FIELDS) except Exception: LOG.exception("There was an error checking if the Neutron " "quotas extension is enabled.") # Nova if not (base.is_service_enabled(request, 'compute') and nova.can_set_quotas()): disabled_quotas.update(NOVA_QUOTA_FIELDS) # The 'missing' quota fields are all nova (this is hardcoded in # dashboards.admin.defaults.workflows) disabled_quotas.update(MISSING_QUOTA_FIELDS) # There appear to be no glance quota fields currently return disabled_quotas
def get_context(request, context=None): """Returns common context data for network topology views.""" if context is None: context = {} context['launch_instance_allowed'] = policy.check( (("compute", "os_compute_api:servers:create"), ), request) context['instance_quota_exceeded'] = _quota_exceeded(request, 'instances') context['create_network_allowed'] = policy.check( (("network", "create_network"), ), request) context['network_quota_exceeded'] = _quota_exceeded(request, 'network') context['create_router_allowed'] = (setting_utils.get_dict_config( 'OPENSTACK_NEUTRON_NETWORK', 'enable_router') and policy.check( (("network", "create_router"), ), request)) context['router_quota_exceeded'] = _quota_exceeded(request, 'router') context['console_type'] = settings.CONSOLE_TYPE context['show_ng_launch'] = (base.is_service_enabled(request, 'compute') and settings.LAUNCH_INSTANCE_NG_ENABLED) context['show_legacy_launch'] = (base.is_service_enabled( request, 'compute') and settings.LAUNCH_INSTANCE_LEGACY_ENABLED) return context
def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__( request, context, *args, **kwargs) # Hide the device field if the hypervisor doesn't support it. if not nova.can_set_mount_point(): self.fields['device_name'].widget = forms.widgets.HiddenInput() source_type_choices = [ ('', _("Select source")), # ("image_id", _("Boot from image")), # ("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, 'volume'): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append( ("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle(request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append( ("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices # # Astute: handle pay-as-you-go accounts # type_map = get_project_type_mapping(request, self.initial['project_id']) if type_map and type_map['billing_type']['code'] == 'payg': del self.fields['flavor'] plans_choices = [] for item in get_project_plan_mappings(request, self.initial['project_id'], unassociated=True): plans_choices.append((str(item['id'])+'::'+str(item['flavor_id']), item['plan_name'])) self.fields['plan_flavor'].choices = plans_choices self.fields['count'].widget = forms.widgets.HiddenInput() self.fields['count'].value = 1 else: if type_map: plan_maps = get_project_plan_mappings(request, self.initial['project_id']) if len(plan_maps) < 1: self.fields['flavor'].choices = [] del self.fields['plan_flavor']
def get_data(self, request, share_net_id): share_net = manila.share_network_get(request, share_net_id) neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: share_net.neutron_net = neutron.network_get( request, share_net.neutron_net_id).name_or_id share_net.neutron_subnet = neutron.subnet_get( request, share_net.neutron_subnet_id).name_or_id else: share_net.nova_net = network.network_get( request, share_net.nova_net_id).name_or_id return share_net
def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__(request, context, *args, **kwargs) # Hide the device field if the hypervisor doesn't support it. if not nova.can_set_mount_point(): self.fields['device_name'].widget = forms.widgets.HiddenInput() self.fields['volume_size'].widget = forms.widgets.HiddenInput() source_type_choices = [ ('', _("Select source")), #("image_id", _("Boot from image")), #("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, 'volume'): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append( ("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle( request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append( ("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices # add by zhihao.ding 2015/7/16 for kill_flavor start self.fields['memory_mb'].choices = [ ('512', '512MB'), ('1024', '1GB'), ('2048', '2GB'), ('4096', '4GB'), ('8192', '8GB'), ('16384', '16GB'), ('32768', '32GB'), ('65536', '64GB'), ] self.fields['vcpus'].choices = [('1', '1'), ('2', '2'), ('4', '4'), ('8', '8'), ('16', '16'), ('24', '24')] self.fields['memory_mb'].initial = '4096' self.fields['vcpus'].initial = '2' self.fields['availability_zone'].widget = forms.widgets.HiddenInput() flavors = instance_utils.flavor_field_data(self.request, False) self.flavor = str(flavors[0][0])
def get_quotas_data(self): request = self.tab_group.request disabled_quotas = [] if not is_service_enabled(self.request, 'volume'): disabled_quotas.extend(['volumes', 'gigabytes']) try: quota_set = quotas.get_default_quota_data(request, disabled_quotas) data = quota_set.items except: data = [] exceptions.handle(self.request, _('Unable to get quota info.')) return data
def test_quotas_sets_defaults_get_when_service_is_enabled(self, client): filters = {'user': {'tenant_id': 'tenant'}} request = self.mock_rest_request(**{'GET': dict(filters)}) base.is_service_enabled(request, 'network').AndReturn(True) client.tenant_quota_get.return_value = [ base.Quota("network", 100), base.Quota("q2", 101)] self.mox.ReplayAll() response = neutron.DefaultQuotaSets().get(request) self.assertStatusCode(response, 200) self.assertItemsCollectionEqual(response, [ {'limit': 100, 'display_name': 'Networks', 'name': 'network'}, {'limit': 101, 'display_name': 'Q2', 'name': 'q2'}]) client.tenant_quota_get.assert_called_once_with( request, request.user.tenant_id)
def get_disabled_quotas(request): disabled_quotas = [] # Cinder if not base.is_service_enabled(request, 'volume'): disabled_quotas.extend(CINDER_QUOTA_FIELDS) # Neutron if not base.is_service_enabled(request, 'network'): disabled_quotas.extend(NEUTRON_QUOTA_FIELDS) else: # Remove the nova network quotas disabled_quotas.extend(['floating_ips', 'fixed_ips']) try: if not neutron.is_quotas_extension_supported(request): disabled_quotas.extend(NEUTRON_QUOTA_FIELDS) except Exception: LOG.exception("There was an error checking if the Neutron " "quotas extension is enabled.") return disabled_quotas
def __init__(self, request): neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: self.floating_ips = neutron.FloatingIpManager(request) else: self.floating_ips = nova.FloatingIpManager(request) if (neutron_enabled and neutron.is_extension_supported(request, 'security-group')): self.secgroups = neutron.SecurityGroupManager(request) else: self.secgroups = nova.SecurityGroupManager(request)
def populate_groups_choices(self, request, context): try: groups = api.network.security_group_list(request) if base.is_service_enabled(request, 'network'): security_group_list = [(sg.id, sg.name) for sg in groups] else: # Nova-Network requires the groups to be listed by name security_group_list = [(sg.name, sg.name) for sg in groups] except Exception: exceptions.handle(request, _('Unable to retrieve list of security groups')) security_group_list = [] return security_group_list
def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id): enabled_compute_quotas = NOVA_COMPUTE_QUOTA_FIELDS - disabled_quotas if not enabled_compute_quotas: return # Unlike the other services it can be the case that nova is enabled but # doesn't support quotas, in which case we still want to get usage info, # so don't rely on '"instances" in disabled_quotas' as elsewhere if not base.is_service_enabled(request, 'compute'): return if tenant_id: instances, has_more = nova.server_list( request, search_opts={'tenant_id': tenant_id}) else: instances, has_more = nova.server_list(request) _add_usage_if_quota_enabled(usages, 'instances', len(instances), disabled_quotas) if {'cores', 'ram'} - disabled_quotas: # Fetch deleted flavors if necessary. flavors = dict([(f.id, f) for f in nova.flavor_list(request)]) missing_flavors = [ instance.flavor['id'] for instance in instances if instance.flavor['id'] not in flavors ] for missing in missing_flavors: if missing not in flavors: try: flavors[missing] = nova.flavor_get(request, missing) except Exception: flavors[missing] = {} exceptions.handle(request, ignore=True) # Sum our usage based on the flavors of the instances. for flavor in [ flavors[instance.flavor['id']] for instance in instances ]: _add_usage_if_quota_enabled(usages, 'cores', getattr(flavor, 'vcpus', None), disabled_quotas) _add_usage_if_quota_enabled(usages, 'ram', getattr(flavor, 'ram', None), disabled_quotas) # Initialize the tally if no instances have been launched yet if len(instances) == 0: _add_usage_if_quota_enabled(usages, 'cores', 0, disabled_quotas) _add_usage_if_quota_enabled(usages, 'ram', 0, disabled_quotas)
def handle(self, request, data): try: super(ManilaCreateProject, self).handle(request, data) if base.is_service_enabled(request, 'share'): manila_data = dict([(key, data[key]) for key in MANILA_QUOTA_FIELDS]) manila.tenant_quota_update(request, self.object.id, **manila_data) except Exception: horizon.exceptions.handle(request, _('Unable to set project quotas.')) return True
def handle(self, request, data): try: super(ManilaUpdateDefaultQuotas, self).handle(request, data) if base.is_service_enabled(request, 'share'): manila_data = dict([(key, data[key]) for key in MANILA_QUOTA_FIELDS]) manila.default_quota_update(request, **manila_data) except Exception: horizon.exceptions.handle(request, _('Unable to update default quotas.')) return True
def __init__(self, request): # TODO(amotoki): neutron check needs to be dropped. # The network API wrapper can depend on neutron. neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: self.floating_ips = neutron.FloatingIpManager(request) else: self.floating_ips = None if (neutron_enabled and neutron.is_extension_supported(request, 'security-group')): self.secgroups = neutron.SecurityGroupManager(request) else: self.secgroups = None
def handle(self, request, data): # Update the default quotas. # `fixed_ips` update for quota class is not supported by novaclient nova_data = dict([(key, data[key]) for key in ALL_NOVA_QUOTA_FIELDS if key != 'fixed_ips']) try: nova.default_quota_update(request, **nova_data) if base.is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in quotas.CINDER_QUOTA_FIELDS]) cinder.default_quota_update(request, **cinder_data) except Exception: exceptions.handle(request, _('Unable to update default quotas.')) return True
def handle(self, request, data): try: if base.is_service_enabled(request, 'share'): manila_data = {} # Share group quotas are removed here because default # quota update for share groups is not possible yet, see # LP #1871252 allowed_updates = (api_manila.MANILA_QUOTA_FIELDS - {'share_groups', 'share_group_snapshots'}) for key in allowed_updates: manila_data[key] = data[key] api_manila.default_quota_update(request, **manila_data) return True except Exception: exceptions.handle(request, _('Unable to update default quotas.')) return False
def __init__(self, request): neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: self.floating_ips = neutron.FloatingIpManager(request) else: self.floating_ips = nova.FloatingIpManager(request) # Not all qunantum plugins support security group, # so we have enable_security_group configuration parameter. neutron_sg_enabled = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}).get('enable_security_group', True) if neutron_enabled and neutron_sg_enabled: self.secgroups = neutron.SecurityGroupManager(request) else: self.secgroups = nova.SecurityGroupManager(request)
def tenant_quota_usages(request): # Get our quotas and construct our usage object. disabled_quotas = [] if not is_service_enabled(request, 'volume'): disabled_quotas.extend(['volumes', 'gigabytes']) usages = QuotaUsage() for quota in get_tenant_quota_data(request, disabled_quotas): usages.add_quota(quota) # Get our usages. floating_ips = network.tenant_floating_ip_list(request) flavors = dict([(f.id, f) for f in nova.flavor_list(request)]) instances = nova.server_list(request) # Fetch deleted flavors if necessary. missing_flavors = [ instance.flavor['id'] for instance in instances if instance.flavor['id'] not in flavors ] for missing in missing_flavors: if missing not in flavors: try: flavors[missing] = nova.flavor_get(request, missing) except: flavors[missing] = {} exceptions.handle(request, ignore=True) usages.tally('instances', len(instances)) usages.tally('floating_ips', len(floating_ips)) if 'volumes' not in disabled_quotas: volumes = cinder.volume_list(request) usages.tally('gigabytes', sum([int(v.size) for v in volumes])) usages.tally('volumes', len(volumes)) # Sum our usage based on the flavors of the instances. for flavor in [flavors[instance.flavor['id']] for instance in instances]: usages.tally('cores', getattr(flavor, 'vcpus', None)) usages.tally('ram', getattr(flavor, 'ram', None)) # Initialise the tally if no instances have been launched yet if len(instances) == 0: usages.tally('cores', 0) usages.tally('ram', 0) return usages
def get_quotas_data(self): request = self.tab_group.request try: quota_set = quotas.get_default_quota_data(request) data = quota_set.items # There is no API to get the default system quotas in # Neutron (cf. LP#1204956). Remove the network-related # quotas from the list for now to avoid confusion if base.is_service_enabled(self.request, 'network'): data = [ quota for quota in data if quota.name not in ['floating_ips', 'fixed_ips'] ] except Exception: data = [] exceptions.handle(self.request, _('Unable to get quota info.')) return data
def populate_volume_snapshot_id_choices(self, request, context): snapshots = [] try: if base.is_service_enabled(request, 'volume'): available = api.cinder.VOLUME_STATE_AVAILABLE snapshots = [self._get_volume_display_name(s) for s in cinder.volume_snapshot_list( self.request, search_opts=dict(status=available))] except Exception: exceptions.handle(self.request, _('Unable to retrieve list of volume ' 'snapshots.')) if snapshots: snapshots.insert(0, ("", _("Select Volume Snapshot"))) else: snapshots.insert(0, ("", _("No volume snapshots available"))) return snapshots
def handle(self, request, data): try: super(ManilaUpdateProject, self).handle(request, data) if base.is_service_enabled(request, 'share'): manila_data = dict([(key, data[key]) for key in MANILA_QUOTA_FIELDS]) manila.tenant_quota_update(request, data['project_id'], **manila_data) except Exception: horizon.exceptions.handle( request, _('Modified project information and ' 'members, but unable to modify ' 'project quotas.')) return True
def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id): # Unlike the other services it can be the case that nova is enabled but # doesn't support quotas, in which case we still want to get usage info, # so don't rely on '"instances" in disabled_quotas' as elsewhere if not base.is_service_enabled(request, 'compute'): return if tenant_id: # determine if the user has permission to view across projects # there are cases where an administrator wants to check the quotas # on a project they are not scoped to all_tenants = policy.check((("compute", "compute:get_all_tenants"), ), request) instances, has_more = nova.server_list( request, search_opts={'tenant_id': tenant_id}, all_tenants=all_tenants) else: instances, has_more = nova.server_list(request) # Fetch deleted flavors if necessary. flavors = dict([(f.id, f) for f in nova.flavor_list(request)]) missing_flavors = [ instance.flavor['id'] for instance in instances if instance.flavor['id'] not in flavors ] for missing in missing_flavors: if missing not in flavors: try: flavors[missing] = nova.flavor_get(request, missing) except Exception: flavors[missing] = {} exceptions.handle(request, ignore=True) usages.tally('instances', len(instances)) # Sum our usage based on the flavors of the instances. for flavor in [flavors[instance.flavor['id']] for instance in instances]: usages.tally('cores', getattr(flavor, 'vcpus', None)) usages.tally('ram', getattr(flavor, 'ram', None)) # Initialize the tally if no instances have been launched yet if len(instances) == 0: usages.tally('cores', 0) usages.tally('ram', 0)
def handle(self, request, data): # Update the default quotas. # `fixed_ips` update for quota class is not supported by novaclient nova_data = dict([(key, data[key]) for key in ALL_NOVA_QUOTA_FIELDS if key != 'fixed_ips']) is_error_nova = False is_error_cinder = False is_volume_service_enabled = base.is_service_enabled(request, 'volume') # Update the default quotas for nova. try: nova.default_quota_update(request, **nova_data) except Exception: is_error_nova = True # Update the default quotas for cinder. try: if is_volume_service_enabled: cinder_data = dict([(key, data[key]) for key in quotas.CINDER_QUOTA_FIELDS]) cinder.default_quota_update(request, **cinder_data) else: LOG.debug('Unable to update Cinder default quotas' ' because the Cinder volume service is disabled.') except Exception: is_error_cinder = True # Analyze errors (if any) to determine what success and error messages # to display to the user. if is_error_nova and not is_error_cinder: if is_volume_service_enabled: self.success_message = _('Default quotas updated for Cinder.') exceptions.handle( request, _('Unable to update default quotas' ' for Nova.')) else: return False elif is_error_cinder and not is_error_nova: self.success_message = _('Default quotas updated for Nova.') exceptions.handle(request, _('Unable to update default quotas for Cinder.')) elif is_error_nova and is_error_cinder: return False return True
def servers_update_addresses(request, servers, all_tenants=False): """Retrieve servers networking information from Neutron if enabled. Should be used when up to date networking information is required, and Nova's networking info caching mechanism is not fast enough. """ # NOTE(amotoki): This check is still needed because 'instances' panel # calls this method. We dropped security group and floating IP support # through Nova API (due to novaclient 8.0.0 drops their supports), # but we can still support 'Instances' panel with nova-network. # TODO(amotoki): Nova networkinfo info caching mechanism is now fast enough # as they are updated by Neutron via Nova event callback mechasm, # so servers_update_addresses is no longer needed. # We can reduce API calls by dropping it. neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: neutron.servers_update_addresses(request, servers, all_tenants)
def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__(request, context, *args, **kwargs) projectname = str(request.user.tenant_name).lower() controllername = api.nova.get_controllername(request) LOG.info("Aman Tenant ID : %s" % request.user.tenant_id) LOG.info("Aman Controller Name : %s" % controllername) if api.lease.lease_project_verify(request.user.tenant_id, controllername) == False: self.fields['lease_days'].initial = "1" self.fields['lease_days'].widget.attrs['readonly'] = True # Hide the device field if the hypervisor doesn't support it. if not nova.can_set_mount_point(): self.fields['device_name'].widget = forms.widgets.HiddenInput() source_type_choices = [ ('', _("Select source")), ("image_id", _("Boot from image")), ("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, 'volume'): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append( ("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle( request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append( ("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices
def get_volume_snapshots_data(self): if base.is_service_enabled(self.request, 'volume'): try: snapshots = api.cinder.volume_snapshot_list(self.request) volumes = api.cinder.volume_list(self.request) volumes = dict((v.id, v) for v in volumes) except Exception: snapshots = [] volumes = {} exceptions.handle(self.request, _("Unable to retrieve " "volume snapshots.")) for snapshot in snapshots: volume = volumes.get(snapshot.volume_id) setattr(snapshot, '_volume', volume) else: snapshots = [] return snapshots