Esempio n. 1
0
 def _delete_sg_rule(self, plugin_context, sg_rule_id):
     try:
         self._delete_resource(self._core_plugin, plugin_context,
                               'security_group_rule', sg_rule_id)
     except ext_sg.SecurityGroupRuleNotFound:
         LOG.warning(_LW('Security Group Rule %s already deleted'),
                     sg_rule_id)
Esempio n. 2
0
 def _delete_address_scope(self, plugin_context, address_scope_id):
     try:
         self._delete_resource(self._core_plugin, plugin_context,
                               'address_scope', address_scope_id)
     except address_scope.AddressScopeNotFound:
         LOG.warning(_LW('Address Scope %s already deleted'),
                     address_scope_id)
Esempio n. 3
0
 def _create_sg_rule(self, plugin_context, attrs):
     try:
         return self._create_resource(self._core_plugin, plugin_context,
                                      'security_group_rule', attrs)
     except ext_sg.SecurityGroupRuleExists as ex:
         LOG.warning(_LW('Security Group already exists %s'), ex.message)
         return
Esempio n. 4
0
    def _validate_pre_launch_executor_results(self,
                                              network_handler,
                                              device_data,
                                              image_name,
                                              image_id_result,
                                              provider_metadata_result,
                                              server_grp_id_result=None):
        interfaces = device_data.pop('interfaces', None)
        if not interfaces:
            LOG.exception(_LE('Failed to get interfaces for device creation.'))
            return None, _, _

        image_id = image_id_result.get('result', None)
        if not image_id:
            LOG.error(_LE('Failed to get image id for device creation.'))
            self._delete_interfaces(device_data,
                                    interfaces,
                                    network_handler=network_handler)
            return None, _, _

        if server_grp_id_result and not server_grp_id_result.get('result'):
            LOG.error(
                _LE('Validation failed for Nova anti-affinity '
                    'server group.'))
            return None, _, _

        provider_metadata = provider_metadata_result.get('result', None)
        if not provider_metadata:
            LOG.warning(
                _LW('Failed to get provider metadata for'
                    ' device creation.'))
            provider_metadata = {}

        return interfaces, image_id, provider_metadata
Esempio n. 5
0
 def delete(self, stack_id):
     try:
         self.stacks.delete(stack_id)
     except heat_exc.HTTPNotFound:
         LOG.warning(
             _LW("Stack %(stack)s created by service chain driver "
                 "is not found at cleanup"), {'stack': stack_id})
 def get_server(self, server_id):
     try:
         return self.client.servers.get(server_id)
     except nova_exceptions.NotFound:
         LOG.warning(_LW("Nova returned NotFound for server: %s"),
                     server_id)
     except Exception as e:
         LOG.exception(e)
Esempio n. 7
0
 def _delete_qos_rule(self, plugin_context, rule_id, qos_policy_id):
     try:
         self._delete_resource_qos(self._qos_plugin,
                                   plugin_context,
                                   'policy_bandwidth_limit_rule',
                                   rule_id, qos_policy_id)
     except n_exc.QosRuleNotFound:
         LOG.warning(_LW('QoS Rule %s already deleted'), rule_id)
Esempio n. 8
0
 def _remove_router_interface(self, plugin_context, router_id,
                              interface_info):
     # To detach Router interface either port ID or Subnet ID is mandatory
     try:
         self._l3_plugin.remove_router_interface(plugin_context, router_id,
                                                 interface_info)
     except l3.RouterInterfaceNotFoundForSubnet:
         LOG.warning(_LW('Router interface already deleted for subnet %s'),
                     interface_info)
         return
Esempio n. 9
0
 def get_gateway_detail(self, session, nf_id):
     svc_gw = nfp_db_model.ServiceGatewayDetails
     try:
         with session.begin(subtransactions=True):
             return self._get_gw_info_dict(
                 session.query(svc_gw).filter(
                     svc_gw.network_function_id == nf_id).one())
     except exc.NoResultFound:
         LOG.warning(
             _LW("Gateway detail doesn't exist for Network Function"
                 " %s "), nf_id)
         raise
Esempio n. 10
0
 def _get_keystone_client(self):
     LOG.debug("Getting keystone client")
     auth = ksc_auth.load_from_conf_options(cfg.CONF, AUTH_GROUP)
     LOG.debug("Got auth: %s", auth)
     if not auth:
         LOG.warning(_LW('No auth_plugin configured in %s'), AUTH_GROUP)
     session = ksc_session.Session.load_from_conf_options(cfg.CONF,
                                                          AUTH_GROUP,
                                                          auth=auth)
     LOG.debug("Got session: %s", session)
     self.keystone = ksc_client.Client(session=session)
     LOG.debug("Got keystone client: %s", self.keystone)
     self.gbp = gbp_client.Client(session=session)
     LOG.debug("Got gbp client: %s", self.gbp)
 def _create_implicit_l3_policy(self, context):
     tenant_id = context.current['tenant_id']
     filter = {'tenant_id': [tenant_id],
               'name': [self._default_l3p_name]}
     l3ps = self._get_l3_policies(context._plugin_context, filter)
     l3p = l3ps and l3ps[0]
     if not l3p:
         attrs = {'tenant_id': tenant_id,
                  'name': self._default_l3p_name,
                  'description': _("Implicitly created L3 policy"),
                  'ip_version': self._default_ip_version,
                  'ip_pool': self._default_ip_pool,
                  'shared': context.current.get('shared', False),
                  'subnet_prefix_length':
                  self._default_subnet_prefix_length}
         if self._proxy_group_enabled:
             attrs['proxy_ip_pool'] = (
                 self._default_proxy_ip_pool)
             attrs['proxy_subnet_prefix_length'] = (
                 self._default_proxy_subnet_prefix_length)
         try:
             l3p = self._create_l3_policy(context._plugin_context, attrs)
             self._mark_l3_policy_owned(context._plugin_context.session,
                                        l3p['id'])
         except exc.DefaultL3PolicyAlreadyExists:
             with excutils.save_and_reraise_exception(
                     reraise=False) as ctxt:
                 LOG.debug("Possible concurrent creation of default L3 "
                           "policy for tenant %s", tenant_id)
                 l3ps = self._get_l3_policies(context._plugin_context,
                                              filter)
                 l3p = l3ps and l3ps[0]
                 if not l3p:
                     LOG.warning(_LW(
                         "Caught DefaultL3PolicyAlreadyExists, "
                         "but default L3 policy not concurrently "
                         "created for tenant %s"), tenant_id)
                     ctxt.reraise = True
         except exc.OverlappingIPPoolsInSameTenantNotAllowed:
             with excutils.save_and_reraise_exception():
                 LOG.info(_LI("Caught "
                              "OverlappingIPPoolsinSameTenantNotAllowed "
                              "during creation of default L3 policy for "
                              "tenant %s"), tenant_id)
     context.current['l3_policy_id'] = l3p['id']
Esempio n. 12
0
 def process_create_l3_policy(self, session, data, result):
     data = data['l3_policy']
     if cfg.CONF.resource_mapping.use_subnetpools:
         # With subnetpools, proxy ips are allocated from regular ip_pool
         # (since neutron does not allow mixed subnetpools on same
         # network). But proxy_subnet_prefix_length is still used to
         # determine size of proxy ip pool.
         gp_db.GroupPolicyDbPlugin.validate_subnet_prefix_length(
             data['ip_version'], data['proxy_subnet_prefix_length'],
             data['ip_pool'])
         if data['proxy_ip_pool']:
             LOG.warning(_LW("Since use_subnetpools setting is turned on, "
                             "proxy_ip_pool %s will be ignored. "
                             "Proxy subnets will be allocated from same "
                             "subnetpool as group subnets"),
                         data['proxy_ip_pool'])
     else:
         gp_db.GroupPolicyDbPlugin.validate_ip_pool(
             data['proxy_ip_pool'], data['ip_version'])
         gp_db.GroupPolicyDbPlugin.validate_subnet_prefix_length(
             data['ip_version'], data['proxy_subnet_prefix_length'],
             data['proxy_ip_pool'])
Esempio n. 13
0
    def _get_resource(self, context, resource_name, resource_id, fields=None):
        session = context.session
        deployers = {}
        with session.begin(subtransactions=True):
            resource = getattr(super(NodeCompositionPlugin, self),
                               'get_' + resource_name)(context, resource_id)
            if resource_name == 'servicechain_instance':
                if len(resource['servicechain_specs']) > 1:
                    raise exc.OneSpecPerInstanceAllowed()
                try:
                    deployers = self._get_scheduled_drivers(
                        context, resource, 'get')
                except Exception:
                    LOG.warning(_LW("Failed to get node driver"))

        # Invoke drivers only if status attributes are requested
        if not fields or STATUS_SET.intersection(set(fields)):
            _resource = self._get_resource_status(context, resource_name,
                                                  deployers)
            if _resource:
                updated_status = _resource['status']
                updated_status_details = _resource['status_details']
                if resource['status'] != updated_status or (
                        resource['status_details'] != updated_status_details):
                    new_status = {
                        resource_name: {
                            'status': updated_status,
                            'status_details': updated_status_details
                        }
                    }
                    session = context.session
                    with session.begin(subtransactions=True):
                        getattr(super(NodeCompositionPlugin, self),
                                'update_' + resource_name)(context,
                                                           resource['id'],
                                                           new_status)
                    resource['status'] = updated_status
                    resource['status_details'] = updated_status_details
        return self._fields(resource, fields)
Esempio n. 14
0
 def _delete_policy_target_group(self, plugin_context, ptg_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'policy_target_group', ptg_id)
     except sc_ext.ServiceChainSpecNotFound:
         LOG.warning(_LW("Policy Target Group %s already deleted"), ptg_id)
Esempio n. 15
0
 def _delete_policy_target(self, plugin_context, pt_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'policy_target', pt_id, False)
     except gp_ext.PolicyTargetNotFound:
         LOG.warning(_LW('Policy Rule Set %s already deleted'), pt_id)
Esempio n. 16
0
 def _delete_servicechain_spec(self, plugin_context, scs_id):
     try:
         self._delete_resource(self._servicechain_plugin, plugin_context,
                               'servicechain_spec', scs_id)
     except sc_ext.ServiceChainSpecNotFound:
         LOG.warning(_LW("servicechain spec %s already deleted"), scs_id)
Esempio n. 17
0
 def _delete_servicechain_instance(self, plugin_context, sci_id):
     try:
         self._delete_resource(self._servicechain_plugin, plugin_context,
                               'servicechain_instance', sci_id, False)
     except sc_ext.ServiceChainInstanceNotFound:
         LOG.warning(_LW("servicechain %s already deleted"), sci_id)
Esempio n. 18
0
 def _delete_external_policy(self, plugin_context, ep_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'external_policy', ep_id, False)
     except gp_ext.ExternalPolicyNotFound:
         LOG.warning(_LW('External Policy %s already deleted'), ep_id)
Esempio n. 19
0
 def _delete_router(self, plugin_context, router_id):
     try:
         self._delete_resource(self._l3_plugin, plugin_context, 'router',
                               router_id)
     except l3.RouterNotFound:
         LOG.warning(_LW('Router %s already deleted'), router_id)
Esempio n. 20
0
 def _delete_port(self, plugin_context, port_id):
     try:
         self._delete_resource(self._core_plugin,
                               plugin_context, 'port', port_id)
     except n_exc.PortNotFound:
         LOG.warning(_LW('Port %s already deleted'), port_id)
Esempio n. 21
0
    def _get_gbp_details(self, context, request, host):
        with context.session.begin(subtransactions=True):
            device = request.get('device')

            core_plugin = self._core_plugin
            port_id = core_plugin._device_to_port_id(context, device)
            port_context = core_plugin.get_bound_port_context(
                context, port_id, host)
            if not port_context:
                LOG.warning(
                    _LW("Device %(device)s requested by agent "
                        "%(agent_id)s not found in database"), {
                            'device': port_id,
                            'agent_id': request.get('agent_id')
                        })
                return {'device': request.get('device')}
            port = port_context.current

            # NOTE(ivar): removed the PROXY_PORT_PREFIX hack.
            # This was needed to support network services without hotplug.

            epg = self._get_port_epg(context, port)

            details = {
                'device':
                request.get('device'),
                'enable_dhcp_optimization':
                self._is_dhcp_optimized(context, port),
                'enable_metadata_optimization':
                (self._is_metadata_optimized(context, port)),
                'port_id':
                port_id,
                'mac_address':
                port['mac_address'],
                'app_profile_name':
                epg.app_profile_name,
                'tenant_id':
                port['tenant_id'],
                'host':
                host,
                # TODO(ivar): scope names, possibly through AIM or the
                # name mapper
                'ptg_tenant':
                epg.tenant_name,
                'endpoint_group_name':
                epg.name,
                'promiscuous_mode':
                self._is_port_promiscuous(context, port),
                'extra_ips': [],
                'floating_ip': [],
                'ip_mapping': [],
                # Put per mac-address extra info
                'extra_details': {}
            }

            # Set VM name if needed.
            if port['device_owner'].startswith(
                    'compute:') and port['device_id']:
                vm = nclient.NovaClient().get_server(port['device_id'])
                details['vm-name'] = vm.name if vm else port['device_id']
            mtu = self._get_port_mtu(context, port)
            if mtu:
                details['interface_mtu'] = mtu

            # NOTE(ivar): having these methods cleanly separated actually makes
            # things less efficient by requiring lots of calls duplication.
            # we could alleviate this by passing down a cache that stores
            # commonly requested objects (like EPGs). 'details' itself could
            # be used for such caching.
            details['_cache'] = {}
            vrf = self._get_port_vrf(context, port, details)
            details['l3_policy_id'] = '%s %s' % (vrf.tenant_name, vrf.name)
            self._add_subnet_details(context, port, details)
            self._add_allowed_address_pairs_details(context, port, details)
            self._add_vrf_details(context, details['l3_policy_id'], details)
            self._add_nat_details(context, port, host, details)
            self._add_extra_details(context, port, details)
            self._add_segmentation_label_details(context, port, details)
            self._set_dhcp_lease_time(details)
            details.pop('_cache', None)

        LOG.debug("Details for port %s : %s", port['id'], details)
        return details
Esempio n. 22
0
    def plug_services(self, context, deployment):
        if deployment:
            provider = deployment[0]['context'].provider
            management = deployment[0]['context'].management
            # Sorted from provider (N) to consumer (0)
            # TODO(ivar): validate number of interfaces per service per service
            # type is as expected
            self._sort_deployment(deployment)
            for part in deployment:
                info = part['plumbing_info']
                if not info:
                    continue
                part_context = part['context']
                # Management PT can be created immediately
                self._create_service_target(context, part_context,
                                            info.get('management', []),
                                            management, 'management')
                # Create proper PTs based on the service type
                jump_ptg = None
                LOG.info(_LI("Plumbing service of type '%s'"),
                         info['plumbing_type'])
                if info['plumbing_type'] == common.PLUMBING_TYPE_ENDPOINT:
                    # No stitching needed, only provider side PT is created.
                    # overriding PT name in order to keep port security up
                    # for this kind of service.
                    node = part_context.current_node
                    instance = part_context.instance
                    for provider_info in info.get('provider', []):
                        provider_info['name'] = (
                            "tscp_endpoint_service_%s_%s" %
                            (node['id'][:5], instance['id'][:5]))
                    self._create_service_target(context, part_context,
                                                info.get('provider', []),
                                                provider, 'provider')

                elif info['plumbing_type'] == common.PLUMBING_TYPE_GATEWAY:
                    # L3 stitching needed, provider and consumer side PTs are
                    # created. One proxy_gateway is needed in consumer side
                    jump_ptg = self._create_l3_jump_group(
                        context, provider, part['context'].current_position)
                    # On provider side, this service is the default gateway
                    info['provider'][0]['group_default_gateway'] = True
                    self._create_service_target(context, part_context,
                                                info['provider'], provider,
                                                'provider')
                    # On consumer side, this service is the proxy gateway
                    info['consumer'][0]['proxy_gateway'] = True
                    self._create_service_target(context, part_context,
                                                info['consumer'], jump_ptg,
                                                'consumer')
                elif info['plumbing_type'] == common.PLUMBING_TYPE_TRANSPARENT:
                    # L2 stitching needed, provider and consumer side PTs are
                    # created
                    self._create_service_target(context, part_context,
                                                info.get('provider', []),
                                                provider, 'provider')
                    jump_ptg = self._create_l2_jump_group(
                        context, provider, part['context'].current_position)
                    self._create_service_target(context, part_context,
                                                info['consumer'], jump_ptg,
                                                'consumer')
                else:
                    LOG.warning(_LW("Unsupported plumbing type %s"),
                                info['plumbing_type'])
                # Replace current "provider" with jump ptg if needed
                provider = jump_ptg or provider
Esempio n. 23
0
 def _delete_subnetpool(self, plugin_context, subnetpool_id):
     try:
         self._delete_resource(self._core_plugin, plugin_context,
                               'subnetpool', subnetpool_id)
     except n_exc.SubnetpoolNotFound:
         LOG.warning(_LW('Subnetpool %s already deleted'), subnetpool_id)
Esempio n. 24
0
 def _delete_fip(self, plugin_context, fip_id):
     try:
         self._delete_resource(self._l3_plugin, plugin_context,
                               'floatingip', fip_id)
     except l3.FloatingIPNotFound:
         LOG.warning(_LW('Floating IP %s Already deleted'), fip_id)
Esempio n. 25
0
 def _delete_l3_policy(self, plugin_context, l3p_id):
     try:
         self._delete_resource(self._group_policy_plugin,
                               plugin_context, 'l3_policy', l3p_id, False)
     except gp_ext.L3PolicyNotFound:
         LOG.warning(_LW('L3 Policy %s already deleted'), l3p_id)
Esempio n. 26
0
 def _delete_external_segment(self, plugin_context, es_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'external_segment', es_id, False)
     except gp_ext.ExternalSegmentNotFound:
         LOG.warning(_LW('External Segment %s already deleted'), es_id)
Esempio n. 27
0
 def _delete_qos_policy(self, plugin_context, qos_policy_id):
     try:
         self._delete_resource(self._qos_plugin,
                               plugin_context, 'policy', qos_policy_id)
     except n_exc.QosPolicyNotFound:
         LOG.warning(_LW('QoS Policy %s already deleted'), qos_policy_id)
Esempio n. 28
0
 def _delete_network(self, plugin_context, network_id):
     try:
         self._delete_resource(self._core_plugin, plugin_context,
                               'network', network_id)
     except n_exc.NetworkNotFound:
         LOG.warning(_LW('Network %s already deleted'), network_id)