Ejemplo n.º 1
0
    def create_filter(self, tc_dict):

        if tc_dict['protocol'] == 'vxlan':
            self._create_vxlan_filter(tc_dict)
            return

        raise exceptions.BadRequest(msg='Protocol not supported')
Ejemplo n.º 2
0
 def _add_interface_by_subnet(self, context, router, subnet_id, owner):
     LOG.debug(
         "_add_interface_by_subnet(): router=%(router)s, "
         "subnet_id=%(subnet_id)s, owner=%(owner)s", {
             'router': router,
             'subnet_id': subnet_id,
             'owner': owner
         })
     subnet = self._core_plugin._get_subnet(context, subnet_id)
     if not subnet['gateway_ip']:
         msg = _('Subnet for router interface must have a gateway IP')
         raise n_exc.BadRequest(resource='router', msg=msg)
     self._check_for_dup_router_subnets(context, router,
                                        subnet['network_id'], [subnet])
     fixed_ip = {
         'ip_address': subnet['gateway_ip'],
         'subnet_id': subnet['id']
     }
     # TODO(jerryz): move this out of transaction.
     setattr(context, 'GUARD_TRANSACTION', False)
     return (self._core_plugin.create_port(
         context, {
             'port': {
                 'tenant_id': subnet['tenant_id'],
                 'network_id': subnet['network_id'],
                 'fixed_ips': [fixed_ip],
                 'mac_address': utils.get_mac(self, context),
                 'admin_state_up': True,
                 'device_id': router.id,
                 'device_owner': owner,
                 'name': ''
             }
         }), [subnet], True)
Ejemplo n.º 3
0
 def delete(self, context, lb):
     service_client = self.core_plugin.nsxlib.load_balancer.service
     lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding(
         context.session, lb.id)
     if lb_binding:
         lb_service_id = lb_binding['lb_service_id']
         lb_service = service_client.get(lb_service_id)
         vs_list = lb_service.get('virtual_server_ids')
         if not vs_list:
             try:
                 service_client.delete(lb_service_id)
             except nsxlib_exc.ManagerError:
                 self.lbv2_driver.pool.failed_completion(context,
                                                         lb,
                                                         delete=True)
                 msg = (_('Failed to delete lb service %(lbs)s from nsx') %
                        {
                            'lbs': lb_service_id
                        })
                 raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
         nsx_db.delete_nsx_lbaas_loadbalancer_binding(
             context.session, lb.id)
     self.lbv2_driver.load_balancer.successful_completion(context,
                                                          lb,
                                                          delete=True)
Ejemplo n.º 4
0
    def get_auto_allocated_topology(self, context, tenant_id, fields=None):
        """Return tenant's network associated to auto-allocated topology.

        The topology will be provisioned upon return, if network is missing.
        """
        fields = fields or []
        tenant_id = self._validate(context, tenant_id)
        if CHECK_REQUIREMENTS in fields:
            # for dry-run requests, simply validates that subsequent
            # requests can be fulfilled based on a set of requirements
            # such as existence of default networks, pools, etc.
            return self._check_requirements(context, tenant_id)
        elif fields:
            raise n_exc.BadRequest(resource='auto_allocate',
                                   msg=_("Unrecognized field"))

        # Check for an existent topology
        network_id = self._get_auto_allocated_network(context, tenant_id)
        if network_id:
            return self._response(network_id, tenant_id, fields=fields)
        # See if we indeed have an external network to connect to, otherwise
        # we will fail fast
        default_external_network = self._get_default_external_network(context)

        # If we reach this point, then we got some work to do!
        network_id = self._build_topology(context, tenant_id,
                                          default_external_network)
        return self._response(network_id, tenant_id, fields=fields)
Ejemplo n.º 5
0
 def _get_vpp_router(self, context, router_id):
     try:
         router = self._get_by_id(context, Router, router_id)
     except Exception:
         raise n_exc.BadRequest("L3 Router not found for router_id: %s",
                                router_id)
     return router
Ejemplo n.º 6
0
 def get_vsd_subnets(self, context, filters=None, fields=None):
     if 'vsd_zone_id' not in filters:
         msg = _('vsd_zone_id is a required filter parameter for this API.')
         raise n_exc.BadRequest(resource='vsd-subnets', msg=msg)
     l3subs = self.vsdclient.get_domain_subnet_by_zone_id(
         filters['vsd_zone_id'][0])
     vsd_to_os = {
         'ID':
         'id',
         'name':
         'name',
         self._calc_cidr:
         'cidr',
         self._calc_ipv6_cidr:
         'ipv6_cidr',
         'gateway':
         'gateway',
         'IPv6Gateway':
         'ipv6_gateway',
         'IPType':
         'ip_version',
         functools.partial(self._return_val, filters['vsd_zone_id'][0]):
         'vsd_zone_id'
     }
     return self._trans_vsd_to_os(l3subs, vsd_to_os, filters, fields)
Ejemplo n.º 7
0
    def _add_csnat_router_interface_port(
            self, context, router, network_id, subnet_id, do_pop=True):
        """Add SNAT interface to the specified router and subnet."""
        port_data = {'tenant_id': '',
                     'network_id': network_id,
                     'fixed_ips': [{'subnet_id': subnet_id}],
                     'device_id': router.id,
                     'device_owner': const.DEVICE_OWNER_ROUTER_SNAT,
                     'admin_state_up': True,
                     'name': ''}
        snat_port = p_utils.create_port(self._core_plugin, context,
                                        {'port': port_data})
        if not snat_port:
            msg = _("Unable to create the SNAT Interface Port")
            raise n_exc.BadRequest(resource='router', msg=msg)

        with p_utils.delete_port_on_error(
            self.l3plugin._core_plugin, context.elevated(), snat_port['id']):
            l3_obj.RouterPort(
                context,
                port_id=snat_port['id'],
                router_id=router.id,
                port_type=const.DEVICE_OWNER_ROUTER_SNAT
            ).create()

            if do_pop:
                return self.l3plugin._populate_mtu_and_subnets_for_ports(
                    context, [snat_port])
            return snat_port
Ejemplo n.º 8
0
    def delete(self, context, policy):
        vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server
        rule_client = self.core_plugin.nsxlib.load_balancer.rule
        binding = nsx_db.get_nsx_lbaas_l7policy_binding(
            context.session, policy.id)
        if binding:
            vs_id = binding['lb_vs_id']
            rule_id = binding['lb_rule_id']
            try:
                # Update virtual server to remove lb rule
                vs_client.remove_rule(vs_id, rule_id)
                rule_client.delete(rule_id)
            except nsxlib_exc.ResourceNotFound:
                LOG.warning('LB rule %(rule)s is not found on NSX',
                            {'rule': rule_id})
            except nsxlib_exc.ManagerError:
                self.lbv2_driver.l7policy.failed_completion(context, policy)
                msg = (_('Failed to delete lb rule: %(rule)s') % {
                    'rule': rule_id
                })
                raise n_exc.BadRequest(resource='lbaas-l7policy-delete',
                                       msg=msg)
            nsx_db.delete_nsx_lbaas_l7policy_binding(context.session,
                                                     policy.id)

        self.lbv2_driver.l7policy.successful_completion(context,
                                                        policy,
                                                        delete=True)
Ejemplo n.º 9
0
    def update(self, context, old_policy, new_policy):
        rule_client = self.core_plugin.nsxlib.load_balancer.rule
        binding = nsx_db.get_nsx_lbaas_l7policy_binding(
            context.session, old_policy.id)
        if not binding:
            self.lbv2_driver.l7rule.failed_completion(context, new_policy)
            msg = _('Cannot find nsx lbaas binding for policy '
                    '%(policy_id)s') % {
                        'policy_id': old_policy.id
                    }
            raise n_exc.BadRequest(resource='lbaas-l7policy-update', msg=msg)

        vs_id = binding['lb_vs_id']
        lb_rule_id = binding['lb_rule_id']
        rule_body = lb_utils.convert_l7policy_to_lb_rule(context, new_policy)
        try:
            rule_client.update(lb_rule_id, **rule_body)
            if new_policy.position != old_policy.position:
                self._update_policy_position(vs_id, lb_rule_id,
                                             new_policy.position)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.lbv2_driver.l7policy.failed_completion(
                    context, new_policy)
                LOG.error('Failed to update L7policy %(policy)s: '
                          '%(err)s', {
                              'policy': old_policy.id,
                              'err': e
                          })

        self.lbv2_driver.l7policy.successful_completion(context, new_policy)
Ejemplo n.º 10
0
    def create(self, context, lb):
        edge_id = lb_common.get_lbaas_edge_id(context, self.core_plugin, lb.id,
                                              lb.vip_address, lb.vip_subnet_id,
                                              lb.tenant_id)

        if not edge_id:
            msg = _('Failed to allocate Edge on subnet %(sub)s for '
                    'loadbalancer %(lb)s') % {
                        'sub': lb.vip_subnet_id,
                        'lb': lb.id
                    }
            raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)

        try:
            lb_common.enable_edge_acceleration(self.vcns, edge_id)

            edge_fw_rule_id = lb_common.add_vip_fw_rule(
                self.vcns, edge_id, lb.id, lb.vip_address)

            # set LB default rule
            lb_common.set_lb_firewall_default_rule(self.vcns, edge_id,
                                                   'accept')

            nsxv_db.add_nsxv_lbaas_loadbalancer_binding(
                context.session, lb.id, edge_id, edge_fw_rule_id,
                lb.vip_address)
            self.lbv2_driver.load_balancer.successful_completion(context, lb)

        except nsxv_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                self.lbv2_driver.load_balancer.failed_completion(context, lb)
                LOG.error('Failed to create pool %s', lb.id)
Ejemplo n.º 11
0
    def create(self, context, lb):
        edge_id = lb_common.get_lbaas_edge_id_for_subnet(
            context, self.core_plugin, lb.vip_subnet_id, lb.tenant_id)

        if not edge_id:
            msg = _('No suitable Edge found for subnet %s') % lb.vip_subnet_id
            raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)

        try:
            if not nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge(
                    context.session, edge_id):
                lb_common.enable_edge_acceleration(self.vcns, edge_id)

            lb_common.add_vip_as_secondary_ip(self.vcns, edge_id,
                                              lb.vip_address)
            edge_fw_rule_id = lb_common.add_vip_fw_rule(
                self.vcns, edge_id, lb.id, lb.vip_address)

            nsxv_db.add_nsxv_lbaas_loadbalancer_binding(
                context.session, lb.id, edge_id, edge_fw_rule_id,
                lb.vip_address)
            self.lbv2_driver.load_balancer.successful_completion(context, lb)

        except nsxv_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                self.lbv2_driver.load_balancer.failed_completion(context, lb)
                LOG.error(_LE('Failed to create pool %s'), lb.id)
Ejemplo n.º 12
0
    def stats(self, context, lb):
        stats = {
            'bytes_in': 0,
            'bytes_out': 0,
            'active_connections': 0,
            'total_connections': 0
        }

        binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding(
            context.session, lb.id)

        try:
            lb_stats = self.vcns.get_loadbalancer_statistics(
                binding['edge_id'])

        except nsxv_exc.VcnsApiException:
            msg = (_('Failed to read load balancer statistics, edge: %s') %
                   binding['edge_id'])
            raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)

        pools_stats = lb_stats[1].get('pool', [])
        for pool_stats in pools_stats:
            stats['bytes_in'] += pool_stats.get('bytesIn', 0)
            stats['bytes_out'] += pool_stats.get('bytesOut', 0)
            stats['active_connections'] += pool_stats.get('curSessions', 0)
            stats['total_connections'] += pool_stats.get('totalSessions', 0)

        return stats
Ejemplo n.º 13
0
    def delete_network_segment_range(self, context, id):
        """Check segment reference on network segment range deletion."""
        with db_api.CONTEXT_WRITER.using(context):
            network_segment_range = self._get_network_segment_range(
                context, id)
            range_data = network_segment_range.to_dict()

            if range_data['default']:
                # TODO(kailun): To use
                #  range_exc.NetworkSegmentRangeDefaultReadOnly when the
                #  neutron-lib patch https://review.openstack.org/640777 is
                #  merged and released.
                message = _("Network Segment Range %s is a "
                            "default segment range which could not be "
                            "updated or deleted.") % id
                raise lib_exc.BadRequest(resource=range_def.RESOURCE_NAME,
                                         msg=message)

            if self._is_network_segment_range_referenced(context, range_data):
                # TODO(kailun): To use
                #  range_exc.NetworkSegmentRangeReferencedByProject when the
                #  neutron-lib patch https://review.openstack.org/640777 is
                #  merged and released.
                message = _("Network Segment Range %s is referenced by "
                            "one or more tenant networks.") % id
                raise lib_exc.InUse(resource=range_def.RESOURCE_NAME,
                                    msg=message)
            network_segment_range.delete()

        self.type_manager.update_network_segment_range_allocations(
            network_segment_range['network_type'])
Ejemplo n.º 14
0
    def create_agent(self, context, agent):
        """Create agent.

        This operation is not allow in REST API.
        @raise exceptions.BadRequest:
        """
        raise exceptions.BadRequest()
Ejemplo n.º 15
0
    def _prepare_netpartitions(self):
        # prepare shared netpartition
        shared_netpart_name = constants.SHARED_INFRASTRUCTURE
        self._validate_create_net_partition(n_ctx.get_admin_context(),
                                            shared_netpart_name)
        # prepare default netpartition
        default_netpart_name = cfg.CONF.RESTPROXY.default_net_partition_name
        l3template = cfg.CONF.RESTPROXY.default_l3domain_template
        l2template = cfg.CONF.RESTPROXY.default_l2domain_template
        l3isolated = cfg.CONF.RESTPROXY.default_isolated_zone
        l3shared = cfg.CONF.RESTPROXY.default_shared_zone

        # if templates are not provided, create default templates
        if l2template or l3template or l3isolated or l3shared:
            if (not l2template or not l3template or not l3isolated
                    or not l3shared):
                msg = 'Configuration of default net-partition not complete'
                raise n_exc.BadRequest(resource='net_partition', msg=msg)
            '''NetPartition and templates already created. Just sync the
            neutron DB. They must all be in VSD. If not, its an error
            '''
            self._link_default_netpartition(default_netpart_name, l2template,
                                            l3template, l3isolated, l3shared)
        else:
            default_netpart = self._validate_create_net_partition(
                n_ctx.get_admin_context(), default_netpart_name)
            self._default_np_id = default_netpart['id']
Ejemplo n.º 16
0
 def update(self, context, old_pool, new_pool):
     pool_client = self.core_plugin.nsxlib.load_balancer.pool
     pool_name = None
     tags = None
     lb_algorithm = None
     if new_pool.name != old_pool.name:
         pool_name = utils.get_name_and_uuid(new_pool.name or 'pool',
                                             new_pool.id)
         tags = self._get_pool_tags(context, new_pool)
     if new_pool.lb_algorithm != old_pool.lb_algorithm:
         lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(
             new_pool.lb_algorithm)
     binding = nsx_db.get_nsx_lbaas_pool_binding(context.session,
                                                 old_pool.loadbalancer_id,
                                                 old_pool.id)
     if not binding:
         msg = (_('Cannot find pool %(pool)s binding on NSX db '
                  'mapping'), {
                      'pool': old_pool.id
                  })
         raise n_exc.BadRequest(resource='lbaas-pool', msg=msg)
     try:
         lb_pool_id = binding['lb_pool_id']
         kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm)
         pool_client.update(lb_pool_id, **kwargs)
         self.lbv2_driver.pool.successful_completion(context, new_pool)
     except Exception as e:
         with excutils.save_and_reraise_exception():
             self.lbv2_driver.pool.failed_completion(context, new_pool)
             LOG.error(
                 'Failed to update pool %(pool)s with '
                 'error %(error)s', {
                     'pool': old_pool.id,
                     'error': e
                 })
Ejemplo n.º 17
0
 def _validate_delete_net_partition(self, context, id, net_partition_name):
     if net_partition_name == constants.SHARED_INFRASTRUCTURE:
         msg = _("Can't delete net_partition {}").format(net_partition_name)
         raise n_exc.BadRequest(resource='net_partition', msg=msg)
     ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid(
         context.session, id)
     ent_l2dom_mapping = nuagedb.get_ent_l2dom_mapping_by_entid(
         context.session, id)
     if ent_rtr_mapping:
         msg = (_("One or more router still attached to "
                  "net_partition %s.") % net_partition_name)
         raise n_exc.BadRequest(resource='net_partition', msg=msg)
     if ent_l2dom_mapping:
         msg = (_("One or more L2 Domain Subnet present in the "
                  "net_partition %s.") % net_partition_name)
         raise n_exc.BadRequest(resource='net_partition', msg=msg)
    def create_nuage_redirect_target(self, context, nuage_redirect_target):
        redirect_target = nuage_redirect_target['nuage_redirect_target']
        has_subnet_id = is_attr_set(redirect_target.get('subnet_id'))
        has_router_id = is_attr_set(redirect_target.get('router_id'))

        if not has_subnet_id and not has_router_id:
            msg = _('subnet_id or router_id should be specified')
            raise n_exc.BadRequest(resource='subnets', msg=msg)

        subnet_mapping = nuagedb.get_subnet_l2dom_by_id(
            context.session, redirect_target.get('subnet_id')) or {}
        router_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
            context.session, redirect_target.get('router_id')) or {}
        if not subnet_mapping and not router_mapping:
            raise ext_rtarget.RedirectTargetNoDomainOrL2Domain()
        if has_subnet_id:
            subnet = self.core_plugin.get_subnet(context,
                                                 redirect_target['subnet_id'])
            if subnet:
                redirect_target['external_id'] = \
                    cms_id_helper.get_vsd_external_id(subnet['network_id'])
        try:
            nuage_redirect_target = self.vsdclient\
                .create_nuage_redirect_target(
                    redirect_target,
                    l2dom_id=subnet_mapping.get('nuage_subnet_id'),
                    domain_id=router_mapping.get('nuage_router_id'))
        except Exception as e:
            if getattr(e, "vsd_code", None) == '7016':
                msg = _("A Nuage redirect target with name '%s' already "
                        "exists") % redirect_target['name']
                raise nuage_exc.NuageBadRequest(msg=msg)
            raise
        return self._make_redirect_target_dict(nuage_redirect_target,
                                               context=context)
Ejemplo n.º 19
0
    def get_vsd_zones(self, context, filters=None, fields=None):
        if 'vsd_domain_id' not in filters:
            msg = _('vsd_domain_id is a required filter parameter for this '
                    'API.')
            raise n_exc.BadRequest(resource='vsd-zones', msg=msg)
        try:
            vsd_zones = self.vsdclient.get_zone_by_domainid(
                filters['vsd_domain_id'][0])
        except RESTProxyError as e:
            if e.code == 404:
                return []
            else:
                raise e

        vsd_zones = [
            self._update_dict(zone, 'vsd_domain_id',
                              filters['vsd_domain_id'][0])
            for zone in vsd_zones
        ]
        vsd_to_os = {
            'zone_id': 'id',
            'zone_name': 'name',
            'vsd_domain_id': 'vsd_domain_id'
        }
        return self._trans_vsd_to_os(vsd_zones, vsd_to_os, filters, fields)
Ejemplo n.º 20
0
 def _check_router_match(self, context, fip_obj, router_id, pf_dict):
     internal_port_id = pf_dict['internal_port_id']
     if fip_obj.router_id and fip_obj.router_id != router_id:
         objs = pf.PortForwarding.get_objects(
             context,
             floatingip_id=fip_obj.id,
             internal_ip_address=pf_dict['internal_ip_address'],
             internal_port=pf_dict['internal_port'])
         if objs:
             message = _("Floating IP %(floatingip_id)s with params: "
                         "internal_ip_address: %(internal_ip_address)s, "
                         "internal_port: %(internal_port)s "
                         "already exists") % {
                             'floatingip_id': fip_obj.id,
                             'internal_ip_address':
                             pf_dict['internal_ip_address'],
                             'internal_port': pf_dict['internal_port']
                         }
         else:
             message = _("The Floating IP %(floatingip_id)s had been set "
                         "on router %(router_id)s, the internal Neutron "
                         "port %(internal_port_id)s can not reach it") % {
                             'floatingip_id': fip_obj.id,
                             'router_id': fip_obj.router_id,
                             'internal_port_id': internal_port_id
                         }
         raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME,
                                  msg=message)
Ejemplo n.º 21
0
    def _validate_router_migration(self, context, router_db, router_res):
        """Allow transition only when admin_state_up=False"""
        original_distributed_state = router_db.extra_attributes.distributed
        requested_distributed_state = router_res.get('distributed', None)

        distributed_changed = (
            requested_distributed_state is not None and
            requested_distributed_state != original_distributed_state)
        if not distributed_changed:
            return False
        if router_db.admin_state_up:
            msg = _("Cannot change the 'distributed' attribute of active "
                    "routers. Please set router admin_state_up to False "
                    "prior to upgrade")
            raise n_exc.BadRequest(resource='router', msg=msg)

        # Notify advanced services of the imminent state transition
        # for the router.
        try:
            kwargs = {'context': context, 'router': router_db}
            registry.notify(
                resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
        except exceptions.CallbackFailure as e:
            # NOTE(armax): preserve old check's behavior
            if len(e.errors) == 1:
                raise e.errors[0].error
            raise l3_exc.RouterInUse(router_id=router_db['id'], reason=e)
        return True
Ejemplo n.º 22
0
    def _link_default_netpartition(self, netpart_name, l2template, l3template,
                                   l3isolated, l3shared):
        params = {
            'name': netpart_name,
            'l3template': l3template,
            'l2template': l2template
        }
        (np_id, l3dom_tid,
         l2dom_tid) = self.vsdclient.link_default_netpartition(params)
        # verify that the provided zones have been created already
        shared_match, isolated_match = self.vsdclient.validate_zone_create(
            l3dom_tid, l3isolated, l3shared)
        if not shared_match or not isolated_match:
            msg = ('Default zone names must be provided for '
                   'default net-partiton')
            raise n_exc.BadRequest(resource='net_partition', msg=msg)

        # basic verifications passed. add default netpartition to the DB
        session = lib_db_api.get_writer_session()
        netpartition = nuagedb.get_net_partition_by_name(session, netpart_name)

        with session.begin():
            if netpartition:
                nuagedb.delete_net_partition(session, netpartition)
            nuagedb.add_net_partition(session, np_id, l3dom_tid, l2dom_tid,
                                      netpart_name, l3isolated, l3shared)
        self._default_np_id = np_id
Ejemplo n.º 23
0
    def create_wan_tc_class(self, context, wtc_class):

        wtc_class_db = models.WanTcClass(
            id=uuidutils.generate_uuid(),
            direction=wtc_class['direction'],
            class_ext_id=self.get_last_class_ext_id(context))

        if 'parent' in wtc_class and wtc_class['parent'] != '':
            parent = wtc_class['parent']
            parent_class = self.get_class_by_id(context, parent)
            if not parent_class:
                raise exceptions.BadRequest(msg='invalid parent id')
            wtc_class_db.parent = parent
            wtc_class_db.parent_class_ext_id = parent_class['class_ext_id']
        else:
            wtc_class_db.parent = 'root'
            wtc_class_db.parent_class_ext_id = 1

        with context.session.begin(subtransactions=True):

            if 'min' in wtc_class:
                wtc_class_db.min = wtc_class['min']
            if 'max' in wtc_class:
                wtc_class_db.max = wtc_class['max']

            context.session.add(wtc_class_db)
        class_dict = self._class_to_dict(wtc_class_db)
        class_dict['parent_class_ext_id'] = wtc_class_db.parent_class_ext_id
        return class_dict
Ejemplo n.º 24
0
    def create(self, context, hm, completor):
        lb_id = hm['pool']['loadbalancer_id']
        pool_id = hm['pool']['id']
        pool_client = self.core_plugin.nsxlib.load_balancer.pool
        monitor_client = self.core_plugin.nsxlib.load_balancer.monitor
        monitor_name = utils.get_name_and_uuid(hm['name'] or 'monitor',
                                               hm['id'])
        tags = lb_utils.get_tags(self.core_plugin, hm['id'],
                                 lb_const.LB_HM_TYPE, hm['tenant_id'],
                                 context.project_name)
        monitor_body = self._build_monitor_args(hm)

        try:
            lb_monitor = monitor_client.create(display_name=monitor_name,
                                               tags=tags,
                                               **monitor_body)
        except nsxlib_exc.ManagerError:
            with excutils.save_and_reraise_exception():
                completor(success=False)

        binding = nsx_db.get_nsx_lbaas_pool_binding(context.session, lb_id,
                                                    pool_id)
        if binding:
            lb_pool_id = binding['lb_pool_id']
            try:
                pool_client.add_monitor_to_pool(lb_pool_id, lb_monitor['id'])
            except nsxlib_exc.ManagerError:
                completor(success=False)
                msg = _('Failed to attach monitor %(monitor)s to pool '
                        '%(pool)s') % {
                            'monitor': lb_monitor['id'],
                            'pool': lb_pool_id
                        }
                raise n_exc.BadRequest(resource='lbaas-hm', msg=msg)
            nsx_db.add_nsx_lbaas_monitor_binding(context.session, lb_id,
                                                 pool_id, hm['id'],
                                                 lb_monitor['id'], lb_pool_id)
        else:
            completor(success=False)
            msg = _('Failed to attach monitor %(monitor)s to pool '
                    '%(pool)s: NSX pool was not found on the DB') % {
                        'monitor': hm['id'],
                        'pool': pool_id
                    }
            raise n_exc.BadRequest(resource='lbaas-hm', msg=msg)

        completor(success=True)
Ejemplo n.º 25
0
    def create_nuage_external_security_group(self, context,
                                             nuage_external_security_group):
        external_sg = nuage_external_security_group[
            'nuage_external_security_group']
        subnet_id = external_sg.get('subnet_id')
        router_id = external_sg.get('router_id')

        if not subnet_id and not router_id:
            msg = _("Either router_id or subnet_id must be specified")
            raise n_exc.BadRequest(resource='nuage_external_security_group',
                                   msg=msg)

        l2dom_id = None
        l3dom_id = None
        external_id = None
        if subnet_id:
            subnet_mapping = nuagedb.get_subnet_l2dom_by_id(
                context.session, subnet_id)
            if subnet_mapping and self._is_l2(subnet_mapping):
                l2dom_id = subnet_mapping['nuage_subnet_id']
                external_id = subnet_id
            if not l2dom_id:
                msg = _("VSD L2Domain not found for subnet %s") % subnet_id
                raise n_exc.BadRequest(
                    resource='nuage_external_security_group', msg=msg)
        elif router_id:
            nuage_router = self.vsdclient.get_router_by_external(router_id)
            if nuage_router:
                l3dom_id = nuage_router['ID']
                external_id = router_id
            if not l3dom_id:
                msg = _("VSD domain not found for router %s") % router_id
                raise n_exc.BadRequest(
                    resource='nuage_external_security_group', msg=msg)
        params = {
            'l2dom_id': l2dom_id,
            'l3dom_id': l3dom_id,
            'name': external_sg.get('name'),
            'description': external_sg.get('description'),
            'extended_community': external_sg.get('extended_community_id'),
            'externalID': external_id

        }
        ext_sg_resp = (
            self.vsdclient.create_nuage_external_security_group(params))
        return self._make_external_security_group_dict(ext_sg_resp[0],
                                                       context=context)
Ejemplo n.º 26
0
    def delete(self, context, rule):
        lb_id = rule.policy.listener.loadbalancer_id
        vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server
        rule_client = self.core_plugin.nsxlib.load_balancer.rule

        binding = nsx_db.get_nsx_lbaas_l7rule_binding(
            context.session, lb_id, rule.l7policy_id, rule.id)
        if binding:
            vs_id = binding['lb_vs_id']
            rule_id = binding['lb_rule_id']
            try:
                rule_client.delete(rule_id)
            except nsx_exc.NsxResourceNotFound:
                msg = (_("LB rule cannot be found on nsx: %(rule)s") %
                       {'rule': rule_id})
                raise n_exc.BadRequest(resource='lbaas-l7rule-delete',
                                       msg=msg)
            except nsxlib_exc.ManagerError:
                self.lbv2_driver.l7rule.failed_completion(context,
                                                          rule)
                msg = (_('Failed to delete lb rule: %(rule)s') %
                       {'rule': rule.id})
                raise n_exc.BadRequest(resource='lbaas-l7rule-delete',
                                       msg=msg)
            try:
                lb_vs = vs_client.get(vs_id)
                if 'rule_ids' in lb_vs and rule_id in lb_vs['rule_ids']:
                    lb_vs['rule_ids'].remove(rule_id)
                vs_client.update(vs_id, lb_vs)
            except nsx_exc.NsxResourceNotFound:
                msg = (_("virtual server cannot be found on nsx: %(vs)s") %
                       {'vs': vs_id})
                raise n_exc.BadRequest(resource='lbaas-l7rule-delete',
                                       msg=msg)
            except nsxlib_exc.ManagerError:
                self.lbv2_driver.l7rule.failed_completion(context,
                                                          rule)
                msg = (_('Failed to update rule %(rule)s on virtual server '
                         '%(vs)s') % {'rule': rule_id, 'vs': vs_id})
                raise n_exc.BadRequest(resource='lbaas-l7rule-delete',
                                       msg=msg)

            nsx_db.delete_nsx_lbaas_l7rule_binding(
                context.session, lb_id, rule.l7policy_id, rule.id)

        self.lbv2_driver.l7rule.successful_completion(context, rule,
                                                      delete=True)
    def get_vsd_domains(self, context, filters=None, fields=None):

        l3domains = []
        l2domains = []

        if 'vsd_organisation_id' in filters:
            # get domains by enterprise id
            l3domains.extend(
                self.vsdclient.get_routers_by_netpart(
                    filters['vsd_organisation_id'][0]))
            l2domains.extend(
                self.vsdclient.get_subnet_by_netpart(
                    filters['vsd_organisation_id'][0]))
        elif 'os_router_ids' in filters:
            # get domain by Openstack router id
            for os_id in filters['os_router_ids']:
                l3_domain = self.vsdclient.get_l3domain_by_external_id(os_id)
                if l3_domain:
                    l3domains.append(l3_domain)
        else:
            msg = _('vsd_organisation_id or os_router_ids is a required filter'
                    ' parameter for this API')
            raise n_exc.BadRequest(resource='vsd-domains', msg=msg)

        # add type to the domains (used by horizon linkedselect)
        for domain in l3domains:
            domain.update({'type': 'L3'})
        for domain in l2domains:
            domain.update({'type': 'L2'})

        vsd_to_os = {
            'ID': 'id',
            'name': 'name',
            'type': 'type',

            # L2
            'net_partition_id': 'net_partition_id',
            'dhcp_managed': 'dhcp_managed',
            'IPType': 'ip_version',
            'ipv4_cidr': 'cidr',
            'IPv6Address': 'ipv6_cidr',
            'ipv4_gateway': 'gateway',
            'IPv6Gateway': 'ipv6_gateway',
            'enableDHCPv4': 'enable_dhcpv4',
            'enableDHCPv6': 'enable_dhcpv6',

            # L3
            'parentID': 'net_partition_id',
            'routeDistinguisher': 'rd',
            'routeTarget': 'rt',
            'backHaulVNID': 'backhaul_vnid',
            'backHaulRouteDistinguisher': 'backhaul_rd',
            'backHaulRouteTarget': 'backhaul_rt',
            'templateID': 'router_template_id',
            'tunnelType': 'tunnel_type',
            'ECMPCount': 'ecmp_count'
        }
        return self._trans_vsd_to_os(l3domains + l2domains, vsd_to_os, filters,
                                     fields)
Ejemplo n.º 28
0
    def create(self, context, hm, completor):
        lb_id = hm['pool']['loadbalancer_id']
        lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding(
            context.session, lb_id)
        edge_id = lb_binding['edge_id']
        pool_id = hm['pool']['id']
        pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding(
            context.session, lb_id, pool_id)
        if not pool_binding:
            completor(success=False)
            msg = _('Failed to create health monitor on edge: %s. '
                    'Binding not found') % edge_id
            LOG.error(msg)
            raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)

        edge_pool_id = pool_binding['edge_pool_id']

        hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding(
            context.session, lb_id, pool_id, hm['id'], edge_id)
        edge_mon_id = None

        if hm_binding:
            edge_mon_id = hm_binding['edge_mon_id']
        else:
            edge_monitor = self._convert_lbaas_monitor(hm)
            try:
                with locking.LockManager.get_lock(edge_id):
                    h = self.vcns.create_health_monitor(edge_id,
                                                        edge_monitor)[0]
                    edge_mon_id = lb_common.extract_resource_id(h['location'])

                nsxv_db.add_nsxv_lbaas_monitor_binding(context.session, lb_id,
                                                       pool_id, hm['id'],
                                                       edge_id, edge_mon_id)

            except nsxv_exc.VcnsApiException:
                with excutils.save_and_reraise_exception():
                    completor(success=False)
                    LOG.error('Failed to create health monitor on edge: %s',
                              edge_id)

        try:
            # Associate monitor with Edge pool
            with locking.LockManager.get_lock(edge_id):
                edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]
                if edge_pool.get('monitorId'):
                    edge_pool['monitorId'].append(edge_mon_id)
                else:
                    edge_pool['monitorId'] = [edge_mon_id]

                self.vcns.update_pool(edge_id, edge_pool_id, edge_pool)

        except nsxv_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                completor(success=False)
                LOG.error('Failed to create health monitor on edge: %s',
                          edge_id)

        completor(success=True)
Ejemplo n.º 29
0
    def create(self, context, member, completor):
        lb_id = self._get_pool_lb_id(member)
        lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding(
            context.session, lb_id)
        edge_id = lb_binding['edge_id']

        pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding(
            context.session, lb_id, member['pool_id'])
        if not pool_binding:
            completor(success=False)
            msg = _('Failed to create member on edge: %s. '
                    'Binding not found') % edge_id
            LOG.error(msg)
            raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)

        edge_pool_id = pool_binding['edge_pool_id']
        old_lb = lb_common.is_lb_on_router_edge(context, self.core_plugin,
                                                edge_id)
        with locking.LockManager.get_lock(edge_id):
            if not cfg.CONF.nsxv.use_routers_as_lbaas_platform and not old_lb:
                # Verify that Edge appliance is connected to the member's
                # subnet (only if this is a dedicated loadbalancer edge)
                if not lb_common.get_lb_interface(context, self.core_plugin,
                                                  lb_id, member['subnet_id']):
                    lb_common.create_lb_interface(context, self.core_plugin,
                                                  lb_id, member['subnet_id'],
                                                  member['tenant_id'])

            edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]
            edge_member = {
                'ipAddress': member['address'],
                'weight': member['weight'],
                'port': member['protocol_port'],
                'monitorPort': member['protocol_port'],
                'name': lb_common.get_member_id(member['id']),
                'condition':
                'enabled' if member['admin_state_up'] else 'disabled'
            }

            if edge_pool.get('member'):
                edge_pool['member'].append(edge_member)
            else:
                edge_pool['member'] = [edge_member]

            try:
                self.vcns.update_pool(edge_id, edge_pool_id, edge_pool)
                completor(success=True)

                if old_lb:
                    member_ips = self._get_pool_member_ips(
                        member['pool'], 'add', member['address'])
                    lb_common.update_pool_fw_rule(
                        self.vcns, member['pool_id'], edge_id,
                        self._get_lbaas_fw_section_id(), member_ips)

            except nsxv_exc.VcnsApiException:
                with excutils.save_and_reraise_exception():
                    completor(success=False)
                    LOG.error('Failed to create member on edge: %s', edge_id)
Ejemplo n.º 30
0
def policy_to_edge_and_rule_id(context, policy_id):
    # get the nsx application rule id and edge id
    binding = nsxv_db.get_nsxv_lbaas_l7policy_binding(context.session,
                                                      policy_id)
    if not binding:
        msg = _('No suitable Edge found for policy %s') % policy_id
        raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
    return binding['edge_id'], binding['edge_app_rule_id']