Exemplo n.º 1
0
def get_nsx_security_group_id(session, cluster, neutron_id):
    """Return the NSX sec profile uuid for a given neutron sec group.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing.
    NOTE: Security groups are called 'security profiles' on the NSX backend.
    """
    nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
    if not nsx_id:
        # Find security profile on backend.
        # This is a rather expensive query, but it won't be executed
        # more than once for each security group in Neutron's lifetime
        nsx_sec_profiles = secgrouplib.query_security_profiles(
            cluster, '*',
            filters={'tag': neutron_id,
                     'tag_scope': 'q_sec_group_id'})
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # security profile is found with the same neutron port tag
        if not nsx_sec_profiles:
            LOG.warning(_LW("Unable to find NSX security profile for Neutron "
                            "security group %s"), neutron_id)
            return
        elif len(nsx_sec_profiles) > 1:
            LOG.warning(_LW("Multiple NSX security profiles found for Neutron "
                            "security group %s"), neutron_id)
        nsx_sec_profile = nsx_sec_profiles[0]
        nsx_id = nsx_sec_profile['uuid']
        with session.begin(subtransactions=True):
            # Create DB mapping
            nsx_db.add_neutron_nsx_security_group_mapping(
                session, neutron_id, nsx_id)
    return nsx_id
Exemplo n.º 2
0
def delete_old_dhcp_edge(context, old_edge_id, bindings):
    LOG.info(_LI("Deleting the old DHCP edge: %s"), old_edge_id)
    # using one of the router-ids in the bindings for the deleting
    dhcp_names = [binding['router_id'] for binding in bindings]
    dhcp_name = dhcp_names[0]
    with locking.LockManager.get_lock(old_edge_id):
        # Delete from NSXv backend
        # (using the first dhcp name as the "router name")
        # Note - If we will not delete the router, but free it - it will be
        # immediately used as the new one, So it is better to delete it.
        try:
            nsxv.delete_edge(old_edge_id)
        except Exception as e:
            LOG.warning(_LW("Failed to delete the old edge %(id)s: %(e)s"), {
                'id': old_edge_id,
                'e': e
            })
            # Continue the process anyway
            # The edge may have been already deleted at the backend

        try:
            # Remove bindings from Neutron DB
            nsxv_db.delete_nsxv_router_binding(context.session, dhcp_name)
            nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id)
        except Exception as e:
            LOG.warning(
                _LW("Failed to delete the old edge %(id)s from the "
                    "DB : %(e)s"), {
                        'id': old_edge_id,
                        'e': e
                    })
Exemplo n.º 3
0
def delete_resource_by_values(resource, skip_not_found=True, **kwargs):
    resources_get = client.get_resource(resource)
    matched_num = 0
    for res in resources_get['results']:
        if utils.dict_match(kwargs, res):
            LOG.debug("Deleting %s from resource %s", res, resource)
            delete_resource = resource + "/" + str(res['id'])
            client.delete_resource(delete_resource)
            matched_num = matched_num + 1
    if matched_num == 0:
        if skip_not_found:
            LOG.warning(_LW("No resource in %(res)s matched for values: "
                            "%(values)s"), {'res': resource,
                                            'values': kwargs})
        else:
            err_msg = (_("No resource in %(res)s matched for values: "
                         "%(values)s") % {'res': resource,
                                          'values': kwargs})
            raise nsx_exc.ResourceNotFound(
                manager=client._get_nsx_managers_from_conf(),
                operation=err_msg)
    elif matched_num > 1:
        LOG.warning(_LW("%(num)s resources in %(res)s matched for values: "
                        "%(values)s"), {'num': matched_num,
                                        'res': resource,
                                        'values': kwargs})
Exemplo n.º 4
0
    def _init_nested_groups(self, requested_size):
        # Construct the groups dict -
        # {0: <groups-1>,.., n-1: <groups-n>}
        size = requested_size
        nested_groups = {
            self._get_nested_group_index_from_name(nsgroup): nsgroup['id']
            for nsgroup in firewall.list_nsgroups()
            if utils.is_internal_resource(nsgroup)}

        if nested_groups:
            size = max(requested_size, max(nested_groups) + 1)
            if size > requested_size:
                LOG.warning(_LW("Lowering the value of "
                                "nsx_v3:number_of_nested_groups isn't "
                                "supported, '%s' nested-groups will be used."),
                            size)

        absent_groups = set(range(size)) - set(nested_groups.keys())
        if absent_groups:
            LOG.warning(
                _LW("Found %(num_present)s Nested Groups, "
                    "creating %(num_absent)s more."),
                {'num_present': len(nested_groups),
                 'num_absent': len(absent_groups)})
            for i in absent_groups:
                cont = self._create_nested_group(i)
                nested_groups[i] = cont['id']

        return nested_groups
Exemplo n.º 5
0
    def _redirect_params(self, conn, headers, allow_release_conn=False):
        """Process redirect response, create new connection if necessary.

        Args:
            conn: connection that returned the redirect response
            headers: response headers of the redirect response
            allow_release_conn: if redirecting to a different server,
                release existing connection back to connection pool.

        Returns: Return tuple(conn, url) where conn is a connection object
            to the redirect target and url is the path of the API request
        """

        url = None
        for name, value in headers:
            if name.lower() == "location":
                url = value
                break
        if not url:
            LOG.warning(_LW("[%d] Received redirect status without location "
                            "header field"), self._rid())
            return (conn, None)
        # Accept location with the following format:
        # 1. /path, redirect to same node
        # 2. scheme://hostname:[port]/path where scheme is https or http
        # Reject others
        # 3. e.g. relative paths, unsupported scheme, unspecified host
        result = urlparse.urlparse(url)
        if not result.scheme and not result.hostname and result.path:
            if result.path[0] == "/":
                if result.query:
                    url = "%s?%s" % (result.path, result.query)
                else:
                    url = result.path
                return (conn, url)      # case 1
            else:
                LOG.warning(_LW("[%(rid)d] Received invalid redirect "
                                "location: '%(url)s'"),
                            {'rid': self._rid(), 'url': url})
                return (conn, None)     # case 3
        elif result.scheme not in ["http", "https"] or not result.hostname:
            LOG.warning(_LW("[%(rid)d] Received malformed redirect "
                            "location: %(url)s"),
                        {'rid': self._rid(), 'url': url})
            return (conn, None)         # case 3
        # case 2, redirect location includes a scheme
        # so setup a new connection and authenticate
        if allow_release_conn:
            self._api_client.release_connection(conn)
        conn_params = (result.hostname, result.port, result.scheme == "https")
        conn = self._api_client.acquire_redirect_connection(conn_params, True,
                                                            self._headers)
        if result.query:
            url = "%s?%s" % (result.path, result.query)
        else:
            url = result.path
        return (conn, url)
Exemplo n.º 6
0
def validate_nsxv_config_options():
    if (cfg.CONF.nsxv.manager_uri is None or cfg.CONF.nsxv.user is None
            or cfg.CONF.nsxv.password is None):
        error = _("manager_uri, user, and password must be configured!")
        raise nsx_exc.NsxPluginException(err_msg=error)
    if cfg.CONF.nsxv.dvs_id is None:
        LOG.warning(_LW("dvs_id must be configured to support VLANs!"))
    if cfg.CONF.nsxv.vdn_scope_id is None:
        LOG.warning(_LW("vdn_scope_id must be configured to support VXLANs!"))
    if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled():
        error = _("dvs host/vcenter credentials must be defined to use "
                  "dvs features")
        raise nsx_exc.NsxPluginException(err_msg=error)
Exemplo n.º 7
0
def validate_nsxv_config_options():
    if (cfg.CONF.nsxv.manager_uri is None or
        cfg.CONF.nsxv.user is None or
        cfg.CONF.nsxv.password is None):
        error = _("manager_uri, user, and password must be configured!")
        raise nsx_exc.NsxPluginException(err_msg=error)
    if cfg.CONF.nsxv.dvs_id is None:
        LOG.warning(_LW("dvs_id must be configured to support VLANs!"))
    if cfg.CONF.nsxv.vdn_scope_id is None:
        LOG.warning(_LW("vdn_scope_id must be configured to support VXLANs!"))
    if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled():
        error = _("dvs host/vcenter credentials must be defined to use "
                  "dvs features")
        raise nsx_exc.NsxPluginException(err_msg=error)
Exemplo n.º 8
0
 def remove_nsgroup(self, nsgroup_id):
     for group in self._suggest_nested_group(nsgroup_id):
         try:
             firewall.remove_nsgroup_member(
                 group, firewall.NSGROUP, nsgroup_id, verify=True)
             break
         except firewall.NSGroupMemberNotFound:
             LOG.warning(_LW("NSGroup %(nsgroup)s was expected to be found "
                             "in group %(group_id)s, but wasn't. "
                             "Looking in the next group.."),
                         {'nsgroup': nsgroup_id, 'group_id': group})
             continue
     else:
         LOG.warning(_LW("NSGroup %s was marked for removal, but its "
                         "reference is missing."), nsgroup_id)
Exemplo n.º 9
0
    def _get_bw_values_from_rule(self, bw_rule):
        """Translate the neutron bandwidth_limit_rule values, into the
        values expected by the NSX-v3 QoS switch profile,
        and validate that those are legal
        """
        if bw_rule:
            shaping_enabled = True

            # validate the max_kbps - it must be at least 1Mbps for the
            # switch profile configuration to succeed.
            if (bw_rule.max_kbps < MAX_KBPS_MIN_VALUE):
                # Since failing the action from the notification callback
                # is not possible, just log the warning and use the
                # minimal value.
                LOG.warning(_LW("Invalid input for max_kbps. "
                                "The minimal legal value is 1024"))
                bw_rule.max_kbps = MAX_KBPS_MIN_VALUE

            # 'None' value means we will keep the old value
            burst_size = peak_bandwidth = average_bandwidth = None

            # translate kbps -> bytes
            burst_size = int(bw_rule.max_burst_kbps) * 128

            # translate kbps -> Mbps
            peak_bandwidth = int(float(bw_rule.max_kbps) / 1024)
            # neutron QoS does not support this parameter
            average_bandwidth = peak_bandwidth
        else:
            shaping_enabled = False
            burst_size = None
            peak_bandwidth = None
            average_bandwidth = None

        return shaping_enabled, burst_size, peak_bandwidth, average_bandwidth
Exemplo n.º 10
0
    def acquire_connection(self, auto_login=True, headers=None, rid=-1):
        '''Check out an available HTTPConnection instance.

        Blocks until a connection is available.
        :auto_login: automatically logins before returning conn
        :headers: header to pass on to login attempt
        :param rid: request id passed in from request eventlet.
        :returns: An available HTTPConnection instance or None if no
                 api_providers are configured.
        '''
        if not self._api_providers:
            LOG.warning(_LW("[%d] no API providers currently available."), rid)
            return None
        if self._conn_pool.empty():
            LOG.debug("[%d] Waiting to acquire API client connection.", rid)
        priority, conn = self._conn_pool.get()
        now = time.time()
        if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
            LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
                         "seconds; reconnecting."),
                     {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
                      'sec': now - conn.last_used})
            conn = self._create_connection(*self._conn_params(conn))

        conn.last_used = now
        conn.priority = priority  # stash current priority for release
        qsize = self._conn_pool.qsize()
        LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
                  "connection(s) available.",
                  {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
                   'qsize': qsize})
        if auto_login and self.auth_cookie(conn) is None:
            self._wait_for_login(conn, headers)
        return conn
Exemplo n.º 11
0
 def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False):
     if not lr_uuids and not scan_missing:
         return
     # TODO(salvatore-orlando): Deal with the case the tag
     # has been tampered with
     neutron_router_mappings = {}
     for lr_uuid in lr_uuids:
         lrouter = (self._nsx_cache[lr_uuid].get('data')
                    or self._nsx_cache[lr_uuid].get('data_bk'))
         tags = self._get_tag_dict(lrouter['tags'])
         neutron_router_id = tags.get('q_router_id')
         if neutron_router_id:
             neutron_router_mappings[neutron_router_id] = (
                 self._nsx_cache[lr_uuid])
         else:
             LOG.warning(
                 _LW("Unable to find Neutron router id for "
                     "NSX logical router: %s"), lr_uuid)
     # Fetch neutron routers from database
     filters = ({} if scan_missing else {
         'id': neutron_router_mappings.keys()
     })
     routers = self._plugin._get_collection(ctx,
                                            l3_db.Router,
                                            self._plugin._make_router_dict,
                                            filters=filters)
     for router in routers:
         lrouter = neutron_router_mappings.get(router['id'])
         self.synchronize_router(ctx, router, lrouter
                                 and lrouter.get('data'))
Exemplo n.º 12
0
def get_nsx_router_id(session, cluster, neutron_router_id):
    """Return the NSX router uuid for a given neutron router.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing.
    """
    if not neutron_router_id:
        return
    nsx_router_id = nsx_db.get_nsx_router_id(session, neutron_router_id)
    if not nsx_router_id:
        # Find logical router from backend.
        # This is a rather expensive query, but it won't be executed
        # more than once for each router in Neutron's lifetime
        nsx_routers = routerlib.query_lrouters(cluster,
                                               '*',
                                               filters={
                                                   'tag': neutron_router_id,
                                                   'tag_scope': 'q_router_id'
                                               })
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_routers:
            LOG.warning(_LW("Unable to find NSX router for Neutron router %s"),
                        neutron_router_id)
            return
        nsx_router = nsx_routers[0]
        nsx_router_id = nsx_router['uuid']
        with session.begin(subtransactions=True):
            # Create DB mapping
            nsx_db.add_neutron_nsx_router_mapping(session, neutron_router_id,
                                                  nsx_router_id)
    return nsx_router_id
Exemplo n.º 13
0
    def api_providers(self):
        """Parse api_providers from response.

        Returns: api_providers in [(host, port, is_ssl), ...] format
        """
        def _provider_from_listen_addr(addr):
            # (pssl|ptcp):<ip>:<port> => (host, port, is_ssl)
            parts = addr.split(':')
            return (parts[1], int(parts[2]), parts[0] == 'pssl')

        try:
            if self.successful():
                ret = []
                body = jsonutils.loads(self.value.body)
                for node in body.get('results', []):
                    for role in node.get('roles', []):
                        if role.get('role') == 'api_provider':
                            addr = role.get('listen_addr')
                            if addr:
                                ret.append(_provider_from_listen_addr(addr))
                return ret
        except Exception as e:
            LOG.warning(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
                        {'rid': self._rid(), 'e': e})
            # intentionally fall through
        return None
    def _check_invalid_security_groups_specified(self,
                                                 context,
                                                 port,
                                                 only_warn=False):
        """Check if the lists of security groups are valid

        When only_warn is True we do not raise an exception here, because this
        may fail nova boot.
        Instead we will later remove provider security groups from the regular
        security groups list of the port.
        Since all the provider security groups of the tenant will be on this
        list anyway, the result will be the same.
        """
        if validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
            for sg in port.get(ext_sg.SECURITYGROUPS, []):
                # makes sure user doesn't add non-provider secgrp as secgrp
                if self._is_provider_security_group(context, sg):
                    if only_warn:
                        LOG.warning(
                            _LW("Ignored provider security group %(sg)s in "
                                "security groups list for port %(id)s"), {
                                    'sg': sg,
                                    'id': port['id']
                                })
                    else:
                        raise provider_sg.SecurityGroupIsProvider(id=sg)

        if validators.is_attr_set(port.get(
                provider_sg.PROVIDER_SECURITYGROUPS)):

            # also check all provider groups are provider.
            for sg in port.get(provider_sg.PROVIDER_SECURITYGROUPS, []):
                self._check_provider_security_group_exists(context, sg)
Exemplo n.º 15
0
    def synchronize_network(self,
                            context,
                            neutron_network_data,
                            lswitches=None):
        """Synchronize a Neutron network with its NSX counterpart.

        This routine synchronizes a set of switches when a Neutron
        network is mapped to multiple lswitches.
        """
        if not lswitches:
            # Try to get logical switches from nsx
            try:
                lswitches = nsx_utils.fetch_nsx_switches(
                    context.session, self._cluster, neutron_network_data['id'])
            except exceptions.NetworkNotFound:
                # TODO(salv-orlando): We should be catching
                # api_exc.ResourceNotFound here
                # The logical switch was not found
                LOG.warning(
                    _LW("Logical switch for neutron network %s not "
                        "found on NSX."), neutron_network_data['id'])
                lswitches = []
            else:
                for lswitch in lswitches:
                    self._nsx_cache.update_lswitch(lswitch)
        # By default assume things go wrong
        status = constants.NET_STATUS_ERROR
        # In most cases lswitches will contain a single element
        for ls in lswitches:
            if not ls:
                # Logical switch was deleted
                break
            ls_status = ls['_relations']['LogicalSwitchStatus']
            if not ls_status['fabric_status']:
                status = constants.NET_STATUS_DOWN
                break
        else:
            # No switch was down or missing. Set status to ACTIVE unless
            # there were no switches in the first place!
            if lswitches:
                status = constants.NET_STATUS_ACTIVE
        # Update db object
        if status == neutron_network_data['status']:
            # do nothing
            return

        with context.session.begin(subtransactions=True):
            try:
                network = self._plugin._get_network(context,
                                                    neutron_network_data['id'])
            except exceptions.NetworkNotFound:
                pass
            else:
                network.status = status
                LOG.debug(
                    "Updating status for neutron resource %(q_id)s to:"
                    " %(status)s", {
                        'q_id': neutron_network_data['id'],
                        'status': status
                    })
Exemplo n.º 16
0
def get_nsx_router_id(session, cluster, neutron_router_id):
    """Return the NSX router uuid for a given neutron router.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing.
    """
    if not neutron_router_id:
        return
    nsx_router_id = nsx_db.get_nsx_router_id(
        session, neutron_router_id)
    if not nsx_router_id:
        # Find logical router from backend.
        # This is a rather expensive query, but it won't be executed
        # more than once for each router in Neutron's lifetime
        nsx_routers = routerlib.query_lrouters(
            cluster, '*',
            filters={'tag': neutron_router_id,
                     'tag_scope': 'q_router_id'})
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_routers:
            LOG.warning(_LW("Unable to find NSX router for Neutron router %s"),
                        neutron_router_id)
            return
        nsx_router = nsx_routers[0]
        nsx_router_id = nsx_router['uuid']
        with session.begin(subtransactions=True):
            # Create DB mapping
            nsx_db.add_neutron_nsx_router_mapping(
                session,
                neutron_router_id,
                nsx_router_id)
    return nsx_router_id
Exemplo n.º 17
0
 def _setup_nsx_dhcp_metadata(self):
     self._check_services_requirements()
     nsx_svc.register_dhcp_opts(cfg)
     nsx_svc.register_metadata_opts(cfg)
     lsnmanager.register_lsn_opts(cfg)
     lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference)
     self.lsn_manager = lsn_manager
     if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
         notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference,
                                               lsn_manager)
         self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier
         # In agentless mode, ports whose owner is DHCP need to
         # be special cased; so add it to the list of special
         # owners list
         if const.DEVICE_OWNER_DHCP not in self.port_special_owners:
             self.port_special_owners.append(const.DEVICE_OWNER_DHCP)
     elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED:
         # This becomes ineffective, as all new networks creations
         # are handled by Logical Services Nodes in NSX
         cfg.CONF.set_override('network_auto_schedule', False)
         LOG.warning(_LW('network_auto_schedule has been disabled'))
         notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
                                                lsn_manager)
         self.supported_extension_aliases.append(lsn.EXT_ALIAS)
         # Add the capability to migrate dhcp and metadata services over
         self.migration_manager = (
             migration.MigrationManager(
                 self.safe_reference, lsn_manager, notifier))
     return notifier
Exemplo n.º 18
0
    def _dvs_create_network(self, context, network):
        net_data = network['network']
        if net_data['admin_state_up'] is False:
            LOG.warning(
                _LW("Network with admin_state_up=False are not yet "
                    "supported by this plugin. Ignoring setting for "
                    "network %s"), net_data.get('name', '<unknown>'))
        net_data['id'] = str(uuid.uuid4())
        vlan_tag = 0
        if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN:
            vlan_tag = net_data.get(pnet.SEGMENTATION_ID, 0)

        net_id = None
        if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.PORTGROUP:
            net_id = net_data.get(pnet.PHYSICAL_NETWORK)
            dvpg_moref = self._dvs._net_id_to_moref(net_id)
            pg_info = self._dvs.get_portgroup_info(dvpg_moref)
            if pg_info.get('name') != net_data.get('name'):
                err_msg = (_("Portgroup name %(dvpg)s must match network "
                             "name %(network)s") % {
                                 'dvpg': pg_info.get('name'),
                                 'network': net_data.get('name')
                             })
                raise n_exc.InvalidInput(error_message=err_msg)
            dvs_id = dvpg_moref.value
        else:
            dvs_id = self._dvs_get_id(net_data)
            self._dvs.add_port_group(dvs_id, vlan_tag)

        try:
            with context.session.begin(subtransactions=True):
                new_net = super(NsxDvsV2,
                                self).create_network(context, network)
                # Process port security extension
                self._process_network_port_security_create(
                    context, net_data, new_net)

                nsx_db.add_network_binding(context.session, new_net['id'],
                                           net_data.get(pnet.NETWORK_TYPE),
                                           net_id or 'dvs', vlan_tag)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to create network'))
                if (net_data.get(pnet.NETWORK_TYPE) !=
                        c_utils.NetworkTypes.PORTGROUP):
                    self._dvs.delete_port_group(dvs_id)

        new_net[pnet.NETWORK_TYPE] = net_data.get(pnet.NETWORK_TYPE)
        new_net[pnet.PHYSICAL_NETWORK] = net_id or 'dvs'
        new_net[pnet.SEGMENTATION_ID] = vlan_tag

        # this extra lookup is necessary to get the
        # latest db model for the extension functions
        net_model = self._get_network(context, net_data['id'])
        self._apply_dict_extend_functions('networks', new_net, net_model)

        self.handle_network_dhcp_access(context,
                                        new_net,
                                        action='create_network')
        return new_net
Exemplo n.º 19
0
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
    """Get port by neutron tag.

    Returns the NSX UUID of the logical port with tag q_port_id equal to
    neutron_port_id or None if the port is not Found.
    """
    uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
                                 parent_resource_id=lswitch_uuid,
                                 fields='uuid',
                                 filters={'tag': neutron_port_id,
                                          'tag_scope': 'q_port_id'})
    LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
              "on: '%(lswitch_uuid)s'",
              {'neutron_port_id': neutron_port_id,
               'lswitch_uuid': lswitch_uuid})
    res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
    num_results = len(res["results"])
    if num_results >= 1:
        if num_results > 1:
            LOG.warning(_LW("Found '%(num_ports)d' ports with "
                            "q_port_id tag: '%(neutron_port_id)s'. "
                            "Only 1 was expected."),
                        {'num_ports': num_results,
                         'neutron_port_id': neutron_port_id})
        return res["results"][0]
Exemplo n.º 20
0
 def check_edge_jobs(self, edge_id):
     retries = max(cfg.CONF.nsxv.retries, 1)
     delay = 0.5
     for attempt in range(1, retries + 1):
         if attempt != 1:
             time.sleep(delay)
             delay = min(2 * delay, 60)
         h, jobs = self.vcns.get_edge_jobs(edge_id)
         if jobs['edgeJob'] == []:
             return
         job_number = len(jobs['edgeJob'])
         # Assume one job would wait time out after 20 minutes and one
         # job takes about 1 minute to be completed.
         if job_number < 20:
             LOG.warning(_LW("NSXv: %(num)s jobs still running on edge "
                             "%(edge_id)s."),
                         {'num': job_number,
                          'edge_id': edge_id})
         else:
             LOG.error(_LE("NSXv: %(num)s jobs still running on edge "
                           "%(edge_id)s. Too many jobs may lead to job "
                           "time out at the backend"),
                       {'num': job_number,
                        'edge_id': edge_id})
     LOG.error(_LE('NSXv: jobs are still runnings!'))
Exemplo n.º 21
0
 def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False):
     if not lr_uuids and not scan_missing:
         return
     # TODO(salvatore-orlando): Deal with the case the tag
     # has been tampered with
     neutron_router_mappings = {}
     for lr_uuid in lr_uuids:
         lrouter = (self._nsx_cache[lr_uuid].get('data') or
                    self._nsx_cache[lr_uuid].get('data_bk'))
         tags = self._get_tag_dict(lrouter['tags'])
         neutron_router_id = tags.get('q_router_id')
         if neutron_router_id:
             neutron_router_mappings[neutron_router_id] = (
                 self._nsx_cache[lr_uuid])
         else:
             LOG.warning(_LW("Unable to find Neutron router id for "
                             "NSX logical router: %s"), lr_uuid)
     # Fetch neutron routers from database
     filters = ({} if scan_missing else
                {'id': neutron_router_mappings.keys()})
     routers = self._plugin._get_collection(
         ctx, l3_db.Router, self._plugin._make_router_dict,
         filters=filters)
     for router in routers:
         lrouter = neutron_router_mappings.get(router['id'])
         self.synchronize_router(
             ctx, router, lrouter and lrouter.get('data'))
Exemplo n.º 22
0
    def delete_edge(self, context, router_id, edge_id, dist=False):
        try:
            nsxv_db.delete_nsxv_router_binding(context.session, router_id)
            if not dist:
                nsxv_db.clean_edge_vnic_binding(context.session, edge_id)
        except sa_exc.NoResultFound:
            LOG.warning(_LW("Router Binding for %s not found"), router_id)

        if edge_id:
            try:
                self.vcns.delete_edge(edge_id)
                return True
            except exceptions.ResourceNotFound:
                return True
            except exceptions.VcnsApiException as e:
                LOG.exception(
                    _LE("VCNS: Failed to delete %(edge_id)s:\n"
                        "%(response)s"), {
                            'edge_id': edge_id,
                            'response': e.response
                        })
                return False
            except Exception:
                LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
                return False
Exemplo n.º 23
0
 def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True):
     """Retrieve LSN and LSN port given network and mac address."""
     lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err)
     if lsn_id:
         try:
             lsn_port_id = lsn_api.lsn_port_by_mac_get(
                 self.cluster, lsn_id, mac)
         except (n_exc.NotFound, api_exc.NsxApiException):
             if raise_on_err:
                 LOG.error(_LE('Unable to find Logical Service Node Port '
                               'for LSN %(lsn_id)s and mac address '
                               '%(mac)s'),
                           {'lsn_id': lsn_id, 'mac': mac})
                 raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
                                             entity='MAC',
                                             entity_id=mac)
             else:
                 LOG.warning(_LW('Unable to find Logical Service Node '
                                 'Port for LSN %(lsn_id)s and mac address '
                                 '%(mac)s'),
                             {'lsn_id': lsn_id, 'mac': mac})
             return (lsn_id, None)
         else:
             return (lsn_id, lsn_port_id)
     else:
         return (None, None)
Exemplo n.º 24
0
 def lsn_delete(self, context, lsn_id):
     """Delete a LSN given its id."""
     try:
         lsn_api.lsn_delete(self.cluster, lsn_id)
     except (n_exc.NotFound, api_exc.NsxApiException):
         LOG.warning(_LW('Unable to delete Logical Service Node %s'),
                     lsn_id)
Exemplo n.º 25
0
 def check_edge_jobs(self, edge_id):
     retries = max(cfg.CONF.nsxv.retries, 1)
     delay = 0.5
     for attempt in range(1, retries + 1):
         if attempt != 1:
             time.sleep(delay)
             delay = min(2 * delay, 60)
         h, jobs = self.vcns.get_edge_jobs(edge_id)
         if jobs['edgeJob'] == []:
             return
         job_number = len(jobs['edgeJob'])
         # Assume one job would wait time out after 20 minutes and one
         # job takes about 1 minute to be completed.
         if job_number < 20:
             LOG.warning(
                 _LW("NSXv: %(num)s jobs still running on edge "
                     "%(edge_id)s."), {
                         'num': job_number,
                         'edge_id': edge_id
                     })
         else:
             LOG.error(
                 _LE("NSXv: %(num)s jobs still running on edge "
                     "%(edge_id)s. Too many jobs may lead to job "
                     "time out at the backend"), {
                         'num': job_number,
                         'edge_id': edge_id
                     })
     LOG.error(_LE('NSXv: jobs are still runnings!'))
Exemplo n.º 26
0
    def _proxy(self, proxy_for, uri, *args, **kwargs):
        # proxy http request call to an avail endpoint
        with self.endpoint_connection() as conn_data:
            conn = conn_data.connection
            endpoint = conn_data.endpoint

            # http conn must support requests style interface
            do_request = getattr(conn, proxy_for)

            if not uri.startswith('/'):
                uri = "/%s" % uri
            url = "%s%s" % (endpoint.provider.url, uri)
            try:
                LOG.debug("API cluster proxy %s %s to %s",
                          proxy_for.upper(), uri, url)
                # call the actual connection method to do the
                # http request/response over the wire
                response = do_request(url, *args, **kwargs)
                endpoint.set_state(EndpointState.UP)

                return response
            except Exception as e:
                LOG.warning(_LW("Request failed due to: %s"), e)
                if not self._http_provider.is_connection_exception(e):
                    # only trap and retry connection errors
                    raise e
                endpoint.set_state(EndpointState.DOWN)
                LOG.debug("Connection to %s failed, checking additional "
                          "endpoints" % url)
                # retry until exhausting endpoints
                return self._proxy(proxy_for, uri, *args, **kwargs)
Exemplo n.º 27
0
def get_nsx_switch_ids(session, cluster, neutron_network_id):
    """Return the NSX switch id for a given neutron network.

    First lookup for mappings in Neutron database. If no mapping is
    found, query the NSX backend and add the mappings.
    """
    nsx_switch_ids = nsx_db.get_nsx_switch_ids(session, neutron_network_id)
    if not nsx_switch_ids:
        # Find logical switches from backend.
        # This is a rather expensive query, but it won't be executed
        # more than once for each network in Neutron's lifetime
        nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
        if not nsx_switches:
            LOG.warning(
                _LW("Unable to find NSX switches for Neutron network "
                    "%s"), neutron_network_id)
            return
        nsx_switch_ids = []
        with session.begin(subtransactions=True):
            for nsx_switch in nsx_switches:
                nsx_switch_id = nsx_switch['uuid']
                nsx_switch_ids.append(nsx_switch_id)
                # Create DB mapping
                nsx_db.add_neutron_nsx_network_mapping(session,
                                                       neutron_network_id,
                                                       nsx_switch_id)
    return nsx_switch_ids
Exemplo n.º 28
0
 def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True):
     """Retrieve LSN and LSN port for the network and the subnet."""
     lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err)
     if lsn_id:
         try:
             lsn_port_id = lsn_api.lsn_port_by_subnet_get(
                 self.cluster, lsn_id, subnet_id)
         except (n_exc.NotFound, api_exc.NsxApiException):
             if raise_on_err:
                 LOG.error(_LE('Unable to find Logical Service Node Port '
                               'for LSN %(lsn_id)s and subnet '
                               '%(subnet_id)s'),
                           {'lsn_id': lsn_id, 'subnet_id': subnet_id})
                 raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
                                             entity='subnet',
                                             entity_id=subnet_id)
             else:
                 LOG.warning(_LW('Unable to find Logical Service Node Port '
                                 'for LSN %(lsn_id)s and subnet '
                                 '%(subnet_id)s'),
                             {'lsn_id': lsn_id, 'subnet_id': subnet_id})
             return (lsn_id, None)
         else:
             return (lsn_id, lsn_port_id)
     else:
         return (None, None)
Exemplo n.º 29
0
    def release_connection(self,
                           http_conn,
                           bad_state=False,
                           service_unavail=False,
                           rid=-1):
        '''Mark HTTPConnection instance as available for check-out.

        :param http_conn: An HTTPConnection instance obtained from this
            instance.
        :param bad_state: True if http_conn is known to be in a bad state
                (e.g. connection fault.)
        :service_unavail: True if http_conn returned 503 response.
        :param rid: request id passed in from request eventlet.
        '''
        conn_params = self._conn_params(http_conn)
        if self._conn_params(http_conn) not in self._api_providers:
            LOG.debug(
                "[%(rid)d] Released connection %(conn)s is not an "
                "API provider for the cluster", {
                    'rid': rid,
                    'conn': api_client.ctrl_conn_to_str(http_conn)
                })
            return
        elif hasattr(http_conn, "no_release"):
            return

        priority = http_conn.priority
        if bad_state:
            # Reconnect to provider.
            LOG.warning(
                _LW("[%(rid)d] Connection returned in bad state, "
                    "reconnecting to %(conn)s"), {
                        'rid': rid,
                        'conn': api_client.ctrl_conn_to_str(http_conn)
                    })
            http_conn = self._create_connection(*self._conn_params(http_conn))
        elif service_unavail:
            # http_conn returned a service unaviable response, put other
            # connections to the same controller at end of priority queue,
            conns = []
            while not self._conn_pool.empty():
                priority, conn = self._conn_pool.get()
                if self._conn_params(conn) == conn_params:
                    priority = self._next_conn_priority
                    self._next_conn_priority += 1
                conns.append((priority, conn))
            for priority, conn in conns:
                self._conn_pool.put((priority, conn))
            # put http_conn at end of queue also
            priority = self._next_conn_priority
            self._next_conn_priority += 1

        self._conn_pool.put((priority, http_conn))
        LOG.debug(
            "[%(rid)d] Released connection %(conn)s. %(qsize)d "
            "connection(s) available.", {
                'rid': rid,
                'conn': api_client.ctrl_conn_to_str(http_conn),
                'qsize': self._conn_pool.qsize()
            })
Exemplo n.º 30
0
 def _setup_nsx_dhcp_metadata(self):
     self._check_services_requirements()
     nsx_svc.register_dhcp_opts(cfg)
     nsx_svc.register_metadata_opts(cfg)
     lsnmanager.register_lsn_opts(cfg)
     lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference)
     self.lsn_manager = lsn_manager
     if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
         notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference,
                                               lsn_manager)
         self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier
         # In agentless mode, ports whose owner is DHCP need to
         # be special cased; so add it to the list of special
         # owners list
         if const.DEVICE_OWNER_DHCP not in self.port_special_owners:
             self.port_special_owners.append(const.DEVICE_OWNER_DHCP)
     elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED:
         # This becomes ineffective, as all new networks creations
         # are handled by Logical Services Nodes in NSX
         cfg.CONF.set_override('network_auto_schedule', False)
         LOG.warning(_LW('network_auto_schedule has been disabled'))
         notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
                                                lsn_manager)
         self.supported_extension_aliases.append(lsn.EXT_ALIAS)
         # Add the capability to migrate dhcp and metadata services over
         self.migration_manager = (migration.MigrationManager(
             self.safe_reference, lsn_manager, notifier))
     return notifier
Exemplo n.º 31
0
def get_nsx_switch_ids(session, cluster, neutron_network_id):
    """Return the NSX switch id for a given neutron network.

    First lookup for mappings in Neutron database. If no mapping is
    found, query the NSX backend and add the mappings.
    """
    nsx_switch_ids = nsx_db.get_nsx_switch_ids(
        session, neutron_network_id)
    if not nsx_switch_ids:
        # Find logical switches from backend.
        # This is a rather expensive query, but it won't be executed
        # more than once for each network in Neutron's lifetime
        nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
        if not nsx_switches:
            LOG.warning(_LW("Unable to find NSX switches for Neutron network "
                            "%s"), neutron_network_id)
            return
        nsx_switch_ids = []
        with session.begin(subtransactions=True):
            for nsx_switch in nsx_switches:
                nsx_switch_id = nsx_switch['uuid']
                nsx_switch_ids.append(nsx_switch_id)
                # Create DB mapping
                nsx_db.add_neutron_nsx_network_mapping(
                    session,
                    neutron_network_id,
                    nsx_switch_id)
    return nsx_switch_ids
Exemplo n.º 32
0
def handle_router_metadata_access(plugin, context, router_id, interface=None):
    # For instances created in a DHCP-disabled network but connected to
    # a router.
    # The parameter "interface" is only used as a Boolean flag to indicate
    # whether to add (True) or delete (False) an internal metadata network.
    plugin_cfg = getattr(cfg.CONF, plugin.cfg_group)
    if plugin_cfg.metadata_mode != config.MetadataModes.DIRECT:
        LOG.debug("Metadata access network is disabled")
        return
    if not cfg.CONF.allow_overlapping_ips:
        LOG.warning(
            _LW("Overlapping IPs must be enabled in order to setup "
                "the metadata access network"))
        return
    ctx_elevated = context.elevated()
    on_demand = getattr(plugin_cfg, 'metadata_on_demand', False)
    try:
        if interface:
            # Add interface case
            filters = {
                'device_id': [router_id],
                'device_owner': const.ROUTER_INTERFACE_OWNERS,
                'fixed_ips': {
                    'ip_address': [METADATA_GATEWAY_IP]
                }
            }
            # Retrieve metadata ports by calling database plugin
            ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(
                plugin, ctx_elevated, filters=filters)
            if not ports and (not on_demand
                              or _find_dhcp_disabled_subnet_by_router(
                                  plugin, ctx_elevated, router_id)):
                _create_metadata_access_network(plugin, ctx_elevated,
                                                router_id)
        else:
            # Remove interface case
            filters = {
                'device_id': [router_id],
                'device_owner': const.ROUTER_INTERFACE_OWNERS
            }
            # Retrieve router interface ports by calling database plugin
            ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(
                plugin, ctx_elevated, filters=filters)
            if len(ports) == 1 or (on_demand
                                   and not _find_dhcp_disabled_subnet_by_port(
                                       plugin, ctx_elevated, ports)):
                # Delete the internal metadata network if the router port
                # is the last port left or no more DHCP-disabled subnet
                # attached to the router.
                _destroy_metadata_access_network(plugin, ctx_elevated,
                                                 router_id, ports)
    # TODO(salvatore-orlando): A better exception handling in the
    # NSX plugin would allow us to improve error handling here
    except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
            api_exc.NsxApiException):
        # Any exception here should be regarded as non-fatal
        LOG.exception(
            _LE("An error occurred while operating on the "
                "metadata access network for router:'%s'"), router_id)
Exemplo n.º 33
0
def get_nsx_device_statuses(cluster, tenant_id):
    try:
        status_dict = l2gwlib.get_gateway_devices_status(
            cluster, tenant_id)
        return dict((nsx_device_id,
                     networkgw_db.STATUS_ACTIVE if connected
                     else networkgw_db.STATUS_DOWN) for
                    (nsx_device_id, connected) in six.iteritems(status_dict))
    except api_exc.NsxApiException:
        # Do not make a NSX API exception fatal
        if tenant_id:
            LOG.warning(_LW("Unable to retrieve operational status for "
                            "gateway devices belonging to tenant: %s"),
                        tenant_id)
        else:
            LOG.warning(_LW("Unable to retrieve operational status for "
                            "gateway devices"))
Exemplo n.º 34
0
def get_nsx_device_statuses(cluster, tenant_id):
    try:
        status_dict = l2gwlib.get_gateway_devices_status(cluster, tenant_id)
        return dict(
            (nsx_device_id, networkgw_db.
             STATUS_ACTIVE if connected else networkgw_db.STATUS_DOWN)
            for (nsx_device_id, connected) in six.iteritems(status_dict))
    except api_exc.NsxApiException:
        # Do not make a NSX API exception fatal
        if tenant_id:
            LOG.warning(
                _LW("Unable to retrieve operational status for "
                    "gateway devices belonging to tenant: %s"), tenant_id)
        else:
            LOG.warning(
                _LW("Unable to retrieve operational status for "
                    "gateway devices"))
Exemplo n.º 35
0
def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
    """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3

    After using api_replay to migrate the neutron data from NSX-V to NSX-T
    we need to update the VM ports to use OpaqueNetwork instead of
    DistributedVirtualPortgroup
    """
    # Connect to the DVS manager, using the configuration parameters
    try:
        dvs_mng = dvs.DvsManager()
    except Exception as e:
        LOG.error(
            _LE("Cannot connect to the DVS: Please update the [dvs] "
                "section in the nsx.ini file: %s"), e)
        return

    # Go over all the compute ports from the plugin
    admin_cxt = neutron_context.get_admin_context()
    port_filters = {'device_owner': ['compute:None']}
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)

    for port in neutron_ports:
        device_id = port.get('device_id')

        # get the vm moref & spec from the DVS
        vm_moref = dvs_mng.get_vm_moref_obj(device_id)
        vm_spec = dvs_mng.get_vm_spec(vm_moref)

        # Go over the VM interfaces and check if it should be updated
        update_spec = False
        for prop in vm_spec.propSet:
            if (prop.name == 'network'
                    and hasattr(prop.val, 'ManagedObjectReference')):
                for net in prop.val.ManagedObjectReference:
                    if net._type == 'DistributedVirtualPortgroup':
                        update_spec = True

        if not update_spec:
            LOG.info(_LI("No need to update the spec of vm %s"), device_id)
            continue

        # find the old interface by it's mac and delete it
        device = get_vm_network_device(dvs_mng, vm_moref, port['mac_address'])
        if device is None:
            LOG.warning(_LW("No device with MAC address %s exists on the VM"),
                        port['mac_address'])
            continue
        device_type = device.__class__.__name__

        LOG.info(_LI("Detaching old interface from VM %s"), device_id)
        dvs_mng.detach_vm_interface(vm_moref, device)

        # add the new interface as OpaqueNetwork
        LOG.info(_LI("Attaching new interface to VM %s"), device_id)
        nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
        dvs_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
                                    nsx_net_id, device_type)
    def _check_ikepolicy_ipsecpolicy_allowed(self, ikepolicy, ipsecpolicy):
        """Check whether ikepolicy and ipsecpolicy are allowed on vshield edge.

        Some IPsec VPN configurations and features are configured by default or
        not supported on vshield edge.

        """
        # Check validation of IKEPolicy.
        if ikepolicy['ike_version'] != 'v1':
            msg = _("Unsupported ike_version: %s! Only 'v1' ike version is "
                    "supported on vshield Edge!") % ikepolicy['ike_version']
            LOG.warning(msg)
            raise vcns_exc.VcnsBadRequest(resource='ikepolicy', msg=msg)

        # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm
        # and authentication algorithms setting. At present, just record the
        # discrepancy error in log and take ipsecpolicy to do configuration.
        if (ikepolicy['auth_algorithm'] != ipsecpolicy['auth_algorithm']
                or ikepolicy['encryption_algorithm'] !=
                ipsecpolicy['encryption_algorithm']
                or ikepolicy['pfs'] != ipsecpolicy['pfs']):
            LOG.warning(
                _LW("IKEPolicy and IPsecPolicy should have consistent "
                    "auth_algorithm, encryption_algorithm and pfs for VSE!"))

        # Check whether encryption_algorithm is allowed.
        encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get(
            ipsecpolicy.get('encryption_algorithm'), None)
        if not encryption_algorithm:
            msg = _("Unsupported encryption_algorithm: %s! '3des', "
                    "'aes-128' and 'aes-256' are supported on VSE right now."
                    ) % ipsecpolicy['encryption_algorithm']
            LOG.warning(msg)
            raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg)

        # Check whether pfs is allowed.
        if not PFS_MAP.get(ipsecpolicy['pfs']):
            msg = _("Unsupported pfs: %s! 'group2' and 'group5' "
                    "are supported on VSE right now.") % ipsecpolicy['pfs']
            LOG.warning(msg)
            raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg)

        # Check whether transform protocol is allowed.
        if ipsecpolicy['transform_protocol'] not in TRANSFORM_PROTOCOL_ALLOWED:
            msg = _("Unsupported transform protocol: %s! 'esp' is supported "
                    "by default on VSE right now."
                    ) % ipsecpolicy['transform_protocol']
            LOG.warning(msg)
            raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg)

        # Check whether encapsulation mode is allowed.
        if ipsecpolicy['encapsulation_mode'] not in ENCAPSULATION_MODE_ALLOWED:
            msg = _("Unsupported encapsulation mode: %s! 'tunnel' is "
                    "supported by default on VSE right now."
                    ) % ipsecpolicy['encapsulation_mode']
            LOG.warning(msg)
            raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg)
Exemplo n.º 37
0
 def delete_ipsec_config(self, edge_id):
     try:
         self.vcns.delete_ipsec_config(edge_id)
     except vcns_exc.ResourceNotFound:
         LOG.warning(_LW("IPsec config not found on edge: %s"), edge_id)
     except vcns_exc.VcnsApiException:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("Failed to delete ipsec vpn configuration "
                               "with edge_id: %s"), edge_id)
    def delete_router(self, context, router_id):
        self.edge_manager.delete_lrouter(context, router_id, dist=True)

        # This should address cases where the binding remains due to breakage
        if nsxv_db.get_vdr_dhcp_binding_by_vdr(context.session, router_id):
            LOG.warning(
                _LW("DHCP bind wasn't cleaned for router %s. "
                    "Cleaning up entry"), router_id)
            nsxv_db.delete_vdr_dhcp_binding(context.session, router_id)
Exemplo n.º 39
0
    def synchronize_network(self, context, neutron_network_data,
                            lswitches=None):
        """Synchronize a Neutron network with its NSX counterpart.

        This routine synchronizes a set of switches when a Neutron
        network is mapped to multiple lswitches.
        """
        if not lswitches:
            # Try to get logical switches from nsx
            try:
                lswitches = nsx_utils.fetch_nsx_switches(
                    context.session, self._cluster,
                    neutron_network_data['id'])
            except exceptions.NetworkNotFound:
                # TODO(salv-orlando): We should be catching
                # api_exc.ResourceNotFound here
                # The logical switch was not found
                LOG.warning(_LW("Logical switch for neutron network %s not "
                                "found on NSX."), neutron_network_data['id'])
                lswitches = []
            else:
                for lswitch in lswitches:
                    self._nsx_cache.update_lswitch(lswitch)
        # By default assume things go wrong
        status = constants.NET_STATUS_ERROR
        # In most cases lswitches will contain a single element
        for ls in lswitches:
            if not ls:
                # Logical switch was deleted
                break
            ls_status = ls['_relations']['LogicalSwitchStatus']
            if not ls_status['fabric_status']:
                status = constants.NET_STATUS_DOWN
                break
        else:
            # No switch was down or missing. Set status to ACTIVE unless
            # there were no switches in the first place!
            if lswitches:
                status = constants.NET_STATUS_ACTIVE
        # Update db object
        if status == neutron_network_data['status']:
            # do nothing
            return

        with context.session.begin(subtransactions=True):
            try:
                network = self._plugin._get_network(context,
                                                    neutron_network_data['id'])
            except exceptions.NetworkNotFound:
                pass
            else:
                network.status = status
                LOG.debug("Updating status for neutron resource %(q_id)s to:"
                          " %(status)s",
                          {'q_id': neutron_network_data['id'],
                           'status': status})
Exemplo n.º 40
0
def delete_security_profile(cluster, spid):
    path = "/ws.v1/security-profile/%s" % spid

    try:
        nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
    except exceptions.NotFound:
        with excutils.save_and_reraise_exception():
            # This is not necessarily an error condition
            LOG.warning(_LW("Unable to find security profile %s on NSX "
                            "backend"), spid)
Exemplo n.º 41
0
def find_version(headers):
    """Retrieve NSX controller version from response headers."""
    for (header_name, header_value) in (headers or ()):
        try:
            if header_name == 'server':
                return Version(header_value.split('/')[1])
        except IndexError:
            LOG.warning(
                _LW("Unable to fetch NSX version from response "
                    "headers :%s"), headers)
 def delete_ipsec_config(self, edge_id):
     try:
         self.vcns.delete_ipsec_config(edge_id)
     except vcns_exc.ResourceNotFound:
         LOG.warning(_LW("IPsec config not found on edge: %s"), edge_id)
     except vcns_exc.VcnsApiException:
         with excutils.save_and_reraise_exception():
             LOG.exception(
                 _LE("Failed to delete ipsec vpn configuration "
                     "with edge_id: %s"), edge_id)
Exemplo n.º 43
0
 def _validate(self, endpoint):
     try:
         with endpoint.pool.item() as conn:
             self._http_provider.validate_connection(self, endpoint, conn)
             endpoint.set_state(EndpointState.UP)
             LOG.debug("Validated API cluster endpoint: %s", endpoint)
     except Exception as e:
         endpoint.set_state(EndpointState.DOWN)
         LOG.warning(_LW("Failed to validate API cluster endpoint "
                         "'%(ep)s' due to: %(err)s"),
                     {'ep': endpoint, 'err': e})
Exemplo n.º 44
0
 def lsn_port_dispose(self, context, network_id, mac_address):
     """Delete a LSN port given the network and the mac address."""
     lsn_id, lsn_port_id = self.lsn_port_get_by_mac(
         context, network_id, mac_address, raise_on_err=False)
     if lsn_port_id:
         self.lsn_port_delete(context, lsn_id, lsn_port_id)
         if mac_address == const.METADATA_MAC:
             try:
                 lswitch_port_id = switch_api.get_port_by_neutron_tag(
                     self.cluster, network_id,
                     const.METADATA_PORT_ID)['uuid']
                 switch_api.delete_port(
                     self.cluster, network_id, lswitch_port_id)
             except (n_exc.PortNotFoundOnNetwork,
                     api_exc.NsxApiException):
                 LOG.warning(_LW("Metadata port not found while attempting "
                                 "to delete it from network %s"),
                             network_id)
     else:
         LOG.warning(_LW("Unable to find Logical Services Node "
                         "Port with MAC %s"), mac_address)
Exemplo n.º 45
0
def handle_router_metadata_access(plugin, context, router_id, interface=None):
    # For instances created in a DHCP-disabled network but connected to
    # a router.
    # The parameter "interface" is only used as a Boolean flag to indicate
    # whether to add (True) or delete (False) an internal metadata network.
    plugin_cfg = getattr(cfg.CONF, plugin.cfg_group)
    if plugin_cfg.metadata_mode != config.MetadataModes.DIRECT:
        LOG.debug("Metadata access network is disabled")
        return
    if not cfg.CONF.allow_overlapping_ips:
        LOG.warning(_LW("Overlapping IPs must be enabled in order to setup "
                        "the metadata access network"))
        return
    ctx_elevated = context.elevated()
    on_demand = getattr(plugin_cfg, 'metadata_on_demand', False)
    try:
        if interface:
            # Add interface case
            filters = {'device_id': [router_id],
                       'device_owner': const.ROUTER_INTERFACE_OWNERS,
                       'fixed_ips': {'ip_address': [METADATA_GATEWAY_IP]}}
            # Retrieve metadata ports by calling database plugin
            ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(
                plugin, ctx_elevated, filters=filters)
            if not ports and (not on_demand or
                _find_dhcp_disabled_subnet_by_router(
                    plugin, ctx_elevated, router_id)):
                _create_metadata_access_network(
                    plugin, ctx_elevated, router_id)
        else:
            # Remove interface case
            filters = {'device_id': [router_id],
                       'device_owner': const.ROUTER_INTERFACE_OWNERS}
            # Retrieve router interface ports by calling database plugin
            ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(
                plugin, ctx_elevated, filters=filters)
            if len(ports) == 1 or (on_demand and not
                _find_dhcp_disabled_subnet_by_port(
                    plugin, ctx_elevated, ports)):
                # Delete the internal metadata network if the router port
                # is the last port left or no more DHCP-disabled subnet
                # attached to the router.
                _destroy_metadata_access_network(
                    plugin, ctx_elevated, router_id, ports)
    # TODO(salvatore-orlando): A better exception handling in the
    # NSX plugin would allow us to improve error handling here
    except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
            api_exc.NsxApiException):
        # Any exception here should be regarded as non-fatal
        LOG.exception(_LE("An error occurred while operating on the "
                          "metadata access network for router:'%s'"),
                      router_id)
Exemplo n.º 46
0
 def remove_router_link_port(self, tier1_uuid, tier0_uuid):
     try:
         tier1_link_port = (
             self._router_port_client.get_tier1_link_port(tier1_uuid))
     except nsx_exc.ResourceNotFound:
         LOG.warning(_LW("Logical router link port for tier1 router: %s "
                         "not found at the backend"), tier1_uuid)
         return
     tier1_link_port_id = tier1_link_port['id']
     tier0_link_port_id = (
         tier1_link_port['linked_logical_router_port_id'].get('target_id'))
     self._router_port_client.delete(tier1_link_port_id)
     self._router_port_client.delete(tier0_link_port_id)
Exemplo n.º 47
0
    def synchronize_router(self, context, neutron_router_data,
                           lrouter=None):
        """Synchronize a neutron router with its NSX counterpart."""
        if not lrouter:
            # Try to get router from nsx
            try:
                # This query will return the logical router status too
                nsx_router_id = nsx_utils.get_nsx_router_id(
                    context.session, self._cluster, neutron_router_data['id'])
                if nsx_router_id:
                    lrouter = routerlib.get_lrouter(
                        self._cluster, nsx_router_id)
            except exceptions.NotFound:
                # NOTE(salv-orlando): We should be catching
                # api_exc.ResourceNotFound here
                # The logical router was not found
                LOG.warning(_LW("Logical router for neutron router %s not "
                                "found on NSX."), neutron_router_data['id'])
            if lrouter:
                # Update the cache
                self._nsx_cache.update_lrouter(lrouter)

        # Note(salv-orlando): It might worth adding a check to verify neutron
        # resource tag in nsx entity matches a Neutron id.
        # By default assume things go wrong
        status = constants.NET_STATUS_ERROR
        if lrouter:
            lr_status = (lrouter['_relations']
                         ['LogicalRouterStatus']
                         ['fabric_status'])
            status = (lr_status and
                      constants.NET_STATUS_ACTIVE
                      or constants.NET_STATUS_DOWN)
        # Update db object
        if status == neutron_router_data['status']:
            # do nothing
            return

        with context.session.begin(subtransactions=True):
            try:
                router = self._plugin._get_router(context,
                                                  neutron_router_data['id'])
            except l3.RouterNotFound:
                pass
            else:
                router.status = status
                LOG.debug("Updating status for neutron resource %(q_id)s to:"
                          " %(status)s",
                          {'q_id': neutron_router_data['id'],
                           'status': status})
Exemplo n.º 48
0
    def release_connection(self, http_conn, bad_state=False,
                           service_unavail=False, rid=-1):
        '''Mark HTTPConnection instance as available for check-out.

        :param http_conn: An HTTPConnection instance obtained from this
            instance.
        :param bad_state: True if http_conn is known to be in a bad state
                (e.g. connection fault.)
        :service_unavail: True if http_conn returned 503 response.
        :param rid: request id passed in from request eventlet.
        '''
        conn_params = self._conn_params(http_conn)
        if self._conn_params(http_conn) not in self._api_providers:
            LOG.debug("[%(rid)d] Released connection %(conn)s is not an "
                      "API provider for the cluster",
                      {'rid': rid,
                       'conn': api_client.ctrl_conn_to_str(http_conn)})
            return
        elif hasattr(http_conn, "no_release"):
            return

        priority = http_conn.priority
        if bad_state:
            # Reconnect to provider.
            LOG.warning(_LW("[%(rid)d] Connection returned in bad state, "
                            "reconnecting to %(conn)s"),
                        {'rid': rid,
                         'conn': api_client.ctrl_conn_to_str(http_conn)})
            http_conn = self._create_connection(*self._conn_params(http_conn))
        elif service_unavail:
            # http_conn returned a service unaviable response, put other
            # connections to the same controller at end of priority queue,
            conns = []
            while not self._conn_pool.empty():
                priority, conn = self._conn_pool.get()
                if self._conn_params(conn) == conn_params:
                    priority = self._next_conn_priority
                    self._next_conn_priority += 1
                conns.append((priority, conn))
            for priority, conn in conns:
                self._conn_pool.put((priority, conn))
            # put http_conn at end of queue also
            priority = self._next_conn_priority
            self._next_conn_priority += 1

        self._conn_pool.put((priority, http_conn))
        LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d "
                  "connection(s) available.",
                  {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn),
                   'qsize': self._conn_pool.qsize()})
Exemplo n.º 49
0
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
    """Return the NSX switch and port uuids for a given neutron port.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing because
    the port was created before upgrading to grizzly.

    This routine also retrieves the identifier of the logical switch in
    the backend where the port is plugged. Prior to Icehouse this
    information was not available in the Neutron Database. For dealing
    with pre-existing records, this routine will query the backend
    for retrieving the correct switch identifier.

    As of Icehouse release it is not indeed anymore possible to assume
    the backend logical switch identifier is equal to the neutron
    network identifier.
    """
    nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
        session, neutron_port_id)
    if not nsx_switch_id:
        # Find logical switch for port from backend
        # This is a rather expensive query, but it won't be executed
        # more than once for each port in Neutron's lifetime
        nsx_ports = switchlib.query_lswitch_lports(
            cluster, '*', relations='LogicalSwitchConfig',
            filters={'tag': neutron_port_id,
                     'tag_scope': 'q_port_id'})
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_ports:
            LOG.warning(_LW("Unable to find NSX port for Neutron port %s"),
                        neutron_port_id)
            # This method is supposed to return a tuple
            return None, None
        nsx_port = nsx_ports[0]
        nsx_switch_id = (nsx_port['_relations']
                         ['LogicalSwitchConfig']['uuid'])
        if nsx_port_id:
            # Mapping already exists. Delete before recreating
            nsx_db.delete_neutron_nsx_port_mapping(
                session, neutron_port_id)
        else:
            nsx_port_id = nsx_port['uuid']
        # (re)Create DB mapping
        nsx_db.add_neutron_nsx_port_mapping(
            session, neutron_port_id,
            nsx_switch_id, nsx_port_id)
    return nsx_switch_id, nsx_port_id