Esempio n. 1
0
def get_ip_availability(**kwargs):
    LOG.debug("Begin querying %s" % kwargs)
    used_ips = get_used_ips(neutron_db_api.get_session(), **kwargs)
    unused_ips = get_unused_ips(neutron_db_api.get_session(), used_ips,
                                **kwargs)
    LOG.debug("End querying")
    return dict(used=used_ips, unused=unused_ips)
Esempio n. 2
0
    def _get_agent_fdb(self, segment, port, agent_host):
        if not agent_host:
            return

        network_id = port['network_id']

        session = db_api.get_session()
        agent_active_ports = l2pop_db.get_agent_network_active_port_count(
            session, agent_host, network_id)

        agent = l2pop_db.get_agent_by_host(db_api.get_session(), agent_host)
        if not self._validate_segment(segment, port['id'], agent):
            return

        agent_ip = l2pop_db.get_agent_ip(agent)
        other_fdb_entries = self._get_fdb_entries_template(
            segment, agent_ip, port['network_id'])
        if agent_active_ports == 0:
            # Agent is removing its last activated port in this network,
            # other agents needs to be notified to delete their flooding entry.
            other_fdb_entries[network_id]['ports'][agent_ip].append(
                const.FLOODING_ENTRY)
        # Notify other agents to remove fdb rules for current port
        if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
            fdb_entries = self._get_port_fdb_entries(port)
            other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries

        return other_fdb_entries
    def update_port_state_with_notifier(self, rpc_context, **kwargs):
        port_id = kwargs.get('port_id')
        network_id = kwargs.get('network_id')
        network_type = kwargs.get('network_type')
        segmentation_id = kwargs.get('segmentation_id')
        physical_network = kwargs.get('physical_network')

        # 1 update segment
        session = db_api.get_session()
        with session.begin(subtransactions=True):
            try:
                query = (session.query(models_ml2.NetworkSegment).
                         filter_by(network_id=network_id))
                query = query.filter_by(physical_network=physical_network)
                query = query.filter_by(is_dynamic=True)
                record = query.one()
                record.segmentation_id = segmentation_id
            except sa_exc.NoResultFound:
                pass

        # 2 change port state
        plugin = manager.NeutronManager.get_plugin()
        plugin.update_port_status(
            rpc_context,
            port_id,
            constants.PORT_STATUS_ACTIVE
        )

        # 3 serch db from port_id
        session = db_api.get_session()
        port = None
        with session.begin(subtransactions=True):
            try:
                port_db = (session.query(models_v2.Port).
                           enable_eagerloads(False).
                           filter(models_v2.Port.id.startswith(port_id)).
                           one())
                port = plugin._make_port_dict(port_db)
            except sa_exc.NoResultFound:
                LOG.error(_LE("Can't find port with port_id %s"),
                          port_id)
            except sa_exc.MultipleResultsFound:
                LOG.error(_LE("Multiple ports have port_id starting with %s"),
                          port_id)
        # 4 send notifier
        if port is not None:
            LOG.debug("notifier port_update %(net_type)s, %(seg_id)s, "
                      "%(physnet)s",
                      {'net_type': network_type,
                       'seg_id': segmentation_id,
                       'physnet': physical_network})
            plugin.notifier.port_update(
                rpc_context, port,
                network_type,
                segmentation_id,
                physical_network
            )

        return {}
Esempio n. 4
0
def _test_and_create_object(id):
    try:
        session = db_api.get_session()
        with session.begin():
            session.query(models.DFLockedObjects).filter_by(
                object_uuid=id).one()
    except orm_exc.NoResultFound:
        try:
            session = db_api.get_session()
            with session.begin():
                _create_db_row(session, oid=id)
        except db_exc.DBDuplicateEntry:
            # the lock is concurrently created.
            pass
Esempio n. 5
0
 def test_create_network_with_default_n1kv_network_profile_id(self):
     """Test network create without passing network profile id."""
     with self.network() as network:
         db_session = db.get_session()
         np = n1kv_db_v2.get_network_profile(
             db_session, network['network'][n1kv.PROFILE_ID])
         self.assertEqual(np['name'], 'default_network_profile')
Esempio n. 6
0
 def _delete_data(self):
     session = db_api.get_session()
     with session.begin():
         query = session.query(test_quota.MehModel).filter_by(
             tenant_id=self.tenant_id)
         for item in query:
             session.delete(item)
Esempio n. 7
0
    def _sync_vxlan_allocations(self):
        """
        Synchronize vxlan_allocations table with configured tunnel ranges.
        """

        # determine current configured allocatable vnis
        vxlan_vnis = set()
        for tun_min, tun_max in self.vxlan_vni_ranges:
            if tun_max + 1 - tun_min > MAX_VXLAN_VNI:
                LOG.error(_("Skipping unreasonable VXLAN VNI range "
                            "%(tun_min)s:%(tun_max)s"),
                          {'tun_min': tun_min, 'tun_max': tun_max})
            else:
                vxlan_vnis |= set(xrange(tun_min, tun_max + 1))

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            # remove from table unallocated tunnels not currently allocatable
            allocs = session.query(VxlanAllocation)
            for alloc in allocs:
                try:
                    # see if tunnel is allocatable
                    vxlan_vnis.remove(alloc.vxlan_vni)
                except KeyError:
                    # it's not allocatable, so check if its allocated
                    if not alloc.allocated:
                        # it's not, so remove it from table
                        LOG.debug(_("Removing tunnel %s from pool"),
                                  alloc.vxlan_vni)
                        session.delete(alloc)

            # add missing allocatable tunnels to table
            for vxlan_vni in sorted(vxlan_vnis):
                alloc = VxlanAllocation(vxlan_vni=vxlan_vni)
                session.add(alloc)
Esempio n. 8
0
 def test_create_port_with_default_n1kv_policy_profile_id(self):
     """Test port create without passing policy profile id."""
     with self.port() as port:
         db_session = db.get_session()
         pp = n1kv_db_v2.get_policy_profile(
             db_session, port['port'][n1kv.PROFILE_ID])
         self.assertEqual(pp['name'], 'service_profile')
 def update_ip_owner(self, ip_owner_info):
     ports_to_update = set()
     port_id = ip_owner_info.get('port')
     ipv4 = ip_owner_info.get('ip_address_v4')
     ipv6 = ip_owner_info.get('ip_address_v6')
     network_id = ip_owner_info.get('network_id')
     if not port_id or (not ipv4 and not ipv6):
         return ports_to_update
     LOG.debug("Got IP owner update: %s", ip_owner_info)
     core_plugin = self._get_plugin()
     # REVISIT: just use SQLAlchemy session and models_v2.Port?
     port = core_plugin.get_port(nctx.get_admin_context(), port_id)
     if not port:
         LOG.debug("Ignoring update for non-existent port: %s", port_id)
         return ports_to_update
     ports_to_update.add(port_id)
     for ipa in [ipv4, ipv6]:
         if not ipa:
             continue
         try:
             session = db_api.get_session()
             with session.begin(subtransactions=True):
                 old_owner = self.ha_ip_handler.get_port_for_ha_ipaddress(
                     ipa, network_id or port['network_id'], session=session)
                 self.ha_ip_handler.set_port_id_for_ha_ipaddress(port_id,
                                                                 ipa,
                                                                 session)
                 if old_owner and old_owner['port_id'] != port_id:
                     self.ha_ip_handler.delete_port_id_for_ha_ipaddress(
                         old_owner['port_id'], ipa, session=session)
                     ports_to_update.add(old_owner['port_id'])
         except db_exc.DBReferenceError as dbe:
             LOG.debug("Ignoring FK error for port %s: %s", port_id, dbe)
     return ports_to_update
def get_port_from_device(device):
    """Get port from database."""
    LOG.debug(_("get_port_from_device() called"))
    session = db.get_session()
    sg_binding_port = sg_db.SecurityGroupPortBinding.port_id

    query = session.query(models_v2.Port,
                          sg_db.SecurityGroupPortBinding.security_group_id)
    query = query.outerjoin(sg_db.SecurityGroupPortBinding,
                            models_v2.Port.id == sg_binding_port)
    query = query.filter(models_v2.Port.id.startswith(device))
    port_and_sgs = query.all()
    if not port_and_sgs:
        return
    port = port_and_sgs[0][0]
    plugin = manager.NeutronManager.get_plugin()
    port_dict = plugin._make_port_dict(port)
    port_dict['security_groups'] = []
    for port_in_db, sg_id in port_and_sgs:
        if sg_id:
            port_dict['security_groups'].append(sg_id)
    port_dict['security_group_rules'] = []
    port_dict['security_group_source_groups'] = []
    port_dict['fixed_ips'] = [ip['ip_address']
                              for ip in port['fixed_ips']]
    return port_dict
Esempio n. 11
0
def get_port_and_sgs(port_id):
    """Get port from database with security group info."""

    LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
    session = db_api.get_session()
    sg_binding_port = sg_db.SecurityGroupPortBinding.port_id

    with session.begin(subtransactions=True):
        query = session.query(models_v2.Port,
                              sg_db.SecurityGroupPortBinding.security_group_id)
        query = query.outerjoin(sg_db.SecurityGroupPortBinding,
                                models_v2.Port.id == sg_binding_port)
        query = query.filter(models_v2.Port.id.startswith(port_id))
        port_and_sgs = query.all()
        if not port_and_sgs:
            return
        port = port_and_sgs[0][0]
        plugin = manager.NeutronManager.get_plugin()
        port_dict = plugin._make_port_dict(port)
        port_dict['security_groups'] = [
            sg_id for port_, sg_id in port_and_sgs if sg_id]
        port_dict['security_group_rules'] = []
        port_dict['security_group_source_groups'] = []
        port_dict['fixed_ips'] = [ip['ip_address']
                                  for ip in port['fixed_ips']]
        return port_dict
Esempio n. 12
0
def sync_network_states(network_vlan_ranges):
    """Synchronize network_states table with current configured VLAN ranges."""

    session = db.get_session()
    with session.begin():
        # get existing allocations for all physical networks
        allocations = dict()
        entries = session.query(mlnx_models_v2.SegmentationIdAllocation).all()
        for entry in entries:
            allocations.setdefault(entry.physical_network, set()).add(entry)

        # process vlan ranges for each configured physical network
        for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
            # determine current configured allocatable vlans for this
            # physical network
            vlan_ids = set()
            for vlan_range in vlan_ranges:
                vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1))

            # remove from table unallocated vlans not currently allocatable
            _remove_non_allocatable_vlans(session, allocations, physical_network, vlan_ids)

            # add missing allocatable vlans to table
            _add_missing_allocatable_vlans(session, physical_network, vlan_ids)

        # remove from table unallocated vlans for any unconfigured physical
        # networks
        _remove_unconfigured_vlans(session, allocations)
 def sanitize_policy_profile_table(self):
     """Clear policy profiles from stale VSM."""
     db_session = db.get_session()
     hosts = config.get_vsm_hosts()
     vsm_info = db_session.query(
         n1kv_models.PolicyProfile.vsm_ip).distinct()
     if vsm_info is None or hosts is None:
         return
     vsm_ips = [vsm_ip[0] for vsm_ip in vsm_info if vsm_ip[0] not in hosts]
     for vsm_ip in vsm_ips:
         pprofiles = n1kv_db.get_policy_profiles_by_host(vsm_ip, db_session)
         for pprofile in pprofiles:
             # Do not delete profile if it is in use and if it
             # is the only VSM to have it configured
             pp_in_use = n1kv_db.policy_profile_in_use(pprofile['id'],
                                                       db_session)
             num_vsm_using_pp = db_session.query(
                 n1kv_models.PolicyProfile).filter_by(
                 id=pprofile['id']).count()
             if (not pp_in_use) or (num_vsm_using_pp > 1):
                 db_session.delete(pprofile)
                 db_session.flush()
             else:
                 LOG.warning(_LW('Cannot delete policy profile %s '
                                 'as it is in use.'), pprofile['id'])
Esempio n. 14
0
 def setUp(self):
     super(VlanAllocationsTest, self).setUp()
     db.configure_db()
     self.session = db.get_session()
     self.net_p = _create_test_network_profile_if_not_there(self.session)
     n1kv_db_v2.sync_vlan_allocations(self.session, self.net_p)
     self.addCleanup(db.clear_db)
Esempio n. 15
0
def remove_reserved_binding(vlan_id, switch_ip, instance_id,
                            port_id):
    """Removes reserved binding.

    This overloads port bindings to support reserved Switch binding
    used to maintain the state of a switch so it can be viewed by
    all other neutron processes. There's also the case of
    a reserved port binding to keep switch information on a given
    interface.
    The values of these arguments is as follows:
    :param vlan_id: 0
    :param switch_ip: ip address of the switch
    :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1
    :                   or RESERVED_NEXUS_PORT_DEVICE_ID_R1
    :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE
    :               port-expected port_id
    """
    if not port_id:
        LOG.warning(_LW("remove_reserved_binding called with no state"))
        return
    LOG.debug("remove_reserved_binding called")
    session = db.get_session()
    binding = _lookup_one_nexus_binding(session=session,
                                        vlan_id=vlan_id,
                                        switch_ip=switch_ip,
                                        instance_id=instance_id,
                                        port_id=port_id)
    for bind in binding:
        session.delete(bind)
    session.flush()
    return binding
Esempio n. 16
0
 def get_device_details(self, rpc_context, **kwargs):
     """Agent requests device details."""
     agent_id = kwargs.get("agent_id")
     device = kwargs.get("device")
     LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), {"device": device, "agent_id": agent_id})
     port = self.get_port_from_device(device)
     if port:
         binding = db.get_network_binding(db_api.get_session(), port["network_id"])
         (network_type, segmentation_id) = constants.interpret_vlan_id(binding.vlan_id)
         entry = {
             "device": device,
             "network_type": network_type,
             "physical_network": binding.physical_network,
             "segmentation_id": segmentation_id,
             "network_id": port["network_id"],
             "port_id": port["id"],
             "admin_state_up": port["admin_state_up"],
         }
         if cfg.CONF.AGENT.rpc_support_old_agents:
             entry["vlan_id"] = binding.vlan_id
         new_status = q_const.PORT_STATUS_ACTIVE if port["admin_state_up"] else q_const.PORT_STATUS_DOWN
         if port["status"] != new_status:
             db.set_port_status(port["id"], new_status)
     else:
         entry = {"device": device}
         LOG.debug(_("%s can not be found in database"), device)
     return entry
Esempio n. 17
0
 def get_device_details(self, rpc_context, **kwargs):
     """Agent requests device details."""
     agent_id = kwargs.get('agent_id')
     device = kwargs.get('device')
     LOG.debug("Device %(device)s details requested from %(agent_id)s",
               {'device': device, 'agent_id': agent_id})
     plugin = manager.NeutronManager.get_plugin()
     port = plugin.get_port_from_device(device)
     if port:
         binding = db.get_network_binding(db_api.get_session(),
                                          port['network_id'])
         entry = {'device': device,
                  'physical_network': binding.physical_network,
                  'network_type': binding.network_type,
                  'segmentation_id': binding.segmentation_id,
                  'network_id': port['network_id'],
                  'port_mac': port['mac_address'],
                  'port_id': port['id'],
                  'admin_state_up': port['admin_state_up']}
         if cfg.CONF.AGENT.rpc_support_old_agents:
             entry['vlan_id'] = binding.segmentation_id
         new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
                       else q_const.PORT_STATUS_DOWN)
         if port['status'] != new_status:
             db.set_port_status(port['id'], new_status)
     else:
         entry = {'device': device}
         LOG.debug("%s can not be found in database", device)
     return entry
Esempio n. 18
0
    def update_switchports(cls, switchports, session=None):
        if not session:
            session = db_api.get_session()

        with session.begin(subtransactions=True):

            hardware_id = cls._validate_hardware_id(switchports)
            originals = list(db.filter_switchports(
                hardware_id=hardware_id, session=session))

            # If the given switchports match what exists in the db,
            # we don't have to do anything.
            equal = db.compare_switchports(
                originals, switchports, session=session)

            if equal:
                LOG.info(("No switchports update required "
                          "for hardware_id %s" % (hardware_id)))
                return originals
            else:
                LOG.info(("Updating switchports for "
                          "hardware_id %s" % (hardware_id)))
                # TODO(morgbara) this is a little ham-fisted
                cls.delete_switchports(hardware_id, session=session)
                return cls.create_switchports(switchports, session=session)
Esempio n. 19
0
    def sync_allocations(self):

        # determine current configured allocatable vnis
        vxlan_vnis = set()
        for tun_min, tun_max in self.tunnel_ranges:
            vxlan_vnis |= set(moves.range(tun_min, tun_max + 1))

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            # remove from table unallocated tunnels not currently allocatable
            # fetch results as list via all() because we'll be iterating
            # through them twice
            allocs = session.query(VxlanAllocation).with_lockmode("update").all()
            # collect all vnis present in db
            existing_vnis = set(alloc.vxlan_vni for alloc in allocs)
            # collect those vnis that needs to be deleted from db
            vnis_to_remove = [
                alloc.vxlan_vni for alloc in allocs if (alloc.vxlan_vni not in vxlan_vnis and not alloc.allocated)
            ]
            # Immediately delete vnis in chunks. This leaves no work for
            # flush at the end of transaction
            bulk_size = 100
            chunked_vnis = (vnis_to_remove[i : i + bulk_size] for i in range(0, len(vnis_to_remove), bulk_size))
            for vni_list in chunked_vnis:
                if vni_list:
                    session.query(VxlanAllocation).filter(VxlanAllocation.vxlan_vni.in_(vni_list)).delete(
                        synchronize_session=False
                    )
            # collect vnis that need to be added
            vnis = list(vxlan_vnis - existing_vnis)
            chunked_vnis = (vnis[i : i + bulk_size] for i in range(0, len(vnis), bulk_size))
            for vni_list in chunked_vnis:
                bulk = [{"vxlan_vni": vni, "allocated": False} for vni in vni_list]
                session.execute(VxlanAllocation.__table__.insert(), bulk)
Esempio n. 20
0
def get_network_binding(session, network_id):
    session = session or db.get_session()
    try:
        binding = session.query(ovs_models_v2.NetworkBinding).filter_by(network_id=network_id).one()
        return binding
    except exc.NoResultFound:
        return
Esempio n. 21
0
    def _sync_gre_allocations(self):
        """Synchronize gre_allocations table with configured tunnel ranges."""

        # determine current configured allocatable gres
        gre_ids = set()
        for gre_id_range in self.gre_id_ranges:
            tun_min, tun_max = gre_id_range
            if tun_max + 1 - tun_min > 1000000:
                LOG.error(_("Skipping unreasonable gre ID range "
                            "%(tun_min)s:%(tun_max)s"),
                          {'tun_min': tun_min, 'tun_max': tun_max})
            else:
                gre_ids |= set(xrange(tun_min, tun_max + 1))

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            # remove from table unallocated tunnels not currently allocatable
            allocs = (session.query(GreAllocation).all())
            for alloc in allocs:
                try:
                    # see if tunnel is allocatable
                    gre_ids.remove(alloc.gre_id)
                except KeyError:
                    # it's not allocatable, so check if its allocated
                    if not alloc.allocated:
                        # it's not, so remove it from table
                        LOG.debug(_("Removing tunnel %s from pool"),
                                  alloc.gre_id)
                        session.delete(alloc)

            # add missing allocatable tunnels to table
            for gre_id in sorted(gre_ids):
                alloc = GreAllocation(gre_id=gre_id)
                session.add(alloc)
Esempio n. 22
0
def get_port(port_id):
    session = db.get_session()
    try:
        port = session.query(models_v2.Port).filter_by(id=port_id).one()
    except exc.NoResultFound:
        port = None
    return port
 def _get_mock_port_operation_context():
     current = {'status': 'DOWN',
                'binding:host_id': '',
                'allowed_address_pairs': [],
                'device_owner': 'fake_owner',
                'binding:profile': {},
                'fixed_ips': [{
                    'subnet_id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839'}],
                'id': '83d56c48-e9b8-4dcf-b3a7-0813bb3bd940',
                'security_groups': [SECURITY_GROUP],
                'device_id': 'fake_device',
                'name': '',
                'admin_state_up': True,
                'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
                'tenant_id': 'test-tenant',
                'binding:vif_details': {},
                'binding:vnic_type': 'normal',
                'binding:vif_type': 'unbound',
                'mac_address': '12:34:56:78:21:b6'}
     context = mock.Mock(current=current)
     context._plugin.get_security_group = mock.Mock(
         return_value=SECURITY_GROUP)
     context._plugin.get_port = mock.Mock(return_value=current)
     context._plugin_context.session = neutron_db_api.get_session()
     context._network_context = mock.Mock(
         _network=OpenDaylightMechanismDriverTestCase.
         _get_mock_network_operation_context().current)
     return context
Esempio n. 24
0
def update_port_ext(port_id, commit=None, hardware_id=None,
                    trunked=None, session=None):
    if not session:
        session = db_api.get_session()

    updated = False

    with session.begin(subtransactions=True):
        port = (session.query(models.PortExt).
                get(port_id))

        if commit is not None:
            port.commit = commit
            updated = True

        if hardware_id is not None:
            port.hardware_id = hardware_id
            updated = True

        if trunked is not None:
            port.trunked = trunked
            updated = True

        if updated:
            session.add(port)
            session.flush()

        return port
    def _sync_create_ports(self, combined_res_info, vsm_ip):
        """
        Sync ports by creating missing ones on VSM.

        :param combined_res_info: tuple containing VSM and neutron information
        :param vsm_ip: string representing the IP address of the VSM
        """
        (vsm_vmn_dict, neutron_ports) = combined_res_info
        vsm_port_uuids = set()
        for (k, v) in vsm_vmn_dict.items():
            port_dict = v['properties']
            port_ids = set(port_dict['portId'].split(','))
            vsm_port_uuids = vsm_port_uuids.union(port_ids)
        for port in neutron_ports:
            if port['id'] not in vsm_port_uuids:
                # create these ports on VSM
                network_uuid = port['network_id']
                binding = n1kv_db.get_policy_binding(port['id'])
                policy_profile_id = binding.profile_id
                policy_profile = n1kv_db.get_policy_profile_by_uuid(
                    db.get_session(), policy_profile_id)
                vmnetwork_name = "%s%s_%s" % (n1kv_const.VM_NETWORK_PREFIX,
                                              policy_profile_id,
                                              network_uuid)
                try:
                    self.n1kvclient.create_n1kv_port(port, vmnetwork_name,
                                                     policy_profile,
                                                     vsm_ip=vsm_ip)
                except n1kv_exc.VSMError as e:
                    LOG.warning(_LW('Sync Exception: Port creation on VSM '
                                'failed: %s'), e.message)
Esempio n. 26
0
 def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid):
     # The nvplib and db calls are  mocked, therefore the cluster
     # and the neutron_port_id parameters can be set to None
     ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
         db_api.get_session(), None, None)
     self.assertEqual(exp_ls_uuid, ls_uuid)
     self.assertEqual(exp_lp_uuid, lp_uuid)
Esempio n. 27
0
def filter_port_ext(session=None, **kwargs):
    if not session:
        session = db_api.get_session()

    with session.begin(subtransactions=True):
        return (session.query(models.PortExt).
                filter_by(**kwargs))
Esempio n. 28
0
def add_tunnel_endpoint(ip, max_retries=10):
    """Return the endpoint of the given IP address or generate a new one."""

    # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a
    #                   repeatedly executed transactional block to ensure it
    #                   doesn't conflict with any other concurrently executed
    #                   DB transactions in spite of the specified transactions
    #                   isolation level value
    for i in xrange(max_retries):
        LOG.debug(_("Adding a tunnel endpoint for %s"), ip)
        try:
            session = db.get_session()
            with session.begin(subtransactions=True):
                tunnel = (
                    session.query(ovs_models_v2.TunnelEndpoint).filter_by(ip_address=ip).with_lockmode("update").first()
                )

                if tunnel is None:
                    tunnel_id = _generate_tunnel_id(session)
                    tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id)
                    session.add(tunnel)

                return tunnel
        except db_exc.DBDuplicateEntry:
            # a concurrent transaction has been commited, try again
            LOG.debug(
                _(
                    "Adding a tunnel endpoint failed due to a concurrent"
                    "transaction had been commited (%s attempts left)"
                ),
                max_retries - (i + 1),
            )

    raise q_exc.NeutronException(message="Unable to generate a new tunnel id")
Esempio n. 29
0
def sync_tunnel_allocations(tunnel_id_ranges):
    """Synchronize tunnel_allocations table with configured tunnel ranges."""

    # determine current configured allocatable tunnels
    tunnel_ids = set()
    for tunnel_id_range in tunnel_id_ranges:
        tun_min, tun_max = tunnel_id_range
        if tun_max + 1 - tun_min > 1000000:
            LOG.error(
                _("Skipping unreasonable tunnel ID range " "%(tun_min)s:%(tun_max)s"),
                {"tun_min": tun_min, "tun_max": tun_max},
            )
        else:
            tunnel_ids |= set(xrange(tun_min, tun_max + 1))

    session = db.get_session()
    with session.begin():
        # remove from table unallocated tunnels not currently allocatable
        allocs = session.query(ovs_models_v2.TunnelAllocation).all()
        for alloc in allocs:
            try:
                # see if tunnel is allocatable
                tunnel_ids.remove(alloc.tunnel_id)
            except KeyError:
                # it's not allocatable, so check if its allocated
                if not alloc.allocated:
                    # it's not, so remove it from table
                    LOG.debug(_("Removing tunnel %s from pool"), alloc.tunnel_id)
                    session.delete(alloc)

        # add missing allocatable tunnels to table
        for tunnel_id in sorted(tunnel_ids):
            alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
            session.add(alloc)
Esempio n. 30
0
 def test_populate_policy_profile_delete(self):
     # Patch the Client class with the TestClient class
     with mock.patch(n1kv_client.__name__ + ".Client",
                     new=fake_client.TestClient):
         # Patch the _get_total_profiles() method to return a custom value
         with mock.patch(fake_client.__name__ +
                         '.TestClient._get_total_profiles') as obj_inst:
             # Return 3 policy profiles
             obj_inst.return_value = 3
             plugin = manager.NeutronManager.get_plugin()
             plugin._populate_policy_profiles()
             db_session = db.get_session()
             profile = n1kv_db_v2.get_policy_profile(
                 db_session, '00000000-0000-0000-0000-000000000001')
             # Verify that DB contains only 3 policy profiles
             self.assertEqual('pp-1', profile['name'])
             profile = n1kv_db_v2.get_policy_profile(
                 db_session, '00000000-0000-0000-0000-000000000002')
             self.assertEqual('pp-2', profile['name'])
             profile = n1kv_db_v2.get_policy_profile(
                 db_session, '00000000-0000-0000-0000-000000000003')
             self.assertEqual('pp-3', profile['name'])
             self.assertRaises(c_exc.PolicyProfileIdNotFound,
                               n1kv_db_v2.get_policy_profile,
                               db_session,
                               '00000000-0000-0000-0000-000000000004')
             # Return 2 policy profiles
             obj_inst.return_value = 2
             plugin._populate_policy_profiles()
             # Verify that the third policy profile is deleted
             self.assertRaises(c_exc.PolicyProfileIdNotFound,
                               n1kv_db_v2.get_policy_profile,
                               db_session,
                               '00000000-0000-0000-0000-000000000003')
Esempio n. 31
0
 def setUp(self):
     super(CiscoNexusDbTest, self).setUp()
     db.configure_db()
     self.session = db.get_session()
     self.addCleanup(db.clear_db)
Esempio n. 32
0
 def setUp(self):
     super(DbTestCase, self).setUp()
     self.db_session = neutron_db_api.get_session()
     self.addCleanup(self._db_cleanup)
Esempio n. 33
0
def delete_all_n1kv_credentials():
    session = db.get_session()
    session.query(network_models_v2.Credential).filter_by(type='n1kv').delete()
Esempio n. 34
0
 def all_list(self):
     session = db.get_session()
     return session.query(ryu_models_v2.TunnelKey).all()
Esempio n. 35
0
 def _verify_get_nsx_router_id(self, exp_lr_uuid):
     # The nvplib and db calls are  mocked, therefore the cluster
     # and the neutron_router_id parameters can be set to None
     lr_uuid = nsx_utils.get_nsx_router_id(db_api.get_session(), None, None)
     self.assertEqual(exp_lr_uuid, lr_uuid)
Esempio n. 36
0
def get_all_qoss(tenant_id):
    """Lists all the qos to tenant associations."""
    LOG.debug(_("get_all_qoss() called"))
    session = db.get_session()
    return (session.query(network_models_v2.QoS).
            filter_by(tenant_id=tenant_id).all())
Esempio n. 37
0
def get_ovs_vlans():
    session = db.get_session()
    bindings = (session.query(ovs_models_v2.VlanAllocation.vlan_id).
                filter_by(allocated=True))
    return [binding.vlan_id for binding in bindings]
Esempio n. 38
0
def is_provider_network(network_id):
    """Return True if network_id is in the provider network table."""
    session = db.get_session()
    if session.query(network_models_v2.ProviderNetwork).filter_by(
            network_id=network_id).first():
        return True
Esempio n. 39
0
def get_all_credentials():
    """Lists all the creds for a tenant."""
    session = db.get_session()
    return (session.query(network_models_v2.Credential).all())
Esempio n. 40
0
def get_all_n1kv_credentials():
    session = db.get_session()
    return (session.query(network_models_v2.Credential).
            filter_by(type='n1kv'))
Esempio n. 41
0
    def _update_port_up(self, context):
        port = context.current
        agent_host = context.host
        port_infos = self._get_port_infos(context, port, agent_host)
        if not port_infos:
            return
        agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos

        network_id = port['network_id']

        session = db_api.get_session()
        agent_active_ports = self.get_agent_network_active_port_count(
            session, agent_host, network_id)

        other_fdb_entries = {
            network_id: {
                'segment_id': segment['segmentation_id'],
                'network_type': segment['network_type'],
                'ports': {
                    agent_ip: []
                }
            }
        }

        if agent_active_ports == 1 or (self.get_agent_uptime(agent) <
                                       cfg.CONF.l2pop.agent_boot_time):
            # First port activated on current agent in this network,
            # we have to provide it with the whole list of fdb entries
            agent_fdb_entries = {
                network_id: {
                    'segment_id': segment['segmentation_id'],
                    'network_type': segment['network_type'],
                    'ports': {}
                }
            }
            ports = agent_fdb_entries[network_id]['ports']

            nondvr_network_ports = self.get_nondvr_network_ports(
                session, network_id)
            for network_port in nondvr_network_ports:
                binding, agent = network_port
                if agent.host == agent_host:
                    continue

                ip = self.get_agent_ip(agent)
                if not ip:
                    LOG.debug(
                        _("Unable to retrieve the agent ip, check "
                          "the agent %(agent_host)s configuration."),
                        {'agent_host': agent.host})
                    continue

                agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
                agent_ports += self._get_port_fdb_entries(binding.port)
                ports[ip] = agent_ports

            if cfg.CONF.l2pop.cascaded_gateway == 'no_gateway':
                remote_ports = self.get_remote_ports(session, network_id)
            else:
                remote_ports = {}
            for binding in remote_ports:
                profile = binding['profile']
                ip = self.get_host_ip_from_binding_profile_str(profile)
                if not ip:
                    LOG.debug(
                        _("Unable to retrieve the agent ip, check "
                          "the agent %(agent_host)s configuration."),
                        {'agent_host': agent.host})
                    continue

                agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
                agent_ports += self._get_port_fdb_entries(binding.port)
                ports[ip] = agent_ports
            dvr_network_ports = self.get_dvr_network_ports(session, network_id)
            for network_port in dvr_network_ports:
                binding, agent = network_port
                if agent.host == agent_host:
                    continue

                ip = self.get_agent_ip(agent)
                if not ip:
                    LOG.debug(
                        "Unable to retrieve the agent ip, check "
                        "the agent %(agent_host)s configuration.",
                        {'agent_host': agent.host})
                    continue

                agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
                ports[ip] = agent_ports

            # And notify other agents to add flooding entry
            other_fdb_entries[network_id]['ports'][agent_ip].append(
                const.FLOODING_ENTRY)

            if ports.keys():
                self.L2populationAgentNotify.add_fdb_entries(
                    self.rpc_ctx, agent_fdb_entries, agent_host)

        # Notify other agents to add fdb rule for current port
        if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
            other_fdb_entries[network_id]['ports'][agent_ip] += (
                port_fdb_entries)

        self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
                                                     other_fdb_entries)
Esempio n. 42
0
def get_port_from_device_mac(device_mac):
    """Get port from database."""
    LOG.debug(_("Get_port_from_device_mac() called"))
    session = db.get_session()
    qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
    return qry.first()
Esempio n. 43
0
 def test__add_allocation_existing_allocated_is_kept(self):
     session = db_api.get_session()
     _add_allocation(session, gre_id=1, allocated=True)
     self.driver._add_allocation(session, {2})
     _get_allocation(session, 1)
Esempio n. 44
0
 def test__add_allocation_existing_not_allocated_is_removed(self):
     session = db_api.get_session()
     _add_allocation(session, gre_id=1)
     self.driver._add_allocation(session, {2})
     with testtools.ExpectedException(sa_exc.NoResultFound):
         _get_allocation(session, 1)
Esempio n. 45
0
    def _update_port_up(self, context):
        port_context = context.current
        port_infos = self._get_port_infos(context, port_context)
        if not port_infos:
            return
        agent, agent_ip, segment, port_fdb_entries = port_infos

        agent_host = port_context['binding:host_id']
        network_id = port_context['network_id']

        session = db_api.get_session()
        agent_active_ports = self.get_agent_network_active_port_count(
            session, agent_host, network_id)

        other_fdb_entries = {
            network_id: {
                'segment_id': segment['segmentation_id'],
                'network_type': segment['network_type'],
                'ports': {
                    agent_ip: []
                }
            }
        }

        if agent_active_ports == 1 or (self.get_agent_uptime(agent) <
                                       cfg.CONF.l2pop.agent_boot_time):
            # First port activated on current agent in this network,
            # we have to provide it with the whole list of fdb entries
            agent_fdb_entries = {
                network_id: {
                    'segment_id': segment['segmentation_id'],
                    'network_type': segment['network_type'],
                    'ports': {}
                }
            }
            ports = agent_fdb_entries[network_id]['ports']

            network_ports = self.get_network_ports(session, network_id)
            for network_port in network_ports:
                binding, agent = network_port
                if agent.host == agent_host:
                    continue

                ip = self.get_agent_ip(agent)
                if not ip:
                    LOG.debug(
                        _("Unable to retrieve the agent ip, check "
                          "the agent %(agent_host)s configuration."),
                        {'agent_host': agent.host})
                    continue

                agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
                agent_ports += self._get_port_fdb_entries(binding.port)
                ports[ip] = agent_ports

            # And notify other agents to add flooding entry
            other_fdb_entries[network_id]['ports'][agent_ip].append(
                const.FLOODING_ENTRY)

            if ports.keys():
                l2pop_rpc.L2populationAgentNotify.add_fdb_entries(
                    self.rpc_ctx, agent_fdb_entries, agent_host)

        # Notify other agents to add fdb rule for current port
        other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries

        l2pop_rpc.L2populationAgentNotify.add_fdb_entries(
            self.rpc_ctx, other_fdb_entries)
Esempio n. 46
0
 def test__add_allocation_not_existing(self):
     session = db_api.get_session()
     _add_allocation(session, gre_id=1)
     self.driver._add_allocation(session, {1, 2})
     _get_allocation(session, 2)
Esempio n. 47
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(
            _("Device %(device)s details requested by agent "
              "%(agent_id)s"), {
                  'device': device,
                  'agent_id': agent_id
              })
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s not found in database"), {
                          'device': device,
                          'agent_id': agent_id
                      })
                return {'device': device}

            segments = db.get_network_segments(session, port.network_id)
            if not segments:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s has network %(network_id)s with "
                      "no segments"), {
                          'device': device,
                          'agent_id': agent_id,
                          'network_id': port.network_id
                      })
                return {'device': device}

            binding = db.ensure_port_binding(session, port.id)
            if not binding.segment:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s on network %(network_id)s not "
                      "bound, vif_type: %(vif_type)s"), {
                          'device': device,
                          'agent_id': agent_id,
                          'network_id': port.network_id,
                          'vif_type': binding.vif_type
                      })
                return {'device': device}

            segment = self._find_segment(segments, binding.segment)
            if not segment:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s on network %(network_id)s "
                      "invalid segment, vif_type: %(vif_type)s"), {
                          'device': device,
                          'agent_id': agent_id,
                          'network_id': port.network_id,
                          'vif_type': binding.vif_type
                      })
                return {'device': device}

            new_status = (q_const.PORT_STATUS_BUILD
                          if port.admin_state_up else q_const.PORT_STATUS_DOWN)
            if port.status != new_status:
                port.status = new_status
            entry = {
                'device': device,
                'network_id': port.network_id,
                'port_id': port.id,
                'admin_state_up': port.admin_state_up,
                'network_type': segment[api.NETWORK_TYPE],
                'segmentation_id': segment[api.SEGMENTATION_ID],
                'physical_network': segment[api.PHYSICAL_NETWORK]
            }
            LOG.debug(_("Returning: %s"), entry)
            return entry
Esempio n. 48
0
 def setUp(self):
     super(TestL3GwModeMixin, self).setUp()
     plugin = __name__ + '.' + TestDbIntPlugin.__name__
     self.setup_coreplugin(plugin)
     self.target_object = TestDbIntPlugin()
     # Patch the context
     ctx_patcher = mock.patch('neutron.context', autospec=True)
     mock_context = ctx_patcher.start()
     self.context = mock_context.get_admin_context()
     # This ensure also calls to elevated work in unit tests
     self.context.elevated.return_value = self.context
     self.context.session = db_api.get_session()
     # Create sample data for tests
     self.ext_net_id = _uuid()
     self.int_net_id = _uuid()
     self.int_sub_id = _uuid()
     self.tenant_id = 'the_tenant'
     self.network = models_v2.Network(
         id=self.ext_net_id,
         tenant_id=self.tenant_id,
         admin_state_up=True,
         status=constants.NET_STATUS_ACTIVE)
     self.net_ext = external_net_db.ExternalNetwork(
         network_id=self.ext_net_id)
     self.context.session.add(self.network)
     # The following is to avoid complaints from SQLite on
     # foreign key violations
     self.context.session.flush()
     self.context.session.add(self.net_ext)
     self.router = l3_db.Router(
         id=_uuid(),
         name=None,
         tenant_id=self.tenant_id,
         admin_state_up=True,
         status=constants.NET_STATUS_ACTIVE,
         enable_snat=True,
         gw_port_id=None)
     self.context.session.add(self.router)
     self.context.session.flush()
     self.router_gw_port = models_v2.Port(
         id=FAKE_GW_PORT_ID,
         tenant_id=self.tenant_id,
         device_id=self.router.id,
         device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
         admin_state_up=True,
         status=constants.PORT_STATUS_ACTIVE,
         mac_address=FAKE_GW_PORT_MAC,
         network_id=self.ext_net_id)
     self.router.gw_port_id = self.router_gw_port.id
     self.context.session.add(self.router)
     self.context.session.add(self.router_gw_port)
     self.context.session.flush()
     self.fip_ext_port = models_v2.Port(
         id=FAKE_FIP_EXT_PORT_ID,
         tenant_id=self.tenant_id,
         admin_state_up=True,
         device_id=self.router.id,
         device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
         status=constants.PORT_STATUS_ACTIVE,
         mac_address=FAKE_FIP_EXT_PORT_MAC,
         network_id=self.ext_net_id)
     self.context.session.add(self.fip_ext_port)
     self.context.session.flush()
     self.int_net = models_v2.Network(
         id=self.int_net_id,
         tenant_id=self.tenant_id,
         admin_state_up=True,
         status=constants.NET_STATUS_ACTIVE)
     self.int_sub = models_v2.Subnet(
         id=self.int_sub_id,
         tenant_id=self.tenant_id,
         ip_version=4,
         cidr='3.3.3.0/24',
         gateway_ip='3.3.3.1',
         network_id=self.int_net_id)
     self.router_port = models_v2.Port(
         id=FAKE_ROUTER_PORT_ID,
         tenant_id=self.tenant_id,
         admin_state_up=True,
         device_id=self.router.id,
         device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
         status=constants.PORT_STATUS_ACTIVE,
         mac_address=FAKE_ROUTER_PORT_MAC,
         network_id=self.int_net_id)
     self.router_port_ip_info = models_v2.IPAllocation(
         port_id=self.router_port.id,
         network_id=self.int_net.id,
         subnet_id=self.int_sub_id,
         ip_address='3.3.3.1')
     self.context.session.add(self.int_net)
     self.context.session.add(self.int_sub)
     self.context.session.add(self.router_port)
     self.context.session.add(self.router_port_ip_info)
     self.context.session.flush()
     self.fip_int_port = models_v2.Port(
         id=FAKE_FIP_INT_PORT_ID,
         tenant_id=self.tenant_id,
         admin_state_up=True,
         device_id='something',
         device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',
         status=constants.PORT_STATUS_ACTIVE,
         mac_address=FAKE_FIP_INT_PORT_MAC,
         network_id=self.int_net_id)
     self.fip_int_ip_info = models_v2.IPAllocation(
         port_id=self.fip_int_port.id,
         network_id=self.int_net.id,
         subnet_id=self.int_sub_id,
         ip_address='3.3.3.3')
     self.fip = l3_db.FloatingIP(
         id=_uuid(),
         floating_ip_address='1.1.1.2',
         floating_network_id=self.ext_net_id,
         floating_port_id=FAKE_FIP_EXT_PORT_ID,
         fixed_port_id=None,
         fixed_ip_address=None,
         router_id=None)
     self.context.session.add(self.fip_int_port)
     self.context.session.add(self.fip_int_ip_info)
     self.context.session.add(self.fip)
     self.context.session.flush()
     self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
                         'tenant_id': self.tenant_id}
Esempio n. 49
0
 def setUp(self):
     cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:2999'],
                           group='OVS')
     super(NetworkBindingsTest, self).setUp(plugin=PLUGIN_NAME)
     db.configure_db()
     self.session = db.get_session()
Esempio n. 50
0
 def setUp(self):
     super(VlanAllocationsTest, self).setUp()
     db.configure_db()
     ovs_db_v2.sync_vlan_allocations(VLAN_RANGES)
     self.session = db.get_session()
     self.addCleanup(db.clear_db)
Esempio n. 51
0
 def setUp(self):
     super(DnsSearchDomainDBTestCase, self).setUp()
     self.session = db.get_session()
Esempio n. 52
0
 def setUp(self):
     super(TunnelAllocationsTest, self).setUp()
     db.configure_db()
     ovs_db_v2.sync_tunnel_allocations(TUNNEL_RANGES)
     self.session = db.get_session()
     self.addCleanup(db.clear_db)
Esempio n. 53
0
 def setUp(self):
     super(ProfileBindingsTest, self).setUp()
     self.session = db.get_session()
Esempio n. 54
0
def get_port_from_device_mac(device_mac):
    LOG.debug("get_port_from_device_mac() called for mac %s", device_mac)
    session = db_api.get_session()
    qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
    return qry.first()
Esempio n. 55
0
 def setUp(self):
     super(NetworkBindingsTest, self).setUp()
     self.session = db.get_session()
Esempio n. 56
0
 def setUp(self):
     super(PolicyProfileTests, self).setUp()
     self.session = db.get_session()
Esempio n. 57
0
def network_all_tenant_list():
    session = db.get_session()
    return session.query(models_v2.Network).all()
Esempio n. 58
0
 def setUp(self):
     super(NetworkProfileTests, self).setUp()
     self.session = db.get_session()
Esempio n. 59
0
 def setUp(self):
     super(TunnelTypeMultiRangeTestMixin, self).setUp()
     self.driver = self.DRIVER_CLASS()
     self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES
     self.driver.sync_allocations()
     self.session = db.get_session()
Esempio n. 60
0
    def _forward_worker(self, thread_id):
        LOG.debug('forward worker begun')

        session = neutron_db_api.get_session()
        etcd_election = EtcdElection(self.etcd_client, 'forward_worker',
                                     self.election_key_space, thread_id,
                                     wait_until_elected=True,
                                     recovery_time=3)
        while True:
            try:
                etcd_election.wait_until_elected()

                def work(k, v):
                    LOG.debug('forward worker updating etcd key %s', k)
                    if self.do_etcd_update(k, v):
                        return True
                    else:
                        # something went bad; breathe, in case we end
                        # up in a tight loop
                        time.sleep(1)
                        return False

                LOG.debug('forward worker reading journal')
                # TODO(najoy): Limit the journal read entries processed
                # by a worker thread to a finite number, say 50.
                # This will ensure that one thread does not run forever.
                # The re-election process will wake up one of the sleeping
                # threads after the specified recovery_time of 3 seconds
                # and will get a chance to split the available work
                while db.journal_read(session, work):
                    pass
                LOG.debug('forward worker has emptied journal')

                # work queue is now empty.
                LOG.debug("ML2_VPP(%s): worker thread pausing"
                          % self.__class__.__name__)
                # Wait to be kicked, or (in case of emergency) run every
                # few seconds in case another thread or process dumped
                # work and failed to process it
                try:
                    with eventlet.Timeout(PARANOIA_TIME):
                        # Wait for kick
                        dummy = self.db_q_ev.wait()
                        # Clear the event - we will now process till
                        # we've run out of things in the backlog
                        # so any trigger lost in this gap is harmless
                        self.db_q_ev.reset()
                        LOG.debug("ML2_VPP(%s): worker thread kicked: %s"
                                  % (self.__class__.__name__, str(dummy)))
                except eventlet.Timeout:
                    LOG.debug("ML2_VPP(%s): worker thread suspicious of "
                              "a long pause"
                              % self.__class__.__name__)
                    pass
                LOG.debug("ML2_VPP(%s): worker thread active"
                          % self.__class__.__name__)
            except Exception as e:
                # TODO(ijw): log exception properly
                LOG.error("problems in forward worker: %s", e)
                LOG.error(traceback.format_exc())
                # never quit
                pass