def _test_and_create_object(uuid): try: session = db_api.get_writer_session() with session.begin(): row = session.query( models.DFLockedObjects).filter_by(object_uuid=uuid).one() # test ttl if row.lock and timeutils.is_older_than( row.created_at, cfg.CONF.df.distributed_lock_ttl): # reset the lock if it is timeout LOG.warning( 'The lock for object %(id)s is reset ' 'due to timeout.', {'id': uuid}) _lock_free_update(session, uuid, lock_state=True, session_id=row.session_id) except orm_exc.NoResultFound: try: session = db_api.get_writer_session() with session.begin(): _create_db_row(session, oid=uuid) except db_exc.DBDuplicateEntry: # the lock is concurrently created. pass
def create_tp_operation(host, network_id): db_session = db_api.get_writer_session() mapping = kaloom_models.KaloomTPOperation(host=host, network_id=network_id) db_session.add(mapping) db_session.flush() return mapping
def release_local_vlan(net_info): session = db_api.get_writer_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, net_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id']).with_lockmode('update').one()) if allocation.network_port_count == 0: allocation.update({ 'network_id': None, 'allocated': False, 'network_port_count': 0 }) LOG.info(_LI("Released lvid for network: %s."), res) else: LOG.info( _LI("Unable to release local vlan for network_id %s " "because ports are available on network."), res['network_id']) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return. LOG.error( _LE("Network %(network)s is already de-allocated for " "cluster %(cluster)s."), { 'network': net_info['network_id'], 'cluster': net_info['cluster_id'] })
def _initialize_lvids_for_cluster(port_info): vcenter = port_info['vcenter_id'] cluster = port_info['cluster_id'] session = db_api.get_writer_session() with session.begin(subtransactions=True): try: (session.query( models.ClusterVNIAllocations).with_lockmode('update')).all() query = session.query(models.ClusterVNIAllocations) existing_allocations = query.filter( models.ClusterVNIAllocations.vcenter_id == vcenter, models.ClusterVNIAllocations.cluster_id == cluster).all() if not existing_allocations: _generate_vcenter_cluster_allocations(session, vcenter, cluster) return True except Exception: LOG.exception( _LE("Exception while initializing VNI " "allocations for clusters %(cluster)s of " "vCenter %(vcenter)s."), { 'cluster': cluster, 'vcenter': vcenter }) return False
def update_ip_owner(self, ip_owner_info): ports_to_update = set() port_id = ip_owner_info.get('port') ipv4 = ip_owner_info.get('ip_address_v4') ipv6 = ip_owner_info.get('ip_address_v6') network_id = ip_owner_info.get('network_id') if not port_id or (not ipv4 and not ipv6): return ports_to_update LOG.debug("Got IP owner update: %s", ip_owner_info) # REVISIT: Just use SQLAlchemy session and models_v2.Port? port = self.plugin.get_port(n_context.get_admin_context(), port_id) if not port: LOG.debug("Ignoring update for non-existent port: %s", port_id) return ports_to_update ports_to_update.add(port_id) for ipa in [ipv4, ipv6]: if not ipa: continue try: # REVISIT: Why isn't this a single transaction at the # top-level, so that the port itself is guaranteed to # still exist. session = db_api.get_writer_session() with session.begin(subtransactions=True): old_owner = self.get_port_for_ha_ipaddress( ipa, network_id or port['network_id'], session=session) self.set_port_id_for_ha_ipaddress(port_id, ipa, session) if old_owner and old_owner['port_id'] != port_id: self.delete_port_id_for_ha_ipaddress( old_owner['port_id'], ipa, session=session) ports_to_update.add(old_owner['port_id']) except db_exc.DBReferenceError as dbe: LOG.debug("Ignoring FK error for port %s: %s", port_id, dbe) return ports_to_update
def check_to_reclaim_local_vlan(port_info): lvid = -1 session = db_api.get_writer_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, port_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id']).with_lockmode('update').one()) count = allocation.network_port_count if count >= 1: count -= 1 allocation.update({'network_port_count': count}) LOG.debug( "Decremented the allocated port count for network " "%s.", res) if count == 0: lvid = allocation.lvid LOG.info(_LI("lvid can be released for network: %s."), res) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return back status as False. LOG.debug( "Network %(network)s is already de-allocated for " "cluster %(cluster)s.", { 'network': port_info['network_id'], 'cluster': port_info['cluster_id'] }) return lvid
def tearDown(self): super(TestKaloomVlanPool, self).tearDown() conn = db_api.get_writer_session().connection() engine = conn.engine KaloomKnidMapping.__table__.drop(bind=engine) KaloomVlanHostMapping.__table__.drop(bind=engine) KaloomVlanReservation.__table__.drop(bind=engine)
def delete_floatingip(self, context, floatingip_id): session = db_api.get_writer_session() with session.begin(subtransactions=True): try: old_fip = super( OpenContrailRouterHandler, self).get_floatingip( context, floatingip_id) super(OpenContrailRouterHandler, self).delete_floatingip(context, floatingip_id) except Exception as e: LOG.error("Failed to delete floating ip %(id)s: " "%(err)s", {"id": floatingip_id, "err": e}) raise try: self.driver.delete_floatingip(context, floatingip_id) except Exception as e: LOG.error("Failed to delete floating ip %(id)s: " "%(err)s", {"id": floatingip_id, "err": e}) try: with session.begin(subtransactions=True): super(OpenContrailRouterHandler, self).create_floatingip(context, {'floatingip': old_fip}, old_fip['status']) except Exception as e: LOG.error("Failed to undelete floating ip %(id)s: " "%(err)s", {"id": floatingip_id, "err": e}) raise raise
def create_floatingip(self, context, floatingip, initial_status=const.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] if fip.get('port_id') is None: initial_status = const.FLOATINGIP_STATUS_DOWN session = db_api.get_writer_session() with session.begin(subtransactions=True): try: fip_dict = super( OpenContrailRouterHandler, self).create_floatingip( context, floatingip, initial_status) except Exception as e: LOG.error("Failed to create floating ip %(fip)s: " "%(err)s", {"fip": fip, "err": e}) raise try: self.driver.create_floatingip(context, {'floatingip': fip_dict}) return fip_dict except Exception as e: LOG.error("Failed to create floating ip %(fip)s: " "%(err)s", {"fip": fip, "err": e}) with session.begin(subtransactions=True): super(OpenContrailRouterHandler, self).delete_floatingip( context, fip_dict['id']) raise
def test_sg_delete(self): with mock.patch.object(journal, 'record') as record: context = self._get_mock_operation_context(odl_const.ODL_SG) res_id = context[odl_const.ODL_SG]['id'] plugin_context_mock = mock.Mock() plugin_context_mock.session = neutron_db_api.get_writer_session() rule = mock.Mock() rule.id = SG_RULE_FAKE_ID rule.security_group_id = SG_FAKE_ID sg = mock.Mock() sg.id = SG_FAKE_ID sg.security_group_rules = [rule] kwargs = { 'security_group': sg, 'security_group_rule_ids': [SG_RULE_FAKE_ID] } self.mech.sync_from_callback_precommit( plugin_context_mock, odl_const.ODL_DELETE, callback._RESOURCE_MAPPING[odl_const.ODL_SG], res_id, context, **kwargs) record.assert_has_calls([ mock.call(mock.ANY, 'security_group_rule', SG_RULE_FAKE_ID, 'delete', [SG_FAKE_ID]), mock.call( mock.ANY, 'security_group', SG_FAKE_ID, 'delete', { 'description': 'test-description', 'project_id': 'test-tenant', 'security_group_rules': [], 'tenant_id': 'test-tenant', 'id': SG_FAKE_ID, 'name': 'test_sg' }) ])
def add_router_interface(self, context, router_id, interface_info): """Add Router Interface callback handler for OpenContrail. Invokes back-end driver to add router interface in OpenContrail. """ session = db_api.get_writer_session() with session.begin(subtransactions=True): new_router = super( OpenContrailRouterHandler, self).add_router_interface( context, router_id, interface_info) try: interface_info = dict(new_router) del interface_info['subnet_id'] self.driver.add_router_interface(context, router_id, interface_info) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Failed to add interface to router %(id)s: " "%(err)s", {"id": router_id, "err": e}) try: self.remove_router_interface(context, router_id, interface_info) except Exception: LOG.exception("Failed to delete interface of router %s", router_id) return new_router
def remove_router_interface(self, context, router_id, interface_info): session = db_api.get_writer_session() with session.begin(subtransactions=True): new_router = super( OpenDaylightL3RouterPlugin, self).remove_router_interface( context, router_id, interface_info) return new_router
def delete_endpoint_by_host_or_ip(self, host, ip): LOG.debug("delete_endpoint_by_host_or_ip() called for " "host %(host)s or %(ip)s", {'host': host, 'ip': ip}) session = db_api.get_writer_session() session.query(self.endpoint_model).filter( or_(self.endpoint_model.host == host, self.endpoint_model.ip_address == ip)).delete()
def setUp(self): cfg.CONF.set_override("core_plugin", 'neutron.plugins.ml2.plugin.Ml2Plugin') cfg.CONF.set_override('mechanism_drivers', ['logger', 'opendaylight_v2'], 'ml2') self.useFixture(odl_base.OpenDaylightRestClientFixture()) cfg.CONF.set_override("service_plugins", ['odl-router_v2']) core_plugin = cfg.CONF.core_plugin service_plugins = {'l3_plugin_name': 'odl-router_v2'} mock.patch.object(journal.OpenDaylightJournalThread, 'start_odl_sync_thread').start() self.mock_mt_thread = mock.patch.object( maintenance.MaintenanceThread, 'start').start() mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, '_record_in_journal').start() mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, 'sync_from_callback_precommit').start() mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, 'sync_from_callback_postcommit').start() self.useFixture(odl_base.OpenDaylightFeaturesFixture()) super(OpenDaylightL3TestCase, self).setUp( plugin=core_plugin, service_plugins=service_plugins) self.db_session = neutron_db_api.get_writer_session() self.plugin = directory.get_plugin() self.plugin._network_is_external = mock.Mock(return_value=True) self.driver = directory.get_plugin(constants.L3) self.thread = journal.OpenDaylightJournalThread() self.driver.get_floatingip = mock.Mock( return_value={'router_id': ROUTER_ID, 'floating_network_id': NETWORK_ID})
def _delete_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: session.delete(item)
def create_knid_mapping(kaloom_knid, network_id): db_session = db_api.get_writer_session() mapping = kaloom_models.KaloomKnidMapping(kaloom_knid=kaloom_knid, network_id=network_id) db_session.add(mapping) db_session.flush() return mapping
def update_router(self, context, router_id, router): session = db_api.get_writer_session() with session.begin(subtransactions=True): router_dict = super(OpenDaylightL3RouterPlugin, self).update_router(context, router_id, router) journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_UPDATE, router_dict) return router_dict
def create_vlan_reservation(host, vlan_id, network_id): db_session = db_api.get_writer_session() mapping = kaloom_models.KaloomVlanReservation(host=host, vlan_id=vlan_id, network_id=network_id) db_session.add(mapping) db_session.flush() return mapping
def _update_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: item['meh'] = 'meh-%s' % item['meh'] session.add(item)
def session(self): # TODO(akamyshnikova): checking for session attribute won't be needed # when reader and writer will be used if hasattr(super(Context, self), 'session'): return super(Context, self).session if self._session is None: self._session = db_api.get_writer_session() return self._session
def fake_synchronize_read(self, read_time, routers, vfabric_routers): db_session = db_api.get_writer_session() with db_session.begin(subtransactions=True): kaloom_db.get_Lock(db_session, kconst.L3_LOCK_NAME, read=False, caller_msg='l3_sync_read') time.sleep(read_time) LOG.info(LOG_MESSAGE_READ)
def setUp(self): super(RecoveryTestCase, self).setUp() self.db_session = neutron_db_api.get_writer_session() self.useFixture( base.OpenDaylightRestClientGlobalFixture(recovery._CLIENT)) self._CLIENT = recovery._CLIENT.get_client() self.addCleanup(self._db_cleanup)
def delete_router(self, context, router_id): session = db_api.get_writer_session() router_dict = self.get_router(context, router_id) dependency_list = [router_dict['gw_port_id']] with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_router(context, router_id) journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list)
def _call_operation_object(self, operation, object_type): context = self._get_mock_operation_context(object_type) if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: plugin_context_mock = mock.Mock() plugin_context_mock.session = neutron_db_api.get_writer_session() res_type = [ rt for rt in callback._RESOURCE_MAPPING.values() if rt.singular == object_type ][0] res_id = context[object_type]['id'] context_ = (copy.deepcopy(context) if operation != odl_const.ODL_DELETE else None) if (object_type == odl_const.ODL_SG and operation in [odl_const.ODL_CREATE, odl_const.ODL_DELETE]): # TODO(yamahata): remove this work around once # https://review.openstack.org/#/c/281693/ # is merged. if operation == odl_const.ODL_CREATE: sg = securitygroup.SecurityGroup( id=res_id, name=context_[object_type]['name'], tenant_id=context_[object_type]['tenant_id'], description=context_[object_type]['description']) plugin_context_mock.session.add(sg) sg_dict = dict(sg) sg_dict['security_group_rules'] = [] self.mech.sync_from_callback_precommit( plugin_context_mock, operation, res_type, res_id, context_, security_group=sg_dict) if operation == odl_const.ODL_DELETE: self.mech.sync_from_callback_precommit( plugin_context_mock, operation, res_type, res_id, context_, security_group={ 'security_group_rules': { 'id': SG_RULE_FAKE_ID } }, security_group_rule_ids=[SG_RULE_FAKE_ID]) else: self.mech.sync_from_callback_precommit(plugin_context_mock, operation, res_type, res_id, context_) else: method = getattr(self.mech, '%s_%s_precommit' % (operation, object_type)) method(context) self.db_session.flush()
def _release_lock(oid, sid): # NOTE(nick-ma-z): we disallow subtransactions because the # retry logic will bust any parent transactions session = db_api.get_writer_session() with session.begin(): LOG.debug("Try to release lock for object %(oid)s in " "session %(sid)s.", {'oid': oid, 'sid': sid}) _lock_free_update(session, oid, lock_state=True, session_id=sid) LOG.debug("Lock is released for object %(oid)s in " "session %(sid)s.", {'oid': oid, 'sid': sid})
def _add_data(self, tenant_id=None): session = db_api.get_writer_session() with session.begin(): tenant_id = tenant_id or self.tenant_id session.add(test_quota.MehModel( meh='meh_%s' % uuidutils.generate_uuid(), tenant_id=tenant_id)) session.add(test_quota.MehModel( meh='meh_%s' % uuidutils.generate_uuid(), tenant_id=tenant_id))
def setUp(self): super(KaloomMechanismDriverTestCase, self).setUp() engine = db_api.get_writer_session().connection().engine KaloomKnidMapping.__table__.create(bind=engine) KaloomVlanHostMapping.__table__.create(bind=engine) with patch.object(KaloomVlanPool, '_parse_network_vlan_ranges', return_value=(1, 4094)): self.driver = KaloomOVSMechanismDriver() self.driver.initialize()
def delete_floatingip(self, context, floatingip_id): session = db_api.get_writer_session() floatingip_dict = self.get_floatingip(context, floatingip_id) dependency_list = [floatingip_dict['router_id']] dependency_list.append(floatingip_dict['floating_network_id']) with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_floatingip( context, floatingip_id) journal.record(context, odl_const.ODL_FLOATINGIP, floatingip_id, odl_const.ODL_DELETE, dependency_list)
def delete_knid_mapping(network_id): db_session = db_api.get_writer_session() try: mapping = db_session.query(kaloom_models.KaloomKnidMapping). \ filter_by(network_id=network_id).one() db_session.delete(mapping) db_session.flush() except (sa_exc.NoResultFound, sa_exc.StaleDataError): # no record was found, do nothing # ignore concurrent deletion pass
def _add_endpoint(self, ip, host, **kwargs): LOG.debug("_add_endpoint() called for ip %s", ip) session = db_api.get_writer_session() try: endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) endpoint.save(session) except db_exc.DBDuplicateEntry: endpoint = (session.query(self.endpoint_model). filter_by(ip_address=ip).one()) LOG.warning(_LW("Endpoint with ip %s already exists"), ip) return endpoint
def _add_endpoint(self, ip, host, **kwargs): LOG.debug("_add_endpoint() called for ip %s", ip) session = db_api.get_writer_session() try: endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) endpoint.save(session) except db_exc.DBDuplicateEntry: endpoint = (session.query( self.endpoint_model).filter_by(ip_address=ip).one()) LOG.warning("Endpoint with ip %s already exists", ip) return endpoint
def set_port_id_for_ha_ipaddress(self, port_id, ipaddress, session=None): """Stores a Neutron Port Id as owner of HA IP Addr (idempotent API).""" session = session or db_api.get_writer_session() try: with session.begin(subtransactions=True): obj = self._get_ha_ipaddress(port_id, ipaddress, session) if obj: return obj else: obj = HAIPAddressToPortAssociation( port_id=port_id, ha_ip_address=ipaddress) session.add(obj) return obj except db_exc.DBDuplicateEntry: LOG.debug('Duplicate IP ownership entry for tuple %s', (port_id, ipaddress))
def delete_port_id_for_ha_ipaddress(self, port_id, ipaddress, session=None): session = session or db_api.get_writer_session() with session.begin(subtransactions=True): try: # REVISIT: Can this query be baked? The # sqlalchemy.ext.baked.Result class does not have a # delete() method, and adding delete() to the baked # query before executing it seems to result in the # params() not being evaluated. return session.query( HAIPAddressToPortAssociation).filter_by( port_id=port_id, ha_ip_address=ipaddress).delete() except orm.exc.NoResultFound: return
def setUp(self): super(TestL3GwModeMixin, self).setUp() plugin = __name__ + '.' + TestDbIntPlugin.__name__ self.setup_coreplugin(plugin) self.target_object = TestDbIntPlugin() # Patch the context ctx_patcher = mock.patch('neutron_lib.context', autospec=True) mock_context = ctx_patcher.start() self.context = mock_context.get_admin_context() # This ensure also calls to elevated work in unit tests self.context.elevated.return_value = self.context self.context.session = db_api.get_writer_session() # Create sample data for tests self.ext_net_id = _uuid() self.int_net_id = _uuid() self.int_sub_id = _uuid() self.tenant_id = 'the_tenant' self.network = net_obj.Network( self.context, id=self.ext_net_id, project_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.net_ext = net_obj.ExternalNetwork( self.context, network_id=self.ext_net_id) self.network.create() self.net_ext.create() self.router = l3_models.Router( id=_uuid(), name=None, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE, enable_snat=True, gw_port_id=None) self.context.session.add(self.router) self.context.session.flush() self.router_gw_port = port_obj.Port( self.context, id=FAKE_GW_PORT_ID, project_id=self.tenant_id, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_GW_PORT_MAC), network_id=self.ext_net_id) self.router_gw_port.create() self.router.gw_port_id = self.router_gw_port.id self.context.session.add(self.router) self.context.session.flush() self.fip_ext_port = port_obj.Port( self.context, id=FAKE_FIP_EXT_PORT_ID, project_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_FLOATINGIP, status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_FIP_EXT_PORT_MAC), network_id=self.ext_net_id) self.fip_ext_port.create() self.context.session.flush() self.int_net = net_obj.Network( self.context, id=self.int_net_id, project_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.int_sub = subnet_obj.Subnet(self.context, id=self.int_sub_id, project_id=self.tenant_id, ip_version=4, cidr=utils.AuthenticIPNetwork('3.3.3.0/24'), gateway_ip=netaddr.IPAddress('3.3.3.1'), network_id=self.int_net_id) self.router_port = port_obj.Port( self.context, id=FAKE_ROUTER_PORT_ID, project_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_ROUTER_PORT_MAC), network_id=self.int_net_id) self.router_port_ip_info = port_obj.IPAllocation(self.context, port_id=self.router_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.1') self.int_net.create() self.int_sub.create() self.router_port.create() self.router_port_ip_info.create() self.context.session.flush() self.fip_int_port = port_obj.Port( self.context, id=FAKE_FIP_INT_PORT_ID, project_id=self.tenant_id, admin_state_up=True, device_id='something', device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova', status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_FIP_INT_PORT_MAC), network_id=self.int_net_id) self.fip_int_ip_info = port_obj.IPAllocation(self.context, port_id=self.fip_int_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.3') self.fip = l3_obj.FloatingIP( self.context, id=_uuid(), floating_ip_address=netaddr.IPAddress('1.1.1.2'), floating_network_id=self.ext_net_id, floating_port_id=FAKE_FIP_EXT_PORT_ID, fixed_port_id=None, fixed_ip_address=None, router_id=None) self.fip_int_port.create() self.fip_int_ip_info.create() self.fip.create() self.context.session.flush() self.context.session.expire_all() self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID, 'tenant_id': self.tenant_id}
def delete_endpoint(self, ip): LOG.debug("delete_endpoint() called for ip %s", ip) session = db_api.get_writer_session() session.query(self.endpoint_model).filter_by(ip_address=ip).delete()
def session(self): if self._session is None: self._session = db_api.get_writer_session() return self._session