def _test_and_create_object(uuid): try: session = db_api.get_writer_session() with session.begin(): row = session.query( models.DFLockedObjects).filter_by(object_uuid=uuid).one() # test ttl if row.lock and timeutils.is_older_than( row.created_at, cfg.CONF.df.distributed_lock_ttl): # reset the lock if it is timeout LOG.warning( 'The lock for object %(id)s is reset ' 'due to timeout.', {'id': uuid}) _lock_free_update(session, uuid, lock_state=True, session_id=row.session_id) except orm_exc.NoResultFound: try: session = db_api.get_writer_session() with session.begin(): _create_db_row(session, oid=uuid) except db_exc.DBDuplicateEntry: # the lock is concurrently created. pass
def delete_endpoint_by_host_or_ip(self, host, ip): LOG.debug("delete_endpoint_by_host_or_ip() called for " "host %(host)s or %(ip)s", {'host': host, 'ip': ip}) session = db_api.get_writer_session() session.query(self.endpoint_model).filter( or_(self.endpoint_model.host == host, self.endpoint_model.ip_address == ip)).delete()
def _link_default_netpartition(self, netpart_name, l2template, l3template, l3isolated, l3shared): params = { 'name': netpart_name, 'l3template': l3template, 'l2template': l2template } (np_id, l3dom_tid, l2dom_tid) = self.vsdclient.link_default_netpartition(params) # verify that the provided zones have been created already shared_match, isolated_match = self.vsdclient.validate_zone_create( l3dom_tid, l3isolated, l3shared) if not shared_match or not isolated_match: msg = ('Default zone names must be provided for ' 'default net-partiton') raise n_exc.BadRequest(resource='net_partition', msg=msg) # basic verifications passed. add default netpartition to the DB session = lib_db_api.get_writer_session() netpartition = nuagedb.get_net_partition_by_name(session, netpart_name) with session.begin(): if netpartition: nuagedb.delete_net_partition(session, netpartition) nuagedb.add_net_partition(session, np_id, l3dom_tid, l2dom_tid, netpart_name, l3isolated, l3shared) self._default_np_id = np_id
def _initialize_lvids_for_cluster(port_info): vcenter = port_info['vcenter_id'] cluster = port_info['cluster_id'] session = db_api.get_writer_session() with session.begin(subtransactions=True): try: (session.query( models.ClusterVNIAllocations).with_lockmode('update')).all() query = session.query(models.ClusterVNIAllocations) existing_allocations = query.filter( models.ClusterVNIAllocations.vcenter_id == vcenter, models.ClusterVNIAllocations.cluster_id == cluster).all() if not existing_allocations: _generate_vcenter_cluster_allocations(session, vcenter, cluster) return True except Exception: LOG.exception( _LE("Exception while initializing VNI " "allocations for clusters %(cluster)s of " "vCenter %(vcenter)s."), { 'cluster': cluster, 'vcenter': vcenter }) return False
def release_local_vlan(net_info): session = db_api.get_writer_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, net_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id']).with_lockmode('update').one()) if allocation.network_port_count == 0: allocation.update({ 'network_id': None, 'allocated': False, 'network_port_count': 0 }) LOG.info(_LI("Released lvid for network: %s."), res) else: LOG.info( _LI("Unable to release local vlan for network_id %s " "because ports are available on network."), res['network_id']) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return. LOG.error( _LE("Network %(network)s is already de-allocated for " "cluster %(cluster)s."), { 'network': net_info['network_id'], 'cluster': net_info['cluster_id'] })
def check_to_reclaim_local_vlan(port_info): lvid = -1 session = db_api.get_writer_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, port_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id']).with_lockmode('update').one()) count = allocation.network_port_count if count >= 1: count -= 1 allocation.update({'network_port_count': count}) LOG.debug( "Decremented the allocated port count for network " "%s.", res) if count == 0: lvid = allocation.lvid LOG.info(_LI("lvid can be released for network: %s."), res) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return back status as False. LOG.debug( "Network %(network)s is already de-allocated for " "cluster %(cluster)s.", { 'network': port_info['network_id'], 'cluster': port_info['cluster_id'] }) return lvid
def sync_router_interfaces(self, routers): for r in routers: # Sync (vfabric.add_router_interface) can't go in parallel with router operations (on same router) # to avoid race condition of creating router--network link. # parallelism of router operations (on different router) # write (x) lock on "the router" during transaction (supports multiple sync_router_interfaces operations) db_session = db_api.get_writer_session() with db_session.begin(subtransactions=True): try: caller_msg = 'l3_sync_interface on router id=%s name=%s' % (r['id'] , r['name']) kaloom_db.get_Lock(db_session, r['id'], read=False, caller_msg = caller_msg) except Exception as e: #no record (router deleted): nothing to sync for the router #lock timeout LOG.warning("sync_router_interfaces failed to lock router, err:%s", e) continue grouped_router_interfaces = self.get_router_interfaces(r) for nw_name in grouped_router_interfaces.keys(): try: if not self.driver.router_l2node_link_exists(r['id'], r['name'], nw_name): router_interfaces = grouped_router_interfaces[nw_name] for ri in router_interfaces: try: self.driver.add_router_interface(self, ri) except Exception as e: LOG.error("sync_router_interfaces failed to add_router_interface msg:%s", e) except Exception as e: LOG.error("sync_router_interfaces failed to check link existence:%s--%s, msg:%s", r['name'], nw_name, e)
def create_router(self, context, router): """Create a new router entry in DB, and create it in vFabric.""" # create_router can't go in parallel with l3_sync (synchronize) # parallelism of router operations # shared (S) lock during transaction. db_session = db_api.get_writer_session() with db_session.begin(subtransactions=True): caller_msg = 'create_router %s' % router['router']['name'] kaloom_db.get_Lock(db_session, kconst.L3_LOCK_NAME, read=True, caller_msg = caller_msg) # Add router to the DB new_router = super(KaloomL3ServicePlugin, self).create_router( context, router) # create router on the vFabric try: self.driver.create_router(context, new_router) #Add router-id to the KaloomConcurrency table (later use for x/s lock) kaloom_db.create_entry_for_Lock(new_router['id']) return new_router except Exception: with excutils.save_and_reraise_exception(): super(KaloomL3ServicePlugin, self).delete_router( context, new_router['id'] )
def setUp(self): service_plugins = { 'router': 'neutron.tests.unit.extensions.test_l3.TestL3NatServicePlugin'} l3_plugin = test_l3.TestL3NatServicePlugin() sec_plugin = test_securitygroup.SecurityGroupTestPlugin() ext_mgr = extensions.PluginAwareExtensionManager( EXTENSIONS_PATH, {'router': l3_plugin, 'sec': sec_plugin} ) super(TestRevisionNumberMaintenance, self).setUp( plugin=PLUGIN_CLASS, service_plugins=service_plugins) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.session = db_api.get_writer_session() revision_plugin.RevisionPlugin() self.net = self._make_network(self.fmt, 'net1', True)['network'] # Mock the default value for INCONSISTENCIES_OLDER_THAN so # tests won't need to wait for the timeout in order to validate # the database inconsistencies self.older_than_mock = mock.patch( 'neutron.db.ovn_revision_numbers_db.INCONSISTENCIES_OLDER_THAN', -1) self.older_than_mock.start() self.addCleanup(self.older_than_mock.stop) self.ctx = context.get_admin_context()
def release_local_vlan(net_info): session = db_api.get_writer_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, net_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id'] ).with_lockmode('update').one()) if allocation.network_port_count == 0: allocation.update({'network_id': None, 'allocated': False, 'network_port_count': 0}) LOG.info(_LI("Released lvid for network: %s."), res) else: LOG.info(_LI("Unable to release local vlan for network_id %s " "because ports are available on network."), res['network_id']) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return. LOG.error(_LE("Network %(network)s is already de-allocated for " "cluster %(cluster)s."), {'network': net_info['network_id'], 'cluster': net_info['cluster_id']})
def synchronize(self): """Synchronizes Router DB from Neturon DB with Kaloom Fabric. Walks through the Neturon Db and ensures that all the routers created in Netuton DB match with Kaloom Fabric. After creating appropriate routers, it ensures to add interfaces as well. Stranded routers in vFabric get deleted. Uses idempotent properties of Kaloom vFabric configuration, which means same commands can be repeated. """ # Sync (read from neutron_db and vfabric) can't go in parallel with router operations # parallelism of router operations # write (x) lock during transaction. db_session = db_api.get_writer_session() with db_session.begin(subtransactions=True): try: kaloom_db.get_Lock(db_session, kconst.L3_LOCK_NAME, read=False, caller_msg = 'l3_sync_read') routers = directory.get_plugin(plugin_constants.L3).get_routers(nctx.get_admin_context()) vfabric_routers = self.driver.get_routers() except Exception as e: LOG.warning(e) return LOG.info('Syncing Neutron Router DB <-> vFabric') self.sync_routers(routers, vfabric_routers) self.sync_router_interfaces(routers)
def _delete_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: session.delete(item)
def disassociate_floatingips(self, context, port_id, do_notify=True): session = db_api.get_writer_session() try: with session.begin(subtransactions=True): filters = {'port_id': [port_id]} fip_dicts = self.get_floatingips(context, filters=filters) router_ids = super(OpenContrailRouterHandler, self).disassociate_floatingips( context, port_id, do_notify) for fip_dict in fip_dicts: fip_dict = self.get_floatingip(context, fip_dict['id']) fip_dict['status'] = const.FLOATINGIP_STATUS_DOWN self.update_floatingip_status(context, fip_dict['id'], fip_dict['status']) self.driver.update_floatingip(context, fip_dict['id'], {'floatingip': fip_dict}) return router_ids except Exception as e: LOG.error( "Failed to disassociate floating ips on port %(id)s: " "%(err)s", { "id": port_id, "err": e }) raise
def setUp(self): super(TestRevisionNumber, self).setUp() res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) self.net = self.deserialize(self.fmt, res)['network'] self.session = db_api.get_writer_session()
def add_router_interface(self, context, router_id, interface_info): """Add Router Interface callback handler for OpenContrail. Invokes back-end driver to add router interface in OpenContrail. """ session = db_api.get_writer_session() with session.begin(subtransactions=True): new_router = super(OpenContrailRouterHandler, self).add_router_interface( context, router_id, interface_info) try: interface_info = dict(new_router) del interface_info['subnet_id'] self.driver.add_router_interface(context, router_id, interface_info) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error( "Failed to add interface to router %(id)s: " "%(err)s", { "id": router_id, "err": e }) try: self.remove_router_interface(context, router_id, interface_info) except Exception: LOG.exception("Failed to delete interface of router %s", router_id) return new_router
def create_port(tenant_id, net_id, device_id, port_id, network_ctx, device_owner='compute', host='ubuntu1', dynamic_segment=None): session = db_api.get_writer_session() ndb = db_lib.NeutronNets() ndb.set_ipam_backend() port_ctx = get_port_context(tenant_id, net_id, device_id, network_ctx, port_id=port_id, device_owner=device_owner, host=host, session=session, dynamic_segment=dynamic_segment) ndb.create_port(port_ctx, {'port': port_ctx.current}) for binding_level in port_ctx._binding_levels: session.add(ml2_models.PortBindingLevel(**binding_level.__dict__)) session.flush() return port_ctx
def delete_endpoint_by_host_or_ip(self, host, ip): LOG.debug("delete_endpoint_by_host_or_ip() called for " "host %(host)s or %(ip)s", {'host': host, 'ip': ip}) session = lib_db_api.get_writer_session() session.query(self.endpoint_model).filter( or_(self.endpoint_model.host == host, self.endpoint_model.ip_address == ip)).delete()
def bind_port_to_host(port_id, host, network_ctx): session = db_api.get_writer_session() for level, segment in enumerate(network_ctx.network_segments): port_binding = FakePortBindingLevel(port_id, level, 'vendor-1', segment['id'], host) session.add(ml2_models.PortBindingLevel(**port_binding.__dict__)) session.flush()
def check_to_reclaim_local_vlan(port_info): lvid = -1 session = db_api.get_writer_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, port_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id'] ).with_lockmode('update').one()) count = allocation.network_port_count if count >= 1: count -= 1 allocation.update({'network_port_count': count}) LOG.debug("Decremented the allocated port count for network " "%s.", res) if count == 0: lvid = allocation.lvid LOG.info(_LI("lvid can be released for network: %s."), res) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return back status as False. LOG.debug("Network %(network)s is already de-allocated for " "cluster %(cluster)s.", {'network': port_info['network_id'], 'cluster': port_info['cluster_id']}) return lvid
def bump_revision(resource, resource_type): session = db_api.get_writer_session() revision_number = utils.get_revision_number(resource, resource_type) with session.begin(): _ensure_revision_row_exist(session, resource, resource_type) std_attr_id = _get_standard_attr_id(session, resource['id'], resource_type) row = session.merge( models.OVNRevisionNumbers(standard_attr_id=std_attr_id, resource_uuid=resource['id'])) if revision_number < row.revision_number: LOG.debug( 'Skip bumping the revision number for %(res_uuid)s (type: ' '%(res_type)s) to %(rev_num)d. A higher version is already ' 'registered in the database (%(new_rev)d)', { 'res_type': resource_type, 'res_uuid': resource['id'], 'rev_num': revision_number, 'new_rev': row.revision_number }) return row.revision_number = revision_number session.merge(row) LOG.info( 'Successfully bumped revision number for resource ' '%(res_uuid)s (type: %(res_type)s) to %(rev_num)d', { 'res_uuid': resource['id'], 'res_type': resource_type, 'rev_num': revision_number })
def delete_revision(resource_id): session = db_api.get_writer_session() with session.begin(): row = session.query(models.OVNRevisionNumbers).filter_by( resource_uuid=resource_id).one_or_none() if row: session.delete(row)
def _delete_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query( test_quota.MehModel).filter_by(project_id=self.project_id) for item in query: session.delete(item)
def unbind_port_from_host(port_id, host): session = db_api.get_writer_session() pbl_model = ml2_models.PortBindingLevel bindings = (session.query(pbl_model).filter(pbl_model.port_id == port_id, pbl_model.host == host)) for binding in bindings: session.delete(binding) session.flush()
def _update_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: item['meh'] = 'meh-%s' % item['meh'] session.add(item)
def _update_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query( test_quota.MehModel).filter_by(project_id=self.project_id) for item in query: item['meh'] = 'meh-%s' % item['meh'] session.add(item)
def add_node(node_uuid=None): if node_uuid is None: node_uuid = uuidutils.generate_uuid() session = db_api.get_writer_session() with session.begin(): row = models.OVNHashRing(node_uuid=node_uuid, hostname=CONF.host) session.add(row) return node_uuid
def session(self): # TODO(akamyshnikova): checking for session attribute won't be needed # when reader and writer will be used if hasattr(super(Context, self), 'session'): return super(Context, self).session if self._session is None: self._session = db_api.get_writer_session() return self._session
def delete_networks_for_tenant(tenant_id): session = db_api.get_writer_session() with session.begin(): network_model = models_v2.Network networks = session.query(network_model).filter( network_model.project_id == tenant_id).all() for network in networks: delete_ports_on_network(network.id) session.delete(network)
def _add_data(self, tenant_id=None): session = db_api.get_writer_session() with session.begin(): tenant_id = tenant_id or self.tenant_id session.add(test_quota.MehModel( meh='meh_%s' % uuidutils.generate_uuid(), tenant_id=tenant_id)) session.add(test_quota.MehModel( meh='meh_%s' % uuidutils.generate_uuid(), tenant_id=tenant_id))
def _add_data(self, project_id=None): session = db_api.get_writer_session() with session.begin(): project_id = project_id or self.project_id session.add( test_quota.MehModel(meh='meh_%s' % uuidutils.generate_uuid(), project_id=project_id)) session.add( test_quota.MehModel(meh='meh_%s' % uuidutils.generate_uuid(), project_id=project_id))
def setUp(self): super(TestDBInconsistenciesPeriodics, self).setUp() self.net = self._make_network( self.fmt, name='net1', admin_state_up=True)['network'] self.port = self._make_port( self.fmt, self.net['id'], name='port1')['port'] self.fake_ovn_client = mock.Mock() self.periodic = maintenance.DBInconsistenciesPeriodics( self.fake_ovn_client) self.session = db_api.get_writer_session()
def delete_segments_for_tenant(tenant_id): session = db_api.get_writer_session() network_model = models_v2.Network segment_model = segment_models.NetworkSegment with session.begin(): networks = session.query(network_model).filter( network_model.project_id == tenant_id).all() for network in networks: session.query(segment_model).filter( segment_model.network_id == network.id).delete()
def add_router_interface(self, context, router_id, interface_info): """Add a subnet of a network to an existing router.""" router = self.get_router(context, router_id) # add_router_interface can't go in parallel with l3_sync_interface on same router # parallelism of router operations (on different router) # write (x) lock on "the router" during transaction. db_session = db_api.get_writer_session() with db_session.begin(subtransactions=True): caller_msg = 'add_router_interface on router id=%s name=%s' % (router_id, router['name']) kaloom_db.get_Lock(db_session, router_id, read=False, caller_msg = caller_msg) new_router_ifc = super(KaloomL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) core = directory.get_plugin() # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = core.get_subnet(context, interface_info['subnet_id']) port = core.get_port(context, new_router_ifc['port_id']) #port has multiple (ip_address, subnet_id) ip_address = self._get_subnet_ip(port['fixed_ips'], interface_info['subnet_id']) elif add_by_port: port = core.get_port(context, interface_info['port_id']) ip_address = port['fixed_ips'][0]['ip_address'] subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = core.get_subnet(context, subnet_id) # Package all the info needed for vFabric programming network_id = subnet['network_id'] try: nw_name = utils._kaloom_nw_name(self.prefix, network_id) except n_exc.NetworkNotFound as e: LOG.warning('Nothing to do in add_router_interface as no such network=%s, msg:%s', network_id, e) return new_router_ifc router_info = copy.deepcopy(new_router_ifc) router_info['nw_name'] = nw_name router_info['ip_address'] = ip_address router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) self._update_port_up(context, port['id']) return new_router_ifc except Exception: with excutils.save_and_reraise_exception(): super(KaloomL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)
def _add_endpoint(self, ip, host, **kwargs): LOG.debug("_add_endpoint() called for ip %s", ip) session = db_api.get_writer_session() try: endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) endpoint.save(session) except db_exc.DBDuplicateEntry: endpoint = (session.query( self.endpoint_model).filter_by(ip_address=ip).one()) LOG.warning("Endpoint with ip %s already exists", ip) return endpoint
def _create_ovsvapp_mitigated_cluster(self, ovsvapp_mitigated_cluster): """Create mitigated_cluster helper method.""" session = db_api.get_writer_session() db_entry_dict = ovsvapp_mitigated_cluster['ovsvapp_mitigated_cluster'] db_entry = {'vcenter_id': db_entry_dict['vcenter_id'], 'cluster_id': db_entry_dict['cluster_id'], 'threshold_reached': db_entry_dict['threshold_reached'], 'being_mitigated': db_entry_dict['being_mitigated']} session.execute(ovsvapp_models.OVSvAppClusters.__table__.insert(), db_entry) return ovsvapp_mitigated_cluster['ovsvapp_mitigated_cluster']
def _add_endpoint(self, ip, host, **kwargs): LOG.debug("_add_endpoint() called for ip %s", ip) session = db_api.get_writer_session() try: endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) endpoint.save(session) except db_exc.DBDuplicateEntry: endpoint = (session.query(self.endpoint_model). filter_by(ip_address=ip).one()) LOG.warning("Endpoint with ip %s already exists", ip) return endpoint
def session(self): # TODO(akamyshnikova): checking for session attribute won't be needed # when reader and writer will be used if hasattr(super(Context, self), 'session'): LOG.debug('context.session is used with and without new ' 'enginefacade. Please update the code to use new ' 'enginefacede consistently.') return super(Context, self).session if self._session is None: self._session = db_api.get_writer_session() return self._session
def get_agent_by_host(agent_host): """Return a L2 agent on the host.""" session = db_api.get_writer_session() with session.begin(subtransactions=True): query = session.query(agents_db.Agent) agent = query.filter( agents_db.Agent.host == agent_host, agents_db.Agent.agent_type == constants.AGENT_TYPE_DVS, agents_db.Agent.admin_state_up.is_(True)).first() if agent and agent.is_active: return agent return None
def synchronize(self): LOG.info(_LI('Syncing VLANs with EOS')) try: self._rpc.check_vlan_type_driver_commands() vlan_pool = self._rpc.get_vlan_allocation() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return self._assigned_vlans = { 'default': self._parse_vlan_ranges(vlan_pool['assignedVlans'], return_as_ranges=True), } assigned_vlans = ( self._parse_vlan_ranges(vlan_pool['assignedVlans'])) available_vlans = frozenset( self._parse_vlan_ranges(vlan_pool['availableVlans'])) used_vlans = frozenset( self._parse_vlan_ranges(vlan_pool['allocatedVlans'])) self._force_sync = False session = db_api.get_writer_session() with session.begin(subtransactions=True): allocs = ( session.query(vlanallocation.VlanAllocation).with_lockmode( 'update')) for alloc in allocs: if alloc.physical_network != 'default': session.delete(alloc) try: assigned_vlans.remove(alloc.vlan_id) except KeyError: session.delete(alloc) continue if alloc.allocated and alloc.vlan_id in available_vlans: alloc.update({"allocated": False}) elif not alloc.allocated and alloc.vlan_id in used_vlans: alloc.update({"allocated": True}) for vlan_id in sorted(assigned_vlans): allocated = vlan_id in used_vlans alloc = vlanallocation.VlanAllocation( physical_network='default', vlan_id=vlan_id, allocated=allocated) session.add(alloc)
def _create_ovsvapp_mitigated_cluster(self, ovsvapp_mitigated_cluster): """Create mitigated_cluster helper method.""" session = db_api.get_writer_session() db_entry_dict = ovsvapp_mitigated_cluster['ovsvapp_mitigated_cluster'] db_entry = { 'vcenter_id': db_entry_dict['vcenter_id'], 'cluster_id': db_entry_dict['cluster_id'], 'threshold_reached': db_entry_dict['threshold_reached'], 'being_mitigated': db_entry_dict['being_mitigated'] } session.execute(ovsvapp_models.OVSvAppClusters.__table__.insert(), db_entry) return ovsvapp_mitigated_cluster['ovsvapp_mitigated_cluster']
def release_cluster_lock(vcenter_id, cluster_id): session = db_api.get_writer_session() with session.begin(subtransactions=True): try: query = session.query(models.OVSvAppClusters) cluster_row = (query.filter( models.OVSvAppClusters.vcenter_id == vcenter_id, models.OVSvAppClusters.cluster_id == cluster_id ).with_lockmode('update').one()) cluster_row.update({'being_mitigated': False, 'threshold_reached': False}) except sa_exc.NoResultFound: LOG.error(_LE("Cannot update the row for cluster %s."), cluster_id)
def synchronize(self): LOG.info(_LI('Syncing VLANs with EOS')) try: self._rpc.register_with_eos() vlan_pool = self._rpc.get_vlan_allocation() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return self._assigned_vlans = { 'default': self._parse_vlan_ranges(vlan_pool['assignedVlans'], return_as_ranges=True), } assigned_vlans = (self._parse_vlan_ranges(vlan_pool['assignedVlans'])) available_vlans = frozenset( self._parse_vlan_ranges(vlan_pool['availableVlans'])) used_vlans = frozenset( self._parse_vlan_ranges(vlan_pool['allocatedVlans'])) self._force_sync = False session = db_api.get_writer_session() with session.begin(subtransactions=True): allocs = (session.query( vlanallocation.VlanAllocation).with_lockmode('update')) for alloc in allocs: if alloc.physical_network != 'default': session.delete(alloc) try: assigned_vlans.remove(alloc.vlan_id) except KeyError: session.delete(alloc) continue if alloc.allocated and alloc.vlan_id in available_vlans: alloc.update({"allocated": False}) elif not alloc.allocated and alloc.vlan_id in used_vlans: alloc.update({"allocated": True}) for vlan_id in sorted(assigned_vlans): allocated = vlan_id in used_vlans alloc = vlanallocation.VlanAllocation( physical_network='default', vlan_id=vlan_id, allocated=allocated) session.add(alloc)
def remove_switch_binding(port_id, switch_id, intf_id): session = db_api.get_writer_session() with session.begin(): pb_model = ml2_models.PortBinding binding = (session.query(pb_model) .filter(pb_model.port_id == port_id).first()) profile = json.loads(binding.profile) lli = profile['local_link_information'] for idx, link in enumerate(lli): if link['switch_id'] == switch_id and link['port_id'] == intf_id: lli.pop(idx) break binding.profile = json.dumps(profile) if len(lli) == 0: delete_port_binding(port_id, binding.host)
def set_cluster_threshold(vcenter_id, cluster_id): session = db_api.get_writer_session() with session.begin(subtransactions=True): try: query = session.query(models.OVSvAppClusters) cluster_row = (query.filter( models.OVSvAppClusters.vcenter_id == vcenter_id, models.OVSvAppClusters.cluster_id == cluster_id ).with_lockmode('update').one()) LOG.info(_LI("Cluster row found for %s."), cluster_row) if not cluster_row.threshold_reached: cluster_row.update({'being_mitigated': False, 'threshold_reached': True}) except sa_exc.NoResultFound: LOG.error(_LE("Cluster row not found for %s."), cluster_id)
def reset_cluster_threshold(vcenter_id, cluster_id): session = db_api.get_writer_session() with session.begin(subtransactions=True): try: query = session.query(models.OVSvAppClusters) cluster_row = (query.filter( models.OVSvAppClusters.vcenter_id == vcenter_id, models.OVSvAppClusters.cluster_id == cluster_id ).with_lockmode('update').one()) if cluster_row.threshold_reached: cluster_row.update({'being_mitigated': False, 'threshold_reached': False}) except sa_exc.NoResultFound: # First agent in this cluster LOG.error(_LE("Cluster row not found for %s."), cluster_id) cluster_row = {'vcenter_id': vcenter_id, 'cluster_id': cluster_id} session.execute(models.OVSvAppClusters.__table__.insert(), cluster_row)
def get_local_vlan(port_info, assign=True): lvid = None session = db_api.get_writer_session() res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, port_info[k]) for k in res_keys) with session.begin(subtransactions=True): try: if not assign: lvid = _try_to_obtain_local_vlan(session, port_info, assign) return lvid query = session.query(models.ClusterVNIAllocations) # Lock all the rows in the table corresponding to the vCenter # and cluster. cluster_rows = query.filter( (models.ClusterVNIAllocations.vcenter_id == res['vcenter_id']), (models.ClusterVNIAllocations.cluster_id == res['cluster_id']) ).with_lockmode('update').all() if cluster_rows: lvid = _try_to_obtain_local_vlan(session, port_info, assign) return lvid else: LOG.info(_LI("Local VLAN rows not provisioned for the " "cluster %(cluster)s of vCenter %(vcenter)s. " "Going to provision."), {'cluster': res['cluster_id'], 'vcenter': res['vcenter_id']}) except Exception: LOG.exception(_LE("Error retrieving a local vlan for network " "%(network)s for %(port)s."), {'network': port_info['network_id'], 'port': port_info['port_id']}) return status = _initialize_lvids_for_cluster(res) if status: with session.begin(subtransactions=True): lvid = _try_to_obtain_local_vlan(session, port_info, assign) else: LOG.error(_LE("Local VLAN rows not provisioned for the " "cluster %(cluster)s of vCenter %(vcenter)s."), {'cluster': res['cluster_id'], 'vcenter': res['vcenter_id']}) return lvid
def create_ports(ports): session = db_api.get_writer_session() with session.begin(): for port in ports: binding_levels = port.pop('binding_levels', []) binding = port.pop('binding', {}) session.add(models_v2.Port(**port)) if binding: binding['port_id'] = port['id'] if binding['vif_type'] == 'distributed': distributed_binding = binding.copy() distributed_binding['status'] = 'ACTIVE' for host in binding['host']: distributed_binding['host'] = host session.add( ml2_models.DistributedPortBinding( **distributed_binding)) else: session.add(ml2_models.PortBinding(**binding)) for binding_level in binding_levels: binding_level['port_id'] = port['id'] session.add(ml2_models.PortBindingLevel(**binding_level))
def delete_port_binding(port_id, host): session = db_api.get_writer_session() with session.begin(): # We cannot do any bulk deletes here because every delete bumps the # revision number of the Port pbl_model = ml2_models.PortBindingLevel levels = (session.query(pbl_model) .filter(pbl_model.port_id == port_id, pbl_model.host == host)) for level in levels: session.delete(level) pb_model = ml2_models.PortBinding bindings = (session.query(pb_model) .filter(pb_model.port_id == port_id, pb_model.host == host)) for binding in bindings: session.delete(binding) dpb_model = ml2_models.DistributedPortBinding bindings = (session.query(dpb_model) .filter(dpb_model.port_id == port_id, dpb_model.host == host)) for binding in bindings: session.delete(binding)
def get_stale_local_vlans_for_network(network_id): session = db_api.get_writer_session() vcenter_clusters = None with session.begin(subtransactions=True): try: query = session.query(models.ClusterVNIAllocations) allocations = (query.filter( models.ClusterVNIAllocations.network_id == network_id ).all()) if allocations: vcenter_clusters = [] for alloc in allocations: vcenter_clusters.append((alloc.vcenter_id, alloc.cluster_id, alloc.lvid)) LOG.info(_LI("Found stale allocations for network " "%s."), network_id) except Exception: # Nothing to do, port-deletions have properly cleaned up # the records. We will just log and return back empty list. LOG.debug("Network %s is already cleaned up from " "VNI allocations table.", network_id) return vcenter_clusters
def update_and_get_cluster_lock(vcenter_id, cluster_id): session = db_api.get_writer_session() with session.begin(subtransactions=True): try: query = session.query(models.OVSvAppClusters) cluster_row = (query.filter( models.OVSvAppClusters.vcenter_id == vcenter_id, models.OVSvAppClusters.cluster_id == cluster_id ).with_lockmode('update').one()) if not cluster_row.threshold_reached: if not cluster_row.being_mitigated: cluster_row.update({'being_mitigated': True}) LOG.info(_LI("Blocked the cluster %s for maintenance."), cluster_id) return SUCCESS else: LOG.info(_LI("Cluster %s is under maintenance. " "Will retry later"), cluster_id) return RETRY else: LOG.warning(_LW("Cluster %(id)s in vCenter %(vc)s needs " "attention. " "Not able to put hosts to maintenance!"), {'id': cluster_id, 'vc': vcenter_id}) return GIVE_UP except sa_exc.NoResultFound: # First fault case in this cluster_id. cluster_row = {'vcenter_id': vcenter_id, 'cluster_id': cluster_id, 'being_mitigated': True} session.execute(models.OVSvAppClusters.__table__.insert(), cluster_row) LOG.info(_LI("Blocked the cluster %s for maintenance."), cluster_id) return SUCCESS
def _initialize_lvids_for_cluster(port_info): vcenter = port_info['vcenter_id'] cluster = port_info['cluster_id'] session = db_api.get_writer_session() with session.begin(subtransactions=True): try: (session.query(models.ClusterVNIAllocations). with_lockmode('update')).all() query = session.query(models.ClusterVNIAllocations) existing_allocations = query.filter( models.ClusterVNIAllocations.vcenter_id == vcenter, models.ClusterVNIAllocations.cluster_id == cluster ).all() if not existing_allocations: _generate_vcenter_cluster_allocations( session, vcenter, cluster) return True except Exception: LOG.exception(_LE("Exception while initializing VNI " "allocations for clusters %(cluster)s of " "vCenter %(vcenter)s."), {'cluster': cluster, 'vcenter': vcenter}) return False
def delete_ports_for_tenant(tenant_id): session = db_api.get_writer_session() with session.begin(): port_model = models_v2.Port session.query(port_model).filter( port_model.project_id == tenant_id).delete()
def delete_port(port_id): session = db_api.get_writer_session() with session.begin(): port_model = models_v2.Port session.query(port_model).filter( port_model.id == port_id).delete()
def delete_segments_for_network(network_id): session = db_api.get_writer_session() with session.begin(): segment_model = segment_models.NetworkSegment session.query(segment_model).filter( segment_model.network_id == network_id).delete()
def delete_ports_on_network(network_id): session = db_api.get_writer_session() with session.begin(): port_model = models_v2.Port session.query(port_model).filter( port_model.network_id == network_id).delete()
def delete_ports_for_instance(instance_id): session = db_api.get_writer_session() with session.begin(): port_model = models_v2.Port session.query(port_model).filter( port_model.device_id == instance_id).delete()
def delete_endpoint(self, ip): LOG.debug("delete_endpoint() called for ip %s", ip) session = db_api.get_writer_session() session.query(self.endpoint_model).filter_by(ip_address=ip).delete()
def create_trunks(trunks): session = db_api.get_writer_session() with session.begin(): for trunk in trunks: session.add(t_models.Trunk(**trunk))
def create_subports(subports): session = db_api.get_writer_session() with session.begin(): for subport in subports: session.add(t_models.SubPort(**subport))