def get_nsxv_spoofguard_policy_network_mappings(session, filters=None, like_filters=None): session = db.get_reader_session() query = session.query(nsxv_models.NsxvSpoofGuardPolicyNetworkMapping) return nsx_db._apply_filters_to_query( query, nsxv_models.NsxvSpoofGuardPolicyNetworkMapping, filters, like_filters).all()
def _get_agent_fdb(self, context, segment, port, agent_host): if not agent_host: return network_id = port['network_id'] session = db_api.get_reader_session() agent_active_ports = l2pop_db.get_agent_network_active_port_count( session, agent_host, network_id) agent = l2pop_db.get_agent_by_host(session, agent_host) if not self._validate_segment(segment, port['id'], agent): return agent_ip = l2pop_db.get_agent_ip(agent) other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, port['network_id']) if agent_active_ports == 0: # Agent is removing its last activated port in this network, # other agents needs to be notified to delete their flooding entry. other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) # Notify other agents to remove fdb rules for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and not l3_hamode_db.is_ha_router_port( context, port['device_owner'], port['device_id'])): fdb_entries = self._get_port_fdb_entries(port) other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries return other_fdb_entries
def _fixed_ips_changed(self, context, orig, port, diff_ips): orig_ips, port_ips = diff_ips if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): agent_host = context.host else: agent_host = context.original_host if not agent_host: return agent_ip = l2pop_db.get_agent_ip_by_host(db_api.get_reader_session(), agent_host) orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in orig_ips] port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in port_ips] upd_fdb_entries = {port['network_id']: {agent_ip: {}}} ports = upd_fdb_entries[port['network_id']][agent_ip] if orig_mac_ip: ports['before'] = orig_mac_ip if port_mac_ip: ports['after'] = port_mac_ip self.L2populationAgentNotify.update_fdb_entries( self.rpc_ctx, {'chg_ip': upd_fdb_entries}) return True
def get_networks(): db_session = db_api.get_reader_session() try: networks = db_session.query(Network.id, Network.name).all() return networks except sa_exc.NoResultFound: return []
def get_knid_for_network(network_id): db_session = db_api.get_reader_session() try: return db_session.query(kaloom_models.KaloomKnidMapping). \ filter_by(network_id=network_id).one().kaloom_knid except sa_exc.NoResultFound: return None
def setUp(self): super(RouteTargetTypeTest, self).setUp() self.driver = type_route_target.RouteTargetTypeDriver() self.driver.rt_nn_ranges = RT_NN_RANGES self.driver._sync_route_target_allocations() self.session = db.get_reader_session() self.context = context.get_admin_context()
def get_mac_for_port(port_id): db_session = db_api.get_reader_session() try: port = db_session.query(Port).filter_by(id=port_id).one() return port.mac_address except sa_exc.NoResultFound: return None
def _fixed_ips_changed(self, context, orig, port, diff_ips): orig_ips, port_ips = diff_ips if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): agent_host = context.host else: agent_host = context.original_host if not agent_host: return agent_ip = l2pop_db.get_agent_ip_by_host(db_api.get_reader_session(), agent_host) orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in orig_ips] port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in port_ips] upd_fdb_entries = {port['network_id']: {agent_ip: {}}} ports = upd_fdb_entries[port['network_id']][agent_ip] if orig_mac_ip: ports['before'] = orig_mac_ip if port_mac_ip: ports['after'] = port_mac_ip self.L2populationAgentNotify.update_fdb_entries( self.rpc_ctx, {'chg_ip': upd_fdb_entries}) return True
def get_vlan_mapping_for_network_and_host(network_id, host): db_session = db_api.get_reader_session() try: return db_session.query(kaloom_models.KaloomVlanHostMapping). \ filter_by(host=host, network_id=network_id).one() except sa_exc.NoResultFound: return None
def get_trunk_port_by_trunk_id(trunk_id): session = db.get_reader_session() with session.begin(): trunk_port = (session.query( trunk_models.Trunk).filter_by(id=trunk_id).first()) if trunk_port: return trunk_port.port
def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid): # The nsxlib and db calls are mocked, therefore the cluster # and the neutron_port_id parameters can be set to None ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( db_api.get_reader_session(), None, None) self.assertEqual(exp_ls_uuid, ls_uuid) self.assertEqual(exp_lp_uuid, lp_uuid)
def get_omni_resource(openstack_id): session = db_api.get_reader_session() result = session.query(omni_resources.OmniResources).filter_by( openstack_id=openstack_id).first() if not result: return None return result.omni_resource
def get_revision_row(self, resource_uuid): try: session = db_api.get_reader_session() with session.begin(): return session.query(models.OVNRevisionNumbers).filter_by( resource_uuid=resource_uuid).one() except exc.NoResultFound: pass
def get_segment_for_network(network_id): db_session = db_api.get_reader_session() try: segment = db_session.query(NetworkSegment).filter_by( network_id=network_id, segment_index=0).one() return segment except sa_exc.NoResultFound: return None
def get_all_portbindings(): """Returns a list of all ports bindings.""" session = db.get_reader_session() with session.begin(): query = session.query(ml2_models.PortBinding) ports = query.all() return {port.port_id: _make_port_dict(port) for port in ports}
def get_instances(tenant): """Returns set of all instance ids that may be relevant on CVX.""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port return set(device_id[0] for device_id in session.query(port_model.device_id).filter( port_model.tenant_id == tenant).distinct())
def setUp(self): super(OpendaylightBgpvpnDriverTestCase, self).setUp() self.db_session = neutron_db_api.get_reader_session() self.driver = driverv2.OpenDaylightBgpvpnDriver(service_plugin=None) self.context = self._get_mock_context() self.mock_sync_thread = mock.patch.object( journal.OpendaylightJournalThread, 'start_odl_sync_thread').start() self.thread = journal.OpendaylightJournalThread()
def get_network_segments_by_port_id(port_id): session = db.get_reader_session() with session.begin(): segments = (session.query( segment_models.NetworkSegment, ml2_models.PortBindingLevel).join( ml2_models.PortBindingLevel).filter_by( port_id=port_id).order_by( ml2_models.PortBindingLevel.level).all()) return [segment[0] for segment in segments]
def get_trunk_port_by_subport_id(subport_id): """Returns trunk parent port based on sub port id.""" session = db.get_reader_session() with session.begin(): subport = (session.query( trunk_models.SubPort).filter_by(port_id=subport_id).first()) if subport: trunk_id = subport.trunk_id return get_trunk_port_by_trunk_id(trunk_id)
def get_instance_ports(tenant_id, manage_fabric=True, managed_physnets=None): """Returns all instance ports for a given tenant.""" session = db.get_reader_session() with session.begin(): # hack for pep8 E711: comparison to None should be # 'if cond is not None' none = None port_model = models_v2.Port binding_level_model = ml2_models.PortBindingLevel segment_model = segment_models.NetworkSegment all_ports = (session.query( port_model, binding_level_model, segment_model).join(binding_level_model).join( segment_model).filter(port_model.tenant_id == tenant_id, binding_level_model.host != none, port_model.device_id != none, port_model.network_id != none)) if not manage_fabric: all_ports = all_ports.filter( segment_model.physical_network != none) if managed_physnets is not None: managed_physnets.append(None) all_ports = all_ports.filter( segment_model.physical_network.in_(managed_physnets)) def eos_port_representation(port): return { u'portId': port.id, u'deviceId': port.device_id, u'hosts': set([bl.host for bl in port.binding_levels]), u'networkId': port.network_id } ports = {} for port in all_ports: if not utils.supported_device_owner(port.Port.device_owner): continue ports[port.Port.id] = eos_port_representation(port.Port) vm_dict = dict() def eos_vm_representation(port): return { u'vmId': port['deviceId'], u'baremetal_instance': False, u'ports': { port['portId']: port } } for port in ports.values(): deviceId = port['deviceId'] if deviceId in vm_dict: vm_dict[deviceId]['ports'][port['portId']] = port else: vm_dict[deviceId] = eos_vm_representation(port) return vm_dict
def _verify_get_nsx_switch_ids(self, exp_ls_uuids): # The nsxlib and db calls are mocked, therefore the cluster # and the neutron_router_id parameters can be set to None ls_uuids = nsx_utils.get_nsx_switch_ids( db_api.get_reader_session(), None, None) for ls_uuid in ls_uuids or []: self.assertIn(ls_uuid, exp_ls_uuids) exp_ls_uuids.remove(ls_uuid) self.assertFalse(exp_ls_uuids)
def _get_ha_ipaddress(self, port_id, ipaddress, session=None): session = session or db_api.get_reader_session() query = BAKERY(lambda s: s.query( HAIPAddressToPortAssociation)) query += lambda q: q.filter_by( port_id=sa.bindparam('port_id'), ha_ip_address=sa.bindparam('ipaddress')) return query(session).params( port_id=port_id, ipaddress=ipaddress).first()
def _get_ha_ipaddress(self, port_id, ipaddress, session=None): session = session or db_api.get_reader_session() query = BAKERY(lambda s: s.query( HAIPAddressToPortAssociation)) query += lambda q: q.filter_by( port_id=sa.bindparam('port_id'), ha_ip_address=sa.bindparam('ipaddress')) return query(session).params( port_id=port_id, ipaddress=ipaddress).first()
def tenant_provisioned(tid): """Returns true if any networks or ports exist for a tenant.""" session = db.get_reader_session() with session.begin(): network_model = models_v2.Network port_model = models_v2.Port res = bool( session.query(network_model).filter_by(tenant_id=tid).count() or session.query(port_model).filter_by(tenant_id=tid).count()) return res
def get_all_baremetal_ports(): """Returns a list of all ports that belong to baremetal hosts.""" session = db.get_reader_session() with session.begin(): querry = session.query(ml2_models.PortBinding) bm_ports = querry.filter_by(vnic_type='baremetal').all() return { bm_port.port_id: _make_port_dict(bm_port) for bm_port in bm_ports }
def _get_network_info_for_port(self, port_id): """Get MAC, IP and Gw IP addresses informations for a specific port""" session = db_api.get_reader_session() (mac_address, ip_address, cidr, gateway_ip) = (get_network_info_for_port(session, port_id)) return { 'mac_address': mac_address, 'ip_address': ip_address + cidr[cidr.index('/'):], 'gateway_ip': gateway_ip }
def agent_restarted(self, context): agent_host = context.host session = db_api.get_reader_session() agent = l2pop_db.get_agent_by_host(session, agent_host) if l2pop_db.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time: LOG.warning( _LW("Agent on host '%s' did not supply " "'agent_restarted'information in RPC message, " "determined it restarted based on deprecated " "'agent_boot_time' config option."), agent_host) return True return False
def is_ha_router_port(context, device_owner, router_id): session = db_api.get_reader_session() if device_owner == constants.DEVICE_OWNER_HA_REPLICATED_INT: return True elif device_owner == constants.DEVICE_OWNER_ROUTER_SNAT: query = session.query(l3_attrs.RouterExtraAttributes) query = query.filter_by(ha=True) query = query.filter(l3_attrs.RouterExtraAttributes.router_id == router_id) return bool(query.limit(1).count()) else: return False
def get_tenants(): """Returns list of all project/tenant ids that may be relevant on CVX.""" session = db.get_reader_session() project_ids = set() with session.begin(): network_model = models_v2.Network project_ids |= set( pid[0] for pid in session.query(network_model.project_id).distinct()) port_model = models_v2.Port project_ids |= set( pid[0] for pid in session.query(port_model.project_id).distinct()) return project_ids
def get_ha_ipaddresses_for_port(self, port_id, session=None): """Returns the HA IP Addressses associated with a Port.""" session = session or db_api.get_reader_session() query = BAKERY(lambda s: s.query( HAIPAddressToPortAssociation)) query += lambda q: q.filter_by( port_id=sa.bindparam('port_id')) objs = query(session).params( port_id=port_id).all() # REVISIT: Do the sorting in the UT? return sorted([x['ha_ip_address'] for x in objs])
def get_ha_ipaddresses_for_port(self, port_id, session=None): """Returns the HA IP Addressses associated with a Port.""" session = session or db_api.get_reader_session() query = BAKERY(lambda s: s.query( HAIPAddressToPortAssociation)) query += lambda q: q.filter_by( port_id=sa.bindparam('port_id')) objs = query(session).params( port_id=port_id).all() # REVISIT: Do the sorting in the UT? return sorted([x['ha_ip_address'] for x in objs])
def get_stale_vlan_mappings(creating_seconds, deleting_seconds): db_session = db_api.get_reader_session() try: now = datetime.utcnow() old_creating_date = now - timedelta(seconds=creating_seconds) old_deleting_date = now - timedelta(seconds=deleting_seconds) return db_session.query(kaloom_models.KaloomVlanHostMapping). \ filter(or_(and_(kaloom_models.KaloomVlanHostMapping.state == "CREATING", \ kaloom_models.KaloomVlanHostMapping.timestamp <= old_creating_date),\ and_(kaloom_models.KaloomVlanHostMapping.state == "DELETING", \ kaloom_models.KaloomVlanHostMapping.timestamp <= old_deleting_date) )).all() except sa_exc.NoResultFound: return None
def delete_port_postcommit(self, context): port = context.current agent_host = context.host fdb_entries = self._get_agent_fdb( context, context.bottom_bound_segment, port, agent_host) if port['device_owner'] in l2pop_db.HA_ROUTER_PORTS and fdb_entries: session = db_api.get_reader_session() network_id = port['network_id'] other_fdb_ports = self._get_ha_port_agents_fdb( session, network_id, port['device_id']) fdb_entries[network_id]['ports'] = other_fdb_ports self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, fdb_entries)
def update_port_up(self, context): port = context.current agent_host = context.host session = db_api.get_reader_session() port_context = context._plugin_context agent = l2pop_db.get_agent_by_host(session, agent_host) if not agent: LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"), agent_host) return network_id = port['network_id'] agent_active_ports = l2pop_db.get_agent_network_active_port_count( session, agent_host, network_id) agent_ip = l2pop_db.get_agent_ip(agent) segment = context.bottom_bound_segment if not self._validate_segment(segment, port['id'], agent): return other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, network_id) other_fdb_ports = other_fdb_entries[network_id]['ports'] if agent_active_ports == 1 or (l2pop_db.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): # First port activated on current agent in this network, # we have to provide it with the whole list of fdb entries agent_fdb_entries = self._create_agent_fdb(session, agent, segment, network_id) # And notify other agents to add flooding entry other_fdb_ports[agent_ip].append(const.FLOODING_ENTRY) if agent_fdb_entries[network_id]['ports'].keys(): self.L2populationAgentNotify.add_fdb_entries( self.rpc_ctx, agent_fdb_entries, agent_host) # Notify other agents to add fdb rule for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and not l3_hamode_db.is_ha_router_port( port_context, port['device_owner'], port['device_id'])): other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port) self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, other_fdb_entries)
def delete_port_postcommit(self, context): port = context.current agent_host = context.host plugin_context = context._plugin_context fdb_entries = self._get_agent_fdb( plugin_context, context.bottom_bound_segment, port, agent_host) if fdb_entries and l3_hamode_db.is_ha_router_port( context, port['device_owner'], port['device_id']): session = db_api.get_reader_session() network_id = port['network_id'] other_fdb_ports = self._get_ha_port_agents_fdb( session, network_id, port['device_id']) fdb_entries[network_id]['ports'] = other_fdb_ports self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, fdb_entries)
def get_port_for_ha_ipaddress(self, ipaddress, network_id, session=None): """Returns the Neutron Port ID for the HA IP Addresss.""" session = session or db_api.get_reader_session() query = BAKERY(lambda s: s.query( HAIPAddressToPortAssociation)) query += lambda q: q.join( models_v2.Port, models_v2.Port.id == HAIPAddressToPortAssociation.port_id) query += lambda q: q.filter( HAIPAddressToPortAssociation.ha_ip_address == sa.bindparam('ipaddress')) query += lambda q: q.filter( models_v2.Port.network_id == sa.bindparam('network_id')) port_ha_ip = query(session).params( ipaddress=ipaddress, network_id=network_id).first() return port_ha_ip
def delete(self, context): stack_ids = self._get_node_instance_stacks(context.plugin_session, context.current_node['id'], context.instance['id']) heatclient = self._get_heat_client(context.plugin_context) for stack in stack_ids: vip_port_id = None try: rstr = heatclient.client.resources.get(stack_ids[0].stack_id, 'loadbalancer') vip_port_id = rstr.attributes['vip_port_id'] except heat_exc.HTTPNotFound: # stack not found, so no need to process any further pass heatclient.delete(stack.stack_id) if vip_port_id: for x in range(0, DELETE_VIP_PORT_RETRIES): # We intentionally get a new session so as to be # able to read the updated DB session = db_api.get_reader_session() vip_port = session.query(ndb.Port).filter_by( id=vip_port_id).all() if vip_port: # heat stack delete is not finished yet, so try again LOG.debug(("VIP port %s is not yet deleted"), vip_port) LOG.debug(("Retry attempt; %s"), x + 1) # Stack delete will at least take some minimal amount # of time, hence we wait a little bit. time.sleep(STACK_ACTION_WAIT_TIME) else: # we force a retry so that a new session can be # used that will correctly reflect the VIP port as # deleted and hence allow the subsequent policy driver # to delete the VIP subnet raise db_exc.RetryRequest(Exception) self._delete_node_instance_stack_in_db(context.plugin_session, context.current_node['id'], context.instance['id'])
def _get_agent_fdb(self, context, segment, port, agent_host): if not agent_host: return network_id = port['network_id'] session = db_api.get_reader_session() agent_active_ports = l2pop_db.get_agent_network_active_port_count( session, agent_host, network_id) agent = l2pop_db.get_agent_by_host(session, agent_host) if not agent: LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"), agent_host) return if not self._validate_segment(segment, port['id'], agent): return agent_ip = l2pop_db.get_agent_ip(agent) other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, port['network_id']) if agent_active_ports == 0: # Agent is removing its last activated port in this network, # other agents needs to be notified to delete their flooding entry. other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) # Notify other agents to remove fdb rules for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and not l3_hamode_db.is_ha_router_port(context, port['device_owner'], port['device_id'])): fdb_entries = self._get_port_fdb_entries(port) other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries return other_fdb_entries
def test_pt_lifecycle(self): ptg = self.create_policy_target_group()['policy_target_group'] pt = self.create_policy_target( policy_target_group_id=ptg['id'])['policy_target'] self.assertEqual([], pt['segmentation_labels']) pt = self.show_policy_target( pt['id'], expected_res_status=200)['policy_target'] self.assertEqual([], pt['segmentation_labels']) self.delete_policy_target(pt['id'], expected_res_status=204) labels = [] pt = self.create_policy_target( policy_target_group_id=ptg['id'], segmentation_labels=labels)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) pt = self.show_policy_target( pt['id'], expected_res_status=200)['policy_target'] self.assertItemsEqual([], pt['segmentation_labels']) self.delete_policy_target(pt['id'], expected_res_status=204) labels = ['red', 'blue'] pt = self.create_policy_target( policy_target_group_id=ptg['id'], segmentation_labels=labels)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) pt = self.show_policy_target( pt['id'], expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) labels = ['green', 'black', 'red'] pt = self.update_policy_target( pt['id'], segmentation_labels=labels, expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) pt = self.show_policy_target( pt['id'], expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) labels = [] pt = self.update_policy_target( pt['id'], segmentation_labels=labels, expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) pt = self.show_policy_target( pt['id'], expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) labels = ['black'] pt = self.update_policy_target( pt['id'], segmentation_labels=labels, expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) pt = self.show_policy_target( pt['id'], expected_res_status=200)['policy_target'] self.assertItemsEqual(labels, pt['segmentation_labels']) self.delete_policy_target(pt['id'], expected_res_status=204) session = db_api.get_reader_session() rows = (session.query(db.ApicSegmentationLabelDB).filter_by( policy_target_id=pt['id']).all()) self.assertEqual([], rows)
def _get_endpoints(self): LOG.debug("_get_endpoints() called") session = db_api.get_reader_session() return session.query(self.endpoint_model)
def get_endpoint_by_ip(self, ip): LOG.debug("get_endpoint_by_ip() called for ip %s", ip) session = db_api.get_reader_session() return (session.query(self.endpoint_model). filter_by(ip_address=ip).first())
def get_endpoint_by_host(self, host): LOG.debug("get_endpoint_by_host() called for host %s", host) session = db_api.get_reader_session() return (session.query(self.endpoint_model). filter_by(host=host).first())
def test_l3p_lifecycle(self): l3p = self.create_l3_policy(name='myl3')['l3_policy'] self.assertEqual([], l3p['allowed_vm_names']) l3p = self.show_l3_policy( l3p['id'], expected_res_status=200)['l3_policy'] self.assertEqual([], l3p['allowed_vm_names']) self.delete_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=204) allowed_vm_names = [] l3p = self.create_l3_policy( name='myl3', allowed_vm_names=allowed_vm_names)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) l3p = self.show_l3_policy( l3p['id'], expected_res_status=200)['l3_policy'] self.assertItemsEqual([], l3p['allowed_vm_names']) self.delete_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=204) allowed_vm_names = ['safe_vm*', '^secure_vm*'] l3p = self.create_l3_policy( name='myl3', allowed_vm_names=allowed_vm_names)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) l3p = self.show_l3_policy( l3p['id'], expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) allowed_vm_names = ['good_vm*', '^ok_vm*', 'safe_vm*'] l3p = self.update_l3_policy( l3p['id'], allowed_vm_names=allowed_vm_names, expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) l3p = self.show_l3_policy( l3p['id'], expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) allowed_vm_names = [] l3p = self.update_l3_policy( l3p['id'], allowed_vm_names=allowed_vm_names, expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) l3p = self.show_l3_policy( l3p['id'], expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) allowed_vm_names = ['^ok_vm*'] l3p = self.update_l3_policy( l3p['id'], allowed_vm_names=allowed_vm_names, expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) l3p = self.show_l3_policy( l3p['id'], expected_res_status=200)['l3_policy'] self.assertItemsEqual(allowed_vm_names, l3p['allowed_vm_names']) self.delete_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=204) session = db_api.get_reader_session() rows = (session.query(db.ApicAllowedVMNameDB).filter_by( l3_policy_id=l3p['id']).all()) self.assertEqual([], rows)
def get_ha_port_associations(self): session = db_api.get_reader_session() query = BAKERY(lambda s: s.query( HAIPAddressToPortAssociation)) return query(session).all()