def test_bd_l3out_vrf_in_tenant(self): self.mgr.create(self.ctx, a_res.Tenant(name='dept1')) vrf = a_res.VRF(tenant_name='dept1', name='default') bd1_dept1 = a_res.BridgeDomain(tenant_name='dept1', name='bd1', limit_ip_learn_to_subnets=True, vrf_name='default') bd2_dept1 = a_res.BridgeDomain(tenant_name='dept1', name='bd2', limit_ip_learn_to_subnets=True, vrf_name='foo') for o in [vrf, bd1_dept1, bd2_dept1]: self.mgr.create(self.ctx, o) l3out = a_res.L3Outside(tenant_name='dept1', name='o1') ext_net = a_res.ExternalNetwork( tenant_name='dept1', l3out_name='o1', name='inet1') self.ns.create_l3outside(self.ctx, l3out) self.ns.create_external_network(self.ctx, ext_net) self.mgr.update(self.ctx, l3out, vrf_name='default') self._verify(present=[bd1_dept1, bd2_dept1]) self.ns.connect_vrf(self.ctx, ext_net, vrf) bd1_dept1.l3out_names = ['o1'] self._verify(present=[bd1_dept1, bd2_dept1]) self.ns.disconnect_vrf(self.ctx, ext_net, vrf) bd1_dept1.l3out_names = [] self._verify(present=[bd1_dept1, bd2_dept1])
def _check_connect_vrfs(self, stage): objs = self._get_vrf_1_ext_net_1_objects() l3out = objs['l3out'] ext_net = objs['ext_net'] nat_bd = objs['nat_bd'] l3out_objs = [o for o in self._get_l3out_objects() if not isinstance(o, a_res.BridgeDomain)] bd1 = a_res.BridgeDomain(tenant_name='t1', name='bd1', vrf_name='vrf1', limit_ip_learn_to_subnets=True, l3out_names=['o1']) bd2 = a_res.BridgeDomain(tenant_name='t1', name='bd2', vrf_name='vrf2', limit_ip_learn_to_subnets=True, l3out_names=['o1']) if stage == 'stage1': self._verify(present=objs.values() + l3out_objs + [bd1]) elif stage == 'stage2' or stage == 'stage3': l3out.vrf_name = 'vrf2' ext_net.provided_contract_names = ['EXT-o1', 'p1_vrf2', 'p2_vrf2'] ext_net.consumed_contract_names = ['EXT-o1', 'c1_vrf2', 'c2_vrf2'] self._verify(present=objs.values() + l3out_objs + [bd1, bd2]) elif stage == 'stage4': bd2.l3out_names = [] nat_bd.vrf_name = 'EXT-o1' l3out.vrf_name = 'vrf2' ext_net.provided_contract_names = ['EXT-o1'] ext_net.consumed_contract_names = ['EXT-o1'] self._verify(present=objs.values() + l3out_objs + [bd1, bd2]) else: self.assertFalse(True, 'Unknown test stage %s' % stage)
def test_connect_vrfs(self): l3out = a_res.L3Outside(tenant_name='t1', name='o1', display_name='OUT') ext_net = a_res.ExternalNetwork( tenant_name='t1', l3out_name='o1', name='inet1', display_name='INET1') self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name)) self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf2_tenant_name)) self.ns.create_l3outside(self.ctx, l3out) self.ns.create_external_network(self.ctx, ext_net) self.ns.update_external_cidrs(self.ctx, ext_net, ['20.20.20.0/24', '50.50.0.0/16']) # connect vrf_1 vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1', display_name='VRF1') if self.vrf1_tenant_name != self.bd1_tenant_name: self.mgr.create(self.ctx, a_res.Tenant(name='dept1')) if self.fix_l3out_vrf: self.mgr.update(self.ctx, l3out, vrf_name=vrf1.name) bd1 = a_res.BridgeDomain(tenant_name=self.bd1_tenant_name, name='bd1', limit_ip_learn_to_subnets=True, vrf_name='vrf1') self.mgr.create(self.ctx, vrf1) self.mgr.create(self.ctx, bd1) ext_net.provided_contract_names = ['p1_vrf1', 'p2_vrf1'] ext_net.consumed_contract_names = ['c1_vrf1', 'c2_vrf1'] self.ns.connect_vrf(self.ctx, ext_net, vrf1) connected_vrfs = self.ns.read_vrfs(self.ctx, ext_net) self.assertEqual(vrf1, connected_vrfs[0]) self._check_connect_vrfs('stage1') # connect vrf_1 again - should be no-op self.ns.connect_vrf(self.ctx, ext_net, vrf1) self._check_connect_vrfs('stage1') # connect vrf_2 vrf2 = a_res.VRF(tenant_name=self.vrf2_tenant_name, name='vrf2', display_name='VRF2') bd2 = a_res.BridgeDomain(tenant_name=self.vrf2_tenant_name, name='bd2', limit_ip_learn_to_subnets=True, vrf_name='vrf2') self.mgr.create(self.ctx, vrf2) self.mgr.create(self.ctx, bd2) ext_net.provided_contract_names = ['p1_vrf2', 'p2_vrf2'] ext_net.consumed_contract_names = ['c1_vrf2', 'c2_vrf2'] if self.fix_l3out_vrf: self.mgr.update(self.ctx, l3out, vrf_name=vrf2.name) self.ns.connect_vrf(self.ctx, ext_net, vrf2) self._check_connect_vrfs('stage2') # disconnect vrf_1 self.ns.disconnect_vrf(self.ctx, ext_net, vrf1) self._check_connect_vrfs('stage3') # disconnect vrf_2 self.ns.disconnect_vrf(self.ctx, ext_net, vrf2) self._check_connect_vrfs('stage4')
def test_squash_operations(self): # Craft some objects and push them aim_converter = converter.AimToAciModelConverter() tn = a_res.Tenant(name='tn1', display_name='foo') bd = a_res.BridgeDomain(tenant_name='tn1', name='bd1', display_name='bar') vrf = a_res.VRF(tenant_name='tn1', name='vrf1', display_name='pippo') self.manager.push_aim_resources({ 'create': [tn, bd], 'delete': aim_converter.convert([vrf]) }) self.assertEqual(1, len(self.manager.object_backlog.queue)) old = self.manager.object_backlog.queue[0] # Idempotent self.manager.push_aim_resources({ 'create': [tn, bd], 'delete': aim_converter.convert([vrf]) }) self.assertEqual(1, len(self.manager.object_backlog.queue)) curr = self.manager.object_backlog.queue[0] self.assertEqual(old, curr) # Now replace something bd2 = a_res.BridgeDomain(tenant_name='tn1', name='bd2', display_name='bar') bd = copy.deepcopy(bd) bd.display_name = 'foobar' self.manager.push_aim_resources({'create': [bd2, bd], 'delete': []}) self.assertEqual(2, len(self.manager.object_backlog.queue)) self.assertEqual({ 'create': [bd2], 'delete': [] }, self.manager.object_backlog.queue[1]) self.assertEqual( 'foobar', self.manager.object_backlog.queue[0]['create'][1].display_name) # Add something completely different vrf2 = a_res.VRF(tenant_name='tn1', name='vrf2', display_name='pippo') self.manager.push_aim_resources({ 'create': [vrf2], 'delete': aim_converter.convert([bd]) }) self.assertEqual( { 'create': [vrf2], 'delete': aim_converter.convert([bd]) }, self.manager.object_backlog.queue[2])
def _get_vrf_1_ext_net_1_objects(self, connected=True): return { 'l3out': a_res.L3Outside( tenant_name='t1', name='o1', display_name='OUT', vrf_name='vrf1'), 'ext_net': a_res.ExternalNetwork( tenant_name='t1', l3out_name='o1', name='inet1', display_name='INET1', provided_contract_names=( ['EXT-o1', 'p1_vrf1', 'p2_vrf1'] if connected else ['EXT-o1']), consumed_contract_names=( ['EXT-o1', 'c1_vrf1', 'c2_vrf1'] if connected else ['EXT-o1'])), 'nat_bd': a_res.BridgeDomain( tenant_name='t1', name='EXT-o1', display_name='EXT-OUT', vrf_name='EXT-o1', limit_ip_learn_to_subnets=True, l3out_names=['o1']), 'ext_sub_1': a_res.ExternalSubnet( tenant_name='t1', l3out_name='o1', external_network_name='inet1', cidr='20.20.20.0/24'), 'ext_sub_2': a_res.ExternalSubnet( tenant_name='t1', l3out_name='o1', external_network_name='inet1', cidr='50.50.0.0/16')}
def test_no_tree_update_on_event(self): bd = resource.BridgeDomain(tenant_name='t1', name='bd1') bd_db_obj = self.ctx.store.make_db_obj(bd) bd_db_obj.update({ 'kind': bd_db_obj.kind, 'apiVersion': bd_db_obj.api_version }) ev = {'type': 'ADDED', 'object': bd_db_obj} watcher = k8s_watcher.K8sWatcher() self.assertEqual(set(['tn-t1']), watcher._process_event(ev)) # no-change event self.assertEqual(set(), watcher._process_event(ev)) # no real change ev['type'] = 'MODIFIED' self.assertEqual(set(), watcher._process_event(ev)) # change to irrelevant attribute ev['object']['spec']['someAttr'] = 'someValue' self.assertEqual(set(), watcher._process_event(ev)) # delete ev['type'] = 'DELETED' self.assertEqual(set(['tn-t1']), watcher._process_event(ev))
def extend_network_dict(self, session, base_model, result): LOG.debug("APIC AIM MD extending dict for network: %s", result) tenant_id = result['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = result['id'] name = result['name'] aname = self.name_mapper.network(session, id, name) LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", { 'id': id, 'name': name, 'aname': aname }) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) aim_ctx = aim_context.AimContext(session) sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, bd) sync_state = self._merge_status(aim_ctx, sync_state, epg) result[cisco_apic.DIST_NAMES] = { cisco_apic.BD: bd.dn, cisco_apic.EPG: epg.dn } result[cisco_apic.SYNC_STATE] = sync_state
def delete_network_precommit(self, context): LOG.debug("APIC AIM MD deleting network: %s", context.current) session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", { 'id': id, 'name': name, 'aname': aname }) aim_ctx = aim_context.AimContext(session) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) self.aim.delete(aim_ctx, epg) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) self.aim.delete(aim_ctx, bd) self.name_mapper.delete_apic_name(session, id)
def update_network_precommit(self, context): LOG.debug("APIC AIM MD updating network: %s", context.current) if context.current['name'] != context.original['name']: session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug( "Mapped network_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(context.current['name']) aim_ctx = aim_context.AimContext(session) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) bd = self.aim.update(aim_ctx, bd, display_name=dname) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) epg = self.aim.update(aim_ctx, epg, display_name=dname)
def test_cleanup_state(self, tree_type=tree_manager.CONFIG_TREE): tree_mgr = tree_manager.HashTreeManager() aim_mgr = aim_manager.AimManager() aim_mgr.create(self.ctx, resource.Tenant(name='t1')) bd1 = resource.BridgeDomain(tenant_name='t1', name='bd1', display_name='somestuff', vrf_name='vrf') bd1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/BD-bd1/fault-901', description='failure901') aim_mgr.create(self.ctx, bd1) aim_mgr.set_fault(self.ctx, bd1, bd1_fault) self.assertRaises(Exception, self.universe.cleanup_state, 'tn-t1') trees = tree_mgr.find(self.ctx, tree=tree_type) # tenant still there, trees not empty. self.assertEqual(1, len(trees)) aim_mgr.clear_fault(self.ctx, bd1_fault) aim_mgr.delete(self.ctx, resource.Tenant(name='t1'), cascade=True) self.universe.cleanup_state(self.ctx, 'tn-t1') trees = tree_mgr.find(self.ctx, tree=tree_type) self.assertEqual(0, len(trees))
def _get_example_aim_bd(cls, **kwargs): example = resource.BridgeDomain(tenant_name='test-tenant', vrf_name='default', name='test', enable_arp_flood=False, enable_routing=True, limit_ip_learn_to_subnets=False, l2_unknown_unicast_mode='proxy', ep_move_detect_mode='') example.__dict__.update(kwargs) return example
def test_get_resources_for_delete(self): objs = [ {'fvBD': {'attributes': { 'dn': 'uni/tn-t1/BD-test'}}}, {'vzRsSubjFiltAtt': {'attributes': { 'dn': 'uni/tn-t1/brc-c/subj-s/rssubjFiltAtt-f'}}}, {'vzRsFiltAtt': {'attributes': { 'dn': 'uni/tn-t1/brc-c/subj-s/intmnl/rsfiltAtt-g'}}}, {'vzRsFiltAtt': {'attributes': { 'dn': 'uni/tn-t1/brc-c/subj-s/outtmnl/rsfiltAtt-h'}}}] keys = [('fvTenant|t1', 'fvBD|test'), ('fvTenant|t1', 'vzBrCP|c', 'vzSubj|s', 'vzRsSubjFiltAtt|f'), ('fvTenant|t1', 'vzBrCP|c', 'vzSubj|s', 'vzInTerm|intmnl', 'vzRsFiltAtt|g'), ('fvTenant|t1', 'vzBrCP|c', 'vzSubj|s', 'vzOutTerm|outtmnl', 'vzRsFiltAtt|h')] result = self.universe.get_resources_for_delete(keys) self.assertEqual(utils.deep_sort(objs), utils.deep_sort(result)) # Create a pending monitored object tn1 = resource.Tenant(name='tn1', monitored=True) monitored_bd = resource.BridgeDomain( tenant_name='tn1', name='monitoredBD', monitored=True) self.universe.manager.create(self.ctx, tn1) self.universe.manager.set_resource_sync_pending(self.ctx, tn1) self.universe.manager.create(self.ctx, monitored_bd) self.universe.manager.set_resource_sync_pending(self.ctx, monitored_bd) self.universe.multiverse = [] result = self.universe.get_resources_for_delete( [('fvTenant|tn1', 'fvBD|monitoredBD')]) self.assertEqual(1, len(result)) result = result[0] self.assertEqual('tagInst', result.keys()[0]) self.assertEqual('uni/tn-tn1/BD-monitoredBD/tag-openstack_aid', list(result.values())[0]['attributes']['dn']) # Delete an RS-node of a monitored object self.universe.manager.create(self.ctx, resource.L3Outside( tenant_name='tn1', name='out', monitored=True)) ext_net = self.universe.manager.create( self.ctx, resource.ExternalNetwork(tenant_name='tn1', l3out_name='out', name='inet', provided_contract_names=['p1'], monitored=True)) self.universe.manager.set_resource_sync_synced(self.ctx, ext_net) result = self.universe.get_resources_for_delete( [('fvTenant|tn1', 'l3extOut|out', 'l3extInstP|inet', 'fvRsProv|p1')]) self.assertEqual(1, len(result)) result = result[0] self.assertEqual('fvRsProv', result.keys()[0]) self.assertEqual('uni/tn-tn1/out-out/instP-inet/rsprov-p1', list(result.values())[0]['attributes']['dn'])
def _get_bd(self, bd_name, tenant_name, should_exist=True): session = db_api.get_session() aim_ctx = aim_context.AimContext(session) bd = aim_resource.BridgeDomain(tenant_name=tenant_name, name=bd_name) bd = self.aim_mgr.get(aim_ctx, bd) if should_exist: self.assertIsNotNone(bd) else: self.assertIsNone(bd) return bd
def test_squash_operations_no_key(self): aim_converter = converter.AimToAciModelConverter() tn = a_res.Tenant(name='tn1', display_name='foo') bd = a_res.BridgeDomain(tenant_name='tn1', name='bd1', display_name='bar') vrf = a_res.VRF(tenant_name='tn1', name='vrf1', display_name='pippo') self.manager.push_aim_resources({'create': [tn, bd]}) self.manager.push_aim_resources( {'delete': aim_converter.convert([vrf])}) self.assertEqual(2, len(self.manager.object_backlog.queue))
def _get_nat_bd(self, ctx, l3out): d_name = self._display_name(l3out) bd_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.BridgeDomain(tenant_name=l3out.tenant_name, name=bd_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name( 'EXT-%s' % d_name)), limit_ip_learn_to_subnets=True, l3out_names=[l3out.name])
def _get_l3out_objects(self, l3out_name=None, l3out_display_name=None, nat_vrf_name=None, vmm_domains=None, phys_domains=None): name = 'EXT-%s' % (l3out_name or 'o1') d_name = 'EXT-%s' % (l3out_display_name or 'OUT') nat_vrf = a_res.VRF(tenant_name='t1', name=name, display_name=d_name) if vmm_domains is not None: vmm_doms = vmm_domains else: vmm_doms = self.vmm_domains if phys_domains is not None: phys_doms = phys_domains else: phys_doms = self.phys_domains return ([ a_res.Filter(tenant_name='t1', name=name, display_name=d_name), a_res.FilterEntry(tenant_name='t1', filter_name=name, name='Any', display_name='Any'), a_res.Contract(tenant_name='t1', name=name, display_name=d_name), a_res.ContractSubject(tenant_name='t1', contract_name=name, name='Allow', display_name='Allow', bi_filters=[name]), a_res.BridgeDomain(tenant_name='t1', name=name, display_name=d_name, vrf_name=nat_vrf_name or name, limit_ip_learn_to_subnets=True, l3out_names=[l3out_name or 'o1']), a_res.ApplicationProfile(tenant_name='t1', name='myapp', display_name='myapp'), a_res.EndpointGroup(tenant_name='t1', app_profile_name='myapp', name=name, display_name=d_name, bd_name=name, provided_contract_names=[name], consumed_contract_names=[name], # NOTE(ivar): Need to keep both VMM # representations since a GET on the EPG # will also return the domain name list # for backward compatibility openstack_vmm_domain_names=[dom['name'] for dom in vmm_doms if dom['type'] == 'OpenStack'], physical_domain_names=[dom['name'] for dom in phys_doms], vmm_domains=vmm_doms, physical_domains=phys_doms)] + ([nat_vrf] if nat_vrf_name is None else []))
def _aim_bridge_domain(self, session, tenant_id, network_id, network_name): # This returns a new AIM BD resource # TODO(Sumit): Use _aim_resource_by_name tenant_name = self._aim_tenant_name(session, tenant_id) bd_name = self.name_mapper.network(session, network_id, network_name) display_name = self.aim_display_name(network_name) LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to " "%(apic_name)s"), {'id': network_id, 'name': network_name, 'apic_name': bd_name}) bd = aim_resource.BridgeDomain(tenant_name=str(tenant_name), name=str(bd_name), display_name=display_name) return bd
def test_set_unset_bd_l3out(self): l3out = a_res.L3Outside(tenant_name='t1', name='o1', display_name='OUT') bd1 = a_res.BridgeDomain(tenant_name=self.bd1_tenant_name, name='bd1', limit_ip_learn_to_subnets=True, vrf_name='vrf1') self.mgr.create(self.ctx, bd1) self.ns.set_bd_l3out(self.ctx, bd1, l3out) self._check_bd_l3out(bd1, l3out) # add the same l3out again self.ns.set_bd_l3out(self.ctx, bd1, l3out) self._check_bd_l3out(bd1, l3out) self.ns.unset_bd_l3out(self.ctx, bd1, l3out) bd1 = self.mgr.get(self.ctx, bd1) self.assertEqual([], bd1.l3out_names)
def test_connect_vrf_multiple(self): l3out1 = a_res.L3Outside(tenant_name='t1', name='o1', display_name='OUT') ext_net1 = a_res.ExternalNetwork( tenant_name='t1', l3out_name='o1', name='inet1', display_name='INET1') self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name)) self.ns.create_l3outside(self.ctx, l3out1) self.ns.create_external_network(self.ctx, ext_net1) self.ns.update_external_cidrs(self. ctx, ext_net1, ['20.20.20.0/24', '50.50.0.0/16']) l3out2 = a_res.L3Outside(tenant_name='t2', name='o2', display_name='OUT2') ext_net2 = a_res.ExternalNetwork( tenant_name='t2', l3out_name='o2', name='inet2', display_name='INET2') self.ns.create_l3outside(self.ctx, l3out2) self.ns.create_external_network(self.ctx, ext_net2) self.ns.update_external_cidrs(self. ctx, ext_net2, ['0.0.0.0/0']) vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1', display_name='VRF1') bd1 = a_res.BridgeDomain(tenant_name=self.vrf1_tenant_name, name='bd1', limit_ip_learn_to_subnets=True, vrf_name='vrf1') self.mgr.create(self.ctx, vrf1) self.mgr.create(self.ctx, bd1) if self.fix_l3out_vrf: self.mgr.update(self.ctx, l3out1, vrf_name=vrf1.name) self.mgr.update(self.ctx, l3out2, vrf_name=vrf1.name) ext_net1.provided_contract_names = ['p1_vrf1', 'p2_vrf1'] ext_net1.consumed_contract_names = ['c1_vrf1', 'c2_vrf1'] ext_net2.provided_contract_names = ['p3_vrf1', 'p4_vrf1'] ext_net2.consumed_contract_names = ['c3_vrf1', 'c4_vrf1'] self.ns.connect_vrf(self.ctx, ext_net1, vrf1) self.ns.connect_vrf(self.ctx, ext_net2, vrf1) self._check_connect_vrf_multiple('stage1') self.ns.disconnect_vrf(self.ctx, ext_net1, vrf1) self._check_connect_vrf_multiple('stage2') self.ns.disconnect_vrf(self.ctx, ext_net2, vrf1) self._check_connect_vrf_multiple('stage3')
def create_network_precommit(self, context): LOG.debug("APIC AIM MD creating network: %s", context.current) session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) vrf = self._get_unrouted_vrf(aim_ctx) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname, display_name=dname, vrf_name=vrf.name, enable_arp_flood=True, enable_routing=False, limit_ip_learn_to_subnets=True) self.aim.create(aim_ctx, bd) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname, display_name=dname, bd_name=aname) self.aim.create(aim_ctx, epg)
def _get_vrf_1_ext_net_2_objects(self, connected=True): return { 'l3out': a_res.L3Outside( tenant_name='t2', name='o2', display_name='OUT2', vrf_name='vrf1'), 'ext_net': a_res.ExternalNetwork( tenant_name='t2', l3out_name='o2', name='inet2', display_name='INET2', provided_contract_names=( ['EXT-o2', 'p3_vrf1', 'p4_vrf1'] if connected else ['EXT-o2']), consumed_contract_names=( ['EXT-o2', 'c3_vrf1', 'c4_vrf1'] if connected else ['EXT-o2'])), 'nat_bd': a_res.BridgeDomain( tenant_name='t2', name='EXT-o2', display_name='EXT-OUT2', vrf_name='EXT-o2', limit_ip_learn_to_subnets=True, l3out_names=['o2']), 'ext_sub_1': a_res.ExternalSubnet( tenant_name='t2', l3out_name='o2', external_network_name='inet2', cidr='0.0.0.0/0')}
def test_get_aim_resources(self, tree_type=tree_manager.CONFIG_TREE): tree_mgr = tree_manager.HashTreeManager() aim_mgr = aim_manager.AimManager() t1 = resource.Tenant(name='t1') t2 = resource.Tenant(name='t2') t1_fault = aim_status.AciFault( fault_code='101', external_identifier='uni/tn-t1/fault-101', description='failure101') t2_fault = aim_status.AciFault( fault_code='102', external_identifier='uni/tn-t2/fault-102', description='failure102') # Create Resources on a couple of tenants bd1 = resource.BridgeDomain( tenant_name='t1', name='bd1', display_name='somestuff', vrf_name='vrf') bd1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/BD-bd1/fault-901', description='failure901') bd1_fault2 = aim_status.AciFault( fault_code='902', external_identifier='uni/tn-t1/BD-bd1/fault-902', description='failure902') bd2 = resource.BridgeDomain( tenant_name='t2', name='bd1', display_name='somestuff', vrf_name='vrf2') dc1 = aim_service_graph.DeviceCluster( tenant_name='t1', name='clus1', devices=[{'name': '1'}]) dc1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/lDevVip-clus1/fault-901', description='failure901') sg1 = aim_service_graph.ServiceGraph( tenant_name='t1', name='gr1', linear_chain_nodes=[{'name': 'N1', 'device_cluster_name': 'cl1'}]) sg1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/AbsGraph-gr1/fault-901', description='failure901') srp1 = aim_service_graph.ServiceRedirectPolicy( tenant_name='t1', name='srp1', destinations=[{'ip': '1.1.1.1', 'mac': 'aa:bb:cc:dd:ee:ff'}]) srp1_fault = aim_status.AciFault( fault_code='901', external_identifier=('uni/tn-t1/svcCont/svcRedirectPol-srp1' '/fault-901'), description='failure901') dc_ctx1 = aim_service_graph.DeviceClusterContext( tenant_name='t1', contract_name='contract1', service_graph_name='graph1', node_name='N1', device_cluster_name='cluster1', device_cluster_tenant_name='common', bridge_domain_name='svc_bd', service_redirect_policy_name='srp1') dc_ctx1_fault = aim_status.AciFault( fault_code='901', external_identifier=('uni/tn-t1/ldevCtx-c-contract1-' 'g-graph1-n-N1/fault-901'), description='failure901') if tree_type == tree_manager.MONITORED_TREE: bd1.monitored = True bd2.monitored = True t1.monitored = True t2.monitored = True dc1.monitored = True sg1.monitored = True srp1.monitored = True dc_ctx1.monitored = True aim_mgr.create(self.ctx, t1) aim_mgr.create(self.ctx, t2) aim_mgr.create(self.ctx, bd1) aim_mgr.set_fault(self.ctx, t1, t1_fault) aim_mgr.set_fault(self.ctx, t2, t2_fault) aim_mgr.set_fault(self.ctx, bd1, bd1_fault) aim_mgr.set_fault(self.ctx, bd1, bd1_fault2) aim_mgr.create(self.ctx, bd2) aim_mgr.set_resource_sync_synced(self.ctx, t1) aim_mgr.set_resource_sync_synced(self.ctx, t2) aim_mgr.set_resource_sync_synced(self.ctx, bd2) aim_mgr.set_resource_sync_synced(self.ctx, bd1) aim_mgr.create(self.ctx, dc1) aim_mgr.create(self.ctx, sg1) aim_mgr.create(self.ctx, srp1) aim_mgr.create(self.ctx, dc_ctx1) aim_mgr.set_fault(self.ctx, dc1, dc1_fault) aim_mgr.set_fault(self.ctx, sg1, sg1_fault) aim_mgr.set_fault(self.ctx, srp1, srp1_fault) aim_mgr.set_fault(self.ctx, dc_ctx1, dc_ctx1_fault) aim_mgr.set_resource_sync_synced(self.ctx, dc1) aim_mgr.set_resource_sync_synced(self.ctx, sg1) aim_mgr.set_resource_sync_synced(self.ctx, srp1) aim_mgr.set_resource_sync_synced(self.ctx, dc_ctx1) # Two trees exist trees = tree_mgr.find(self.ctx, tree=tree_type) self.assertEqual(2, len(trees)) # Calculate the different with empty trees to retrieve missing keys diff_tn_1 = trees[0].diff(tree.StructuredHashTree()) diff_tn_2 = trees[1].diff(tree.StructuredHashTree()) self.universe.get_relevant_state_for_read = mock.Mock( return_value=[{'tn-t1': trees[0], 'tn-t2': trees[1]}]) result = self.universe.get_resources(diff_tn_1.get('add', []) + diff_tn_1.get('remove', []) + diff_tn_2.get('add', []) + diff_tn_2.get('remove', [])) converted = converter.AciToAimModelConverter().convert( converter.AimToAciModelConverter().convert( [bd1, bd2, dc1, sg1, srp1, dc_ctx1, t1, t2])) if tree_type == tree_manager.MONITORED_TREE: for x in converted: x.monitored = True if tree_type in [tree_manager.CONFIG_TREE, tree_manager.MONITORED_TREE]: self.assertEqual(len(converted), len(result)) for item in converted: self.assertTrue(item in result) elif tree_type == tree_manager.OPERATIONAL_TREE: self.assertEqual(8, len(result)) self.assertTrue(bd1_fault in result) self.assertTrue(bd1_fault2 in result) self.assertTrue(dc1_fault in result) self.assertTrue(sg1_fault in result) self.assertTrue(srp1_fault in result) self.assertTrue(dc_ctx1_fault in result)
def _get_network_bd(self, mapping): return aim_resource.BridgeDomain( tenant_name=mapping.bd_tenant_name, name=mapping.bd_name)
def test_bd_l3out_vrf_in_common(self): self.mgr.create(self.ctx, a_res.Tenant(name='common')) self.mgr.create(self.ctx, a_res.Tenant(name='dept1')) self.mgr.create(self.ctx, a_res.Tenant(name='dept2')) self.mgr.create(self.ctx, a_res.Tenant(name='dept3')) vrf = a_res.VRF(tenant_name='common', name='default') bd1_dept1 = a_res.BridgeDomain(tenant_name='dept1', name='bd1', limit_ip_learn_to_subnets=True, vrf_name='default') bd2_dept1 = a_res.BridgeDomain(tenant_name='dept1', name='bd2', limit_ip_learn_to_subnets=True, vrf_name='default') bd1_dept2 = a_res.BridgeDomain(tenant_name='dept2', name='bd1', limit_ip_learn_to_subnets=True, vrf_name='default') vrf_dept2 = a_res.VRF(tenant_name='dept2', name='default') bd1_dept3 = a_res.BridgeDomain(tenant_name='dept3', name='bd1', limit_ip_learn_to_subnets=True, vrf_name='default') bd2_dept3 = a_res.BridgeDomain(tenant_name='dept3', name='bd2', limit_ip_learn_to_subnets=True, vrf_name='foo') for o in [vrf, bd1_dept1, bd2_dept1, bd1_dept2, vrf_dept2, bd1_dept3, bd2_dept3]: self.mgr.create(self.ctx, o) # test with 'common' l3out l3out = a_res.L3Outside(tenant_name='common', name='o1') ext_net = a_res.ExternalNetwork( tenant_name='common', l3out_name='o1', name='inet1') self.ns.create_l3outside(self.ctx, l3out) self.ns.create_external_network(self.ctx, ext_net) self.mgr.update(self.ctx, l3out, vrf_name='default') self._verify(present=[bd1_dept1, bd2_dept1, bd1_dept2, bd1_dept3, bd2_dept3]) self.ns.connect_vrf(self.ctx, ext_net, vrf) bd1_dept1.l3out_names = ['o1'] bd2_dept1.l3out_names = ['o1'] bd1_dept3.l3out_names = ['o1'] self._verify(present=[bd1_dept1, bd2_dept1, bd1_dept2, bd1_dept3, bd2_dept3]) self.ns.disconnect_vrf(self.ctx, ext_net, vrf) bd1_dept1.l3out_names = [] bd2_dept1.l3out_names = [] bd1_dept3.l3out_names = [] self._verify(present=[bd1_dept1, bd2_dept1, bd1_dept2, bd1_dept3, bd2_dept3]) # test with l3out in specific tenant l3out.tenant_name = 'dept1' ext_net.tenant_name = 'dept1' self.ns.create_l3outside(self.ctx, l3out) self.ns.create_external_network(self.ctx, ext_net) self.mgr.update(self.ctx, l3out, vrf_name='default') self.ns.connect_vrf(self.ctx, ext_net, vrf) bd1_dept1.l3out_names = ['o1'] bd2_dept1.l3out_names = ['o1'] self._verify(present=[bd1_dept1, bd2_dept1, bd1_dept2, bd1_dept3, bd2_dept3]) self.ns.disconnect_vrf(self.ctx, ext_net, vrf) bd1_dept1.l3out_names = [] bd2_dept1.l3out_names = [] self._verify(present=[bd1_dept1, bd2_dept1, bd1_dept2, bd1_dept3, bd2_dept3])
def test_track_universe_actions(self): # When AIM is the current state, created objects are in ACI form, # deleted objects are in AIM form reset_limit = self.universe.reset_retry_limit purge_limit = self.universe.purge_retry_limit old_backoff_time = self.universe.max_backoff_time self.assertEqual(2 * self.universe.max_create_retry, reset_limit) self.assertEqual(2 * reset_limit, purge_limit) self.assertTrue(self.universe.max_create_retry > 0) self.universe.max_backoff_time = 0 actions = { 'create': [ resource.BridgeDomain(tenant_name='t1', name='b'), resource.BridgeDomain(tenant_name='t1', name='b'), ], 'delete': [] } reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') self.assertEqual(False, reset) self.assertEqual([], purge) # 1 root self.assertEqual(1, len(self.universe._sync_log['tn-t1']['create'])) actions = { 'create': [ resource.VRF(tenant_name='t2', name='c'), ], 'delete': [] } reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t2') # 2 roots self.assertEqual(1, len(self.universe._sync_log['tn-t2']['create'])) # BD counted only once self.assertEqual( 0, self.universe._sync_log['tn-t1']['create'].values()[0]['retries']) ctrl = resource.VMMController(domain_type='OpenStack', domain_name='os', name='ctrl') actions = { 'create': [], 'delete': [self._get_example_aci_object('vmmCtrlrP', ctrl.dn)] } reset, purge, skip = self.universe._track_universe_actions( actions, 'vmmp-OpenStack') self.assertEqual(False, reset) self.assertEqual([], purge) reset, purge, skip = self.universe._track_universe_actions( { 'create': [], 'delete': [] }, 'tn-t2') # Tenant t2 is off the hook self.assertTrue( 'tn-t2' not in self.universe._sync_log['tn-t2']['create']) self.assertTrue( 'tn-t2' not in self.universe._sync_log['tn-t2']['delete']) actions = { 'create': [ resource.BridgeDomain(tenant_name='t1', name='b'), ], 'delete': [] } reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') # BD count increased self.assertEqual( 1, self.universe._sync_log['tn-t1']['create'].values()[0]['retries']) self.assertEqual( 0, self.universe._sync_log[ctrl.root]['delete'].values()[0] ['retries']) # Retry the above until t1 needs reset for _ in range(reset_limit - 1): reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') self.assertEqual(False, reset) self.assertEqual([], purge) reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') self.assertEqual(True, reset) self.assertEqual([], purge) # with the next run, reset is not required for t1 anymore, but pure # countdown starts reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') self.assertEqual([], purge) for _ in range(purge_limit - reset_limit - 2): reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') self.assertEqual(False, reset) self.assertEqual([], purge) reset, purge, skip = self.universe._track_universe_actions( actions, 'tn-t1') self.assertEqual(False, reset) self.assertEqual(1, len(purge)) self.assertEqual('create', purge[0][0]) self.assertEqual('uni/tn-t1/BD-b', purge[0][1].dn) self.universe.max_backoff_time = old_backoff_time