def extend_router_dict(self, session, base_model, result): LOG.debug("APIC AIM MD extending dict for router: %s", result) tenant_id = result['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = result['id'] name = result['name'] aname = self.name_mapper.router(session, id, name) LOG.debug("Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) aim_ctx = aim_context.AimContext(session) sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, contract) sync_state = self._merge_status(aim_ctx, sync_state, subject) result[cisco_apic.DIST_NAMES] = { cisco_apic_l3.CONTRACT: contract.dn, cisco_apic_l3.CONTRACT_SUBJECT: subject.dn } result[cisco_apic.SYNC_STATE] = sync_state
def test_update_1(self): htree = tree.StructuredHashTree() exp_tree = tree.StructuredHashTree() subj = resource.ContractSubject(tenant_name='t1', contract_name='c1', name='s1', in_filters=['i1'], out_filters=['o1'], bi_filters=['f1']) self.maker.update(htree, [subj]) exp_tree = exp_tree.add(('fvTenant|t1', 'vzBrCP|c1', 'vzSubj|s1'), nameAlias='') exp_tree = exp_tree.add(('fvTenant|t1', 'vzBrCP|c1', 'vzSubj|s1', 'vzInTerm|intmnl', 'vzRsFiltAtt|i1'), tnVzFilterName='i1') exp_tree = exp_tree.add(('fvTenant|t1', 'vzBrCP|c1', 'vzSubj|s1', 'vzOutTerm|outtmnl', 'vzRsFiltAtt|o1'), tnVzFilterName='o1') exp_tree = exp_tree.add( ('fvTenant|t1', 'vzBrCP|c1', 'vzSubj|s1', 'vzRsSubjFiltAtt|f1'), tnVzFilterName='f1') exp_tree = exp_tree.add( ('fvTenant|t1', 'vzBrCP|c1', 'vzSubj|s1', 'vzInTerm|intmnl')) exp_tree = exp_tree.add( ('fvTenant|t1', 'vzBrCP|c1', 'vzSubj|s1', 'vzOutTerm|outtmnl')) self.assertEqual(exp_tree, htree, 'differences: %s' % exp_tree.diff(htree))
def delete_router(self, context, current): LOG.debug("APIC AIM MD deleting router: %s", current) session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug("Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) aim_ctx = aim_context.AimContext(session) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) self.aim.delete(aim_ctx, subject) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) self.aim.delete(aim_ctx, contract) self.name_mapper.delete_apic_name(session, id)
def update_router(self, context, current, original): LOG.debug("APIC AIM MD updating router: %s", current) if current['name'] != original['name']: session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug( "Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) contract = self.aim.update(aim_ctx, contract, display_name=dname) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) subject = self.aim.update(aim_ctx, subject, display_name=dname)
def _get_subject(self, subject_name, contract_name, tenant_name, should_exist=True): session = db_api.get_session() aim_ctx = aim_context.AimContext(session) subject = aim_resource.ContractSubject(tenant_name=tenant_name, contract_name=contract_name, name=subject_name) subject = self.aim_mgr.get(aim_ctx, subject) if should_exist: self.assertIsNotNone(subject) else: self.assertIsNone(subject) return subject
def _get_l3out_objects(self, l3out_name=None, l3out_display_name=None, nat_vrf_name=None, vmm_domains=None, phys_domains=None): name = 'EXT-%s' % (l3out_name or 'o1') d_name = 'EXT-%s' % (l3out_display_name or 'OUT') nat_vrf = a_res.VRF(tenant_name='t1', name=name, display_name=d_name) if vmm_domains is not None: vmm_doms = vmm_domains else: vmm_doms = self.vmm_domains if phys_domains is not None: phys_doms = phys_domains else: phys_doms = self.phys_domains return ([ a_res.Filter(tenant_name='t1', name=name, display_name=d_name), a_res.FilterEntry(tenant_name='t1', filter_name=name, name='Any', display_name='Any'), a_res.Contract(tenant_name='t1', name=name, display_name=d_name), a_res.ContractSubject(tenant_name='t1', contract_name=name, name='Allow', display_name='Allow', bi_filters=[name]), a_res.BridgeDomain(tenant_name='t1', name=name, display_name=d_name, vrf_name=nat_vrf_name or name, limit_ip_learn_to_subnets=True, l3out_names=[l3out_name or 'o1']), a_res.ApplicationProfile(tenant_name='t1', name='myapp', display_name='myapp'), a_res.EndpointGroup(tenant_name='t1', app_profile_name='myapp', name=name, display_name=d_name, bd_name=name, provided_contract_names=[name], consumed_contract_names=[name], # NOTE(ivar): Need to keep both VMM # representations since a GET on the EPG # will also return the domain name list # for backward compatibility openstack_vmm_domain_names=[dom['name'] for dom in vmm_doms if dom['type'] == 'OpenStack'], physical_domain_names=[dom['name'] for dom in phys_doms], vmm_domains=vmm_doms, physical_domains=phys_doms)] + ([nat_vrf] if nat_vrf_name is None else []))
def create_router(self, context, current): LOG.debug("APIC AIM MD creating router: %s", current) session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug("Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname, display_name=dname) self.aim.create(aim_ctx, contract) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME, display_name=dname, bi_filters=[ANY_FILTER_NAME]) self.aim.create(aim_ctx, subject) # REVISIT(rkukura): Consider having L3 plugin extend router # dict again after calling this function. sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, contract) sync_state = self._merge_status(aim_ctx, sync_state, subject) current[cisco_apic.DIST_NAMES] = { cisco_apic_l3.CONTRACT: contract.dn, cisco_apic_l3.CONTRACT_SUBJECT: subject.dn } current[cisco_apic.SYNC_STATE] = sync_state
def test_subject_related_objects(self): self.mgr.create(self.ctx, aim_res.Tenant(name='common')) self.mgr.create( self.ctx, aim_res.Contract(tenant_name='common', name='c-name')) subj = aim_res.ContractSubject( **{'contract_name': 'c-name', 'name': 's-name', 'tenant_name': 'common', 'monitored': False}) subj = self.mgr.create(self.ctx, subj) subj_flt = aim_res.ContractSubjOutFilter( **{'contract_name': 'c-name', 'contract_subject_name': 's-name', 'tenant_name': 'common', 'monitored': False, 'filter_name': 'pr_1'}) subj_flt = self.mgr.create(self.ctx, subj_flt) subj_flt1 = aim_res.ContractSubjInFilter( **{'contract_name': 'c-name', 'contract_subject_name': 's-name', 'tenant_name': 'common', 'monitored': False, 'filter_name': 'pr_1'}) subj_flt1 = self.mgr.create(self.ctx, subj_flt1) cfg_tree = self.tt_mgr.get(self.ctx, 'tn-common', tree=tree_manager.CONFIG_TREE) # verify pr_1 and its reverse are in the tree pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzOutTerm|outtmnl", "vzRsFiltAtt|pr_1")) rev_pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzInTerm|intmnl", "vzRsFiltAtt|pr_1")) self.assertIsNotNone(pr_1) self.assertIsNotNone(rev_pr_1) self.mgr.update(self.ctx, subj_flt1, action='deny') cfg_tree = self.tt_mgr.get(self.ctx, 'tn-common', tree=tree_manager.CONFIG_TREE) rev_pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzInTerm|intmnl", "vzRsFiltAtt|pr_1")) self.assertIsNotNone(rev_pr_1)
def test_subject_related_objects(self): self.mgr.create(self.ctx, aim_res.Tenant(name='common')) self.mgr.create(self.ctx, aim_res.Contract(tenant_name='common', name='c-name')) subj = aim_res.ContractSubject( **{ 'contract_name': 'c-name', 'out_filters': ['pr_1', 'reverse-pr_1', 'pr_2', 'reverse-pr_2'], 'name': 's-name', 'tenant_name': 'common', 'monitored': False, 'bi_filters': [], 'in_filters': ['pr_1', 'reverse-pr_1', 'pr_2', 'reverse-pr_2'] }) subj = self.mgr.create(self.ctx, subj) cfg_tree = self.tt_mgr.get(self.ctx, 'tn-common', tree=tree_manager.CONFIG_TREE) # verify pr_1 and its reverse are in the tree pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzOutTerm|outtmnl", "vzRsFiltAtt|pr_1")) rev_pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzOutTerm|outtmnl", "vzRsFiltAtt|reverse-pr_1")) self.assertIsNotNone(pr_1) self.assertIsNotNone(rev_pr_1) self.mgr.update(self.ctx, subj, out_filters=['pr_2', 'reverse-pr_2'], in_filters=['pr_2', 'reverse-pr_2']) cfg_tree = self.tt_mgr.get(self.ctx, 'tn-common', tree=tree_manager.CONFIG_TREE) pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzOutTerm|outtmnl", "vzRsFiltAtt|pr_1")) rev_pr_1 = cfg_tree.find( ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name", "vzOutTerm|outtmnl", "vzRsFiltAtt|reverse-pr_1")) self.assertIsNone(pr_1) self.assertIsNone(rev_pr_1)
def _get_nat_objects(self, ctx, l3out): sani = aim_utils.sanitize_display_name scope = self._scope_name_if_common d_name = self._display_name(l3out) filter_name = scope(l3out.tenant_name, 'EXT-%s' % l3out.name) fltr = resource.Filter(tenant_name=l3out.tenant_name, name=filter_name, display_name=sani( scope(l3out.tenant_name, 'EXT-%s' % d_name))) entry = resource.FilterEntry(tenant_name=fltr.tenant_name, filter_name=fltr.name, name='Any', display_name='Any') contract = self._get_nat_contract(ctx, l3out) subject = resource.ContractSubject(tenant_name=contract.tenant_name, contract_name=contract.name, name='Allow', display_name='Allow') subject_filter = resource.ContractSubjFilter( tenant_name=contract.tenant_name, contract_name=contract.name, contract_subject_name='Allow', filter_name=fltr.name) bd = self._get_nat_bd(ctx, l3out) bd.vrf_name = l3out.vrf_name ap, epg = self._get_nat_ap_epg(ctx, l3out) vm_doms = getattr(self, 'vmm_domains', [{ 'type': d.type, 'name': d.name } for d in self.mgr.find(ctx, resource.VMMDomain)]) phy_doms = getattr(self, 'physical_domains', [{ 'name': d.name } for d in self.mgr.find(ctx, resource.PhysicalDomain)]) epg.bd_name = bd.name epg.provided_contract_names = [contract.name] epg.consumed_contract_names = [contract.name] epg.vmm_domains = vm_doms epg.physical_domains = phy_doms return [fltr, entry, contract, subject, subject_filter, bd, ap, epg]
def _aim_contract_subject(self, aim_contract, in_filters=None, out_filters=None, bi_filters=None): # This returns a new AIM ContractSubject resource # TODO(Sumit): Use _aim_resource_by_name if not in_filters: in_filters = [] if not out_filters: out_filters = [] if not bi_filters: bi_filters = [] display_name = self.aim_display_name(aim_contract.name) # Since we create one ContractSubject per Contract, # ContractSubject is given the Contract name kwargs = {'tenant_name': aim_contract.tenant_name, 'contract_name': aim_contract.name, 'name': aim_contract.name, 'display_name': display_name, 'in_filters': in_filters, 'out_filters': out_filters, 'bi_filters': bi_filters} aim_contract_subject = aim_resource.ContractSubject(**kwargs) return aim_contract_subject
def test_push_resources(self): aim_mgr = aim_manager.AimManager() aim_mgr.create(self.ctx, resource.Tenant(name='t1')) ap = self._get_example_aci_app_profile(dn='uni/tn-t1/ap-a1') ap_aim = resource.ApplicationProfile(tenant_name='t1', name='a1') epg = self._get_example_aci_epg( dn='uni/tn-t1/ap-a1/epg-test') fault = self._get_example_aci_fault( dn='uni/tn-t1/ap-a1/epg-test/fault-951') faul_aim = aim_status.AciFault( fault_code='951', external_identifier='uni/tn-t1/ap-a1/epg-test/fault-951') self.universe.push_resources(self.ctx, {'create': [ap, epg, fault], 'delete': []}) res = aim_mgr.get(self.ctx, resource.EndpointGroup( tenant_name='t1', app_profile_name='a1', name='test')) status = aim_mgr.get_status(self.ctx, res) self.assertEqual(1, len(status.faults)) self.assertEqual('951', status.faults[0].fault_code) # Unset fault self.universe.push_resources(self.ctx, {'create': [], 'delete': [faul_aim]}) status = aim_mgr.get_status(self.ctx, res) self.assertEqual(0, len(status.faults)) # create subject, and faults for subject-to-filter relation filter_objs = [ {'vzBrCP': {'attributes': {'dn': 'uni/tn-t1/brc-c'}}}, {'vzSubj': {'attributes': {'dn': 'uni/tn-t1/brc-c/subj-s2'}}}, self._get_example_aci_fault( dn='uni/tn-t1/brc-c/subj-s2/intmnl/rsfiltAtt-f/fault-F1111', code='F1111'), self._get_example_aci_fault( dn='uni/tn-t1/brc-c/subj-s2/outtmnl/rsfiltAtt-g/fault-F1112', code='F1112'), self._get_example_aci_fault( dn='uni/tn-t1/brc-c/subj-s2/rssubjFiltAtt-h/fault-F1113', code='F1113')] self.universe.push_resources(self.ctx, {'create': filter_objs, 'delete': []}) subj = resource.ContractSubject(tenant_name='t1', contract_name='c', name='s2') status = aim_mgr.get_status(self.ctx, subj) self.assertEqual(2, len(status.faults)) self.assertEqual(['F1111', 'F1112'], [f.fault_code for f in status.faults]) # delete filter faults self.universe.push_resources(self.ctx, {'create': [], 'delete': status.faults}) status = aim_mgr.get_status(self.ctx, subj) self.assertEqual(0, len(status.faults)) # Managed epg managed_epg = resource.EndpointGroup( tenant_name='t1', app_profile_name='a1', name='managed') aim_mgr.create(self.ctx, managed_epg) # EPG cannot be deleted since is managed self.universe.push_resources( self.ctx, {'create': [], 'delete': [ap_aim, managed_epg]}) res = aim_mgr.get(self.ctx, managed_epg) if self.monitor_universe: self.assertIsNotNone(res) aim_mgr.delete(self.ctx, managed_epg) else: self.assertIsNone(res)
def test_push_aim_resources(self): # Create some AIM resources bd1 = self._get_example_aim_bd() bd2 = self._get_example_aim_bd(name='test2') bda1 = self._get_example_aci_bd() bda2 = self._get_example_aci_bd(dn='uni/tn-test-tenant/BD-test2', descr='test2') subj1 = a_res.ContractSubject(tenant_name='test-tenant', contract_name='c', name='s', in_filters=['i1', 'i2'], out_filters=['o1', 'o2']) self.manager.push_aim_resources({'create': [bd1, bd2, subj1]}) self.manager._push_aim_resources() # Verify expected calls transactions = self._objects_transaction_create([bd1, bd2, subj1], top_send=True) exp_calls = [ mock.call(mock.ANY, transactions[0].get_top_level_roots()[0][1], 'test-tenant', 'test'), mock.call(mock.ANY, transactions[1].get_top_level_roots()[0][1], 'test-tenant', 'test2'), mock.call(mock.ANY, transactions[2].get_top_level_roots()[0][1], 'test-tenant', 'c', 's') ] self._check_call_list(exp_calls, self.manager.aci_session.post_body_dict) # Delete AIM resources self.manager.aci_session.post_body_dict.reset_mock() f1 = { 'vzRsFiltAtt__In': { 'attributes': { 'dn': 'uni/tn-test-tenant/brc-c/subj-s/intmnl/rsfiltAtt-i1' } } } f2 = { 'vzRsFiltAtt__Out': { 'attributes': { 'dn': 'uni/tn-test-tenant/brc-c/subj-s/outtmnl/rsfiltAtt-o1' } } } # We should only send the delete request of 2 SGs below to APIC since # all others are just children of first SG sg1 = { 'hostprotPol': { 'attributes': { 'dn': 'uni/tn-test-tenant/pol-sg' } } } sg2 = { 'hostprotPol': { 'attributes': { 'dn': 'uni/tn-test-tenant/pol-sg2' } } } sg_subj = { 'hostprotSubj': { 'attributes': { 'dn': 'uni/tn-test-tenant/pol-sg/subj-default' } } } sg_rule1 = { 'hostprotRule': { 'attributes': { 'dn': 'uni/tn-test-tenant/pol-sg/subj-default/rule-r1' } } } sg_rule2 = { 'hostprotRule': { 'attributes': { 'dn': 'uni/tn-test-tenant/pol-sg/subj-default/rule-r2' } } } self.manager.push_aim_resources({ 'delete': [bda1, bda2, f1, f2, sg_rule2, sg_rule1, sg_subj, sg2, sg1] }) self.manager._push_aim_resources() # Verify expected calls, add deleted status exp_calls = [ mock.call('/mo/' + bda1.values()[0]['attributes']['dn'] + '.json'), mock.call('/mo/' + bda2.values()[0]['attributes']['dn'] + '.json'), mock.call('/mo/' + f1.values()[0]['attributes']['dn'] + '.json'), mock.call('/mo/' + f2.values()[0]['attributes']['dn'] + '.json'), mock.call('/mo/' + sg1.values()[0]['attributes']['dn'] + '.json'), mock.call('/mo/' + sg2.values()[0]['attributes']['dn'] + '.json') ] self._check_call_list(exp_calls, self.manager.aci_session.DELETE) # Create AND delete aim resources self.manager.aci_session.post_body_dict.reset_mock() self.manager.push_aim_resources( collections.OrderedDict([('create', [bd1]), ('delete', [bda2])])) self.manager._push_aim_resources() transactions = self._objects_transaction_create([bd1]) exp_calls = [ mock.call(mock.ANY, transactions[0].get_top_level_roots()[0][1], 'test-tenant', 'test') ] self._check_call_list(exp_calls, self.manager.aci_session.post_body_dict) # Failure in pushing object self.manager.aci_session.DELETE = mock.Mock( side_effect=apic_client.cexc.ApicResponseNotOk( request='my_request', status=400, reason='bad request', err_text='bad request text', err_code=400)) # No exception is externally rised self.manager.push_aim_resources({'delete': [bda1, bda2]}) self.manager._push_aim_resources()
def _map_port_chain(self, plugin_context, pc, flowcs, ppgs): # Create one DeviceClusterContext per PPG p_ctx = plugin_context aim_ctx = aim_context.AimContext(p_ctx.session) # For each flow classifier, there are as many DeviceClusterContext as # the number of nodes in the chain. p_tenants = set() for flc in flowcs: p_tenant = self._get_flowc_provider_group(plugin_context, flc).tenant_name sg = self._get_pc_service_graph(p_ctx.session, pc, p_tenant) contract = self._get_flc_contract(p_ctx.session, flc, p_tenant) subject = aim_resource.ContractSubject( tenant_name=contract.tenant_name, contract_name=contract.name, name=sg.name, service_graph_name=sg.name, bi_filters=[self.aim_mech._any_filter_name]) self.aim.create(aim_ctx, contract) self.aim.create(aim_ctx, subject) self._map_flow_classifier(p_ctx, flc, p_tenant) # Map device clusters for each flow tenant if p_tenant not in p_tenants: for ppg in ppgs: dc = self._get_ppg_device_cluster(p_ctx.session, ppg, p_tenant) self._map_port_pair_group(plugin_context, ppg, p_tenant) dcc = aim_sg.DeviceClusterContext( tenant_name=sg.tenant_name, contract_name="any", service_graph_name=sg.name, node_name=dc.name, display_name=dc.display_name, device_cluster_name=dc.name, device_cluster_tenant_name=dc.tenant_name) dcc = self.aim.create(aim_ctx, dcc) # Create device context interfaces. left_bd, right_bd = self._get_ppg_left_right_bds( p_ctx, ppg) for conn_name, direction, bd in [ ('provider', EGRESS, right_bd), ('consumer', INGRESS, left_bd) ]: dci = aim_sg.DeviceClusterInterface( tenant_name=dc.tenant_name, device_cluster_name=dc.name, name=direction) pbr = self._get_ppg_service_redirect_policy( p_ctx.session, ppg, direction, p_tenant) dcic = aim_sg.DeviceClusterInterfaceContext( tenant_name=dcc.tenant_name, contract_name=dcc.contract_name, service_graph_name=dcc.service_graph_name, node_name=dcc.node_name, connector_name=conn_name, display_name=dcc.display_name, bridge_domain_dn=bd.dn, device_cluster_interface_dn=dci.dn, service_redirect_policy_dn=pbr.dn) self.aim.create(aim_ctx, dcic) sg.linear_chain_nodes.append({ 'name': dc.name, 'device_cluster_name': dc.name, 'device_cluster_tenant_name': dc.tenant_name }) # Unsync left-right EPGs for epg in self._get_ppg_left_right_epgs(p_ctx, ppg): self.aim.update(aim_ctx, epg, sync=False) # Create only once per tenant self.aim.create(aim_ctx, sg) p_tenants.add(p_tenant)