def _delete_port_chain_mapping(self, plugin_context, pc, flowcs, ppgs): p_ctx = plugin_context session = p_ctx.session aim_ctx = aim_context.AimContext(session) deleted_ppgs = set() for flc in flowcs: tenant = self._get_flowc_provider_group(plugin_context, flc).tenant_name for ppg in ppgs: key = (tenant, ppg['id']) if key not in deleted_ppgs: self._delete_port_pair_group_mapping(p_ctx, ppg, tenant) deleted_ppgs.add(key) self._delete_flow_classifier_mapping(p_ctx, flc, tenant) contract = self._get_flc_contract(p_ctx.session, flc, tenant) sg = self._get_pc_service_graph(p_ctx.session, pc, tenant) self.aim.delete(aim_ctx, contract, cascade=True) self.aim.delete(aim_ctx, sg, cascade=True) for ppg_id in pc['port_pair_groups']: ppg_aid = self.name_mapper.port_pair_group(session, ppg_id) dcc = aim_sg.DeviceClusterContext(tenant_name=tenant, contract_name="any", service_graph_name=sg.name, node_name=ppg_aid) self.aim.delete(aim_ctx, dcc, cascade=True) processed_networks = set() # deleted ppgs contains all the ppgs' ID processed_ppgs = deleted_ppgs for ppg in ppgs: for net_id in self._get_ppg_left_right_network_ids(p_ctx, ppg): if net_id in processed_networks: continue processed_networks.add(net_id) # See if there are more chains on these networks for group_id in self._get_group_ids_by_network_id( p_ctx, net_id): if group_id in processed_ppgs: # Nothing to do continue processed_ppgs.add(group_id) for chain in self._get_chains_by_ppg_id(p_ctx, group_id): if chain['id'] != pc['id']: # This network is in use by some chain, cannot # re-activate EPG break else: # No chain associated to this group ID continue break else: # No chain associated to all the groups of this network epg = self.aim_mech._get_epg_by_network_id( p_ctx.session, net_id) self.aim.update(aim_ctx, epg, sync=True)
def test_push_resources_service_graph(self): aim_mgr = aim_manager.AimManager() aim_mgr.create(self.ctx, resource.Tenant(name='t1')) def create_delete_object(aim_obj, aci_obj, aci_faults): # create object and faults to_create = [aci_obj] to_create.extend(aci_faults) self.universe.push_resources(self.ctx, {'create': to_create, 'delete': []}) self.assertIsNotNone(aim_mgr.get(self.ctx, aim_obj)) status = aim_mgr.get_status(self.ctx, aim_obj) self.assertEqual(len(aci_faults), len(status.faults)) self.assertEqual(sorted([f['faultInst']['attributes']['code'] for f in aci_faults]), sorted([f.fault_code for f in status.faults])) # delete filter faults self.universe.push_resources(self.ctx, {'create': [], 'delete': status.faults}) status = aim_mgr.get_status(self.ctx, aim_obj) self.assertEqual(0, len(status.faults)) # Objects with alt_resource dc1_aci = {'vnsLDevVip': {'attributes': {'dn': 'uni/tn-t1/lDevVip-cl2'}}} dc1_fault_objs = [ self._get_example_aci_fault( dn='uni/tn-t1/lDevVip-cl2/fault-F1110', code='F1110'), self._get_example_aci_fault( dn='uni/tn-t1/lDevVip-cl2/lIf-interface/fault-F1111', code='F1111'), self._get_example_aci_fault( dn='uni/tn-t1/lDevVip-cl2/cDev-n2/cIf-[interface]/fault-F1112', code='F1112')] dc1 = aim_service_graph.DeviceCluster(tenant_name='t1', name='cl2') create_delete_object(dc1, dc1_aci, dc1_fault_objs) sg1_aci = {'vnsAbsGraph': {'attributes': {'dn': 'uni/tn-t1/AbsGraph-gr2'}}} sg1_fault_objs = [ self._get_example_aci_fault( dn='uni/tn-t1/AbsGraph-gr2/fault-F1110', code='F1110'), self._get_example_aci_fault( dn='uni/tn-t1/AbsGraph-gr2/AbsConnection-C1/fault-F1111', code='F1111'), self._get_example_aci_fault( dn='uni/tn-t1/AbsGraph-gr2/AbsNode-N1/fault-F1112', code='F1112')] sg1 = aim_service_graph.ServiceGraph(tenant_name='t1', name='gr2') srp1_aci = {'vnsSvcRedirectPol': {'attributes': {'dn': 'uni/tn-t1/svcCont/svcRedirectPol-r2'}}} srp1_fault_objs = [ self._get_example_aci_fault( dn='uni/tn-t1/svcCont/svcRedirectPol-r2/fault-F1111', code='F1111'), self._get_example_aci_fault( dn=('uni/tn-t1/svcCont/svcRedirectPol-r2/' 'RedirectDest_ip-[10.6.1.1]/fault-F1112'), code='F1112')] srp1 = aim_service_graph.ServiceRedirectPolicy(tenant_name='t1', name='r2') dcc1_aci = {'vnsLDevCtx': {'attributes': {'dn': 'uni/tn-t1/ldevCtx-c-c1-g-g1-n-N1'}}} dcc1_fault_objs = [ self._get_example_aci_fault( dn='uni/tn-t1/ldevCtx-c-c1-g-g1-n-N1/fault-F1111', code='F1111'), self._get_example_aci_fault( dn=('uni/tn-t1/ldevCtx-c-c1-g-g1-n-N1/lIfCtx-c-consumer/' 'fault-F1112'), code='F1112')] dcc1 = aim_service_graph.DeviceClusterContext(tenant_name='t1', contract_name='c1', service_graph_name='g1', node_name='N1') create_delete_object(dc1, dc1_aci, dc1_fault_objs) create_delete_object(sg1, sg1_aci, sg1_fault_objs) create_delete_object(srp1, srp1_aci, srp1_fault_objs) create_delete_object(dcc1, dcc1_aci, dcc1_fault_objs)
def test_get_aim_resources(self, tree_type=tree_manager.CONFIG_TREE): tree_mgr = tree_manager.HashTreeManager() aim_mgr = aim_manager.AimManager() t1 = resource.Tenant(name='t1') t2 = resource.Tenant(name='t2') t1_fault = aim_status.AciFault( fault_code='101', external_identifier='uni/tn-t1/fault-101', description='failure101') t2_fault = aim_status.AciFault( fault_code='102', external_identifier='uni/tn-t2/fault-102', description='failure102') # Create Resources on a couple of tenants bd1 = resource.BridgeDomain( tenant_name='t1', name='bd1', display_name='somestuff', vrf_name='vrf') bd1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/BD-bd1/fault-901', description='failure901') bd1_fault2 = aim_status.AciFault( fault_code='902', external_identifier='uni/tn-t1/BD-bd1/fault-902', description='failure902') bd2 = resource.BridgeDomain( tenant_name='t2', name='bd1', display_name='somestuff', vrf_name='vrf2') dc1 = aim_service_graph.DeviceCluster( tenant_name='t1', name='clus1', devices=[{'name': '1'}]) dc1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/lDevVip-clus1/fault-901', description='failure901') sg1 = aim_service_graph.ServiceGraph( tenant_name='t1', name='gr1', linear_chain_nodes=[{'name': 'N1', 'device_cluster_name': 'cl1'}]) sg1_fault = aim_status.AciFault( fault_code='901', external_identifier='uni/tn-t1/AbsGraph-gr1/fault-901', description='failure901') srp1 = aim_service_graph.ServiceRedirectPolicy( tenant_name='t1', name='srp1', destinations=[{'ip': '1.1.1.1', 'mac': 'aa:bb:cc:dd:ee:ff'}]) srp1_fault = aim_status.AciFault( fault_code='901', external_identifier=('uni/tn-t1/svcCont/svcRedirectPol-srp1' '/fault-901'), description='failure901') dc_ctx1 = aim_service_graph.DeviceClusterContext( tenant_name='t1', contract_name='contract1', service_graph_name='graph1', node_name='N1', device_cluster_name='cluster1', device_cluster_tenant_name='common', bridge_domain_name='svc_bd', service_redirect_policy_name='srp1') dc_ctx1_fault = aim_status.AciFault( fault_code='901', external_identifier=('uni/tn-t1/ldevCtx-c-contract1-' 'g-graph1-n-N1/fault-901'), description='failure901') if tree_type == tree_manager.MONITORED_TREE: bd1.monitored = True bd2.monitored = True t1.monitored = True t2.monitored = True dc1.monitored = True sg1.monitored = True srp1.monitored = True dc_ctx1.monitored = True aim_mgr.create(self.ctx, t1) aim_mgr.create(self.ctx, t2) aim_mgr.create(self.ctx, bd1) aim_mgr.set_fault(self.ctx, t1, t1_fault) aim_mgr.set_fault(self.ctx, t2, t2_fault) aim_mgr.set_fault(self.ctx, bd1, bd1_fault) aim_mgr.set_fault(self.ctx, bd1, bd1_fault2) aim_mgr.create(self.ctx, bd2) aim_mgr.set_resource_sync_synced(self.ctx, t1) aim_mgr.set_resource_sync_synced(self.ctx, t2) aim_mgr.set_resource_sync_synced(self.ctx, bd2) aim_mgr.set_resource_sync_synced(self.ctx, bd1) aim_mgr.create(self.ctx, dc1) aim_mgr.create(self.ctx, sg1) aim_mgr.create(self.ctx, srp1) aim_mgr.create(self.ctx, dc_ctx1) aim_mgr.set_fault(self.ctx, dc1, dc1_fault) aim_mgr.set_fault(self.ctx, sg1, sg1_fault) aim_mgr.set_fault(self.ctx, srp1, srp1_fault) aim_mgr.set_fault(self.ctx, dc_ctx1, dc_ctx1_fault) aim_mgr.set_resource_sync_synced(self.ctx, dc1) aim_mgr.set_resource_sync_synced(self.ctx, sg1) aim_mgr.set_resource_sync_synced(self.ctx, srp1) aim_mgr.set_resource_sync_synced(self.ctx, dc_ctx1) # Two trees exist trees = tree_mgr.find(self.ctx, tree=tree_type) self.assertEqual(2, len(trees)) # Calculate the different with empty trees to retrieve missing keys diff_tn_1 = trees[0].diff(tree.StructuredHashTree()) diff_tn_2 = trees[1].diff(tree.StructuredHashTree()) self.universe.get_relevant_state_for_read = mock.Mock( return_value=[{'tn-t1': trees[0], 'tn-t2': trees[1]}]) result = self.universe.get_resources(diff_tn_1.get('add', []) + diff_tn_1.get('remove', []) + diff_tn_2.get('add', []) + diff_tn_2.get('remove', [])) converted = converter.AciToAimModelConverter().convert( converter.AimToAciModelConverter().convert( [bd1, bd2, dc1, sg1, srp1, dc_ctx1, t1, t2])) if tree_type == tree_manager.MONITORED_TREE: for x in converted: x.monitored = True if tree_type in [tree_manager.CONFIG_TREE, tree_manager.MONITORED_TREE]: self.assertEqual(len(converted), len(result)) for item in converted: self.assertTrue(item in result) elif tree_type == tree_manager.OPERATIONAL_TREE: self.assertEqual(8, len(result)) self.assertTrue(bd1_fault in result) self.assertTrue(bd1_fault2 in result) self.assertTrue(dc1_fault in result) self.assertTrue(sg1_fault in result) self.assertTrue(srp1_fault in result) self.assertTrue(dc_ctx1_fault in result)
def _map_port_chain(self, plugin_context, pc, flowcs, ppgs): # Create one DeviceClusterContext per PPG p_ctx = plugin_context aim_ctx = aim_context.AimContext(p_ctx.session) # For each flow classifier, there are as many DeviceClusterContext as # the number of nodes in the chain. p_tenants = set() for flc in flowcs: p_tenant = self._get_flowc_provider_group(plugin_context, flc).tenant_name sg = self._get_pc_service_graph(p_ctx.session, pc, p_tenant) contract = self._get_flc_contract(p_ctx.session, flc, p_tenant) subject = aim_resource.ContractSubject( tenant_name=contract.tenant_name, contract_name=contract.name, name=sg.name, service_graph_name=sg.name, bi_filters=[self.aim_mech._any_filter_name]) self.aim.create(aim_ctx, contract) self.aim.create(aim_ctx, subject) self._map_flow_classifier(p_ctx, flc, p_tenant) # Map device clusters for each flow tenant if p_tenant not in p_tenants: for ppg in ppgs: dc = self._get_ppg_device_cluster(p_ctx.session, ppg, p_tenant) self._map_port_pair_group(plugin_context, ppg, p_tenant) dcc = aim_sg.DeviceClusterContext( tenant_name=sg.tenant_name, contract_name="any", service_graph_name=sg.name, node_name=dc.name, display_name=dc.display_name, device_cluster_name=dc.name, device_cluster_tenant_name=dc.tenant_name) dcc = self.aim.create(aim_ctx, dcc) # Create device context interfaces. left_bd, right_bd = self._get_ppg_left_right_bds( p_ctx, ppg) for conn_name, direction, bd in [ ('provider', EGRESS, right_bd), ('consumer', INGRESS, left_bd) ]: dci = aim_sg.DeviceClusterInterface( tenant_name=dc.tenant_name, device_cluster_name=dc.name, name=direction) pbr = self._get_ppg_service_redirect_policy( p_ctx.session, ppg, direction, p_tenant) dcic = aim_sg.DeviceClusterInterfaceContext( tenant_name=dcc.tenant_name, contract_name=dcc.contract_name, service_graph_name=dcc.service_graph_name, node_name=dcc.node_name, connector_name=conn_name, display_name=dcc.display_name, bridge_domain_dn=bd.dn, device_cluster_interface_dn=dci.dn, service_redirect_policy_dn=pbr.dn) self.aim.create(aim_ctx, dcic) sg.linear_chain_nodes.append({ 'name': dc.name, 'device_cluster_name': dc.name, 'device_cluster_tenant_name': dc.tenant_name }) # Unsync left-right EPGs for epg in self._get_ppg_left_right_epgs(p_ctx, ppg): self.aim.update(aim_ctx, epg, sync=False) # Create only once per tenant self.aim.create(aim_ctx, sg) p_tenants.add(p_tenant)