def SetUpConfig(cntlr_ip,apic_ip,agg_name,avail_zone_name,az_comp_node_name,heat_stack_name,heat_temp_file,): """ Test Step using Heat, setup the Test Config """ gbpheat = gbpHeat(cntlr_ip) gbpnova = gbpNova(cntlr_ip) print ("\nSetupCfg: Create Aggregate & Availability Zone to be executed\n") agg_id = self.gbpnova.avail_zone('api','create',agg_name,avail_zone_name=avail_zone_name) if agg_id == 0: print ("\n ABORTING THE TESTSUITE RUN,nova host aggregate creation Failed") return 0 print (" Agg %s" %(self.agg_id)) if gbpnova.avail_zone('api','addhost',agg_id,hostname=az_comp_node_name) == 0: print ("\n ABORTING THE TESTSUITE RUN, availability zone creation Failed") gbpnova.avail_zone('api','delete',self.nova_agg,avail_zone_name=self.nova_az) # Cleaning up return 0 sleep(3) if self.gbpheat.cfg_all_cli(1,heat_stack_name,heat_temp=heat_temp_file) == 0: print ("\n ABORTING THE TESTSUITE RUN, HEAT STACK CREATE of %s Failed" %(heat_stack_name)) CleanUp(agg_id,az_comp_node_name,heat_stack_name) return 0 sleep(5) # Sleep 5s assuming that all objects areated in APIC print ("\n ADDING SSH-Filter to Svc_epg created for every dhcp_agent") svc_epg_list = ['demo_bd'] create_add_filter(apic_ip,svc_epg_list) return 1
def __init__(self): """ Iniatizing the test-cfg variables & classes """ self.apicsystemID = conf['apic_system_id'] self.nova_agg = conf['nova_agg_name'] self.nova_az = conf['nova_az_name'] self.comp_node = conf['az_comp_node'] self.network_node = conf['network_node'] self.cntlr_ip = conf['controller_ip'] self.cntlr_user = conf.get('controller_user') or 'root' self.cntlr_passwd = conf.get('controller_password') or 'noir0123' self.key_ip = conf.get('keystone_ip') or self.cntlr_ip self.key_user = conf.get('keystone_user') or 'admin' self.key_passwd = conf.get('keystone_password') or 'noir0123' self.apic_ip = conf['apic_ip'] self.leaf1_ip = conf['leaf1_ip'] self.leaf2_ip = conf['leaf2_ip'] self.spine_ip = conf['spine_ip'] self.apic_passwd = conf.get('apic_passwd') self.heat_temp_test = conf['main_setup_heat_temp'] self.num_hosts = conf['num_comp_nodes'] self.heat_stack_name = conf['heat_dp_stack_name'] self.pausetodebug = conf['pausetodebug'] self.test_parameters = conf['test_parameters'] self.plugin = conf['plugin-type'] self.gbpnova = gbpNova(self.cntlr_ip,cntrlr_uname=self.cntlr_user,cntrlr_passwd=self.cntlr_passwd, keystone_user=self.key_user,keystone_password=self.key_passwd) self.gbpheat = gbpHeat(self.cntlr_ip,cntrlr_uname=self.cntlr_user, cntrlr_passwd=self.cntlr_passwd) if self.plugin: #Incase of MergedPlugin if self.apic_passwd: self.gbpaci = gbpApic(self.apic_ip, mode='aim', password=self.apic_passwd) else: self.gbpaci = gbpApic(self.apic_ip, mode='aim') else: self.gbpaci = gbpApic(self.apic_ip, password=self.apic_passwd, apicsystemID=self.apicsystemID) self.vmlist = ['VM1','VM2','VM3','VM4', 'VM5','VM6','VM7','VM8', 'VM9','VM10','VM11','VM12' ] #Below L2Ps needed for APIC Verification self.L2plist = [ 'demo_same_ptg_l2p_l3p_bd', 'demo_diff_ptg_same_l2p_l3p_bd', 'demo_diff_ptg_l2p_same_l3p_bd_1', 'demo_diff_ptg_l2p_same_l3p_bd_2', 'demo_srvr_bd', 'demo_clnt_bd' ]
def __init__(self,params): self.gbpaci = Gbp_Aci() self.heat_stack_name = 'gbpapic1' cntlr_ip = params['cntlr_ip'] self.heat_temp_test = params['heat_temp_file'] self.gbpheat = gbpHeat(cntlr_ip) self.gbpnova = gbpNova(cntlr_ip) self.leaf_ip = params['leaf1_ip'] self.apic_ip = params['apic_ip'] self.network_node = params['network_node'] self.az_comp_node = params['az_comp_node'] self.nova_agg = params['nova_agg'] self.nova_az = params['nova_az'] self.debug = params['pausetodebug']
class super_hdr(object): # Name of the config file is static nova_az = conf['nova_az_name'] cntlr_ip = conf['controller_ip'] cntlr_user = conf.get('controller_user') or 'root' cntlr_passwd = conf.get('controller_password') or 'noir0123' apic_ip = conf['apic_ip'] apic_passwd = conf['apic_passwd'] num_host = conf['num_comp_nodes'] heat_temp = conf['main_setup_heat_temp'] stack_name = conf['heat_dp_stack_name'] vm_image = conf['vm_image'] sshkeyname = conf['key_name'] network_node = conf['network_node'] apicsystemID = conf['apic_system_id'] pausetodebug = conf['pausetodebug'] plugin = conf['plugin-type'] gbpcfg = gbpCfgCli(cntlr_ip, cntrlr_username=cntlr_user, cntrlr_passwd=cntlr_passwd, rcfile=rcfile) gbpnova = gbpNova(cntlr_ip) gbpheat = gbpHeat(cntlr_ip) def vm_create(self, ptgs, vmlist): """ Create VMs """ # ptg_id should be a dict with keys as 'data' & 'mgmt' # vm_list: list of dicts for vm in vmlist: for key, val in ptgs.iteritems(): port = self.gbpcfg.gbp_policy_cfg_all( 1, 'target', 'vm_%s' % (key), policy_target_group='%s' % (val)) if port != 0: vm[key] = port[1] else: raise TestSuiteAbort("Policy Targets creation Failed") print vm # removed mgmt_nic if self.gbpnova.vm_create_cli(vm['name'], self.vm_image, vm['data'], avail_zone=vm['az']) == 0: return 0
def __init__(self,params): self.gbpaci = Gbp_Aci() self.heat_stack_name = 'gbpvpc5' cntlr_ip = params['cntlr_ip'] self.heat_temp_test = params['heat_temp_file'] self.gbpheat = gbpHeat(cntlr_ip) self.gbpnova = gbpNova(cntlr_ip) self.apic_ip = params['apic_ip'] self.az_comp_node = params['az_comp_node'] self.nova_agg = params['nova_agg'] self.nova_az = params['nova_az'] self.network_node = params['network_node'] self.leaf1_port_comp_node1 = params['leaf1_port1'] #This connects Leaf1 to Comp-node1 self.leaf1_port_comp_node2 = params['leaf1_port2'] #This connects Leaf1 to Comp-node2 self.node_id = params['leaf1_node_id']
def __init__(self,params): self.gbpcfg = gbpCfgCli() self.gbpaci = Gbp_Aci() self.heat_stack_name = 'gbpleaf5' cntlr_ip = params['cntlr_ip'] self.heat_temp_test = params['heat_temp_file'] self.gbpheat = gbpHeat(cntlr_ip) self.gbpnova = gbpNova(cntlr_ip) self.leaf_ip = params['leaf1_ip'] self.apic_ip = params['apic_ip'] self.network_node = params['network_node'] self.az_comp_node = params['az_comp_node'] self.nova_agg = params['nova_agg'] self.nova_az = params['nova_az'] self.comp_nodes = params['comp_node_ips']
def __init__(self, params): self.gbpaci = Gbp_Aci() self.heat_stack_name = 'gbpleaf1' cntlr_ip = params['cntlr_ip'] self.heat_temp_test = params['heat_temp_file'] self.gbpheat = gbpHeat(cntlr_ip) self.gbpnova = gbpNova(cntlr_ip) self.leaf_ip = params['leaf1_ip'] self.apic_ip = params['apic_ip'] self.network_node = params['network_node'] self.az_comp_node = params['az_comp_node'] self.nova_agg = params['nova_agg'] self.nova_az = params['nova_az'] self.leaf_port_comp_node1 = params[ 'leaf1_port1'] #This connects Leaf to Comp-node1 self.node_id = params['leaf1_node_id']
def __init__(self, params): self.gbpcfg = gbpCfgCli() self.gbpverify = Gbp_Verify() self.gbpdeftraff = Gbp_def_traff() self.gbpaci = Gbp_Aci() self.heat_stack_name = 'gbpapic3' self.cntlr_ip = params['cntlr_ip'] self.heat_temp_test = params['heat_temp_file'] self.gbpheat = gbpHeat(self.cntlr_ip) self.gbpnova = gbpNova(self.cntlr_ip) self.leaf_ip = params['leaf1_ip'] self.apic_ip = params['apic_ip'] self.network_node = params['network_node'] self.az_comp_node = params['az_comp_node'] self.nova_agg = params['nova_agg'] self.nova_az = params['nova_az'] self.debug = param['pausetodebug'] self.test_name = 'DISCONN_APIC_UPDATECFG_RECONN_APIC' self._log.info( "\nSteps of the TESTCASE_GBP_INTG_APIC_2_DISCONN_APIC_UPDATECFG_RECONN_APIC to be executed\n" ) if self.test_step_SetUpConfig() != 1: self._log.info("Test Failed at Step_1 == SetUpConfig") self.test_CleanUp() if self.test_step_VerifyTraffic() != 1: self._log.info("Test Failed at Step_2 == TestVerifyTraffic") self.test_CleanUp() objs_uuid = self.gbpverify.get_uuid_from_stack(self.heat_temp_test, self.heat_stack_name) self.ptg_1 = objs_uuid['server_ptg_id'] self.ptg_2 = objs_uuid['client_ptg_id'] self.test_1_prs = objs_uuid['demo_ruleset_icmp_id'] self.test_2_prs = objs_uuid['demo_ruleset_tcp_id'] self.test_3_prs = objs_uuid['demo_ruleset_udp_id'] self.test_4_prs = objs_uuid['demo_ruleset_icmp_tcp_id'] self.test_5_prs = objs_uuid['demo_ruleset_icmp_udp_id'] self.test_6_prs = objs_uuid['demo_ruleset_tcp_udp_id'] self.prs_proto = {self.test_1_prs:['icmp'],self.test_2_prs:['tcp'],self.test_3_prs:['udp'],\ self.test_4_prs:['icmp','tcp'],self.test_5_prs:['icmp','udp'],self.test_6_prs:['tcp','udp']}
def __init__(self,params): self.gbpcfg = gbpCfgCli() self.gbpverify = Gbp_Verify() self.gbpdeftraff = Gbp_def_traff() self.gbpaci = Gbp_Aci() self.heat_stack_name = 'gbpapic3' self.cntlr_ip = params['cntlr_ip'] self.heat_temp_test = params['heat_temp_file'] self.gbpheat = gbpHeat(self.cntlr_ip) self.gbpnova = gbpNova(self.cntlr_ip) self.leaf_ip = params['leaf1_ip'] self.apic_ip = params['apic_ip'] self.network_node = params['network_node'] self.az_comp_node = params['az_comp_node'] self.nova_agg = params['nova_agg'] self.nova_az = params['nova_az'] self.debug = param['pausetodebug'] self.test_name = 'DISCONN_APIC_UPDATECFG_RECONN_APIC' self._log.info("\nSteps of the TESTCASE_GBP_INTG_APIC_2_DISCONN_APIC_UPDATECFG_RECONN_APIC to be executed\n") if self.test_step_SetUpConfig()!=1: self._log.info("Test Failed at Step_1 == SetUpConfig") self.test_CleanUp() if self.test_step_VerifyTraffic() != 1: self._log.info("Test Failed at Step_2 == TestVerifyTraffic") self.test_CleanUp() objs_uuid = self.gbpverify.get_uuid_from_stack(self.heat_temp_test,self.heat_stack_name) self.ptg_1 = objs_uuid['server_ptg_id'] self.ptg_2 = objs_uuid['client_ptg_id'] self.test_1_prs = objs_uuid['demo_ruleset_icmp_id'] self.test_2_prs = objs_uuid['demo_ruleset_tcp_id'] self.test_3_prs = objs_uuid['demo_ruleset_udp_id'] self.test_4_prs = objs_uuid['demo_ruleset_icmp_tcp_id'] self.test_5_prs = objs_uuid['demo_ruleset_icmp_udp_id'] self.test_6_prs = objs_uuid['demo_ruleset_tcp_udp_id'] self.prs_proto = {self.test_1_prs:['icmp'],self.test_2_prs:['tcp'],self.test_3_prs:['udp'],\ self.test_4_prs:['icmp','tcp'],self.test_5_prs:['icmp','udp'],self.test_6_prs:['tcp','udp']}
def main(): usage = "usage: %prog [options]" parser = optparse.OptionParser(usage=usage) parser.add_option("-n", "--nattype", help="Mandatory Arg: Type of NAT"\ " Valid strings: dnat or snat or edgenat", dest='nattype') parser.add_option("-f", "--ptnepg", help="Flag to enable Per Tenant NAT-EPG"\ " Valid string: <yes>", default=False, dest='pertenantnatepg') parser.add_option("-i", "--integ", help="integrated ACI Tests. "\ "Valid strings: borderleaf or leaf or spine or agent", default=False, dest='integ') (options, args) = parser.parse_args() if not options.nattype: print ("Mandatory: Please provide the NAT-Type, "\ "Valid strings <dnat> or <snat> or <edgenat>") sys.exit(1) def runinteg(node): if node == 'borderleaf': print "////// Run DP-Test Post Reload of BorderLeaf //////" reboot = 'POST_RELOAD_BORDERLEAF' testbed_cfg.reloadAci() if node == 'leaf': print "////// Run DP-Test Post Reload of Non-BorderLeaf //////" reboot = 'POST_RELOAD_NONBORDERLEAF' testbed_cfg.reloadAci(nodetype='leaf') if node == 'spine': print "////// Run DP-Test Post Reload of Spine //////" reboot = 'POST_RELOAD_SPINE' testbed_cfg.reloadAci(nodetype='leaf') print " **** Sleeping for Spine toboot up ****" sleep(430) if node == 'agent': print "////// Run DP-Test Post Reload of Agent //////" reboot = 'AGENT_RESTART' testbed_cfg.restartAgent() print " **** Sleeping for 5s after Agent Restart ****" sleep(5) def preExistcfg(controller,nat_type='',revert=False,restart=True): if not revert: if nat_type == 'edgenat': preExistingL3Out(controller, '/etc/neutron/neutron.conf', edgenat=True ) else: preExistingL3Out(controller, '/etc/neutron/neutron.conf' ) else: preExistingL3Out(controller, '/etc/neutron/neutron.conf', revert=True, restart=restart ) preexist = True #Going forward for all releases Pre-Existing L3Out nat_type = options.nattype if options.pertenantnatepg == 'yes': options.pertenantnatepg = True # Setup the PreExitingL3Out Config in neutron conf preExistcfg(cntlr_ip) # Build the Test Config to be used for all NAT DataPath Testcases testbed_cfg = nat_dp_main_config() gbpheat = gbpHeat(cntlr_ip) #Instantiated to fetch gbp-objects if nat_type == 'dnat': # RUN ONLY DNAT DP TESTs # TestSetup Configuration print 'Setting up global config for all DNAT DP Testing' if options.pertenantnatepg: print 'Test for PER_TENANT_NAT_EPG FOR DNAT' targetVmFips = testbed_cfg.setup( nat_type, do_config=0, pertntnatEpg=True ) else: targetVmFips = testbed_cfg.setup( nat_type, do_config=0 ) # Fetch gbp objects via heat output objs_uuid = gbpheat.get_uuid_from_stack( dnat_heat_temp, heat_stack_name ) objs_uuid['external_gw'] = extgw objs_uuid['ostack_controller'] = cntlr_ip objs_uuid['ipsofextgw'] = ips_of_extgw objs_uuid['network_node'] = network_node objs_uuid['pausetodebug'] = pausetodebug objs_uuid['routefordest'] = routefordest # Verify the config setup on the ACI print 'Sleeping for the EP learning on ACI Fab' sleep(30) """ #JISHNU: commented out for now 07/25/17 if options.pertenantnatepg: if not testbed_cfg.verifySetup(nat_type, pertntnatEpg=True): testbed_cfg.cleanup() preExistcfg(cntlr_ip,revert=True) print \ 'DNAT-PerTntNatEpg TestSuite Execution Failed' sys.exit(1) else: if not testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() preExistcfg(cntlr_ip,revert=True) print \ 'DNAT TestSuite Execution Failed' sys.exit(1) """ # Note: Please always maintain the below order of DNAT Test Execution # Since the DNAT_VM_to_VM has the final blind cleanup, which helps to # avoid the heat stack-delete failure coming from nat_dp_main_config # Execution of DNAT DP Tests from ExtRtr to VMs from testcases.testcases_dp_nat.testsuite_dnat_extgw_to_vm \ import DNAT_ExtGw_to_VMs test_dnat_extgw_to_vm = DNAT_ExtGw_to_VMs(objs_uuid, targetVmFips) test_dnat_extgw_to_vm.test_runner(preexist) # If integ=True, then ONLY repeat run of ExtRtr-VM Tests # no need for VM-to-VM, will enable if needed later if options.integ: runinteg(options.integ) if not testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() #Revert Back the L3Out Config preExistcfg(cntlr_ip,revert=True) print \ 'DNAT-Integ TestSuite Execution Failed after Reload %s'\ %(options.integ) sys.exit(1) test_dnat_extgw_to_vm.test_runner(preexist) print "\nDNAT-Integ TestSuite executed Successfully\n" # Execution of DNAT DP Test from VM to ExtGW and VM-to-VM from testcases.testcases_dp_nat.testsuite_dnat_vm_to_vm \ import DNAT_VMs_to_VMs test_dnat_vm_to_allvms = DNAT_VMs_to_VMs(objs_uuid, targetVmFips) test_dnat_vm_to_allvms.test_runner(preexist) # Cleanup testbed_cfg.cleanup() print "\nDNAT TestSuite executed Successfully\n" if nat_type == 'snat': # RUN ONLY SNAT DP TESTs # TestSetup Configuration print 'Setting up global config for SNAT DP Testing' testbed_cfg.setup('snat', do_config=0) # Fetch gbp objects via heat output objs_uuid = gbpheat.get_uuid_from_stack( testbed_cfg.snat_heat_temp, testbed_cfg.heat_stack_name ) objs_uuid['external_gw'] = testbed_cfg.extgw objs_uuid['ostack_controller'] = testbed_cfg.cntlr_ip objs_uuid['ipsofextgw'] = testbed_cfg.ips_of_extgw objs_uuid['network_node'] = testbed_cfg.network_node objs_uuid['pausetodebug'] = testbed_cfg.pausetodebug # Verify the config setup on the ACI print 'Sleeping for the EP learning on ACI Fab' sleep(30) if not testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() preExistcfg(cntlr_ip,revert=True) print 'SNAT TestSuite Execution Failed due to Setup Issue' sys.exit(1) # Execution of SNAT DP Tests from testcases.testcases_dp_nat.testsuite_snat_vm_to_extgw \ import SNAT_VMs_to_ExtGw test_snat_allvms_to_extgw = SNAT_VMs_to_ExtGw(objs_uuid) test_snat_allvms_to_extgw.test_runner(preexist) if options.integ: #Only Run ExtRtr-VM Tests, no need for VM-to-VM, will enable #if needed later runinteg(options.integ) if testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() preExistcfg(cntlr_ip,revert=True) print \ 'SNAT-Integ TestSuite Execution Failed after Reload %s'\ %(options.integ) sys.exit(1) test_dnat_extgw_to_vm.test_runner(preexist) # Cleanup after the SNAT Testsuite is run testbed_cfg.cleanup() print "\nSNAT TestSuite executed Successfully\n"
dnat_heat_temp = conf['dnat_heat_temp'] #SNAT: Prexisting L3Out in Common, Ext-Seg being tenant-specific #will result in tenant-specific NAT-EPG where SNAT EPs will get #learned. Apparently thats will cause inconsistency and we should #not support such config. Discussed with Amit snat_heat_temp = conf['snat_heat_temp'] num_hosts = conf['num_comp_nodes'] heat_stack_name = conf['heat_dp_nat_stack_name'] ips_of_extgw = [conf['extrtr_ip1'], conf['extrtr_ip2'], extgw] routefordest = re.search( '\\b(\d+.\d+.\d+.)\d+' '',conf['extrtr_ip2'], re.I).group(1)+'0/24' pausetodebug = conf['pausetodebug'] neutronconffile = conf['neutronconffile'] gbpnova = gbpNova(cntlr_ip) gbpheat = gbpHeat(cntlr_ip) gbpaci = gbpApic(apic_ip, apicsystemID=apicsystemID) gbpcrud = GBPCrud(cntlr_ip) hostpoolcidrL3OutA = '55.55.55.1/24' hostpoolcidrL3OutB = '66.66.66.1/24' #Instead of defining the below static labels/vars #could have sourced the heat.yaml file and read it #But since this TestConfig defined in yaml file #WILL NOT change hence being lazy as a mule targetvm_list = ['Web-Server', 'Web-Client-1', 'Web-Client-2', 'App-Server'] L3plist = ['DCL3P1','DCL3P2'] #Note: change the order of list will affect the below dict Epglist = ['APPPTG','WEBSRVRPTG','WEBCLNTPTG'] L2plist = ['APPL2P','WEBSRVRL2P','WEBCLNTL2P'] EpgL2p = dict(zip(Epglist,L2plist))
def main(): usage = "usage: %prog [options]" parser = optparse.OptionParser(usage=usage) parser.add_option("-n", "--nattype", help="Mandatory Arg: Type of NAT"\ " Valid strings: dnat or snat or edgenat", dest='nattype') parser.add_option("-f", "--ptnepg", help="Flag to enable Per Tenant NAT-EPG"\ " Valid string: <yes>", default=False, dest='pertenantnatepg') parser.add_option("-i", "--integ", help="integrated ACI Tests. "\ "Valid strings: borderleaf or leaf or spine or agent", default=False, dest='integ') (options, args) = parser.parse_args() if not options.nattype: print ("Mandatory: Please provide the NAT-Type, "\ "Valid strings <dnat> or <snat> or <edgenat>") sys.exit(1) def runinteg(node): if node == 'borderleaf': print "////// Run DP-Test Post Reload of BorderLeaf //////" reboot = 'POST_RELOAD_BORDERLEAF' testbed_cfg.reloadAci() if node == 'leaf': print "////// Run DP-Test Post Reload of Non-BorderLeaf //////" reboot = 'POST_RELOAD_NONBORDERLEAF' testbed_cfg.reloadAci(nodetype='leaf') if node == 'spine': print "////// Run DP-Test Post Reload of Spine //////" reboot = 'POST_RELOAD_SPINE' testbed_cfg.reloadAci(nodetype='leaf') print " **** Sleeping for Spine toboot up ****" sleep(430) if node == 'agent': print "////// Run DP-Test Post Reload of Agent //////" reboot = 'AGENT_RESTART' testbed_cfg.restartAgent() print " **** Sleeping for 5s after Agent Restart ****" sleep(5) def preExistcfg(controller, nat_type='', revert=False, restart=True): if not revert: if nat_type == 'edgenat': preExistingL3Out(controller, '/etc/neutron/neutron.conf', edgenat=True) else: preExistingL3Out(controller, '/etc/neutron/neutron.conf') else: preExistingL3Out(controller, '/etc/neutron/neutron.conf', revert=True, restart=restart) preexist = True #Going forward for all releases Pre-Existing L3Out nat_type = options.nattype if options.pertenantnatepg == 'yes': options.pertenantnatepg = True # Setup the PreExitingL3Out Config in neutron conf preExistcfg(cntlr_ip) # Build the Test Config to be used for all NAT DataPath Testcases testbed_cfg = nat_dp_main_config() gbpheat = gbpHeat(cntlr_ip) #Instantiated to fetch gbp-objects if nat_type == 'dnat': # RUN ONLY DNAT DP TESTs # TestSetup Configuration print 'Setting up global config for all DNAT DP Testing' if options.pertenantnatepg: print 'Test for PER_TENANT_NAT_EPG FOR DNAT' targetVmFips = testbed_cfg.setup(nat_type, do_config=0, pertntnatEpg=True) else: targetVmFips = testbed_cfg.setup(nat_type, do_config=0) # Fetch gbp objects via heat output objs_uuid = gbpheat.get_uuid_from_stack(dnat_heat_temp, heat_stack_name) objs_uuid['external_gw'] = extgw objs_uuid['ostack_controller'] = cntlr_ip objs_uuid['ipsofextgw'] = ips_of_extgw objs_uuid['network_node'] = network_node objs_uuid['pausetodebug'] = pausetodebug objs_uuid['routefordest'] = routefordest # Verify the config setup on the ACI print 'Sleeping for the EP learning on ACI Fab' sleep(30) """ #JISHNU: commented out for now 07/25/17 if options.pertenantnatepg: if not testbed_cfg.verifySetup(nat_type, pertntnatEpg=True): testbed_cfg.cleanup() preExistcfg(cntlr_ip,revert=True) print \ 'DNAT-PerTntNatEpg TestSuite Execution Failed' sys.exit(1) else: if not testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() preExistcfg(cntlr_ip,revert=True) print \ 'DNAT TestSuite Execution Failed' sys.exit(1) """ # Note: Please always maintain the below order of DNAT Test Execution # Since the DNAT_VM_to_VM has the final blind cleanup, which helps to # avoid the heat stack-delete failure coming from nat_dp_main_config # Execution of DNAT DP Tests from ExtRtr to VMs from testcases.testcases_dp_nat.testsuite_dnat_extgw_to_vm \ import DNAT_ExtGw_to_VMs test_dnat_extgw_to_vm = DNAT_ExtGw_to_VMs(objs_uuid, targetVmFips) test_dnat_extgw_to_vm.test_runner(preexist) # If integ=True, then ONLY repeat run of ExtRtr-VM Tests # no need for VM-to-VM, will enable if needed later if options.integ: runinteg(options.integ) if not testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() #Revert Back the L3Out Config preExistcfg(cntlr_ip, revert=True) print \ 'DNAT-Integ TestSuite Execution Failed after Reload %s'\ %(options.integ) sys.exit(1) test_dnat_extgw_to_vm.test_runner(preexist) print "\nDNAT-Integ TestSuite executed Successfully\n" # Execution of DNAT DP Test from VM to ExtGW and VM-to-VM from testcases.testcases_dp_nat.testsuite_dnat_vm_to_vm \ import DNAT_VMs_to_VMs test_dnat_vm_to_allvms = DNAT_VMs_to_VMs(objs_uuid, targetVmFips) test_dnat_vm_to_allvms.test_runner(preexist) # Cleanup testbed_cfg.cleanup() print "\nDNAT TestSuite executed Successfully\n" if nat_type == 'snat': # RUN ONLY SNAT DP TESTs # TestSetup Configuration print 'Setting up global config for SNAT DP Testing' testbed_cfg.setup('snat', do_config=0) # Fetch gbp objects via heat output objs_uuid = gbpheat.get_uuid_from_stack(testbed_cfg.snat_heat_temp, testbed_cfg.heat_stack_name) objs_uuid['external_gw'] = testbed_cfg.extgw objs_uuid['ostack_controller'] = testbed_cfg.cntlr_ip objs_uuid['ipsofextgw'] = testbed_cfg.ips_of_extgw objs_uuid['network_node'] = testbed_cfg.network_node objs_uuid['pausetodebug'] = testbed_cfg.pausetodebug # Verify the config setup on the ACI print 'Sleeping for the EP learning on ACI Fab' sleep(30) if not testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() preExistcfg(cntlr_ip, revert=True) print 'SNAT TestSuite Execution Failed due to Setup Issue' sys.exit(1) # Execution of SNAT DP Tests from testcases.testcases_dp_nat.testsuite_snat_vm_to_extgw \ import SNAT_VMs_to_ExtGw test_snat_allvms_to_extgw = SNAT_VMs_to_ExtGw(objs_uuid) test_snat_allvms_to_extgw.test_runner(preexist) if options.integ: #Only Run ExtRtr-VM Tests, no need for VM-to-VM, will enable #if needed later runinteg(options.integ) if testbed_cfg.verifySetup(nat_type): testbed_cfg.cleanup() preExistcfg(cntlr_ip, revert=True) print \ 'SNAT-Integ TestSuite Execution Failed after Reload %s'\ %(options.integ) sys.exit(1) test_dnat_extgw_to_vm.test_runner(preexist) # Cleanup after the SNAT Testsuite is run testbed_cfg.cleanup() print "\nSNAT TestSuite executed Successfully\n"
def main(): usage = "usage: %prog [options]" parser = optparse.OptionParser(usage=usage) parser.add_option("-i", "--integ", help="integrated ACI Tests. "\ "Valid strings: borderleaf or leaf or spine", default=False, dest='integ') (options, args) = parser.parse_args() from testcases.testcases_dp.testsuites_setup_cleanup import \ super_hdr, header1, header2, header3, header4 from testcases.testcases_dp.testsuite_same_ptg_l2p_l3p import \ test_same_ptg_same_l2p_same_l3p from testcases.testcases_dp.testsuite_diff_ptg_same_l2p_l3p import \ test_diff_ptg_same_l2p_l3p from testcases.testcases_dp.testsuite_diff_ptg_diff_l2p_same_l3p import \ test_diff_ptg_diff_l2p_same_l3p from testcases.testcases_dp.testsuite_diff_ptg_diff_l2p_diff_l3p import \ test_diff_ptg_diff_l2p_diff_l3p # Build the Test Config to be used for all DataPath Testcases print "Setting up global config for all DP Testing" testbed_cfg = gbp_main_config() testbed_cfg.setup() # Fetch gbp objects via heat output gbpheat = gbpHeat(super_hdr.cntlr_ip,cntrlr_uname=super_hdr.cntlr_user, cntrlr_passwd=super_hdr.cntlr_passwd) objs_uuid = gbpheat.get_uuid_from_stack( super_hdr.heat_temp, super_hdr.stack_name) ''' #JISHNU: Keeping it commented out until AID fix # Verify the configuration on ACI print "Verification .. sleep 30s, allowing DP learning" sleep(30) _iter = 0 while True: verify = testbed_cfg.verifySetup() if verify: break if _iter > 2: testbed_cfg.cleanup() sys.exit(1) print "Sleeping for 20s more for next iteration of Verify" sleep(20) _iter += 1 ''' sleep(20) #JISHNU: Rremove it after the above verify is Uncommented header_to_suite_map = {'header1': [header1, test_same_ptg_same_l2p_same_l3p], 'header2': [header2, test_diff_ptg_same_l2p_l3p], 'header3': [header3, test_diff_ptg_diff_l2p_same_l3p], 'header4': [header4, test_diff_ptg_diff_l2p_diff_l3p]} def run(reboot=''): for val in header_to_suite_map.itervalues(): # Initialize Testsuite specific config setup/cleanup class header = val[0]() # Build the Testsuite specific setup/config # header.setup() # Initialize Testsuite class to run its testcases testsuite = val[1](objs_uuid) # now run the loop of test-combos(NOTE: The below forloop is now part of Harness & cfgable) for location in ['same_host', 'diff_host_diff_leaf']: if reboot: log_string = "%s_%s_%s_%s_%s" % ( testbed_cfg.test_parameters['bd_type'], testbed_cfg.test_parameters['ip_version'], testbed_cfg.test_parameters['vpc_type'], location, reboot) else: log_string = "%s_%s_%s_%s" % ( testbed_cfg.test_parameters['bd_type'], testbed_cfg.test_parameters['ip_version'], testbed_cfg.test_parameters['vpc_type'], location) testsuite.test_runner(log_string, location) run() #Run the Test without any Integration Test, this will ALWAYS RUN # Options to Run ACI Integration Tests: if options.integ: #With VPC if any of the Leafs reboot, then traffic should be #able to flow through the other Leaf. Only in case of Spine #we should sleep 7 mins before we send traffic #TODO: Add online status check for the leafs/spines post reload if options.integ == 'borderleaf': print "////// Run DP-Test Post Reload of BorderLeaf //////" reboot = 'POST_RELOAD_BORDERLEAF' testbed_cfg.reloadAci() if options.integ == 'leaf': print "////// Run DP-Test Post Reload of Non-BorderLeaf //////" reboot = 'POST_RELOAD_NONBORDERLEAF' testbed_cfg.reloadAci(nodetype='leaf') if options.integ == 'spine': print "////// Run DP-Test Post Reload of Spine //////" reboot = 'POST_RELOAD_SPINE' testbed_cfg.reloadAci(nodetype='spine') print " **** Sleeping for Spine toboot up ****" sleep(430) # After Reboot of ACI node, verifyCfg and send traffic sleep(60) if not testbed_cfg.verifySetup(): print "Verification of Test Config Failed, %s" %(reboot) testbed_cfg.cleanup() sys.exit(1) run(reboot=reboot) testbed_cfg.cleanup() print "\nDataPath TestSuite executed Successfully\n"
key_passwd = conf.get('keystone_password') or 'noir0123' apic_ip = conf['apic_ip'] leaf1_ip = conf['leaf1_ip'] leaf2_ip = conf['leaf2_ip'] spine_ip = conf['spine_ip'] apic_passwd = conf.get('apic_passwd') heat_temp_test = conf['main_setup_heat_temp'] num_hosts = conf['num_comp_nodes'] heat_stack_name = conf['heat_dp_stack_name'] pausetodebug = conf['pausetodebug'] test_parameters = conf['test_parameters'] plugin = conf['plugin-type'] CONTAINERIZED_SERVICES=conf.get('containerized_services', []) gbpnova = gbpNova(cntlr_ip,cntrlr_uname=cntlr_user,cntrlr_passwd=cntlr_passwd, keystone_user=key_user,keystone_password=key_passwd) gbpheat = gbpHeat(cntlr_ip,cntrlr_uname=cntlr_user, cntrlr_passwd=cntlr_passwd) if plugin: #Incase of MergedPlugin if apic_passwd: gbpaci = gbpApic(apic_ip, mode='aim', password=apic_passwd) else: gbpaci = gbpApic(apic_ip, mode='aim') else: gbpaci = gbpApic(apic_ip, password=apic_passwd, apicsystemID=APICSYSTEM_ID) vmlist = ['VM1','VM2','VM3','VM4', 'VM5','VM6','VM7','VM8', 'VM9','VM10','VM11','VM12' ] #Below L2Ps needed for APIC Verification