def CASE1(self, main): # Clean up test environment and set up import time main.testSetUp.ONOSSetUp(main.Cluster, True, cellName=main.cellName, killRemoveMax=False) try: from tests.dependencies.utils import Utils except ImportError: main.log.error("Utils not found exiting the test") main.exit() try: main.Utils except (NameError, AttributeError): main.Utils = Utils() main.Utils.mininetCleanup(main.Mininet1) main.step("Starting up Mininet from command.") mnCmd = " mn " + " --topo " + main.topologyType + "," + main.topologySwitchCount for ctrl in main.Cluster.active(): mnCmd += " --controller remote,ip=" + ctrl.ipAddress stepResult = main.Mininet1.startNet(mnCmd=mnCmd) utilities.assert_equals(expect=main.TRUE, actual=stepResult, onpass="******", onfail="Mininet was NOT set up correctly.")
def cleanup(main, physical=False): """ Stop Onos-cluster. Stops Mininet Copies ONOS log """ try: from tests.dependencies.utils import Utils except ImportError: main.log.error("Utils not found exiting the test") main.cleanAndExit() try: main.utils except (NameError, AttributeError): main.utils = Utils() if not physical: main.utils.mininetCleanup(main.Mininet1) main.utils.copyKarafLog("CASE%d" % main.CurrentTestCaseNumber, before=True, includeCaseDesc=False) for ctrl in main.Cluster.active(): main.ONOSbench.onosStop(ctrl.ipAddress)
def CASE19( self, main ): """ Copy the karaf.log files after each testcase cycle """ try: from tests.dependencies.utils import Utils except ImportError: main.log.error( "Utils not found exiting the test" ) main.cleanAndExit() try: main.Utils except ( NameError, AttributeError ): main.Utils = Utils() main.Utils.copyKarafLog( "cycle" + str( main.cycle ) )
def CASE14( self, main ): """ Stop mininet """ try: from tests.dependencies.utils import Utils except ImportError: main.log.error( "Utils not found exiting the test" ) main.cleanAndExit() try: main.Utils except ( NameError, AttributeError ): main.Utils = Utils() main.Utils.mininetCleanIntro() topoResult = main.Utils.mininetCleanup( main.LincOE, timeout=180 ) # Exit if topology did not load properly if not topoResult: main.cleanAndExit()
def CASE3(self, main): """ cleanup mininet. """ try: from tests.dependencies.utils import Utils except ImportError: main.log.error("Utils not found exiting the test") main.cleanAndExit() try: main.Utils except (NameError, AttributeError): main.Utils = Utils() main.Utils.mininetCleanup( main.Mininet1, exitTimeout=main.basicMNTime + (int(main.currScale) if main.currScale is not None else 1) * main.stopNetTime)
def CASE10(self, main): """ Stop mininet and remove scapy host """ try: from tests.dependencies.utils import Utils except ImportError: main.log.error("Utils not found exiting the test") main.cleanAndExit() try: main.Utils except (NameError, AttributeError): main.Utils = Utils() main.log.report("Stop Mininet and Scapy") main.case("Stop Mininet and Scapy") main.caseExplanation = "Stopping the current mininet topology " +\ "to start up fresh" main.step("Stopping and Removing Scapy Host Components") scapyResult = main.TRUE for host in main.scapyHosts: scapyResult = scapyResult and host.stopScapy() main.log.info("Stopped Scapy Host: {0}".format(host.name)) for host in main.scapyHosts: scapyResult = scapyResult and main.Scapy.removeHostComponent( host.name) main.log.info("Removed Scapy Host Component: {0}".format( host.name)) main.scapyHosts = [] utilities.assert_equals( expect=main.TRUE, actual=scapyResult, onpass="******", onfail="Failed to stop mininet and scapy") mininetResult = main.Utils.mininetCleanup(main.Mininet1) # Exit if topology did not load properly if not (mininetResult and scapyResult): main.cleanAndExit()
def CASE1( self, main ): # Clean up test environment and set up import time try: from tests.dependencies.utils import Utils except ImportError: main.log.error( "Utils not found exiting the test" ) main.cleanAndExit() try: main.Utils except ( NameError, AttributeError ): main.Utils = Utils() main.maxNumBatch = 0 main.testSetUp.ONOSSetUp( main.Cluster, True, cellName=main.cellName, killRemoveMax=False ) main.log.info( "Configure apps" ) main.Cluster.active( 0 ).CLI.setCfg( main.defaultTopoCfg, "maxEvents 1" ) main.Cluster.active( 0 ).CLI.setCfg( main.defaultTopoCfg, "maxBatchMs 0" ) main.Cluster.active( 0 ).CLI.setCfg( main.defaultTopoCfg, "maxIdleMs 0" ) main.Cluster.command( "logSet", args=[ "DEBUG", "org.onosproject.metrics.topology" ], specificDriver=2 ) time.sleep( 1 ) main.log.info( "Copy topology file to Mininet" ) main.ONOSbench.copyMininetFile( main.topoName, main.dependencyPath, main.Mininet1.user_name, main.Mininet1.ip_address ) main.Utils.mininetCleanup( main.Mininet1 ) time.sleep( main.MNSleep ) main.log.info( "Start new mininet topology" ) main.Mininet1.startNet() main.log.info( "Assign switch to controller to ONOS node 1" ) time.sleep( 2 )
def cleanup(main): """ Stop Onos-cluster. Stops Mininet Copies ONOS log """ try: from tests.dependencies.utils import Utils except ImportError: main.log.error("Utils not found exiting the test") main.cleanAndExit() try: main.utils except (NameError, AttributeError): main.utils = Utils() main.utils.mininetCleanup(main.Mininet1) main.utils.copyKarafLog(main.cfgName) for ctrl in main.Cluster.active(): main.ONOSbench.onosStop(ctrl.ipAddress)
def CASE20( self, main ): """ host1 send arping package and measure latency There are only 1 levels of latency measurements to this test: 1 ) ARPING-to-device measurement: Measurement the time from host1 send apring package to onos processing the host event """ import time import subprocess import json import requests import os import numpy try: from tests.dependencies.utils import Utils except ImportError: main.log.error( "Utils not found exiting the test" ) main.cleanAndExit() try: main.Utils except ( NameError, AttributeError ): main.Utils = Utils() # Host adding measurement assertion = main.TRUE main.log.report( 'Latency of adding one host to ONOS' ) main.log.report( 'First ' + str( main.iterIgnore ) + ' iterations ignored' + ' for jvm warmup time' ) main.log.report( 'Total iterations of test: ' + str( main.numlter ) ) addingHostTime = [] metricsResultList = [] for i in range( 0, int( main.numlter ) ): main.log.info( 'Clean up data file' ) with open( main.tsharkResultPath, "w" ) as dbFile: dbFile.write( "" ) main.log.info( 'Starting tshark capture' ) main.ONOSbench.tsharkGrep( main.tsharkPacketIn, main.tsharkResultPath ) time.sleep( main.measurementSleep ) main.log.info( 'host 1 arping...' ) main.Mininet1.arping( srcHost='h1', dstHost='10.0.0.2' ) time.sleep( main.measurementSleep ) main.log.info( 'Stopping all Tshark processes' ) main.ONOSbench.tsharkStop() time.sleep( main.measurementSleep ) # Get tshark output with open( main.tsharkResultPath, "r" ) as resultFile: resultText = resultFile.readline() main.log.info( 'Capture result:' + resultText ) resultText = resultText.split( ' ' ) if len( resultText ) > 1: tsharkResultTime = float( resultText[ 1 ] ) * 1000.0 else: main.log.error( 'Tshark output file for packet_in' + ' returned unexpected results' ) hostTime = 0 caseResult = main.FALSE resultFile.close() # Compare the timestemps, and get the lowest one. temp = 0 # Get host event timestamps from each nodes for ctrl in main.Cluster.active(): metricsResult = json.loads( ctrl.CLI.topologyEventsMetrics() ) metricsResult = metricsResult.get( main.hostTimestampKey ).get( "value" ) main.log.info( "ONOS topology event matrics timestemp: {}".format( str( metricsResult ) ) ) if temp < metricsResult: temp = metricsResult metricsResult = temp addingHostTime.append( float( metricsResult ) - tsharkResultTime ) main.log.info( "Result of this iteration: {}".format( str( float( metricsResult ) - tsharkResultTime ) ) ) # gethost to remove gethost = main.Cluster.active( 0 ).REST.hosts() HosttoRemove = [] HosttoRemove.append( json.loads( gethost[ 1:len( gethost ) - 1 ] ).get( 'id' ) ) main.Cluster.active( 0 ).CLI.removeHost( HosttoRemove ) main.log.info( "Result List: {}".format( addingHostTime ) ) # calculate average latency from each nodes averageResult = numpy.average( addingHostTime ) main.log.info( "Average Latency: {}".format( averageResult ) ) # calculate std stdResult = numpy.std( addingHostTime ) main.log.info( "std: {}".format( stdResult ) ) # write to DB file main.log.info( "Writing results to DS file" ) with open( main.dbFileName, "a" ) as dbFile: temp = "'" + main.commit + "'," temp += "'" + main.nic + "'," # Scale number temp += str( main.Cluster.numCtrls ) temp += ",'" + "baremetal1" + "'" # average latency temp += "," + str( averageResult ) # std of latency temp += "," + str( stdResult ) temp += "\n" dbFile.write( temp ) assertion = main.TRUE utilities.assert_equals( expect=main.TRUE, actual=assertion, onpass='******', onfail='Host latency test failed' ) main.Utils.mininetCleanup( main.Mininet1 )
def CASE20(self, main): try: from tests.dependencies.utils import Utils except ImportError: main.log.error("Utils not found exiting the test") main.cleanAndExit() try: main.Utils except (NameError, AttributeError): main.Utils = Utils() if main.reroute: main.minIntents = int( main.params['NULL']['REROUTE']['min_intents']) main.maxIntents = int( main.params['NULL']['REROUTE']['max_intents']) main.checkInterval = int( main.params['NULL']['REROUTE']['check_interval']) main.batchSize = int(main.params['NULL']['REROUTE']['batch_size']) else: main.minIntents = int(main.params['NULL']['PUSH']['min_intents']) main.maxIntents = int(main.params['NULL']['PUSH']['max_intents']) main.checkInterval = int( main.params['NULL']['PUSH']['check_interval']) main.batchSize = int(main.params['NULL']['PUSH']['batch_size']) # check if the case needs to be skipped if main.setupSkipped: main.setupSkipped = False main.skipCase() # the index where the next intents will be installed offfset = 0 # keeps track of how many intents have been installed currIntents = 0 # keeps track of how many flows have been installed, set to 0 at start currFlows = 0 # limit for the number of intents that can be installed main.batchSize = int(int(main.batchSize) / main.Cluster.numCtrls) limit = main.maxIntents / main.batchSize # total intents installed totalIntents = 0 intentsState = None offtmp = 0 main.step("Pushing intents") stepResult = main.TRUE # temp variable to contain the number of flows flowsNum = 0 if main.Cluster.numCtrls > 1: # if more than one onos nodes, we should check more frequently main.checkInterval = main.checkInterval / 4 # make sure the checkInterval divisible batchSize main.checkInterval = int( int(main.checkInterval / main.batchSize) * main.batchSize) flowTemp = 0 intentVerifyTemp = 0 totalFlows = 0 for i in range(limit): # Threads pool pool = [] for j in range(main.Cluster.numCtrls): if main.Cluster.numCtrls > 1: time.sleep(1) offtmp = offfset + main.maxIntents * j # Push intents by using threads t = main.Thread( target=main.Cluster.active(j).CLI.pushTestIntents, threadID=main.threadID, name="Push-Test-Intents", args=[ main.switchType + main.ingress, main.switchType + main.egress, main.batchSize ], kwargs={ "offset": offtmp, "options": "-i", "timeout": main.timeout, "background": False, "noExit": True }) pool.append(t) t.start() main.threadID = main.threadID + 1 for t in pool: t.join() stepResult = stepResult and t.result offfset = offfset + main.batchSize totalIntents = main.batchSize * main.Cluster.numCtrls + totalIntents if totalIntents >= main.minIntents and totalIntents % main.checkInterval == 0: # if reach to minimum number and check interval, verify Intetns and flows time.sleep(main.verifySleep * main.Cluster.numCtrls) main.log.info("Verify Intents states") # k is a control variable for verify retry attempts k = 1 while k <= main.verifyAttempts: # while loop for check intents by using CLI driver time.sleep(5) intentsState = main.Cluster.active( 0).CLI.checkIntentSummary(timeout=600, noExit=True) if intentsState: verifyTotalIntents = main.Cluster.active( 0).CLI.getTotalIntentsNum(timeout=600, noExit=True) if intentVerifyTemp < verifyTotalIntents: intentVerifyTemp = verifyTotalIntents else: verifyTotalIntents = intentVerifyTemp intentsState = False main.log.info("Total Installed Intents: {}".format( verifyTotalIntents)) break k = k + 1 k = 1 flowVerify = True while k <= main.verifyAttempts: time.sleep(5) totalFlows = main.Cluster.active(0).CLI.getTotalFlowsNum( timeout=600, noExit=True) expectFlows = totalIntents * 7 + main.defaultFlows if totalFlows == expectFlows: main.log.info( "Total Flows Added: {}".format(totalFlows)) break else: main.log.info("Some Flows are not added, retry...") main.log.info( "Total Flows Added: {} Expect Flows: {}".format( totalFlows, expectFlows)) flowVerify = False k += 1 if flowTemp < totalFlows: flowTemp = totalFlows else: totalFlows = flowTemp if not intentsState or not flowVerify: # If some intents are not installed, grep the previous flows list, and finished this test case main.log.warn("Intents or flows are not installed") verifyTotalIntents = main.Cluster.active( 0).CLI.getTotalIntentsNum(timeout=600, noExit=True) if intentVerifyTemp < verifyTotalIntents: intentVerifyTemp = verifyTotalIntents else: verifyTotalIntents = intentVerifyTemp if flowTemp < totalFlows: flowTemp = totalFlows else: totalFlows = flowTemp main.log.info( "Total Intents: {}".format(verifyTotalIntents)) break utilities.assert_equals( expect=main.TRUE, actual=intentsState, onpass="******", onfail="Failed to push and verify intents") main.log.info( "Total Intents Installed before crash: {}".format(totalIntents)) main.log.info("Total Flows ADDED before crash: {}".format(totalFlows)) main.Utils.mininetCleanup(main.Mininet1) main.log.info("Writing results to DS file") with open(main.dbFileName, "a") as dbFile: # Scale number temp = "'" + main.commit + "'," temp += "'" + main.nic + "'," temp += str(main.Cluster.numCtrls) temp += ",'" + "baremetal1" + "'" # how many intents we installed before crash temp += "," + str(verifyTotalIntents) # how many flows we installed before crash temp += "," + str(totalFlows) # other columns in database, but we didn't use in this test temp += "," + "0,0,0,0,0,0,0,0" temp += "\n" dbFile.write(temp)