コード例 #1
0
def doProtocol(prots,pn,fclaim=None,maxproc=2):
    cl = prots[pn]
    cl.sort()
    cl.reverse()        # to start with secrecy (smaller scenarios than auth, hence improving tool.minTime initial behaviour)
    for c in cl:
        if (fclaim == None) or c.startswith(fclaim):
            Report.replog("** Claim: %s,%s ***" % (pn,c), False)
            pl = getProtocols(pn,c)
            for p in pl:
                # Apply filtering
                if TestConfig.filter(pn,c,p.toolname,maxproc):
                    # Get timeout value from config
                    timeout = TestConfig.getTimeout(pn,c,p.toolname,maxproc)
                    verify(p,c,maxproc=maxproc,timeout=timeout)
コード例 #2
0
 def __init__(self, topologyConfigFilePath, testConfigFilePath):
     print("Topology  config path is ", topologyConfigFilePath)
     self.nameToHostMap = tc.loadCFG(cfgfileName=topologyConfigFilePath)
     print("Test case config path is ", testConfigFilePath)
     self.testCfgFile = open(testConfigFilePath)
     obj = json.load(fp=self.testCfgFile)
     self.testCases = tc.TestConfigs.from_dict(obj)
コード例 #3
0
def report(obj="", addnewline=True):
    """
    Output for the tests.

    Emulates print by adding newline.
    Currently always writes to 
    """
    global toStdOut
    global toFile
    global defaultFile

    txt = "%s" % (obj)
    if addnewline:
        txt += "\n"
    if toStdOut:
        sys.stdout.write(txt)
    if toFile != None:
        toFile.write(txt)
        toFile.flush()  # Always immediately flush to see errors early
    """
    Always write to log
    """
    if defaultFile == None:
        filename = "test-%s.log" % (TestConfig.machineName())
        defaultFile = open(filename, "w")
    defaultFile.write(txt)
    defaultFile.flush()
コード例 #4
0
def getStrideDeploymentPairs(nameToHostMap,maxPortcountInSwitch,testCaseName, loadFactor, testDuration,testStartDelay ):
    # foreach scrc-dest-pair
    #       login to src
    #       foreach of the flows
    #               build corresponding cmdString and deploy
    srcList, destList= l2StridePatternTestPairCreator(nameToHostMap,maxPortcountInSwitch)
    deploymentPairList= []

    if (len(srcList) != len(destList)):
        logger.error("Srclist and dest list is not equal in length. Printing them and exiting")
        logger.error(srcList)
        logger.error(destList)
        exit(1)
    else:
        i = 0
        j=0
        for i in range(0, len(srcList)):
            flowArrivalTimesByflowType = calculateFlowArrivalTimes(loadFactor, testDuration)
            for j in range (0, len(ConfigConst.FLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB)):
                flowsizeAsPacketCount = math.ceil(((ConfigConst.FLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB[j]*1024)/(ConfigConst.PACKET_SIZE)))
                for k in range (0, len(flowArrivalTimesByflowType[j])):
                    flowArraivalTime = flowArrivalTimesByflowType[j][k]
                    newDeploymentPair = tc.IPerfDeplymentPair(srcList[i], destList[i], srcList[i].getNextIPerf3ClientPort(),
                                                              destList[i].getNextIPerf3ServerPort(),testCaseName = testCaseName,
                                                              srcHostName=srcList[i].hostName, destHostName= destList[i].hostName,
                                                              startTime= flowArraivalTime+float(testStartDelay),flowSizeinPackets= flowsizeAsPacketCount)
                    deploymentPairList.append(newDeploymentPair)
                    # print(newDeploymentPair.getServerCommand())
    return deploymentPairList
コード例 #5
0
    def cleanUp(self):
        """
        Some AVISPA tools (cl-atse,ofmc) might remains in memory, ugly, and it might destroy
        the other tests.
        """
        """
        TODO potentially very unsafe, if you put Avispa in say /usr/bin.
        The assumption here is that AvispaPath will include "avispa"
        somewhere" to limit the damage.
        """
        avpath = TestConfig.getAvispaPath()
        cmd = "pkill -9 -f %s" % (avpath)
        commands.getoutput(cmd)
        """
        Second, some seem to leave a mess of files (Sat-MC?)
        """
        def extclean(path):
            cmd = "rm -f %stmp*.sate" % path
            commands.getoutput(cmd)
            cmd = "rm -f %stmp*.res" % path
            commands.getoutput(cmd)
            cmd = "rm -f %stmp*dimacs*" % path
            commands.getoutput(cmd)

        extclean("")
        try:
            avispabase = os.environ["AVISPA_PACKAGE"]
            extclean("%s/testsuite/results/" % avispabase)
        except:
            pass
コード例 #6
0
    def __init__(self, name, prefix="", postfix=""):

        self.name = name
        self.protocols = []
        self.extension = ""
        self.protgen = None
        self.commentPrefix = prefix
        self.commentPostfix = postfix
        self.minTime = TestConfig.getMinTime(self.name)
コード例 #7
0
def buildOneDeploymentPair(nameToHostMap, srcName, dstName, testCaseName, testStartDelay, flowsizeInpacket, trafficClass, rateInKBPS):
    src = nameToHostMap.get(srcName)
    dst = nameToHostMap.get(dstName)
    newDeploymentPair = tc.IPerfDeplymentPair(src, dst, src.getNextIPerf3ClientPort(),
                                              dst.getNextIPerf3ServerPort(), testCaseName = testCaseName,
                                              srcHostName=src.hostName, destHostName= dst.hostName,
                                              startTime=float(testStartDelay), flowSizeinPackets= flowsizeInpacket,
                                              trafficClass = trafficClass, bitrate = rateInKBPS)
    return newDeploymentPair
コード例 #8
0
    def __init__(self,topologyConfigFilePath,resultFolder , clientPortStart,serverPortStart, testStartDelay):
        '''

        :param topologyConfigFilePath:
        :param testConfigFilePath:
        :param testStartDelay: This is a delay period for starting the tests. We need this to handle the delay in starting ssh sessions
        '''
        self.testStartDelay = testStartDelay
        self.serverPortStart = serverPortStart
        self.clientPortStart = clientPortStart
        print("Topology  config path is ", topologyConfigFilePath)
        self.nameToHostMap = tc.loadCFG(topologyConfigFilePath,self.clientPortStart, self.serverPortStart )
        self.resultFolder = resultFolder
コード例 #9
0
def run_etwprof(args):
    cmd = [os.path.join(TestConfig._testbin_folder_path, "etwprof.exe")]
    cmd.extend(args)

    try:
        return subprocess.run(
            cmd,
            stdout=subprocess.DEVNULL,
            stderr=subprocess.DEVNULL,
            timeout=TestConfig.get_process_timeout()).returncode
    except subprocess.TimeoutExpired as e:
        raise RuntimeError(
            f"Timeout of {TestConfig.get_process_timeout()} seconds expired for etwprof!"
        )
コード例 #10
0
    def verify(self, prot, args, timeout=-1):
        self.level = None

        timed = False
        if timeout:
            if timeout >= 0:
                timed = True

        while True:

            self.level = TestConfig.ta4NextLevel(self.level, timed)
            if self.level == None:
                return INCONCLUSIVE
            else:
                res = Avispa.verify(self, prot, args, timeout)
                if timeout > self.duration:
                    timeout = timeout - self.duration
                if res != INCONCLUSIVE:
                    return res
コード例 #11
0
def usage():
    (runmin,runmax,runstep) = TestConfig.getMaxProc()
    print """
------------------------------------------------------------------------
performanceTest.py

Security protocol analyzers performance test scripts.
By Cas Cremers & Pascal Lafourcade

--protocol=P        Filter on protocol P
--claims=S          Filter on claim prefix S
--tools=T1[,T2]     Filter on tools T1..TN

-o,--output=F       Send output to file F

--run-max=I         Maximum number of runs I [%i]
--run-step=I        Step number for runs I [%i]

--output-if         Write any generated IF language constructs to a
                    file.
------------------------------------------------------------------------
""" % (runmax,runstep)
コード例 #12
0
    def preprocess(self):
        """
        Maybe preprocess self.prot and self.args

        Hook to convert hlpsl to if :)
        """
        if self.makeIF == True:
            self.hlpsl = self.prot

            fh = tempfile.NamedTemporaryFile(suffix=".hlpsl")
            fname = fh.name
            fh.write(self.hlpsl)
            fh.flush()

            cmd = "hlpsl2if --nowarnings --stdout %s" % (fname)
            self.prot = pexpect.run(cmd)
            fh.close()
            if self.prot.find("ERROR") != -1:
                print "ERROR for hlpsl2if"
                self.ewrite("hlpsl", self.hlpsl)
                self.ewrite("output", self.prot)
                raise Exception, "Tool error: report in files"

            # Stupid parsers trip over DOS-like endings. I mean, really.
            pl = self.prot.splitlines()
            s = ""
            for l in pl:
                s += "%s\n" % l
            self.prot = s

            # If needed, write out
            if TestConfig.inGlobalOptions('--output-if'):
                ifname = "%s_%s.if" % (self.protgen.name, self.claim)
                fh = open(ifname, 'w')
                fh.write(self.prot)
                fh.close()
コード例 #13
0
def getStrideDeploymentPairs(nameToHostMap,maxPortcountInSwitch,testCaseName, loadFactor, testDuration,testStartDelay ):
    # foreach scrc-dest-pair
    #       login to src
    #       foreach of the flows
    #               build corresponding cmdString and deploy
    srcList, destList= l2StridePatternTestPairCreator(nameToHostMap,maxPortcountInSwitch)
    # print("Srclist is ",srcList)
    # print("destList is ",destList)
    deploymentPairList= []

    if (len(srcList) != len(destList)):
        logger.error("Srclist and dest list is not equal in length. Printing them and exiting")
        logger.error(srcList)
        logger.error(destList)
        exit(1)
    else:
        flowArrivalTimesByflowType = calculateFlowArrivalTimes(loadFactor, testDuration)
        for i in range (0, len(ConfigConst.FLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB)):
            totalFlowForThisFlowType = len(flowArrivalTimesByflowType[i])
            flowsizeAsPacketCount = math.ceil(((ConfigConst.FLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB[i]*1024)/(ConfigConst.PACKET_SIZE)))
            for j in range (0,totalFlowForThisFlowType):
                k = j % len(srcList)
                newDeploymentPair = tc.IPerfDeplymentPair(srcList[k], destList[k], srcList[k].getNextIPerf3ClientPort(),
                                  destList[k].getNextIPerf3ServerPort(),testCaseName = testCaseName,
                                  srcHostName=srcList[k].hostName, destHostName= destList[k].hostName,
                                  startTime= flowArrivalTimesByflowType[i][j]+float(testStartDelay),flowSizeinPackets= flowsizeAsPacketCount,
                                  trafficClass = ConfigConst.FLOW_TYPE_TRAFFIC_CLASS[i], bitrate = ConfigConst.FLOW_TYPE_BITRATE[i])
                deploymentPairList.append(newDeploymentPair)
        testDurationScaled = loadFactor * 2 * testDuration
        qosDeploymentPair = getQoSTestDeploymentDeploymentPairs(nameToHostMap,testCaseName, testDuration=testDurationScaled,testStartDelay= testStartDelay)
        for k in qosDeploymentPair:
            deploymentPairList.append(k)
        # src = nameToHostMap.get("h0p0l0")
        # dst = nameToHostMap.get("h1p0l1")
        # flowSize = testDuration * ConfigConst.queueRateForSpineFacingPortsOfLeafSwitch
        # # print("Spoecial flow size is "+str(flowSize))
        # bitrate = ConfigConst.queueRateForSpineFacingPortsOfLeafSwitch * 1024
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                   dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                   srcHostName=src.hostName, destHostName= dst.hostName,
        #                   startTime= 10+float(testStartDelay),flowSizeinPackets= flowSize,
        #                   trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                                           dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                           srcHostName=src.hostName, destHostName= dst.hostName,
        #                                           startTime= 10+float(testStartDelay+testDuration/3),flowSizeinPackets= flowSize,
        #                                           trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                                           dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                           srcHostName=src.hostName, destHostName= dst.hostName,
        #                                           startTime= 10+float(testStartDelay+testDuration*2/3),flowSizeinPackets= flowSize,
        #                                           trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                                           dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                           srcHostName=src.hostName, destHostName= dst.hostName,
        #                                           startTime= 10+float(testStartDelay+testDuration),flowSizeinPackets= flowSize,
        #                                           trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # src = nameToHostMap.get("h0p0l2")
        # dst = nameToHostMap.get("h1p0l3")
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                   dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                   srcHostName=src.hostName, destHostName= dst.hostName,
        #                   startTime= 10+float(testStartDelay),flowSizeinPackets= flowSize,
        #                   trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                                           dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                           srcHostName=src.hostName, destHostName= dst.hostName,
        #                                           startTime= 10+float(testStartDelay+testDuration/3),flowSizeinPackets= flowSize,
        #                                           trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                                           dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                           srcHostName=src.hostName, destHostName= dst.hostName,
        #                                           startTime= 10+float(testStartDelay+testDuration*2/3),flowSizeinPackets= flowSize,
        #                                           trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)
        # newDeploymentPair = tc.IPerfDeplymentPair(src,dst, src.getNextIPerf3ClientPort(),
        #                                           dst.getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                           srcHostName=src.hostName, destHostName= dst.hostName,
        #                                           startTime= 10+float(testStartDelay+testDuration),flowSizeinPackets= flowSize,
        #                                           trafficClass = ConfigConst.tunnelTrafficClass, bitrate = bitrate)
        # deploymentPairList.append(newDeploymentPair)

        # i = 0
        # j=0
        # for i in range(0, len(srcList)):
        #     flowArrivalTimesByflowType = calculateFlowArrivalTimes(loadFactor, testDuration)
        #     for j in range (0, len(ConfigConst.FLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB)):
        #         flowsizeAsPacketCount = math.ceil(((ConfigConst.FLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB[j]*1024)/(ConfigConst.PACKET_SIZE)))
        #         for k in range (0, len(flowArrivalTimesByflowType[j])):
        #             flowArraivalTime = flowArrivalTimesByflowType[j][k]
        #             newDeploymentPair = tc.IPerfDeplymentPair(srcList[i], destList[i], srcList[i].getNextIPerf3ClientPort(),
        #                                                       destList[i].getNextIPerf3ServerPort(),testCaseName = testCaseName,
        #                                                       srcHostName=srcList[i].hostName, destHostName= destList[i].hostName,
        #                                                       startTime= flowArraivalTime+float(testStartDelay),flowSizeinPackets= flowsizeAsPacketCount,
        #                                                       trafficClass = ConfigConst.FLOW_TYPE_TRAFFIC_CLASS[j], bitrate = ConfigConst.FLOW_TYPE_BITRATE[j])
        #             deploymentPairList.append(newDeploymentPair)
        #             # print(newDeploymentPair.getServerCommand())
    return deploymentPairList
コード例 #14
0
ファイル: TestAll.py プロジェクト: bhavanaananda/DataStage
    suite.addTest(TestSubmitDataset.getTestSuite(select=select))
    suite.addTest(TestSubmitDatasetHandler.getTestSuite(select=select))
    suite.addTest(TestDirectoryListingHandler.getTestSuite(select=select))
    suite.addTest(TestMetadataMerging.getTestSuite(select=select))
    suite.addTest(TestGetDatasetMetadataHandler.getTestSuite(select=select))
    suite.addTest(TestHttpSession.getTestSuite(select=select))
    return suite

from MiscLib import TestUtils
import junitxml

if __name__ == "__main__":
    print "============================================================"
    print "This test suite needs to run under a Linux operating system"
    print "Edit TestConfig.py to specify hostname and other parameters"
    print "Create test accounts on target system to match TestConfig.py"
    print "============================================================"
    TestConfig.setDatasetsBaseDir(".")
    
    
    if len(sys.argv) >= 2 and sys.argv[1] == "xml":
        with open('xmlresults.xml', 'w') as report:
            result = junitxml.JUnitXmlResult(report)
            result.startTestRun()
            getTestSuite().run(result)
            result.stopTestRun()
    else:
        TestUtils.runTests("TestAll", getTestSuite, sys.argv)

# End.
コード例 #15
0
import unittest
import subprocess
import json
import os
import util
import time
from TestConfig import *

config = {}
test_env = os.getenv('test_env', 'aiaas')
env_setup = TestConfig()
config = env_setup.setEnvironment(test_env)

cli = os.path.abspath('./pb-cli/index.js')


class TestPBRemove(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.util = util.TestUtil()
        self.util.announce_test_block('pb remove')
        self.hostname = config["hostname"]
        print self.hostname

    def setUp(self):
        self.util.create_and_compile()

    def test_remove_aiml_file(self):
        self.util.it('removes an aiml file from the bot.')

        bot_files = self.util.get_file_list()
コード例 #16
0
import sys
import subprocess
import Configs
import Pid
import TestConfig

TestConfig.permissions_check()

# While zfs destroy is not as taxing as zfs receive, I think that while this
# (potentially large) number of destorys is occuring, we should not allow the
# user to run any other perf tests.
# If we want to test the effect of multiple destroys on receive speed, we can
# work that into the multithread test.

Pid.create_pid_file()

runs_directory = Configs.test_filesystem_path + '/runs'

subprocess.check_call(['zfs', 'destroy', '-r', runs_directory])

subprocess.check_call(
    ['zfs', 'create', Configs.test_filesystem_path + '/runs'])

Pid.destroy_pid_file()
コード例 #17
0
# System dependencies
import sys
import time
import getopt

# Other modules needed for testing
import Protocol
import Test
import Scenario
import TestConfig
import Error
import Report

# The imported protocol generators
protocols = TestConfig.getProtocols()
if "nspk" in protocols:
    import nspk_spdl
    import nspk_hlpsl
    import nspk_pi
    import nspk_spl
if "avispa_tls" in protocols:
    import avispa_tls_spdl
    import avispa_tls_hlpsl
    import avispa_tls_pi
if "eke" in protocols:
    import eke_spdl
    import eke_hlpsl
    import eke_pi
    import eke_spl
コード例 #18
0
ファイル: CleanupRuns.py プロジェクト: Phizzal/zfs-tests
import sys
import subprocess
import Configs
import Pid
import TestConfig

TestConfig.permissions_check()

# While zfs destroy is not as taxing as zfs receive, I think that while this
# (potentially large) number of destorys is occuring, we should not allow the
# user to run any other perf tests.
# If we want to test the effect of multiple destroys on receive speed, we can
# work that into the multithread test.

Pid.create_pid_file()

runs_directory = Configs.test_filesystem_path + '/runs'

subprocess.check_call(['zfs', 'destroy', '-r', runs_directory])

subprocess.check_call(['zfs', 'create', 
    Configs.test_filesystem_path + '/runs'])

Pid.destroy_pid_file()

コード例 #19
0
ファイル: RunTests.py プロジェクト: Donpedro13/etwprof
        print(case.full_name)

    def on_case_end(case):
        diff = perf_counter() - case_start

        if not case.has_failures():
            StylishPrinter.print_green("[       OK ] ")
            print(f"{case.full_name} ({diff * 1000:.0f} ms)")
        else:
            for f in case.failures:
                print(f)

            StylishPrinter.print_red("[  FAILED  ] ")
            print(f"{case.full_name} ({diff * 1000:.0f} ms)")

    TestConfig.set_testbin_folder_path(testbin_folder_path)

    runner = test_framework.TestRunner()
    runner.on_start = on_start
    runner.on_end = on_end
    runner.on_suite_start = on_suite_start
    runner.on_suite_end = on_suite_end
    runner.on_case_start = on_case_start
    runner.on_case_end = on_case_end

    runner.run(filter)


def fail(error_msg):
    StylishPrinter.print_red(f"{error_msg}\n")
    sys.exit(-1)
コード例 #20
0
ファイル: test_pb_get.py プロジェクト: pandorabots/pb-cli
import unittest
import subprocess
import json
import os
import util
import time
from TestConfig import *
config = {}
test_env = os.getenv('test_env', 'aiaas')
env_setup = TestConfig()
config = env_setup.setEnvironment(test_env)

cli = os.path.abspath('./pb-cli/index.js')

class TestPBGet(unittest.TestCase):
    @classmethod
    def setUpClass(self):
	self.util = util.TestUtil()
	self.util.announce_test_block('pb get')
	self.hostname = config["hostname"]
	print self.hostname

    def setUp(self):
        self.util.create_bot()

    def test_get_file_list(self):
        self.util.it('returns a list of the bot\'s files.')
        self.util.get_ready_to_compile()
        result = subprocess.Popen([
            cli, 'get',
            '--app_id', config['appId'],
コード例 #21
0
ファイル: TestAll.py プロジェクト: gklyne/admiral-jiscmrd
    suite.addTest(TestSubmitDataset.getTestSuite(select=select))
    suite.addTest(TestSubmitDatasetHandler.getTestSuite(select=select))
    suite.addTest(TestDirectoryListingHandler.getTestSuite(select=select))
    suite.addTest(TestMetadataMerging.getTestSuite(select=select))
    suite.addTest(TestGetDatasetMetadataHandler.getTestSuite(select=select))
    suite.addTest(TestHttpSession.getTestSuite(select=select))
    return suite


from MiscLib import TestUtils
import junitxml

if __name__ == "__main__":
    print "============================================================"
    print "This test suite needs to run under a Linux operating system"
    print "Edit TestConfig.py to specify hostname and other parameters"
    print "Create test accounts on target system to match TestConfig.py"
    print "============================================================"
    TestConfig.setDatasetsBaseDir(".")

    if len(sys.argv) >= 2 and sys.argv[1] == "xml":
        with open('xmlresults.xml', 'w') as report:
            result = junitxml.JUnitXmlResult(report)
            result.startTestRun()
            getTestSuite().run(result)
            result.stopTestRun()
    else:
        TestUtils.runTests("TestAll", getTestSuite, sys.argv)

# End.
コード例 #22
0
import Common
import MonitorThread
import ReceiveThread
import Results

parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true",
        help="The script will periodically print stats about TXGs and "
        " receive speed")
parser.add_argument('-t', '--threads', type=int, default=4,
        choices=xrange(1,32),
        help="The number of concurrent receives to perform")
args = parser.parse_args()

# Use TestConfig to ensure this computer is set up properly
TestConfig.check_all()
# This test case will use the test send file, check that it will work
TestConfig.check_testfile()

Pid.create_pid_file()

# Establish where this test will be writing its output
current_min = time.strftime("%Y%m%d%H%M%S")
zfs_receive_path = Configs.test_filesystem_path + '/runs/' + current_min

start_txg = ZfsApi.get_current_txg(Configs.main_pool)

results_collector = Results.ResultsCollector(zfs_receive_path)
results_collector.gather_start_results()

if args.verbose:
コード例 #23
0
ファイル: AllTests.py プロジェクト: bhavanaananda/DataStage
logger  =  logging.getLogger("AllTests")


# Code to run unit tests from all library test modules
def getTestSuite(select="all"):
    suite = unittest.TestSuite()
    suite.addTest(TestAll.getTestSuite(select=select))
    return suite


if __name__ == "__main__":
    print "============================================================"
    print "This test suite needs to run under a Linux operating system"
    print "Edit TestConfig.py to specify hostname and other parameters"
    print "Create test accounts on target system to match TestConfig.py"
    print "============================================================"
    #print repr( commands.getstatusoutput('ls ../../'))
    TestConfig.setDatasetsBaseDir("../../SubmitDatasetHandler/tests")
    
        
    if len(sys.argv) >= 2 and sys.argv[1] == "xml":
        with open('xmlresults.xml', 'w') as report:
            result = junitxml.JUnitXmlResult(report)
            result.startTestRun()
            getTestSuite().run(result)
            result.stopTestRun()
    else:
        TestUtils.runTests("AllTests", getTestSuite, sys.argv)

# End.
コード例 #24
0
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], "ho:", ["help",
            "protocol=","claims=","run-max=","run-step=",
            "output=","output-if","tools="])
    except:
        print "Could not parse arguments."
        usage()
        sys.exit(1)
    if args != []:
        print "Superflous arguments %s not recognized." % args
        usage()
        sys.exit(1)

    # Init
    Report.replog("Scanning main")
    prots = gatherClaims()
    Report.replog(repr(prots)+"\n")

    # Parse options
    (runmin,runmax,runstep) = TestConfig.getMaxProc()
    filter = None
    fclaim = None
    TestConfig.setGlobalOptions()   # Init global dict
    for o,a in opts:
        if o in ("-h","--help"):
            usage()
            sys.exit()
        elif o == "--protocol":
            filter = a
        elif o == "--claims":
            fclaim = a
        elif o == "--tools":
            go = TestConfig.setGlobalOption("tools",a.split(","))
        elif o == "--run-max":
            runmax = int(a)
            if runmax < runmin:
                runmin = runmax
        elif o == "--run-step":
            runstep = int(a)
            if runstep <= 0:
                assert False, "Run step number must be more than 0"
        elif o in ["-o","--output"]:
            Report.setFile(a)
        elif o == "--output-if":
            TestConfig.addGlobalOption(o)
        else:
            uo = "Unhandled option '%s'" % o
            assert False, uo

    proc = runmin
    while proc <= runmax:
        to = TestConfig.getTimeout(maxproc=proc)
        Report.separator()
        Report.replog("Testing performance for MaxProc(%i) from range (%i,%i,%i)" % (proc,runmin,runmax,runstep ))
        Report.replog("Using timeout of %i seconds" % (to))
        Report.replog("Current time and date: %s" % (time.asctime()))
        Report.separator()

        if filter in prots.keys():
            doProtocol(prots,filter,fclaim,maxproc=proc)
        else:
            for pn in prots.keys():
                doProtocol(prots,pn,maxproc=proc)
        proc += runstep
コード例 #25
0
ファイル: AllTests.py プロジェクト: mdeguzis/python-datastage
import junitxml
logger = logging.getLogger("AllTests")


# Code to run unit tests from all library test modules
def getTestSuite(select="all"):
    suite = unittest.TestSuite()
    suite.addTest(TestAll.getTestSuite(select=select))
    return suite


if __name__ == "__main__":
    print "============================================================"
    print "This test suite needs to run under a Linux operating system"
    print "Edit TestConfig.py to specify hostname and other parameters"
    print "Create test accounts on target system to match TestConfig.py"
    print "============================================================"
    #print repr( commands.getstatusoutput('ls ../../'))
    TestConfig.setDatasetsBaseDir("../../SubmitDatasetHandler/tests")

    if len(sys.argv) >= 2 and sys.argv[1] == "xml":
        with open('xmlresults.xml', 'w') as report:
            result = junitxml.JUnitXmlResult(report)
            result.startTestRun()
            getTestSuite().run(result)
            result.stopTestRun()
    else:
        TestUtils.runTests("AllTests", getTestSuite, sys.argv)

# End.
コード例 #26
0
import Results

parser = argparse.ArgumentParser()
parser.add_argument(
    "-v",
    "--verbose",
    action="store_true",
    help="The script will periodically print stats about TXGs and " " receive speed",
)
parser.add_argument(
    "-t", "--threads", type=int, default=4, choices=xrange(1, 32), help="The number of concurrent receives to perform"
)
args = parser.parse_args()

# Use TestConfig to ensure this computer is set up properly
TestConfig.check_all()
# This test case will use the test send file, check that it will work
TestConfig.check_testfile()

Pid.create_pid_file()

# Establish where this test will be writing its output
current_min = time.strftime("%Y%m%d%H%M%S")
zfs_receive_path = Configs.test_filesystem_path + "/runs/" + current_min

start_txg = ZfsApi.get_current_txg(Configs.main_pool)

results_collector = Results.ResultsCollector(zfs_receive_path)
results_collector.gather_start_results()

if args.verbose:
コード例 #27
0
    def verify(self, prot, args, timeout=-1):
        """
        Input is a protocol, extra args, and timeout, output should be a result
        """

        self.prot = prot
        self.args = args

        self.preprocess()

        self.duration = 0

        fh = tempfile.NamedTemporaryFile(suffix=self.extension)
        fname = fh.name
        fh.write(self.prot)
        fh.flush()
        """ Make a timer file """
        fhtime = tempfile.NamedTemporaryFile(suffix=".timer")
        fhtimename = fhtime.name
        timer = "time --quiet -f \"\\n"
        timer += "@@%U\\n"
        timer += "@@%S\\n"
        timer += "@@%M\\n"
        timer += "@@%E\\n"
        timer += "\" --output " + fhtimename + " --append "

        subtool = self.toolcommand(fname)
        cmd = timer + subtool
        self.command = cmd

        def onTimeout(d):
            # Caused by the timeout, I guess
            self.timeoutflag = True
            return True

        self.starttime = time.time()
        self.timeoutflag = False

        if timeout != -1:
            self.output = pexpect.run(cmd,
                                      timeout=timeout,
                                      events={pexpect.TIMEOUT: onTimeout})
        else:
            self.output = pexpect.run(cmd)
        """ Compute real used time (user+sys) """
        timeres = []
        log = []
        for l in fhtime.readlines():
            cl = l.strip()
            log.append(cl)
            if cl.startswith("@@"):
                timeres.append(cl[2:])
        if len(timeres) >= 4:
            try:
                timeUser = float(timeres[0])  # seconds..
            except:
                wrongexitstring = "Command exited with non-zero status"
                if timeres[0].startswith(wrongexitstring):
                    self.reportError(timeres[0])
                    raise Error.WrongExit
                print cmd
                print timeres
                assert False, "Could not parse time from '%s' of %s." % (
                    timeres[0], self.name)
                sys.exit(1)
            timeSys = float(timeres[1])  # seconds..
            timeMem = timeres[2]  # In KiloBytes
            timeClock = parseClock(
                timeres[3])  # hours:minutes:seconds.xx, converted to seconds
            """ Old computation """
            #self.duration = time.time() - self.starttime
            measurement = timeUser + timeSys  # time for measurements

            self.duration = measurement

            self.durationWall = timeClock  # time for comparing
            self.memory = timeMem  # max memory usage
            """ Record minimal time for this tool """
            if TestConfig.getMinTime(self.name) == None:
                changed = False
                if self.minTime == None:
                    changed = True
                else:
                    if measurement < self.minTime:
                        changed = True
                if changed == True:
                    self.minTime = measurement
                    rst = "For tool %s, minimal time detected on %s: %gs" \
                            % (self.name,TestConfig.machineName(),self.minTime)
                    Report.replog(rst)

        else:
            """
            No time output is caused only by external killing of the
            subprocess by the timeout.
            """
            if not self.timeoutflag:

                # TODO abort for other reason?
                assert False, "Timeout flag not set, but could not parse time output from\n%s" \
                        % (log)
            self.timeoutflag = True

        fhtime.close()

        if self.timeoutflag == True:
            self.result = TIMEOUT
            self.duration = timeout
            self.durationWall = timeout
            self.memory = 0
        else:
            self.result = self.analyse()

        self.cleanUp()
        self.detectError()
        return self.result