Esempio n. 1
0
def start( config = myConfig , test_selection='Ganga.test.*', logger=myLogger):
    """
    """    
    import os
    #rtconfig = getConfig('TestingFramework')
    my_full_path =  os.path.abspath(os.path.dirname(__file__))
    #sys.stdout = UnbufferedStdout(sys.stdout)
    #sys.stderr = UnbufferedStdout(sys.stderr)    
       
    ##configure Ganga TestLoader

    # enable XML reporting in release mode
    pytf_reporting_opts=""
    if config['ReleaseTesting']:
        pytf_reporting_opts="--report-xml --report-outputdir=%(ReportsOutputDir)s --runid=%(RunID)s" % config        

    # output dir 
    if not os.path.exists(config['LogOutputDir']):
        os.makedirs(config['LogOutputDir'])
        
    # loader path
    global gangaReleaseTopDir
    pytf_loader_path = os.path.join(gangaReleaseTopDir,'python','GangaTest','Framework','loader.py')
    
    # loader args
    pytf_loader_args =[]   
    pytf_loader_args.append( '--loader-args=%s' % config['Config'])
    #pytf_loader_args.append( '--loader-args=%s/python' %  gangaReleaseTopDir)
    pytf_loader_args.append( '--loader-args=%s' %  gangaReleaseTopDir)
    pytf_loader_args.append( '--loader-args=%s' % int(config['ReleaseTesting']))    
    #output_dir
    pytf_loader_args.append( '--loader-args=%s' % config['LogOutputDir'])
    #unit-testing: on/off
    pytf_loader_args.append( '--loader-args=%s' % int(config['SearchLocalTests'])) 
    #system-testing: on/off
    pytf_loader_args.append( '--loader-args=%s' % int(config['SearchReleaseTests'])) 
    #Pass the report(report path + runid) path
    pytf_loader_args.append( '--loader-args=%s' % os.path.join(config['ReportsOutputDir'],config['RunID']))
    #pass the schmema version (if any) to test
    pytf_loader_args.append( '--loader-args=%s' % config['SchemaTesting'])
    
    #print("PYTF path %s config: %s" % (pytf_loader_path, pytf_loader_args))
    import sys
    sys.path.append(os.getenv('PYTF_TOP_DIR','').split(':')[0])
    sys.path.append(os.path.join(os.getenv('PYTF_TOP_DIR','').split(':')[0],'pytf'))
    import runTests
    runner_args = []
    
    runner_args.extend(pytf_reporting_opts.split())
    runner_args.extend(['--loader-path=%s' % pytf_loader_path])
    runner_args.extend(pytf_loader_args)
    runner_args.extend([test_selection])

    try:
        rc = runTests.main(logger,runner_args)
    except:
        rc = -9999
    return rc
Esempio n. 2
0
def start( config = myConfig , test_selection='Ganga.test.*', logger=myLogger):
    """
    """    
    import os
    #rtconfig = getConfig('TestingFramework')
    my_full_path =  os.path.abspath(os.path.dirname(__file__))
    #sys.stdout = UnbufferedStdout(sys.stdout)
    #sys.stderr = UnbufferedStdout(sys.stderr)    
       
    ##configure Ganga TestLoader

    # enable XML reporting in release mode
    pytf_reporting_opts=""
    if config['ReleaseTesting']:
        pytf_reporting_opts="--report-xml --report-outputdir=%(ReportsOutputDir)s --runid=%(RunID)s" % config        

    # output dir 
    if not os.path.exists(config['LogOutputDir']):
        os.makedirs(config['LogOutputDir'])
        
    # loader path
    global gangaReleaseTopDir
    pytf_loader_path = os.path.join(gangaReleaseTopDir,'python','GangaTest','Framework','loader.py')
    
    # loader args
    pytf_loader_args =[]   
    pytf_loader_args.append( '--loader-args=%s' % config['Config'])
    #pytf_loader_args.append( '--loader-args=%s/python' %  gangaReleaseTopDir)
    pytf_loader_args.append( '--loader-args=%s' %  gangaReleaseTopDir)
    pytf_loader_args.append( '--loader-args=%s' % int(config['ReleaseTesting']))    
    #output_dir
    pytf_loader_args.append( '--loader-args=%s' % config['LogOutputDir'])
    #unit-testing: on/off
    pytf_loader_args.append( '--loader-args=%s' % int(config['SearchLocalTests'])) 
    #system-testing: on/off
    pytf_loader_args.append( '--loader-args=%s' % int(config['SearchReleaseTests'])) 
    #Pass the report(report path + runid) path
    pytf_loader_args.append( '--loader-args=%s' % os.path.join(config['ReportsOutputDir'],config['RunID']))
    #pass the schmema version (if any) to test
    pytf_loader_args.append( '--loader-args=%s' % config['SchemaTesting'])
    
    #print("PYTF path %s config: %s" % (pytf_loader_path, pytf_loader_args))
    import sys
    sys.path.append(os.getenv('PYTF_TOP_DIR','').split(':')[0])
    sys.path.append(os.path.join(os.getenv('PYTF_TOP_DIR','').split(':')[0],'pytf'))
    import runTests
    runner_args = []
    
    runner_args.extend(pytf_reporting_opts.split())
    runner_args.extend(['--loader-path=%s' % pytf_loader_path])
    runner_args.extend(pytf_loader_args)
    runner_args.extend([test_selection])

    try:
        rc = runTests.main(logger,runner_args)
    except:
        rc = -9999
    return rc
Esempio n. 3
0
 def run(self):
     os.chdir("tests")
     sys.path.insert(0, '')
     import runTests
     runTests.main([] if self.tests == None else self.tests.split(','))
Esempio n. 4
0
    def run(self):
        os.chdir("tests")
        sys.path.insert(0, "")
        import runTests

        runTests.main([])
Esempio n. 5
0
 def run(self):
     os.chdir("tests")
     sys.path.insert(0, '')
     import runTests
     runTests.main([])
Esempio n. 6
0
def main():
    # init stuff
    started = time.time()
    utils.init_logging(constants.FN_QUICK_TEST_EVENTS, logger, "XT Quick-Test")

    print("---- Quick-Test ----")

    args = parse_args()

    if args.tests:
        args.reset_workspace = False
    else:
        args.tests = [
            "config", "search-scale", "show-controller", "run-index",
            "storage-provider", "hp-client", "search-provider", "run",
            "feature", "cancel", "storage", "option", "action", "tag", "demo"
        ]

    print("Tests requested:", args.tests)

    store, config = quick_prep(args.reset_workspace)
    stats = {}

    # if "config" in args.tests:
    #     count = config_tests.main()
    #     stats["config_tests"] = count

    # not yet ready for quick-test (takes 10+ mins by itself)
    # if "search-scale" in args.tests:
    #     # search scaling tests (low level)
    #     count = searchScaleTests.main()
    #     stats["search_scale"] = count

    if "show-controller" in args.tests:
        count = show_controller_tests()
        stats["show_controller_tests"] = count

    if "run-index" in args.tests:
        # run index tests (low level)
        count = runIndexTests.main()
        stats["run_index"] = count

    if "storage-provider" in args.tests:
        # provider tests (low level)
        count = storageProviderTests.main()
        stats["asure_storage_provider"] = count

    if "hp-client" in args.tests:
        # client-side HP processing/search
        count = hp_client_tests.main()
        stats["hp_client"] = count

    if "search-provider" in args.tests:
        # provider tests (low level)
        count = hpSearchTests.main(philly=args.philly)
        stats["hp_search_provider"] = count

    if "plot" in args.tests:
        # plot command testing
        count = plotCommands.main()
        stats["plot"] = count

    if "run" in args.tests:
        # run tests (lots of combinations)
        count = runTests.main(philly=args.philly)
        stats["run"] = count

    if "feature" in args.tests:
        count = feature_tests()
        stats["feature_tests"] = count

    if "cancel" in args.tests:
        count = cancel_tests(philly=args.philly)
        stats["cancel_tests"] = count

    if "storage" in args.tests:
        count = storage_tests()
        stats["storage_tests"] = count

    if "option" in args.tests:
        count = option_tests(config, store)
        stats["option_tests"] = count

    if "action" in args.tests:
        count = action_tests(philly=args.philly)
        stats["action_tests"] = count

    if "tag" in args.tests:
        count = tagTests.main(philly=args.philly)
        stats["tagTests"] = count

    if "demo" in args.tests:
        count = run_xt_demo(philly=args.philly, basic_mode=1)
        count = run_xt_demo(philly=args.philly, basic_mode=0)
        stats["xt_demo"] = count

    # print summary of results
    total_count = 0
    print("\nquick-test summary:")

    for name, count in stats.items():
        print("  {}: {}".format(name, count))
        total_count += count

    print("  total tests: {}".format(total_count))

    cleanup()

    elapsed = time.time() - started
    print("\n*** quick-test PASSED: (test count={}, elapsed: {:.2f} mins) ***".
          format(total_count, elapsed / 60))