Beispiel #1
0
class Runner(object):
    def __init__(self, argParser, verbose = 2):
        self.suite = None
        self.testLoader = unittest.TestLoader()
        self.runner = unittest.TextTestRunner(verbosity = verbose)
        self.tests = []
        self.report = XUnitTestResult()
        self.result = None
        self.argParser = argParser
        
    def run(self):
        self._get_testcase()
        self._get_sys_conf()        
        self._run_case()   
    
    def _get_sys_conf(self):
        if self.argParser.options.sysconf:
            log.debug('sys conf is: %s' % self.argParser.options.sysconf)
            conf = SysConfig()
            conf.parse_from_config(self.argParser.options.sysconf)
            self._update_ini_conf(conf)
    
    def _update_ini_conf(self, conf):
        log.debug('_update_ini_conf ')
        TestInputSingleton.ini_config = conf.servers

    def writeReport(self, case):
        str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
        #log_dir_path = logger.create_log_dir()
        log_dir_path = self._create_log_dir()
        self.report.add_test(**case.__dict__)
        self.report.write("{0}{2}report-{1}".format(log_dir_path, str_time, os.sep))
        
    def _create_log_dir(self):
        str_time = time.strftime('%Y-%m-%d-%H-%M-%S')
        logdir ='-'.join(['SNTF', str_time])
        logpath = Path.join(os.getenv('SNTF_HOME'), 'logs', logdir)
        if not Path.exists(logpath):
            os.makedirs(logpath)
        return logpath
    
    def _run_case(self):
        for case in self.tests:
            # Update the test params for each test
            self._update_test_params(case = case)
            
            try:
                #load test case
                self.suite = self.testLoader.loadTestsFromName(case.name)
            except AttributeError, e:
                print "Test {0} was not found: {1}".format(case.name, e)
            except SyntaxError, e:
                print "SyntaxError in {0}: {1}".format(case.name, e)
            else:
Beispiel #2
0
 def __init__(self, argParser, verbose = 2):
     self.suite = None
     self.testLoader = unittest.TestLoader()
     self.runner = unittest.TextTestRunner(verbosity = verbose)
     self.tests = []
     self.report = XUnitTestResult()
     self.result = None
     self.argParser = argParser
Beispiel #3
0
def runtests(names, options, arg_i, arg_p, runtime_test_params):
    log.info("\nNumber of tests to be executed: " + str(len(names)))
    BEFORE_SUITE = "suite_setUp"
    AFTER_SUITE = "suite_tearDown"
    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(
        abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1

    if "GROUP" in runtime_test_params:
        print(("Only cases in GROUPs '{0}' will be executed".format(
            runtime_test_params["GROUP"])))
    if "EXCLUDE_GROUP" in runtime_test_params:
        print(("Cases from GROUPs '{0}' will be excluded".format(
            runtime_test_params["EXCLUDE_GROUP"])))

    if TestInputSingleton.input.param("get-delays", False):
        # start measure_sched_delays on all servers
        sd = SchedDelays(TestInputSingleton.input.servers)
        sd.start_measure_sched_delays()

    if TestInputSingleton.input.param("hanging_threads", False):
        print("--> hanging_threads: start monitoring...")
        from hanging_threads import start_monitoring
        hanging_threads_frozen_time = int(
            TestInputSingleton.input.param("hanging_threads", 120))
        hanging_threads_test_interval = int(
            TestInputSingleton.input.param("test_interval", 1000))
        monitoring_thread = start_monitoring(
            seconds_frozen=hanging_threads_frozen_time,
            test_interval=hanging_threads_test_interval)

    for name in names:
        start_time = time.time()
        argument_split = [
            a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]
        ]
        params = dict(list(zip(argument_split[::2], argument_split[1::2])))

        # Note that if ALL is specified at runtime then tests which have no groups are still run - just being
        # explicit on this

        if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params[
                "GROUP"].split(";"):
            if 'GROUP' not in params:  # params is the .conf file parameters.
                # this test is not in any groups so we do not run it
                print((
                    "test '{0}' skipped, a group was requested and this is not any groups"
                    .format(name)))
                continue

            # there is a group for this test case, if that group is not specified at run time then do not run it
            elif not set(runtime_test_params["GROUP"].split(";")).issubset(
                    set(params["GROUP"].split(";"))):
                print(("test '{0}' skipped, is not in the requested group".
                       format(name)))
                continue
            else:
                pass  # the test was in requested group, will run it

        elif "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                set(runtime_test_params["EXCLUDE_GROUP"].split(";")).issubset(set(params["GROUP"].split(";"))):
                print(
                    ("test '{0}' skipped, is in an excluded group".format(name)
                     ))
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(
            os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print(("Logs will be stored at {0}".format(logs_folder)))
        print(("\n.{3}testrunner -i {0} -p {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep)))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print("Test Input params:")
        print((TestInputSingleton.input.test_params))
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        if case_number == 1:
            before_suite_name = "%s.%s" % (name[:name.rfind('.')],
                                           BEFORE_SUITE)
            try:
                print(("Run before suite setup for %s" % name))
                suite = unittest.TestLoader().loadTestsFromName(
                    before_suite_name)
                print(("-->before_suite_name:{},suite: {}".format(
                    before_suite_name, suite)))
                result = unittest.TextTestRunner(verbosity=2).run(suite)
                print(("-->result: {}".format(result)))
                if "get-coredumps" in TestInputSingleton.input.test_params:
                    if TestInputSingleton.input.param("get-coredumps", True):
                        if get_core_dumps(TestInputSingleton.input,
                                          logs_folder):
                            result = unittest.TextTestRunner(
                                verbosity=2)._makeResult()
                            result.errors = [
                                (name, "Failing test : new core dump(s) "
                                 "were found and collected."
                                 " Check testrunner logs folder.")
                            ]
                            log.info(
                                "FAIL: New core dump(s) was found and collected"
                            )
            except AttributeError as ex:
                traceback.print_exc()
                pass
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError as e:
            print(("Test {0} was not found: {1}".format(name, e)))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, str(e))]
        except SyntaxError as e:
            print(("SyntaxError in {0}: {1}".format(name, e)))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, str(e))]
        else:
            test_timeout = TestInputSingleton.input.param("test_timeout", None)
            t = StoppableThreadWithResult(
                target=unittest.TextTestRunner(verbosity=2).run,
                name="test_thread",
                args=(suite))
            t.start()
            result = t.join(timeout=test_timeout)
            if "get-coredumps" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-coredumps", True):
                    if get_core_dumps(TestInputSingleton.input, logs_folder):
                        result = unittest.TextTestRunner(
                            verbosity=2)._makeResult()
                        result.errors = [(name,
                                          "Failing test : new core dump(s) "
                                          "were found and collected."
                                          " Check testrunner logs folder.")]
                        log.info(
                            "FAIL: New core dump(s) was found and collected")
            if not result:
                for t in threading.enumerate():
                    if t != threading.current_thread():
                        t._Thread__stop()
                result = unittest.TextTestRunner(verbosity=2)._makeResult()
                case_number += 1000
                print("========TEST WAS STOPPED DUE TO  TIMEOUT=========")
                result.errors = [(name, "Test was stopped due to timeout")]
        time_taken = time.time() - start_time

        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in list(
                    TestInputSingleton.input.test_params.items()):
                if key and value:
                    params += "," + str(key) + ":" + str(value)

        if result.failures or result.errors:
            # Immediately get the server logs, if
            # the test has failed or has errors
            if "get-logs" in TestInputSingleton.input.test_params:
                get_server_logs(TestInputSingleton.input, logs_folder)

            if "get-logs-cluster-run" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-logs-cluster-run",
                                                  True):
                    # Generate path to ns_server directory
                    ns_server_path = os.path.normpath(abs_path + os.sep +
                                                      os.pardir + os.sep +
                                                      "ns_server")
                    get_logs_cluster_run(TestInputSingleton.input, logs_folder,
                                         ns_server_path)

            if "get-cbcollect-info" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-cbcollect-info", True):
                    get_cbcollect_info(TestInputSingleton.input, logs_folder)

            if "get-couch-dbinfo" in TestInputSingleton.input.test_params and \
                TestInputSingleton.input.param("get-couch-dbinfo", True):
                get_couch_dbinfo(TestInputSingleton.input, logs_folder)

            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name,
                           status='fail',
                           time=time_taken,
                           errorType='membase.error',
                           errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({
                "result": "pass",
                "name": name,
                "time": time_taken
            })
        xunit.write("{0}{2}report-{1}".format(os.path.dirname(logs_folder),
                                              str_time, os.sep))
        xunit.print_summary()
        print(("testrunner logs, diags and results are available under {0}".
               format(logs_folder)))
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print("test fails, all of the following tests will be skipped!!!")
            break

    after_suite_name = "%s.%s" % (name[:name.rfind('.')], AFTER_SUITE)
    try:
        print(("Run after suite setup for %s" % name))
        suite = unittest.TestLoader().loadTestsFromName(after_suite_name)
        result = unittest.TextTestRunner(verbosity=2).run(suite)
    except AttributeError as ex:
        pass
    if "makefile" in TestInputSingleton.input.test_params:
        # print out fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                print((result["name"], " fail "))
                fail_count += 1
            else:
                print((result["name"], " pass"))
        if fail_count > 0:
            sys.exit(1)

    print("During the test, Remote Connections: %s, Disconnections: %s" %
          (RemoteMachineShellConnection.connections,
           RemoteMachineShellConnection.disconnections))

    if TestInputSingleton.input.param("get-delays", False):
        sd.stop_measure_sched_delay()
        sd.fetch_logs()

    # terminate any non main thread - these were causing hangs
    for t in threading.enumerate():
        if t.name != 'MainThread' and t.isAlive():
            print(('Thread', t,
                   'was not properly terminated, will be terminated now.'))
            if hasattr(t, 'shutdown'):
                print("Shutting down the thread...")
                t.shutdown(True)
            else:
                print("Stopping the thread...")
                try:
                    t._stop()
                except Exception as e:
                    pass

    return results, xunit, "{0}{2}report-{1}".format(
        os.path.dirname(logs_folder), str_time, os.sep)
Beispiel #4
0
def main():
    watcher()

    names, test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params
    print("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()

    # Create root logs directory
    if not os.path.exists("logs"):
        os.makedirs("logs")

    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    logs_folder = "logs/testrunner-" + str_time
    logs_folder_abspath = os.path.abspath(logs_folder)
    if not os.path.exists(logs_folder):
        os.makedirs(logs_folder)

    results = []
    case_number = 1
    if "GROUP" in test_params:
        print("Only cases in GROUPs '{0}' will be executed".format(test_params["GROUP"]))
    if "EXCLUDE_GROUP" in test_params:
        print("Cases from GROUPs '{0}' will be excluded".format(test_params["EXCLUDE_GROUP"]))

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(list(zip(argument_split[::2], argument_split[1::2])))

        if ("GROUP" or "EXCLUDE_GROUP") in test_params:
            #determine if the test relates to the specified group(can be separated by ';')
            if not ("GROUP" in params and len(set(test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")))) or \
                    "EXCLUDE_GROUP" in params and len(set(test_params["EXCLUDE_GROUP"].split(";")) & set(params["EXCLUDE_GROUP"].split(";"))):
                print("test '{0}' was skipped".format(name))
                continue

        log_config_filename = ""
        #reduce the number of chars in the file name, if there are many(255 bytes filename limit)
        if len(name) > 240:
            name = os.path.join(name[:220] + time.strftime("%y-%b-%d_%H-%M-%S", time.localtime()))
        if params:
            log_name = os.path.join(logs_folder_abspath, name + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, name + ".logging.conf")
        else:
            dotnames = name.split('.')
            log_name = os.path.join(logs_folder_abspath, dotnames[-1] + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, dotnames[-1] + ".logging.conf")
        create_log_file(log_config_filename, log_name, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print("\n./testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        print("Test Input params:")
        pprint(TestInputSingleton.input.test_params)
        suite = unittest.TestLoader().loadTestsFromName(name)
        result = unittest.TextTestRunner(verbosity=2).run(suite)
        time_taken = time.time() - start_time

        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in list(TestInputSingleton.input.test_params.items()):
                if key and value:
                    params += "," + str(key) + ":" + str(value)

        if result.failures or result.errors:
            # Immediately get the server logs, if
            # the test has failed or has errors
            if "get-logs" in TestInputSingleton.input.test_params:
                get_server_logs(TestInputSingleton.input, logs_folder)

            if "get-cbcollect-info" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-cbcollect-info", True):
                    get_cbcollect_info(TestInputSingleton.input, logs_folder)

            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name, status='fail', time=time_taken,
                           errorType='membase.error', errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({"result": "pass", "name": name, "time": time_taken})
        xunit.write("{0}/report-{1}".format(logs_folder, str_time))
        xunit.print_summary()
        print("testrunner logs, diags and results are available under {0}".format(logs_folder))
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print("test fails, all of the following tests will be skipped!!!")
            break

    if "makefile" in TestInputSingleton.input.test_params:
        # print out fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                print(result["name"], " fail ")
                fail_count += 1
            else:
                print(result["name"], " pass")
        if fail_count > 0:
            sys.exit(1)
Beispiel #5
0
def merge_reports(filespath):
    log.info("Merging of report files from "+str(filespath))

    testsuites = {}
    if not isinstance(filespath, list):
        filespaths = filespath.split(",")
    else:
        filespaths = filespath
    for filepath in filespaths:
        xml_files = glob.glob(filepath)
        if not isinstance(filespath, list) and filespath.find("*"):
            xml_files.sort(key=os.path.getmtime)
        for xml_file in xml_files:
            log.info("-- " + xml_file + " --")
            doc = xml.dom.minidom.parse(xml_file)
            testsuitelem = doc.getElementsByTagName("testsuite")
            for ts in testsuitelem:
                tsname = ts.getAttribute("name")
                tserros = ts.getAttribute("errors")
                tsfailures = ts.getAttribute("failures")
                tsskips = ts.getAttribute("skips")
                tstime = ts.getAttribute("time")
                tstests = ts.getAttribute("tests")
                issuite_existed = False
                tests = {}
                testsuite = {}
                # fill testsuite details
                if tsname in list(testsuites.keys()):
                    testsuite = testsuites[tsname]
                    tests = testsuite['tests']
                else:
                    testsuite['name'] = tsname
                testsuite['errors'] = tserros
                testsuite['failures'] = tsfailures
                testsuite['skips'] = tsskips
                testsuite['time'] = tstime
                testsuite['testcount'] = tstests
                issuite_existed = False
                testcaseelem = ts.getElementsByTagName("testcase")
                # fill test case details
                for tc in testcaseelem:
                    testcase = {}
                    tcname = tc.getAttribute("name")
                    tctime = tc.getAttribute("time")
                    tcerror = tc.getElementsByTagName("error")

                    tcname_filtered = filter_fields(tcname)
                    if compare_with_sort(tests, tcname_filtered):
                        testcase = tests[tcname_filtered]
                        testcase['name'] = tcname
                    else:
                        testcase['name'] = tcname
                    testcase['time'] = tctime
                    testcase['error'] = ""
                    if tcerror:
                        testcase['error']  = str(tcerror[0].firstChild.nodeValue)

                    tests[tcname_filtered] = testcase
                testsuite['tests'] = tests
                testsuites[tsname] = testsuite

    log.info("\nNumber of TestSuites="+str(len(testsuites)))
    tsindex = 0
    for tskey in list(testsuites.keys()):
        tsindex = tsindex+1
        log.info("\nTestSuite#"+str(tsindex)+") "+str(tskey)+", Number of Tests="+str(len(testsuites[tskey]['tests'])))
        pass_count = 0
        fail_count = 0
        tests = testsuites[tskey]['tests']
        xunit = XUnitTestResult()
        for testname in list(tests.keys()):
            testcase = tests[testname]
            tname = testcase['name']
            ttime = testcase['time']
            inttime = float(ttime)
            terrors = testcase['error']
            tparams = ""
            if "," in tname:
                tparams = tname[tname.find(","):]
                tname = tname[:tname.find(",")]

            if terrors:
                failed = True
                fail_count = fail_count + 1
                xunit.add_test(name=tname, status='fail', time=inttime,
                               errorType='membase.error', errorMessage=str(terrors), params=tparams
                               )
            else:
                passed = True
                pass_count = pass_count + 1
                xunit.add_test(name=tname, time=inttime, params=tparams
                    )

        str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
        abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
        root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
        if not os.path.exists(root_log_dir):
            os.makedirs(root_log_dir)
        logs_folder = os.path.join(root_log_dir, "merged_summary")
        try:
            os.mkdir(logs_folder)
        except:
            pass
        output_filepath="{0}{2}mergedreport-{1}".format(logs_folder, str_time, os.sep).strip()

        xunit.write(output_filepath)
        xunit.print_summary()
        log.info("Summary file is at " + output_filepath+"-"+tsname+".xml")
    return testsuites
Beispiel #6
0
def main():

    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params
    print "Global Test input params:"
    pprint(TestInputSingleton.input.test_params)
    import mode
    if options.mode == "rest":
        mode.rest = True
    elif options.mode == "cli":
        mode.cli = True
    else:
        mode.java = True
    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(runtime_test_params["EXCLUDE_GROUP"])

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        # Note that if ALL is specified at runtime then tests which have no groups are still run - just being
        # explicit on this

        if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params["GROUP"].split(";"):
            if 'GROUP' not in params:         # params is the .conf file parameters.
                # this test is not in any groups so we do not run it
                print "test '{0}' skipped, a group was requested and this is not any groups".format(name)
                continue

            # there is a group for this test case, if that group is not specified at run time then do not run it
            elif len( set(runtime_test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")) ) == 0:
                print "test '{0}' skipped, is not in the requested group".format(name)
                continue
            else:
                pass # the test was in requested group, will run it

        elif "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                    len(set(runtime_test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))) > 0:
                print "test '{0}' skipped, is in an excluded group".format(name)
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "Logs will be stored at {0}".format(logs_folder)
        print "\n.{3}testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print "Test Input params:"
        print(TestInputSingleton.input.test_params)
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError, e:
            print "Test {0} was not found: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError, e:
            print "SyntaxError in {0}: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
Beispiel #7
0
def main():
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params

    print("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)
    import mode
    if options.mode == "java":
        mode.java = True
    elif options.mode == "cli":
        mode.cli = True
    else:
        mode.rest = True
    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path,
                                "logs%stestrunner-%s" % (os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print("Only cases in GROUPs '%s' will be executed"
              % runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print("Cases from GROUPs '%s' will be excluded"
              % runtime_test_params["EXCLUDE_GROUP"])

    for name in names:
        start_time = time.time()

        # Reset SDK/Shell connection counters
        RemoteMachineShellConnection.connections = 0
        RemoteMachineShellConnection.disconnections = 0
        SDKClient.sdk_connections = 0
        SDKClient.sdk_disconnections = 0

        argument_split = [a.strip()
                          for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        # Note that if ALL is specified at runtime then tests
        # which have no groups are still run - just being explicit on this

        if "GROUP" in runtime_test_params \
                and "ALL" not in runtime_test_params["GROUP"].split(";"):
            # Params is the .conf file parameters.
            if 'GROUP' not in params:
                # this test is not in any groups so we do not run it
                print("Test '%s' skipped, group requested but test has no group"
                      % name)
                continue
            else:
                skip_test = False
                tc_groups = params["GROUP"].split(";")
                for run_group in runtime_test_params["GROUP"].split(";"):
                    if run_group not in tc_groups:
                        skip_test = True
                        break
                if skip_test:
                    print("Test '{0}' skipped, GROUP not satisfied"
                          .format(name))
                    continue
        if "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                    len(set(runtime_test_params["EXCLUDE_GROUP"].split(";"))
                        & set(params["GROUP"].split(";"))) > 0:
                print("Test '%s' skipped, is in an excluded group" % name)
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(os.path.join(logs_folder,
                                                         "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print("Logs will be stored at %s" % logs_folder)
        print("\nguides/gradlew --refresh-dependencies testrunner -P jython=/opt/jython/bin/jython -P 'args=-i {0} {1} -t {2}'\n"
              .format(arg_i or "", arg_p or "", name))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        if "rerun" not in TestInputSingleton.input.test_params:
            TestInputSingleton.input.test_params["rerun"] = False
        print("Test Input params:\n%s"
              % TestInputSingleton.input.test_params)
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError as e:
            print("Test %s was not found: %s" % (name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError as e:
            print("SyntaxError in %s: %s" % (name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        else:
            result = unittest.TextTestRunner(verbosity=2).run(suite)
            if TestInputSingleton.input.param("rerun") \
                    and (result.failures or result.errors):
                print("#"*60, "\n",
                      "## \tTest Failed: Rerunning it one more time",
                      "\n", "#"*60)
                print("####### Running test with trace logs enabled #######")
                TestInputSingleton.input.test_params["log_level"] = "debug"
                result = unittest.TextTestRunner(verbosity=2).run(suite)
            # test_timeout = TestInputSingleton.input.param("test_timeout",
            #                                               None)
            # t = StoppableThreadWithResult(
            #    target=unittest.TextTestRunner(verbosity=2).run,
            #    name="test_thread",
            #    args=(suite))
            # t.start()
            # result = t.join(timeout=test_timeout)
            if not result:
                for t in threading.enumerate():
                    if t != threading.current_thread():
                        t._Thread__stop()
                result = unittest.TextTestRunner(verbosity=2)._makeResult()
                case_number += 1000
                print("========TEST WAS STOPPED DUE TO  TIMEOUT=========")
                result.errors = [(name, "Test was stopped due to timeout")]
        time_taken = time.time() - start_time
        connection_status_msg = \
            "During the test,\n" \
            "Remote Connections: %s, Disconnections: %s\n" \
            "SDK Connections: %s, Disconnections: %s" \
            % (RemoteMachineShellConnection.connections,
               RemoteMachineShellConnection.disconnections,
               SDKClient.sdk_connections, SDKClient.sdk_disconnections)

        if RemoteMachineShellConnection.connections \
                != RemoteMachineShellConnection.disconnections:
            connection_status_msg += \
                "\n!!!!!! CRITICAL :: Shell disconnection mismatch !!!!!"
        if SDKClient.sdk_connections != SDKClient.sdk_disconnections:
            connection_status_msg += \
                "\n!!!!!! CRITICAL :: SDK disconnection mismatch !!!!!"
        print(connection_status_msg)
        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in TestInputSingleton.input.test_params.items():
                if key and value:
                    params += "," + str(key) + "=" + str(value)

        if result.failures or result.errors:
            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name, status='fail', time=time_taken,
                           errorType='membase.error', errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({"result": "pass",
                            "name": name,
                            "time": time_taken})
        xunit.write("%s%sreport-%s"
                    % (os.path.dirname(logs_folder), os.sep, str_time))
        xunit.print_summary()
        print("testrunner logs, diags and results are available under %s"
              % logs_folder)
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print("Test fails, all of the following tests will be skipped!!!")
            break

    if "makefile" in TestInputSingleton.input.test_params:
        # Print fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                test_run_result = result["name"] + " fail"
                fail_count += 1
            else:
                test_run_result = result["name"] + " pass"
            print(test_run_result)
        if fail_count > 0:
            System.exit(1)
    System.exit(0)
Beispiel #8
0
def runtests(names, options, arg_i, arg_p, runtime_test_params):
    log.info("\nNumber of tests to be executed: " + str(len(names)))
    BEFORE_SUITE = "suite_setUp"
    AFTER_SUITE = "suite_tearDown"
    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1

    if "GROUP" in runtime_test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(runtime_test_params["EXCLUDE_GROUP"])

    if TestInputSingleton.input.param("get-delays", False):
        # start measure_sched_delays on all servers
        sd = SchedDelays(TestInputSingleton.input.servers)
        sd.start_measure_sched_delays()

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        # Note that if ALL is specified at runtime then tests which have no groups are still run - just being
        # explicit on this

        if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params["GROUP"].split(";"):
            if 'GROUP' not in params:         # params is the .conf file parameters.
                # this test is not in any groups so we do not run it
                print "test '{0}' skipped, a group was requested and this is not any groups".format(name)
                continue

            # there is a group for this test case, if that group is not specified at run time then do not run it
            elif len( set(runtime_test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")) ) == 0:
                print "test '{0}' skipped, is not in the requested group".format(name)
                continue
            else:
                pass # the test was in requested group, will run it

        elif "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                len(set(runtime_test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))) > 0:
                    print "test '{0}' skipped, is in an excluded group".format(name)
                    continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "Logs will be stored at {0}".format(logs_folder)
        print "\n.{3}testrunner -i {0} -p {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print "Test Input params:"
        print(TestInputSingleton.input.test_params)
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        if case_number == 1:
            before_suite_name = "%s.%s" % (name[:name.rfind('.')], BEFORE_SUITE)
            try:
                print "Run before suite setup for %s" % name
                suite = unittest.TestLoader().loadTestsFromName(before_suite_name)
                result = unittest.TextTestRunner(verbosity=2).run(suite)
                if "get-coredumps" in TestInputSingleton.input.test_params:
                    if TestInputSingleton.input.param("get-coredumps", True):
                        if get_core_dumps(TestInputSingleton.input, logs_folder):
                            result = unittest.TextTestRunner(verbosity=2)._makeResult()
                            result.errors = [(name, "Failing test : new core dump(s) "
                                             "were found and collected."
                                             " Check testrunner logs folder.")]
                            log.info("FAIL: New core dump(s) was found and collected")
            except AttributeError as ex:
                pass
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError, e:
            print "Test {0} was not found: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError, e:
            print "SyntaxError in {0}: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
Beispiel #9
0
def main():
    watcher()

    names, test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params
    print "Global Test input params:"
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()

    # Create root logs directory
    if not os.path.exists("logs"):
        os.makedirs("logs")

    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    logs_folder = "logs/testrunner-" + str_time
    logs_folder_abspath = os.path.abspath(logs_folder)
    if not os.path.exists(logs_folder):
        os.makedirs(logs_folder)

    results = []
    case_number = 1
    if "GROUP" in test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(test_params["GROUP"])
    if "EXCLUDE_GROUP" in test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(test_params["EXCLUDE_GROUP"])

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        if ("GROUP" or "EXCLUDE_GROUP") in test_params:
            #determine if the test relates to the specified group(can be separated by ';')
            if not ("GROUP" in params and len(set(test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")))) or \
                    "EXCLUDE_GROUP" in params and len(set(test_params["EXCLUDE_GROUP"].split(";")) & set(params["EXCLUDE_GROUP"].split(";"))):
                print "test '{0}' was skipped".format(name)
                continue

        log_config_filename = ""
        #reduce the number of chars in the file name, if there are many(255 bytes filename limit)
        if len(name) > 240:
            name = os.path.join(name[:220] + time.strftime("%y-%b-%d_%H-%M-%S", time.localtime()))
        if params:
            log_name = os.path.join(logs_folder_abspath, name + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, name + ".logging.conf")
        else:
            dotnames = name.split('.')
            log_name = os.path.join(logs_folder_abspath, dotnames[-1] + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, dotnames[-1] + ".logging.conf")
        create_log_file(log_config_filename, log_name, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "\n./testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        print "Test Input params:"
        pprint(TestInputSingleton.input.test_params)
        suite = unittest.TestLoader().loadTestsFromName(name)
        result = unittest.TextTestRunner(verbosity=2).run(suite)
        time_taken = time.time() - start_time

        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in TestInputSingleton.input.test_params.items():
                if key and value:
                    params += "," + str(key) + ":" + str(value)

        if result.failures or result.errors:
            # Immediately get the server logs, if
            # the test has failed or has errors
            if "get-logs" in TestInputSingleton.input.test_params:
                get_server_logs(TestInputSingleton.input, logs_folder)

            if "get-cbcollect-info" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-cbcollect-info", True):
                    get_cbcollect_info(TestInputSingleton.input, logs_folder)

            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name, status='fail', time=time_taken,
                           errorType='membase.error', errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({"result": "pass", "name": name, "time": time_taken})
        xunit.write("{0}/report-{1}".format(logs_folder, str_time))
        xunit.print_summary()
        print "testrunner logs, diags and results are available under {0}".format(logs_folder)
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print "test fails, all of the following tests will be skipped!!!"
            break

    if "makefile" in TestInputSingleton.input.test_params:
        # print out fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                print result["name"], " fail "
                fail_count += 1
            else:
                print result["name"], " pass"
        if fail_count > 0:
            sys.exit(1)
Beispiel #10
0
              break
        if should_be_excluded:
           logger.info("skipping {0}".format(exc))
           continue
        logger.info("running {0}".format(found))
        more_js = find_files("./*.js")
        files = []
        files.extend(base_files)
        files.append(found)
        files.extend(more_js)
        files = sort_js_files(files)
        logger.info("files matched : {0}".format(files))
        merged_filename = "merged-js-files.txt"
        if append_files(files, merged_filename, "127.0.0.1:5984", options.node):
            logger.info("merged all files into one file {0}".format(merged_filename))
        xunit = XUnitTestResult()
        command = ["../../src/couchdb/priv/couchjs", "-H", "merged-js-files.txt"]
        couchjs_test_runner = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
        run_test_thread = Thread(name="run-test",target=start_couchjs_with_timeout,args=(couchjs_test_runner,xunit))
        run_test_thread.start()
        start = time.time()
        status = "ok"
        while (time.time() - start) < timeout:
            if not run_test_thread.isAlive():
                status = "ok"
                break
            else:
                time.sleep(2)


        if status != "ok":