Пример #1
0
def start_load(argv):
    queue = Queue(10)
    test_input = TestInputParser.get_test_input(argv)
    load_info = {
        'server_info': [test_input.servers[0]],
        'memcached_info': {
            'bucket_name': "default",
            'bucket_port': "11210",
            'bucket_password': "",
            },
        'operation_info': {
            'operation_distribution': {'set': 10},
            'valuesize_distribution': {20: 30, 30: 5, 25: 5},
            'create_percent': 25,
            'threads': 6,
            },
        'limit_info': {
            'max_items': 0,
            'operation_count': 0,
            'time': time.time() + 24 * 60 * 60,
            'max_size': 0,
            },
        }
    thread = Thread(target=loadrunner, args=(queue, test_input.servers, load_info))
    thread.start()
    time.sleep(24 * 60 * 60)
    queue.put("stop")
Пример #2
0
def start_backup(argv):
    queue = Queue(10)
    test_input = TestInputParser.get_test_input(argv)
    thread = Thread(target=backup, args=(queue, test_input.servers))
    thread.start()
    time.sleep(24 * 60 * 60)
    queue.put("stop")
def main():
    try:
        (opts, args) = getopt.getopt(sys.argv[1:], 'hi:p', [])
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError as error:
        usage("ERROR: " + str(error))

    file_path = input.param("path", ".")
    remotes = (Getcoredumps(server, file_path) for server in input.servers)
    remote_threads = [Thread(target=remote.run) for remote in remotes]

    for remote_thread in remote_threads:
        remote_thread.daemon = True
        remote_thread.start()
        run_time = 0
        while remote_thread.isAlive() and run_time < 1200:
            time.sleep(15)
            run_time += 15
            print("Waiting for another 15 seconds (time-out after 20 min)")
        if run_time == 1200:
            print("collect core dumps hung on this node. Jumping to next node")
        print("collect core dumps info done")

    for remote_thread in remote_threads:
        remote_thread.join(120)
        if remote_thread.isAlive():
            raise Exception("collect core dumps hung on remote node")
Пример #4
0
def start_combo(argv):
    queue = Queue(10)
    test_input = TestInputParser.get_test_input(argv)
    thread = Thread(target=combo, args=(queue, test_input))
    thread.start()
    time.sleep(24 * 60 * 60)
    queue.put("stop")
Пример #5
0
def main():
    log.info("TestRunner: parsing args...")
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    log.info("TestRunner: start...")
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params
    log.info("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)
    if names:
        if options.merge:
            merge_reports(options.merge)
        elif options.rerun:
            results = reruntests(options.rerun, names, options, arg_i, arg_p,
                                 runtime_test_params)
        else:
            results, _, _ = runtests(names, options, arg_i, arg_p,
                                     runtime_test_params)
    else:
        log.warning(
            "Warning: No tests got selected. Please double check the .conf file and other "
            "options!")

    log.info("TestRunner: end...")
Пример #6
0
def main():
    try:
        (opts, args) = getopt.getopt(sys.argv[1:], 'hi:p', [])
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))
Пример #7
0
def main():
    try:
        (opts, args) = getopt.getopt(sys.argv[1:], 'hi:p', [])
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))
Пример #8
0
def main():
    _, test_params, _, _, _ = parse_args(sys.argv)
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params

    obj = EPerfMasterWrapper()
    obj.input = TestInputSingleton.input

    # Run setUp with load_phase=0, index_phase=0 and access_phase=0
    if "setUp" in sys.argv:
        obj.setUp()
    else:
        num_clients = obj.parami("num_clients", 10) * obj.parami("num_buckets", 1)
        obj.aggregate_all_stats(num_clients, "load")
        obj.aggregate_all_stats(num_clients, "reload")
        obj.aggregate_all_stats(num_clients, "loop")
        obj.aggregate_all_stats(num_clients, "warmup")
        obj.aggregate_all_stats(num_clients, "index")
Пример #9
0
def main():
    _, test_params, _, _, _ = parse_args(sys.argv)
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params

    obj = EPerfMasterWrapper()
    obj.input = TestInputSingleton.input

    # Run setUp with load_phase=0, index_phase=0 and access_phase=0
    if "setUp" in sys.argv:
        obj.setUp()
    else:
        num_clients = obj.parami("num_clients", 10) * obj.parami(
            "num_buckets", 1)
        obj.log = logger.Logger.get_logger()
        obj.aggregate_all_stats(num_clients, "load")
        obj.aggregate_all_stats(num_clients, "reload")
        obj.aggregate_all_stats(num_clients, "loop")
        obj.aggregate_all_stats(num_clients, "warmup")
        obj.aggregate_all_stats(num_clients, "index")
Пример #10
0
def main():
    watcher()

    names, test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params
    print "Global Test input params:"
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()

    # Create root logs directory
    if not os.path.exists("logs"):
        os.makedirs("logs")

    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    logs_folder = "logs/testrunner-" + str_time
    logs_folder_abspath = os.path.abspath(logs_folder)
    if not os.path.exists(logs_folder):
        os.makedirs(logs_folder)

    results = []
    case_number = 1
    if "GROUP" in test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(test_params["GROUP"])
    if "EXCLUDE_GROUP" in test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(test_params["EXCLUDE_GROUP"])

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        if ("GROUP" or "EXCLUDE_GROUP") in test_params:
            #determine if the test relates to the specified group(can be separated by ';')
            if not ("GROUP" in params and len(set(test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")))) or \
                    "EXCLUDE_GROUP" in params and len(set(test_params["EXCLUDE_GROUP"].split(";")) & set(params["EXCLUDE_GROUP"].split(";"))):
                print "test '{0}' was skipped".format(name)
                continue

        log_config_filename = ""
        #reduce the number of chars in the file name, if there are many(255 bytes filename limit)
        if len(name) > 240:
            name = os.path.join(name[:220] + time.strftime("%y-%b-%d_%H-%M-%S", time.localtime()))
        if params:
            log_name = os.path.join(logs_folder_abspath, name + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, name + ".logging.conf")
        else:
            dotnames = name.split('.')
            log_name = os.path.join(logs_folder_abspath, dotnames[-1] + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, dotnames[-1] + ".logging.conf")
        create_log_file(log_config_filename, log_name, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "\n./testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        print "Test Input params:"
        pprint(TestInputSingleton.input.test_params)
        suite = unittest.TestLoader().loadTestsFromName(name)
        result = unittest.TextTestRunner(verbosity=2).run(suite)
        time_taken = time.time() - start_time

        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in TestInputSingleton.input.test_params.items():
                if key and value:
                    params += "," + str(key) + ":" + str(value)

        if result.failures or result.errors:
            # Immediately get the server logs, if
            # the test has failed or has errors
            if "get-logs" in TestInputSingleton.input.test_params:
                get_server_logs(TestInputSingleton.input, logs_folder)

            if "get-cbcollect-info" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-cbcollect-info", True):
                    get_cbcollect_info(TestInputSingleton.input, logs_folder)

            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name, status='fail', time=time_taken,
                           errorType='membase.error', errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({"result": "pass", "name": name, "time": time_taken})
        xunit.write("{0}/report-{1}".format(logs_folder, str_time))
        xunit.print_summary()
        print "testrunner logs, diags and results are available under {0}".format(logs_folder)
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print "test fails, all of the following tests will be skipped!!!"
            break

    if "makefile" in TestInputSingleton.input.test_params:
        # print out fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                print result["name"], " fail "
                fail_count += 1
            else:
                print result["name"], " pass"
        if fail_count > 0:
            sys.exit(1)
Пример #11
0
def main():

    BEFORE_SUITE = "suite_setUp"
    AFTER_SUITE = "suite_tearDown"
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params
    print("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(
        abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print("Only cases in GROUPs '{0}' will be executed".format(
            runtime_test_params["GROUP"]))
    if "EXCLUDE_GROUP" in runtime_test_params:
        print("Cases from GROUPs '{0}' will be excluded".format(
            runtime_test_params["EXCLUDE_GROUP"]))

    if TestInputSingleton.input.param("get-delays", False):
        # start measure_sched_delays on all servers
        sd = SchedDelays(TestInputSingleton.input.servers)
        sd.start_measure_sched_delays()

    if TestInputSingleton.input.param("hanging_threads", False):
        print("--> hanging_threads: start monitoring...")
        from hanging_threads import start_monitoring
        hanging_threads_frozen_time = int(
            TestInputSingleton.input.param("hanging_threads", 120))
        hanging_threads_test_interval = int(
            TestInputSingleton.input.param("test_interval", 1000))
        monitoring_thread = start_monitoring(
            seconds_frozen=hanging_threads_frozen_time,
            test_interval=hanging_threads_test_interval)

    for name in names:
        start_time = time.time()
        argument_split = [
            a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]
        ]
        params = dict(list(zip(argument_split[::2], argument_split[1::2])))

        # Note that if ALL is specified at runtime then tests which have no groups are still run - just being
        # explicit on this

        if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params[
                "GROUP"].split(";"):
            if 'GROUP' not in params:  # params is the .conf file parameters.
                # this test is not in any groups so we do not run it
                print(
                    "test '{0}' skipped, a group was requested and this is not any groups"
                    .format(name))
                continue

            # there is a group for this test case, if that group is not specified at run time then do not run it
            elif len(
                    set(runtime_test_params["GROUP"].split(";"))
                    & set(params["GROUP"].split(";"))) == 0:
                print(
                    "test '{0}' skipped, is not in the requested group".format(
                        name))
                continue
            else:
                pass  # the test was in requested group, will run it

        elif "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                len(set(runtime_test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))) > 0:
                print(
                    "test '{0}' skipped, is in an excluded group".format(name))
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(
            os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print("Logs will be stored at {0}".format(logs_folder))
        print("\n.{3}testrunner -i {0} -p {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print("Test Input params:")
        print((TestInputSingleton.input.test_params))
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        if case_number == 1:
            before_suite_name = "%s.%s" % (name[:name.rfind('.')],
                                           BEFORE_SUITE)
            try:
                print("Run before suite setup for %s" % name)
                suite = unittest.TestLoader().loadTestsFromName(
                    before_suite_name)
                print("-->before_suite_name:{},suite: {}".format(
                    before_suite_name, suite))
                result = unittest.TextTestRunner(verbosity=2).run(suite)
                print("-->result: {}".format(result))
                if "get-coredumps" in TestInputSingleton.input.test_params:
                    if TestInputSingleton.input.param("get-coredumps", True):
                        if get_core_dumps(TestInputSingleton.input,
                                          logs_folder):
                            result = unittest.TextTestRunner(
                                verbosity=2)._makeResult()
                            result.errors = [
                                (name, "Failing test : new core dump(s) "
                                 "were found and collected."
                                 " Check testrunner logs folder.")
                            ]
                            print(
                                "FAIL: New core dump(s) was found and collected"
                            )
            except AttributeError as ex:
                traceback.print_exc()
                pass
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError as e:
            print("Test {0} was not found: {1}".format(name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, str(e))]
        except SyntaxError as e:
            print("SyntaxError in {0}: {1}".format(name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, str(e))]
        else:
            test_timeout = TestInputSingleton.input.param("test_timeout", None)
            t = StoppableThreadWithResult(
                target=unittest.TextTestRunner(verbosity=2).run,
                name="test_thread",
                args=(suite))
            t.start()
            result = t.join(timeout=test_timeout)
            if "get-coredumps" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-coredumps", True):
                    if get_core_dumps(TestInputSingleton.input, logs_folder):
                        result = unittest.TextTestRunner(
                            verbosity=2)._makeResult()
                        result.errors = [(name,
                                          "Failing test : new core dump(s) "
                                          "were found and collected."
                                          " Check testrunner logs folder.")]
                        print("FAIL: New core dump(s) was found and collected")
            if not result:
                for t in threading.enumerate():
                    if t != threading.current_thread():
                        t._Thread__stop()
                result = unittest.TextTestRunner(verbosity=2)._makeResult()
                case_number += 1000
                print("========TEST WAS STOPPED DUE TO  TIMEOUT=========")
                result.errors = [(name, "Test was stopped due to timeout")]
        time_taken = time.time() - start_time

        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in list(
                    TestInputSingleton.input.test_params.items()):
                if key and value:
                    params += "," + str(key) + ":" + str(value)

        if result.failures or result.errors:
            # Immediately get the server logs, if
            # the test has failed or has errors
            if "get-logs" in TestInputSingleton.input.test_params:
                get_server_logs(TestInputSingleton.input, logs_folder)

            if "get-logs-cluster-run" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-logs-cluster-run",
                                                  True):
                    # Generate path to ns_server directory
                    ns_server_path = os.path.normpath(abs_path + os.sep +
                                                      os.pardir + os.sep +
                                                      "ns_server")
                    get_logs_cluster_run(TestInputSingleton.input, logs_folder,
                                         ns_server_path)

            if "get-cbcollect-info" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-cbcollect-info", True):
                    get_cbcollect_info(TestInputSingleton.input, logs_folder)

            if "get-couch-dbinfo" in TestInputSingleton.input.test_params and \
                TestInputSingleton.input.param("get-couch-dbinfo", True):
                get_couch_dbinfo(TestInputSingleton.input, logs_folder)

            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name,
                           status='fail',
                           time=time_taken,
                           errorType='membase.error',
                           errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({
                "result": "pass",
                "name": name,
                "time": time_taken
            })
        xunit.write("{0}{2}report-{1}".format(os.path.dirname(logs_folder),
                                              str_time, os.sep))
        xunit.print_summary()
        print("testrunner logs, diags and results are available under {0}".
              format(logs_folder))
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print("test fails, all of the following tests will be skipped!!!")
            break

    after_suite_name = "%s.%s" % (name[:name.rfind('.')], AFTER_SUITE)
    try:
        print("Run after suite setup for %s" % name)
        suite = unittest.TestLoader().loadTestsFromName(after_suite_name)
        result = unittest.TextTestRunner(verbosity=2).run(suite)
    except AttributeError as ex:
        pass
    if "makefile" in TestInputSingleton.input.test_params:
        # print out fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                print(result["name"], " fail ")
                fail_count += 1
            else:
                print(result["name"], " pass")
        if fail_count > 0:
            sys.exit(1)

    if TestInputSingleton.input.param("get-delays", False):
        sd.stop_measure_sched_delay()
        sd.fetch_logs()

    # terminate any non main thread - these were causing hangs
    for t in threading.enumerate():
        if t.name != 'MainThread' and t.isAlive():
            print('Thread', t,
                  'was not properly terminated, will be terminated now.')
            if hasattr(t, 'shutdown'):
                print("Shutting down the thread...")
                t.shutdown(True)
            else:
                print("Stopping the thread...")
                try:
                    t._stop()
                except Exception as e:
                    pass
Пример #12
0
def main():

    BEFORE_SUITE = "suite_setUp"
    AFTER_SUITE = "suite_tearDown"
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params
    print "Global Test input params:"
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(
        abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(
            runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(
            runtime_test_params["EXCLUDE_GROUP"])

    if TestInputSingleton.input.param("get-delays", False):
        # start measure_sched_delays on all servers
        sd = SchedDelays(TestInputSingleton.input.servers)
        sd.start_measure_sched_delays()

    for name in names:
        start_time = time.time()
        argument_split = [
            a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]
        ]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        # Note that if ALL is specified at runtime then tests which have no groups are still run - just being
        # explicit on this

        if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params[
                "GROUP"].split(";"):
            if 'GROUP' not in params:  # params is the .conf file parameters.
                # this test is not in any groups so we do not run it
                print "test '{0}' skipped, a group was requested and this is not any groups".format(
                    name)
                continue

            # there is a group for this test case, if that group is not specified at run time then do not run it
            elif len(
                    set(runtime_test_params["GROUP"].split(";"))
                    & set(params["GROUP"].split(";"))) == 0:
                print "test '{0}' skipped, is not in the requested group".format(
                    name)
                continue
            else:
                pass  # the test was in requested group, will run it

        elif "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                len(set(runtime_test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))) > 0:
                print "test '{0}' skipped, is in an excluded group".format(
                    name)
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(
            os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "Logs will be stored at {0}".format(logs_folder)
        print "\n.{3}testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print "Test Input params:"
        print(TestInputSingleton.input.test_params)
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        if case_number == 1:
            before_suite_name = "%s.%s" % (name[:name.rfind('.')],
                                           BEFORE_SUITE)
            try:
                print "Run before suite setup for %s" % name
                suite = unittest.TestLoader().loadTestsFromName(
                    before_suite_name)
                result = unittest.TextTestRunner(verbosity=2).run(suite)
                if "get-coredumps" in TestInputSingleton.input.test_params:
                    if TestInputSingleton.input.param("get-coredumps", True):
                        if get_core_dumps(TestInputSingleton.input,
                                          logs_folder):
                            result = unittest.TextTestRunner(
                                verbosity=2)._makeResult()
                            result.errors = [
                                (name, "Failing test : new core dump(s) "
                                 "were found and collected."
                                 " Check testrunner logs folder.")
                            ]
                            print(
                                "FAIL: New core dump(s) was found and collected"
                            )
            except AttributeError as ex:
                pass
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError, e:
            print "Test {0} was not found: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError, e:
            print "SyntaxError in {0}: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
Пример #13
0
import base64

sys.path.append('.')
sys.path.append('lib')

from TestInput import TestInputParser

def create_headers(username, password):
    authorization = base64.encodebytes('%s:%s' % (username, password))
    return {'Content-Type': 'application/x-www-form-urlencoded',
            'Authorization': 'Basic %s' % authorization,
            'Accept': '*/*'}


if __name__ == "__main__":
    input = TestInputParser.get_test_input(sys.argv)
    for serverInfo in input.servers:
        print("grabbing diags from ".format(serverInfo.ip))
        diag_url = "http://{0}:{1}/diag".format(serverInfo.ip, serverInfo.port)
        print(diag_url)
        try:
            req = urllib.request.Request(diag_url)
            req.headers = create_headers(input.membase_settings.rest_username,
                                         input.membase_settings.rest_password)
            filename = "{0}-{1}-diag.txt".format(serverInfo.ip, serverInfo.port)
            page = urllib.request.urlopen(req)
            with open(filename, 'wb') as output:
                os.write(1, "downloading {0} ...".format(serverInfo.ip))
                while True:
                    buffer = page.read(65536)
                    if not buffer:
Пример #14
0
def main():
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params

    print("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)
    import mode
    if options.mode == "rest":
        mode.rest = True
    elif options.mode == "cli":
        mode.cli = True
    else:
        mode.java = True
    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path,
                                "logs%stestrunner-%s" % (os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print("Only cases in GROUPs '%s' will be executed" %
              runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print("Cases from GROUPs '%s' will be excluded" %
              runtime_test_params["EXCLUDE_GROUP"])

    for name in names:
        start_time = time.time()

        # Reset SDK/Shell connection counters
        RemoteMachineShellConnection.connections = 0
        RemoteMachineShellConnection.disconnections = 0
        SDKClient.sdk_connections = 0
        SDKClient.sdk_disconnections = 0

        argument_split = [
            a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]
        ]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        # Note that if ALL is specified at runtime then tests
        # which have no groups are still run - just being explicit on this

        if "GROUP" in runtime_test_params \
                and "ALL" not in runtime_test_params["GROUP"].split(";"):
            # Params is the .conf file parameters.
            if 'GROUP' not in params:
                # this test is not in any groups so we do not run it
                print(
                    "Test '%s' skipped, group requested but test has no group"
                    % name)
                continue
            else:
                skip_test = False
                tc_groups = params["GROUP"].split(";")
                for run_group in runtime_test_params["GROUP"].split(";"):
                    if run_group not in tc_groups:
                        skip_test = True
                        break
                if skip_test:
                    print(
                        "Test '{0}' skipped, GROUP not satisfied".format(name))
                    continue
        if "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                    len(set(runtime_test_params["EXCLUDE_GROUP"].split(";"))
                        & set(params["GROUP"].split(";"))) > 0:
                print("Test '%s' skipped, is in an excluded group" % name)
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(
            os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print("Logs will be stored at %s" % logs_folder)
        print(
            "\nguides/gradlew --refresh-dependencies testrunner -P jython=/opt/jython/bin/jython -P 'args=-i {0} {1} -t {2}'\n"
            .format(arg_i or "", arg_p or "", name))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        if "rerun" not in TestInputSingleton.input.test_params:
            TestInputSingleton.input.test_params["rerun"] = False
        print("Test Input params:\n%s" % TestInputSingleton.input.test_params)
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError, e:
            print("Test %s was not found: %s" % (name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError, e:
            print("SyntaxError in %s: %s" % (name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
Пример #15
0
def main():
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params

    print("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)
    import mode
    if options.mode == "java":
        mode.java = True
    elif options.mode == "cli":
        mode.cli = True
    else:
        mode.rest = True
    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path,
                                "logs%stestrunner-%s" % (os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print("Only cases in GROUPs '%s' will be executed"
              % runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print("Cases from GROUPs '%s' will be excluded"
              % runtime_test_params["EXCLUDE_GROUP"])

    for name in names:
        start_time = time.time()

        # Reset SDK/Shell connection counters
        RemoteMachineShellConnection.connections = 0
        RemoteMachineShellConnection.disconnections = 0
        SDKClient.sdk_connections = 0
        SDKClient.sdk_disconnections = 0

        argument_split = [a.strip()
                          for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        # Note that if ALL is specified at runtime then tests
        # which have no groups are still run - just being explicit on this

        if "GROUP" in runtime_test_params \
                and "ALL" not in runtime_test_params["GROUP"].split(";"):
            # Params is the .conf file parameters.
            if 'GROUP' not in params:
                # this test is not in any groups so we do not run it
                print("Test '%s' skipped, group requested but test has no group"
                      % name)
                continue
            else:
                skip_test = False
                tc_groups = params["GROUP"].split(";")
                for run_group in runtime_test_params["GROUP"].split(";"):
                    if run_group not in tc_groups:
                        skip_test = True
                        break
                if skip_test:
                    print("Test '{0}' skipped, GROUP not satisfied"
                          .format(name))
                    continue
        if "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                    len(set(runtime_test_params["EXCLUDE_GROUP"].split(";"))
                        & set(params["GROUP"].split(";"))) > 0:
                print("Test '%s' skipped, is in an excluded group" % name)
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(os.path.join(logs_folder,
                                                         "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print("Logs will be stored at %s" % logs_folder)
        print("\nguides/gradlew --refresh-dependencies testrunner -P jython=/opt/jython/bin/jython -P 'args=-i {0} {1} -t {2}'\n"
              .format(arg_i or "", arg_p or "", name))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        if "rerun" not in TestInputSingleton.input.test_params:
            TestInputSingleton.input.test_params["rerun"] = False
        print("Test Input params:\n%s"
              % TestInputSingleton.input.test_params)
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError as e:
            print("Test %s was not found: %s" % (name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError as e:
            print("SyntaxError in %s: %s" % (name, e))
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        else:
            result = unittest.TextTestRunner(verbosity=2).run(suite)
            if TestInputSingleton.input.param("rerun") \
                    and (result.failures or result.errors):
                print("#"*60, "\n",
                      "## \tTest Failed: Rerunning it one more time",
                      "\n", "#"*60)
                print("####### Running test with trace logs enabled #######")
                TestInputSingleton.input.test_params["log_level"] = "debug"
                result = unittest.TextTestRunner(verbosity=2).run(suite)
            # test_timeout = TestInputSingleton.input.param("test_timeout",
            #                                               None)
            # t = StoppableThreadWithResult(
            #    target=unittest.TextTestRunner(verbosity=2).run,
            #    name="test_thread",
            #    args=(suite))
            # t.start()
            # result = t.join(timeout=test_timeout)
            if not result:
                for t in threading.enumerate():
                    if t != threading.current_thread():
                        t._Thread__stop()
                result = unittest.TextTestRunner(verbosity=2)._makeResult()
                case_number += 1000
                print("========TEST WAS STOPPED DUE TO  TIMEOUT=========")
                result.errors = [(name, "Test was stopped due to timeout")]
        time_taken = time.time() - start_time
        connection_status_msg = \
            "During the test,\n" \
            "Remote Connections: %s, Disconnections: %s\n" \
            "SDK Connections: %s, Disconnections: %s" \
            % (RemoteMachineShellConnection.connections,
               RemoteMachineShellConnection.disconnections,
               SDKClient.sdk_connections, SDKClient.sdk_disconnections)

        if RemoteMachineShellConnection.connections \
                != RemoteMachineShellConnection.disconnections:
            connection_status_msg += \
                "\n!!!!!! CRITICAL :: Shell disconnection mismatch !!!!!"
        if SDKClient.sdk_connections != SDKClient.sdk_disconnections:
            connection_status_msg += \
                "\n!!!!!! CRITICAL :: SDK disconnection mismatch !!!!!"
        print(connection_status_msg)
        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in TestInputSingleton.input.test_params.items():
                if key and value:
                    params += "," + str(key) + "=" + str(value)

        if result.failures or result.errors:
            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name, status='fail', time=time_taken,
                           errorType='membase.error', errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({"result": "pass",
                            "name": name,
                            "time": time_taken})
        xunit.write("%s%sreport-%s"
                    % (os.path.dirname(logs_folder), os.sep, str_time))
        xunit.print_summary()
        print("testrunner logs, diags and results are available under %s"
              % logs_folder)
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print("Test fails, all of the following tests will be skipped!!!")
            break

    if "makefile" in TestInputSingleton.input.test_params:
        # Print fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                test_run_result = result["name"] + " fail"
                fail_count += 1
            else:
                test_run_result = result["name"] + " pass"
            print(test_run_result)
        if fail_count > 0:
            System.exit(1)
    System.exit(0)
Пример #16
0
def main():
    watcher()

    names, test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params
    print("Global Test input params:")
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()

    # Create root logs directory
    if not os.path.exists("logs"):
        os.makedirs("logs")

    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    logs_folder = "logs/testrunner-" + str_time
    logs_folder_abspath = os.path.abspath(logs_folder)
    if not os.path.exists(logs_folder):
        os.makedirs(logs_folder)

    results = []
    case_number = 1
    if "GROUP" in test_params:
        print("Only cases in GROUPs '{0}' will be executed".format(test_params["GROUP"]))
    if "EXCLUDE_GROUP" in test_params:
        print("Cases from GROUPs '{0}' will be excluded".format(test_params["EXCLUDE_GROUP"]))

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(list(zip(argument_split[::2], argument_split[1::2])))

        if ("GROUP" or "EXCLUDE_GROUP") in test_params:
            #determine if the test relates to the specified group(can be separated by ';')
            if not ("GROUP" in params and len(set(test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")))) or \
                    "EXCLUDE_GROUP" in params and len(set(test_params["EXCLUDE_GROUP"].split(";")) & set(params["EXCLUDE_GROUP"].split(";"))):
                print("test '{0}' was skipped".format(name))
                continue

        log_config_filename = ""
        #reduce the number of chars in the file name, if there are many(255 bytes filename limit)
        if len(name) > 240:
            name = os.path.join(name[:220] + time.strftime("%y-%b-%d_%H-%M-%S", time.localtime()))
        if params:
            log_name = os.path.join(logs_folder_abspath, name + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, name + ".logging.conf")
        else:
            dotnames = name.split('.')
            log_name = os.path.join(logs_folder_abspath, dotnames[-1] + ".log")
            log_config_filename = os.path.join(logs_folder_abspath, dotnames[-1] + ".logging.conf")
        create_log_file(log_config_filename, log_name, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print("\n./testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name))
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        print("Test Input params:")
        pprint(TestInputSingleton.input.test_params)
        suite = unittest.TestLoader().loadTestsFromName(name)
        result = unittest.TextTestRunner(verbosity=2).run(suite)
        time_taken = time.time() - start_time

        # Concat params to test name
        # To make tests more readable
        params = ''
        if TestInputSingleton.input.test_params:
            for key, value in list(TestInputSingleton.input.test_params.items()):
                if key and value:
                    params += "," + str(key) + ":" + str(value)

        if result.failures or result.errors:
            # Immediately get the server logs, if
            # the test has failed or has errors
            if "get-logs" in TestInputSingleton.input.test_params:
                get_server_logs(TestInputSingleton.input, logs_folder)

            if "get-cbcollect-info" in TestInputSingleton.input.test_params:
                if TestInputSingleton.input.param("get-cbcollect-info", True):
                    get_cbcollect_info(TestInputSingleton.input, logs_folder)

            errors = []
            for failure in result.failures:
                test_case, failure_string = failure
                errors.append(failure_string)
                break
            for error in result.errors:
                test_case, error_string = error
                errors.append(error_string)
                break
            xunit.add_test(name=name, status='fail', time=time_taken,
                           errorType='membase.error', errorMessage=str(errors),
                           params=params)
            results.append({"result": "fail", "name": name})
        else:
            xunit.add_test(name=name, time=time_taken, params=params)
            results.append({"result": "pass", "name": name, "time": time_taken})
        xunit.write("{0}/report-{1}".format(logs_folder, str_time))
        xunit.print_summary()
        print("testrunner logs, diags and results are available under {0}".format(logs_folder))
        case_number += 1
        if (result.failures or result.errors) and \
                TestInputSingleton.input.param("stop-on-failure", False):
            print("test fails, all of the following tests will be skipped!!!")
            break

    if "makefile" in TestInputSingleton.input.test_params:
        # print out fail for those tests which failed and do sys.exit() error code
        fail_count = 0
        for result in results:
            if result["result"] == "fail":
                print(result["name"], " fail ")
                fail_count += 1
            else:
                print(result["name"], " pass")
        if fail_count > 0:
            sys.exit(1)
Пример #17
0
def main():

    BEFORE_SUITE = "suite_setUp"
    AFTER_SUITE = "suite_tearDown"
    names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    runtime_test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = runtime_test_params
    print "Global Test input params:"
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in runtime_test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(runtime_test_params["GROUP"])
    if "EXCLUDE_GROUP" in runtime_test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(runtime_test_params["EXCLUDE_GROUP"])

    if TestInputSingleton.input.param("get-delays", False):
        # start measure_sched_delays on all servers
        sd = SchedDelays(TestInputSingleton.input.servers)
        sd.start_measure_sched_delays()

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))



        # Note that if ALL is specified at runtime then tests which have no groups are still run - just being
        # explicit on this

        if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params["GROUP"].split(";"):
            if 'GROUP' not in params:         # params is the .conf file parameters.
                # this test is not in any groups so we do not run it
                print "test '{0}' skipped, a group was requested and this is not any groups".format(name)
                continue

            # there is a group for this test case, if that group is not specified at run time then do not run it
            elif len( set(runtime_test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")) ) == 0:
                print "test '{0}' skipped, is not in the requested group".format(name)
                continue
            else:
                pass # the test was in requested group, will run it

        elif "EXCLUDE_GROUP" in runtime_test_params:
            if 'GROUP' in params and \
                len(set(runtime_test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))) > 0:
                    print "test '{0}' skipped, is in an excluded group".format(name)
                    continue



        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "Logs will be stored at {0}".format(logs_folder)
        print "\n.{3}testrunner -i {0} -p {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(runtime_test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print "Test Input params:"
        print(TestInputSingleton.input.test_params)
        if "get-coredumps" in TestInputSingleton.input.test_params:
            if TestInputSingleton.input.param("get-coredumps", True):
                clear_old_core_dumps(TestInputSingleton.input, logs_folder)
        if case_number == 1:
            before_suite_name = "%s.%s" % (name[:name.rfind('.')], BEFORE_SUITE)
            try:
                print "Run before suite setup for %s" % name
                suite = unittest.TestLoader().loadTestsFromName(before_suite_name)
                result = unittest.TextTestRunner(verbosity=2).run(suite)
                if "get-coredumps" in TestInputSingleton.input.test_params:
                    if TestInputSingleton.input.param("get-coredumps", True):
                        if get_core_dumps(TestInputSingleton.input, logs_folder):
                            result = unittest.TextTestRunner(verbosity=2)._makeResult()
                            result.errors = [(name, "Failing test : new core dump(s) "
                                             "were found and collected."
                                             " Check testrunner logs folder.")]
                            print("FAIL: New core dump(s) was found and collected")
            except AttributeError as ex:
                pass
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError, e:
            print "Test {0} was not found: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError, e:
            print "SyntaxError in {0}: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
Пример #18
0
import sys
import os


sys.path.append('.')
sys.path.append('lib')
from remote.remote_util import RemoteMachineShellConnection

from TestInput import TestInputParser

if __name__ == "__main__":
    input = TestInputParser.get_test_input(sys.argv)
    remote = RemoteMachineShellConnection(input.servers[0])
    server_type = 'membase'
    if remote.is_couchbase_installed():
        server_type = 'couchbase'
    for serverInfo in input.servers:
        try:
            remote = RemoteMachineShellConnection(serverInfo)
            info = remote.extract_remote_info()
            if info.type.lower() != 'windows':
                core_files = []
                print "looking for erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type)
                core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
                print "looking for core* files under /opt/{0}/var/lib/{0}/".format(server_type)
                core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
                print "looking for core* files under /tmp/"
                core_files.extend(remote.file_starts_with("/tmp/", "core"))
                i = 0
                for core_file in core_files:
                    if core_file.find('erl_crash.dump') != -1:
Пример #19
0
def main():
    BEFORE_SUITE = "suite_setUp"
    AFTER_SUITE = "suite_tearDown"
    names, test_params, arg_i, arg_p, options = parse_args(sys.argv)
    # get params from command line
    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
    # ensure command line params get higher priority
    test_params.update(TestInputSingleton.input.test_params)
    TestInputSingleton.input.test_params = test_params
    print "Global Test input params:"
    pprint(TestInputSingleton.input.test_params)

    xunit = XUnitTestResult()
    # Create root logs directory
    abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    # Create testrunner logs subdirectory
    str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
    root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
    if not os.path.exists(root_log_dir):
        os.makedirs(root_log_dir)

    results = []
    case_number = 1
    if "GROUP" in test_params:
        print "Only cases in GROUPs '{0}' will be executed".format(test_params["GROUP"])
    if "EXCLUDE_GROUP" in test_params:
        print "Cases from GROUPs '{0}' will be excluded".format(test_params["EXCLUDE_GROUP"])

    if TestInputSingleton.input.param("get-delays", False):
        # start measure_sched_delays on all servers
        sd = SchedDelays(TestInputSingleton.input.servers)
        sd.start_measure_sched_delays()

    for name in names:
        start_time = time.time()
        argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
        params = dict(zip(argument_split[::2], argument_split[1::2]))

        if ("GROUP" or "EXCLUDE_GROUP") in test_params:
            # determine if the test relates to the specified group(can be separated by ';')
            if not ("GROUP" in params and len(set(test_params["GROUP"].split(";")) & set(params["GROUP"].split(";"))) or "ALL" in test_params["GROUP"].split(";")) or\
                    "EXCLUDE_GROUP" in test_params and len(set(test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))):
                print "test '{0}' was skipped".format(name)
                continue

        # Create Log Directory
        logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
        os.mkdir(logs_folder)
        test_log_file = os.path.join(logs_folder, "test.log")
        log_config_filename = r'{0}'.format(os.path.join(logs_folder, "test.logging.conf"))
        create_log_file(log_config_filename, test_log_file, options.loglevel)
        logging.config.fileConfig(log_config_filename)
        print "Logs will be stored at {0}".format(logs_folder)
        print "\n.{3}testrunner -i {0} {1} -t {2}\n"\
              .format(arg_i or "", arg_p or "", name, os.sep)
        name = name.split(",")[0]

        # Update the test params for each test
        TestInputSingleton.input.test_params = params
        TestInputSingleton.input.test_params.update(test_params)
        TestInputSingleton.input.test_params["case_number"] = case_number
        TestInputSingleton.input.test_params["logs_folder"] = logs_folder
        print "Test Input params:"
        print(TestInputSingleton.input.test_params)
        if case_number == 1:
            before_suite_name = "%s.%s" % (name[:name.rfind('.')], BEFORE_SUITE)
            try:
                print "Run before suite setup for %s" % name
                suite = unittest.TestLoader().loadTestsFromName(before_suite_name)
                result = unittest.TextTestRunner(verbosity=2).run(suite)
            except AttributeError as ex:
                pass
        try:
            suite = unittest.TestLoader().loadTestsFromName(name)
        except AttributeError, e:
            print "Test {0} was not found: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
        except SyntaxError, e:
            print "SyntaxError in {0}: {1}".format(name, e)
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.errors = [(name, e.message)]
Пример #20
0
    statsCollector.stop()
    signal.alarm(30)
    sys.exit(0)


if __name__ == "__main__":

    _cb_exc, _freq = parse_args(sys.argv)

    # remove freq from argv to be backward compatible
    if "-f" in sys.argv:
        sys.argv.remove("-f")
    elif "--freq" in sys.argv:
        sys.argv.remove("--freq")

    TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)

    if not TestInputSingleton.input.servers:
        print "[release_stats_collector error] empty server list, please check .ini file"
        sys.exit(-2)

    if not os.path.exists(_cb_exc):
        print "[release_stats_collector error] cannot find cbstats executable in \"{0}\""\
            .format(_cb_exc)
        sys.exit(-3)

    print "[Servers]"
    for server in TestInputSingleton.input.servers:
        print server.ip
    print
    print "Dumping stats for release criteria tests:"
Пример #21
0
# testrunner, which runs the full
# unittest.TestCase/setUp/testFoo/tearDown lifecycle.
#
import sys

sys.path.append("lib")
sys.path.append("pytests")
sys.path.append(".")

import TestInput

from TestInput import TestInputParser, TestInputSingleton

import performance.eperf as eperf

TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)

class EPerfMasterWrapper(eperf.EPerfMaster):
    def __init__(self):
        pass

obj = EPerfMasterWrapper()
obj.input = TestInputSingleton.input

# Run setUp with load_phase=0 and access_phase=0
if "setUp" in sys.argv:
    obj.setUp()
    what = obj.param("test", "test_ept_read_1")
    meth = getattr(obj, what)
    meth()
else: