Beispiel #1
0
def main():
    try:
        sh(["./cmstestsuite/RunUnitTests.py"] + sys.argv[1:])
        sh(["./cmstestsuite/RunFunctionalTests.py"] + sys.argv[1:])
    except FrameworkException:
        return 1
    return 0
Beispiel #2
0
    def initialize_aws(self):
        """Create an admin.

        The username will be admin_<suffix>, where <suffix> will be the first
        integer (from 1) for which an admin with that name doesn't yet exist.

        return (str): the suffix.

        """
        logger.info("Creating admin...")
        self.admin_info["password"] = "******"

        suffix = "1"
        while True:
            self.admin_info["username"] = "******" % suffix
            logger.info("Trying %(username)s" % self.admin_info)
            try:
                sh([sys.executable, "cmscontrib/AddAdmin.py",
                    "%(username)s" % self.admin_info,
                    "-p", "%(password)s" % self.admin_info],
                   ignore_failure=False)
            except TestException:
                suffix = str(int(suffix) + 1)
            else:
                break

        return suffix
Beispiel #3
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh(["git", "clone", CONFIG["GIT_ORIGIN"], CONFIG["TEST_DIR"]])
    os.chdir("%(TEST_DIR)s" % CONFIG)
    sh(["git", "checkout", CONFIG["GIT_REVISION"]])

    info("Configuring CMS.")
    configure_cms(
        {
            "database": '"postgresql+psycopg2://' \
                '%(DB_USER)s:%(DB_PASSWORD)s@' \
                '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
            "keep_sandbox": "false",
            "contest_listen_address": '["127.0.0.1"]',
            "admin_listen_address": '"127.0.0.1"',
            "min_submission_interval": '0',
        })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

    info("Building cms.")
    sh("./setup.py build")

    info("Creating tables.")
    sh("python cms/db/SQLAlchemyAll.py")
    def initialize_aws(self, rand):
        """Create an admin and logs in

        rand (int): some random bit to add to the admin username.

        """
        logger.info("Creating admin...")
        self.admin_info["username"] = "******" % rand
        self.admin_info["password"] = "******"
        sh([sys.executable, "cmscontrib/AddAdmin.py",
            "%(username)s" % self.admin_info,
            "-p", "%(password)s" % self.admin_info])
Beispiel #5
0
def run_unittests(test_list):
    """Run all needed unit tests.

    test_list ([(string, string)]): a list of test to run in the
                                    format (path, filename.py).
    return (int):
    """
    logger.info("Running unit tests...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (path, filename) in enumerate(test_list):
        logger.info("Running test %d/%d: %s.%s", i + 1, num_tests_to_execute,
                    path, filename)
        cmdline = [os.path.join(path, filename)]
        cmdline = coverage_cmdline(cmdline)
        cmdline = profiling_cmdline(
            cmdline,
            os.path.join(path, filename).replace("/", "_"))
        try:
            sh(cmdline)
        except TestException:
            logger.info("  (FAILED: %s)", filename)

            # Add this case to our list of failures, if we haven't already.
            failures.append((path, filename))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for path, filename in failures:
        results += " %s.%s\n" % (path, filename)

    if failures:
        with io.open(FAILED_UNITTEST_FILENAME, "wt",
                     encoding="utf-8") as failed_filename:
            for path, filename in failures:
                failed_filename.write("%s %s\n" % (path, filename))
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_UNITTEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
Beispiel #6
0
def run_unittests(test_list):
    """Run all needed unit tests.

    test_list ([(string, string)]): a list of test to run in the
                                    format (path, filename.py).
    return (int):
    """
    logger.info("Running unit tests...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (path, filename) in enumerate(test_list):
        logger.info("Running test %d/%d: %s.%s",
                    i + 1, num_tests_to_execute, path, filename)
        cmdline = [os.path.join(path, filename)]
        cmdline = coverage_cmdline(cmdline)
        cmdline = profiling_cmdline(
            cmdline, os.path.join(path, filename).replace("/", "_"))
        try:
            sh(cmdline)
        except TestException:
            logger.info("  (FAILED: %s)", filename)

            # Add this case to our list of failures, if we haven't already.
            failures.append((path, filename))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for path, filename in failures:
        results += " %s.%s\n" % (path, filename)

    if failures:
        with io.open(FAILED_UNITTEST_FILENAME,
                     "wt", encoding="utf-8") as failed_filename:
            for path, filename in failures:
                failed_filename.write("%s %s\n" % (path, filename))
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_UNITTEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
    def initialize_aws(self, rand):
        """Create an admin and logs in

        rand (int): some random bit to add to the admin username.

        """
        logger.info("Creating admin...")
        self.admin_info["username"] = "******" % rand
        self.admin_info["password"] = "******"
        sh([
            sys.executable, "cmscontrib/AddAdmin.py",
            "%(username)s" % self.admin_info, "-p",
            "%(password)s" % self.admin_info
        ])
Beispiel #8
0
def main():
    test_suite = get_test_suite()
    try:
        if test_suite == UNITTESTS or len(test_suite) == 0:
            sh(["./cmstestsuite/RunUnitTests.py"] + sys.argv[1:])
        if test_suite == FUNCTIONALTESTS or len(test_suite) == 0:
            sh(["./cmstestsuite/RunFunctionalTests.py"] + sys.argv[1:])
    except TestException:
        if os.path.exists("./log/cms/last.log"):
            print("\n\n===== START OF LOG DUMP =====\n\n")
            print(open("./log/cms/last.log").read())
            print("\n\n===== END OF LOG DUMP =====\n\n")
        return 1
    return 0
Beispiel #9
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh("git clone %(GIT_ORIGIN)s %(TEST_DIR)s" % CONFIG)
    os.chdir("%(TEST_DIR)s/cms" % CONFIG)
    sh("git checkout %(GIT_REVISION)s" % CONFIG)

    info("Configuring CMS.")
    configure_cms(
        {
            "database": '"postgresql+psycopg2://' \
                '%(DB_USER)s:%(DB_PASSWORD)s@' \
                '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
            "keep_sandbox": "false",
            "contest_listen_address": '["127.0.0.1"]',
            "admin_listen_address": '"127.0.0.1"',
            "min_submission_interval": '0',
        })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s/cms" % CONFIG

    info("Creating tables.")
    sh("python db/SQLAlchemyAll.py")
Beispiel #10
0
def main():
    test_suite = get_test_suite()
    try:
        if test_suite == UNITTESTS or len(test_suite) == 0:
            sh(["./cmstestsuite/RunUnitTests.py"] + sys.argv[1:])
        if test_suite == FUNCTIONALTESTS or len(test_suite) == 0:
            sh(["./cmstestsuite/RunFunctionalTests.py"] + sys.argv[1:])
    except FrameworkException:
        if os.path.exists("./log/cms/last.log"):
            print("\n\n===== START OF LOG DUMP =====\n\n")
            print(open("./log/cms/last.log").read())
            print("\n\n===== END OF LOG DUMP =====\n\n")
        return 1
    return 0
Beispiel #11
0
def main():
    test_suite = get_test_suite()
    try:
        if test_suite == UNITTESTS or len(test_suite) == 0:
            sh(["./cmstestsuite/RunUnitTests.py"] + sys.argv[1:])
        if test_suite == FUNCTIONALTESTS or len(test_suite) == 0:
            sh(["./cmstestsuite/RunFunctionalTests.py"] + sys.argv[1:])
    except TestException:
        if os.path.exists("./log/cms/last.log"):
            print("\n\n===== START OF LOG DUMP =====\n\n")
            with io.open("./log/cms/last.log", "rt", encoding="utf-8") as f:
                print(f.read())
            print("\n\n===== END OF LOG DUMP =====\n\n")
        return 1
    return 0
Beispiel #12
0
def run_unittests(test_list):
    """Run all needed unit tests.

    test_list ([(string, string)]): a list of test to run in the
                                    format (path, filename.py).
    return (int):
    """
    info("Running unit tests...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (path, filename) in enumerate(test_list):
        info("Running test %d/%d: %s.%s" % (
            i + 1, num_tests_to_execute,
            path, filename))
        try:
            sh('python-coverage run -p --source=cms %s' %
               os.path.join(path, filename))
        except FrameworkException:
            info("  (FAILED: %s)" % filename)

            # Add this case to our list of failures, if we haven't already.
            failures.append((path, filename))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for path, filename in failures:
        results += " %s.%s\n" % (path, filename)

    if failures:
        with open(FAILED_UNITTEST_FILENAME, "w") as failed_filename:
            for path, filename in failures:
                failed_filename.write("%s %s\n" % (path, filename))
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_UNITTEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
Beispiel #13
0
def run_unittests(test_list):
    """Run all needed unit tests.

    test_list ([(string, string)]): a list of test to run in the
                                    format (path, filename.py).
    return (int):
    """
    info("Running unit tests...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (path, filename) in enumerate(test_list):
        info("Running test %d/%d: %s.%s" %
             (i + 1, num_tests_to_execute, path, filename))
        try:
            sh('python-coverage run -p --source=cms %s' %
               os.path.join(path, filename))
        except FrameworkException:
            info("  (FAILED: %s)" % filename)

            # Add this case to our list of failures, if we haven't already.
            failures.append((path, filename))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for path, filename in failures:
        results += " %s.%s\n" % (path, filename)

    if failures:
        with open(FAILED_UNITTEST_FILENAME, "w") as failed_filename:
            for path, filename in failures:
                failed_filename.write("%s %s\n" % (path, filename))
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_UNITTEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
Beispiel #14
0
def main():
    parser = ArgumentParser(description="Runs the CMS test suite.")
    parser.add_argument("regex", metavar="regex",
        type=str, nargs='*',
        help="a regex to match to run a subset of tests")
    parser.add_argument("-l", "--languages",
        type=str, action="store", default="",
        help="a comma-separated list of languages to test")
    parser.add_argument("-r", "--retry-failed", action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_TEST_FILENAME)
    parser.add_argument("-v", "--verbose", action="count",
        help="print debug information (use multiple times for more)")
    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose

    start_time = datetime.datetime.now()

    # Pre-process our command-line arugments to figure out which tests to run.
    regexes = [re.compile(s) for s in args.regex]
    if args.languages:
        languages = frozenset(args.languages.split(','))
    else:
        languages = frozenset()
    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()
    test_list = filter_testcases(test_list, regexes, languages)

    if not test_list:
        info("There are no tests to run! (was your filter too restrictive?)")
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    try:
        git_root = subprocess.check_output(
            "git rev-parse --show-toplevel", shell=True,
            stderr=open(os.devnull, "w")).strip()
    except subprocess.CalledProcessError:
        git_root = None
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/examples/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"
    cms_config = get_cms_config()

    if not config_is_usable(cms_config):
        return 1

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python-coverage erase")

    # Fire us up!
    start_generic_services()
    contest_id = create_contest()
    user_id = create_a_user(contest_id)

    # Run all of our test cases.
    test_results = run_testcases(contest_id, user_id, test_list)

    # And good night!
    shutdown_services()
    combine_coverage()

    print test_results

    end_time = datetime.datetime.now()
    print time_difference(start_time, end_time)
Beispiel #15
0
        "git rev-parse --show-toplevel", shell=True).strip()
    CONFIG["GIT_REVISION"] = args.revision

    if not args.keep_working:

        def _cleanup():
            try:
                # Clean up tree.
                info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
                shutil.rmtree("%(TEST_DIR)s" % CONFIG)
            except:
                pass

        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s/cms" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s/cms" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    exec_cmd = " ".join(["./cmstestsuite/RunTests.py"] + args.arguments)
    sh(exec_cmd)
Beispiel #16
0
def combine_coverage():
    """Combine coverage reports from different programs."""
    if CONFIG.get('COVERAGE', False):
        logger.info("Combining coverage results.")
        sh([sys.executable, "-m", "coverage", "combine"])
Beispiel #17
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh(["git", "clone", "--recursive", CONFIG["GIT_ORIGIN"],
        CONFIG["TEST_DIR"]])
    os.chdir("%(TEST_DIR)s" % CONFIG)
    sh(["git", "checkout", CONFIG["GIT_REVISION"]])

    info("Configuring CMS.")
    configure_cms(
        {"database": '"postgresql+psycopg2://'
         '%(DB_USER)s:%(DB_PASSWORD)s@'
         '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
         "keep_sandbox": "false",
         "contest_listen_address": '["127.0.0.1"]',
         "admin_listen_address": '"127.0.0.1"',
         "min_submission_interval": '0',
         })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

    info("Building cms.")
    sh("./setup.py build")
    # Add permission bits to isolate.
    sh("sudo chown root:root isolate/isolate")
    sh("sudo chmod 4755 isolate/isolate")

    # Ensure our logs get preserved. Point them into the checkout instead of
    # the tempdir that we blow away.
    sh(["mkdir", "-p", "%(GIT_ORIGIN)s/log" % CONFIG])
    sh(["ln", "-s", "%(GIT_ORIGIN)s/log" % CONFIG, "log"])

    info("Creating tables.")
    sh("python scripts/cmsInitDB")
Beispiel #18
0
def clear_coverage():
    """Clear existing coverage reports."""
    if CONFIG.get('COVERAGE', False):
        logging.info("Clearing old coverage data.")
        sh([sys.executable, "-m", "coverage", "erase"])
Beispiel #19
0
                shutil.rmtree("%(TEST_DIR)s" % CONFIG)
            except:
                pass
        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    sh(["./cmstestsuite/RunTests.py"] + args.arguments)

    # We export the contest, import it again and re-run the tests on the
    # existing contest. Hard-coded contest indicies should be correct, as we
    # own the database.
    sh(["./cmscontrib/ContestExporter.py", "-c", "1"])
    sh(["./cmscontrib/ContestImporter.py", "dump_testcontest1.tar.gz"])
    sh(["./cmstestsuite/RunTests.py", "-c", "2"] + args.arguments)

    # Export coverage results.
    sh("python -m coverage xml --include 'cms*'")
    shutil.copyfile("coverage.xml", "%(GIT_ORIGIN)s/coverage.xml" % CONFIG)
Beispiel #20
0
def drop_old_data():
    info("Dropping any old databases called %(DB_NAME)s." % CONFIG)
    sh("sudo -u postgres dropdb %(DB_NAME)s" % CONFIG, ignore_failure=True)

    info("Purging old checkout from %(TEST_DIR)s." % CONFIG)
    shutil.rmtree("%(TEST_DIR)s" % CONFIG)
Beispiel #21
0
    CONFIG["GIT_ORIGIN"] = subprocess.check_output("git rev-parse --show-toplevel", shell=True).strip()
    CONFIG["GIT_REVISION"] = args.revision

    if not args.keep_working:

        def _cleanup():
            try:
                # Clean up tree.
                info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
                shutil.rmtree("%(TEST_DIR)s" % CONFIG)
            except:
                pass

        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s/cms" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s/cms" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    exec_cmd = " ".join(["./cmstestsuite/RunTests.py"] + args.arguments)
    sh(exec_cmd)
Beispiel #22
0
def main():
    parser = ArgumentParser(description="Runs the CMS unittest suite.")
    parser.add_argument(
        "-n",
        "--dry-run",
        action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        help="print debug information (use multiple times for more)")
    parser.add_argument(
        "-r",
        "--retry-failed",
        action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_UNITTEST_FILENAME)

    # Unused parameters.
    parser.add_argument("regex",
                        action="store",
                        type=utf8_decoder,
                        nargs='*',
                        metavar="regex",
                        help="unused")
    parser.add_argument("-l",
                        "--languages",
                        action="store",
                        type=utf8_decoder,
                        default="",
                        help="unused")
    parser.add_argument("-c",
                        "--contest",
                        action="store",
                        type=utf8_decoder,
                        help="unused")

    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose
    CONFIG["COVERAGE"] = True

    start_time = datetime.datetime.now()

    try:
        git_root = subprocess.check_output("git rev-parse --show-toplevel",
                                           shell=True,
                                           stderr=io.open(os.devnull,
                                                          "wb")).strip()
    except subprocess.CalledProcessError:
        print("Please run the unit tests from the git repository.")
        return 1

    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()

    if args.dry_run:
        for t in test_list:
            print(t[0].name, t[1])
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python -m coverage erase")

    # Run all of our test cases.
    passed, test_results = run_unittests(test_list)

    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print(time_difference(start_time, end_time))

    if passed:
        return 0
    else:
        return 1
Beispiel #23
0
def main():
    parser = ArgumentParser(description="Runs the CMS functional test suite.")
    parser.add_argument("regex",
                        action="store",
                        type=utf8_decoder,
                        nargs='*',
                        metavar="regex",
                        help="a regex to match to run a subset of tests")
    parser.add_argument("-l",
                        "--languages",
                        action="store",
                        type=utf8_decoder,
                        default="",
                        help="a comma-separated list of languages to test")
    parser.add_argument("-c",
                        "--contest",
                        action="store",
                        type=utf8_decoder,
                        help="use an existing contest (and the tasks in it)")
    parser.add_argument(
        "-r",
        "--retry-failed",
        action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_TEST_FILENAME)
    parser.add_argument(
        "-n",
        "--dry-run",
        action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        help="print debug information (use multiple times for more)")
    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose

    start_time = datetime.datetime.now()

    # Pre-process our command-line arugments to figure out which tests to run.
    regexes = [re.compile(s) for s in args.regex]
    if args.languages:
        languages = frozenset(args.languages.split(','))
    else:
        languages = frozenset()
    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()
    test_list = filter_testcases(test_list, regexes, languages)

    if not test_list:
        info("There are no tests to run! (was your filter too restrictive?)")
        return 0

    if args.dry_run:
        for t in test_list:
            print(t[0].name, t[1])
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    try:
        git_root = subprocess.check_output("git rev-parse --show-toplevel",
                                           shell=True,
                                           stderr=io.open(os.devnull,
                                                          "wb")).strip()
    except subprocess.CalledProcessError:
        git_root = None
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"
    cms_config = get_cms_config()

    if not config_is_usable(cms_config):
        return 1

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python-coverage erase")

    # Fire us up!
    start_generic_services()
    if args.contest is None:
        contest_id = create_contest()
    else:
        contest_id = int(args.contest)
    user_id = create_or_get_user(contest_id)

    start_contest(contest_id)

    # Run all of our test cases.
    passed, test_results = run_testcases(contest_id, user_id, test_list)

    # And good night!
    shutdown_services()
    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print(time_difference(start_time, end_time))

    if passed:
        return 0
    else:
        return 1
Beispiel #24
0
def drop_old_data():
    info("Dropping any old databases called %(DB_NAME)s." % CONFIG)
    sh("sudo -u postgres dropdb %(DB_NAME)s" % CONFIG, ignore_failure=True)

    info("Purging old checkout from %(TEST_DIR)s." % CONFIG)
    shutil.rmtree("%(TEST_DIR)s" % CONFIG)
Beispiel #25
0
def main():
    parser = ArgumentParser(description="Runs the CMS unittest suite.")
    parser.add_argument(
        "-n", "--dry-run", action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v", "--verbose", action="count",
        help="print debug information (use multiple times for more)")
    parser.add_argument(
        "-r", "--retry-failed", action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_UNITTEST_FILENAME)

    # Unused parameters.
    parser.add_argument(
        "regex", action="store", type=utf8_decoder, nargs='*', metavar="regex",
        help="unused")
    parser.add_argument(
        "-l", "--languages", action="store", type=utf8_decoder, default="",
        help="unused")
    parser.add_argument(
        "-c", "--contest", action="store", type=utf8_decoder,
        help="unused")

    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose

    start_time = datetime.datetime.now()

    try:
        git_root = subprocess.check_output(
            "git rev-parse --show-toplevel", shell=True,
            stderr=io.open(os.devnull, "wb")).strip()
    except subprocess.CalledProcessError:
        print("Please run the unit tests from the git repository.")
        return 1

    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()

    if args.dry_run:
        for t in test_list:
            print(t[0].name, t[1])
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python -m coverage erase")

    # Run all of our test cases.
    passed, test_results = run_unittests(test_list)

    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print(time_difference(start_time, end_time))

    if passed:
        return 0
    else:
        return 1
Beispiel #26
0
def clear_coverage():
    """Clear existing coverage reports."""
    if CONFIG.get('COVERAGE', False):
        logging.info("Clearing old coverage data.")
        sh([sys.executable, "-m", "coverage", "erase"])
Beispiel #27
0
def combine_coverage():
    """Combine coverage reports from different programs."""
    if CONFIG.get('COVERAGE', False):
        logger.info("Combining coverage results.")
        sh([sys.executable, "-m", "coverage", "combine"])
Beispiel #28
0
            except:
                pass

        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    sh(["./cmstestsuite/RunTests.py"] + args.arguments)

    # We export the contest, import it again and re-run the tests on the
    # existing contest. Hard-coded contest indicies should be correct, as we
    # own the database.
    sh(["./cmscontrib/ContestExporter.py", "-c", "1"])
    sh(["./cmscontrib/ContestImporter.py", "dump_testcontest1.tar.gz"])
    sh(["./cmstestsuite/RunTests.py", "-c", "2"] + args.arguments)

    # Export coverage results.
    sh("python -m coverage xml --include 'cms*'")
    shutil.copyfile("coverage.xml", "%(GIT_ORIGIN)s/coverage.xml" % CONFIG)
def main():
    parser = ArgumentParser(description="Runs the CMS functional test suite.")
    parser.add_argument("regex",
                        action="store",
                        type=utf8_decoder,
                        nargs='*',
                        metavar="regex",
                        help="a regex to match to run a subset of tests")
    parser.add_argument("-l",
                        "--languages",
                        action="store",
                        type=utf8_decoder,
                        default="",
                        help="a comma-separated list of languages to test")
    parser.add_argument("-c",
                        "--contest",
                        action="store",
                        type=utf8_decoder,
                        help="use an existing contest (and the tasks in it)")
    parser.add_argument(
        "-r",
        "--retry-failed",
        action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_TEST_FILENAME)
    parser.add_argument(
        "-n",
        "--dry-run",
        action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        help="print debug information (use multiple times for more)")
    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose
    CONFIG["COVERAGE"] = True

    # Pre-process our command-line arguments to figure out which tests to run.
    regexes = [re.compile(s) for s in args.regex]
    if args.languages:
        languages = frozenset(args.languages.split(','))
    else:
        languages = frozenset()
    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = ALL_TESTS
    test_list = filter_tests(test_list, regexes, languages)

    if not test_list:
        logger.info(
            "There are no tests to run! (was your filter too restrictive?)")
        return 0

    tests = 0
    for test in test_list:
        for language in test.languages:
            if args.dry_run:
                logger.info("Test %s in %s.", test.name, language)
            tests += 1
        if test.user_tests:
            for language in test.languages:
                if args.dry_run:
                    logger.info("Test %s in %s (for usertest).", test.name,
                                language)
                tests += 1
    if args.dry_run:
        return 0

    if args.retry_failed:
        logger.info("Re-running %s failed tests from last run.",
                    len(test_list))

    # Clear out any old coverage data.
    logging.info("Clearing old coverage data.")
    sh(sys.executable + " -m coverage erase")

    # Startup the test runner.
    runner = TestRunner(test_list, contest_id=args.contest, workers=4)

    # Submit and wait for all tests to complete.
    runner.submit_tests()
    failures = runner.wait_for_evaluation()
    write_test_case_list([(test, lang) for test, lang, _ in failures],
                         FAILED_TEST_FILENAME)

    # And good night!
    runner.shutdown()
    runner.log_elapsed_time()
    combine_coverage()

    logger.info("Executed: %s", tests)
    logger.info("Failed: %s", len(failures))
    if not failures:
        logger.info("All tests passed!")
        return 0
    else:
        logger.error("Some test failed!")
        logger.info("Run again with --retry-failed (or -r) to retry.")
        logger.info("Failed tests:")
        for test, lang, msg in failures:
            logger.info("%s (%s): %s\n", test.name, lang, msg)
        return 1
Beispiel #30
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh([
        "git", "clone", "--recursive", CONFIG["GIT_ORIGIN"], CONFIG["TEST_DIR"]
    ])
    os.chdir("%(TEST_DIR)s" % CONFIG)
    sh(["git", "checkout", CONFIG["GIT_REVISION"]])

    info("Configuring CMS.")
    configure_cms({
        "database":
        '"postgresql+psycopg2://'
        '%(DB_USER)s:%(DB_PASSWORD)s@'
        '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
        "keep_sandbox":
        "false",
        "contest_listen_address":
        '["127.0.0.1"]',
        "admin_listen_address":
        '"127.0.0.1"',
        "min_submission_interval":
        '0',
    })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

    info("Building cms.")
    sh("./setup.py build")
    # Add permission bits to isolate.
    sh("sudo chown root:root isolate/isolate")
    sh("sudo chmod 4755 isolate/isolate")

    # Ensure our logs get preserved. Point them into the checkout instead of
    # the tempdir that we blow away.
    sh(["mkdir", "-p", "%(GIT_ORIGIN)s/log" % CONFIG])
    sh(["ln", "-s", "%(GIT_ORIGIN)s/log" % CONFIG, "log"])

    info("Creating tables.")
    sh("python scripts/cmsInitDB")
Beispiel #31
0
def main():
    parser = argparse.ArgumentParser(
        description="Runs the CMS unittest suite.")
    parser.add_argument("regex",
                        action="store",
                        type=utf8_decoder,
                        nargs='*',
                        help="a regex to match to run a subset of tests")
    parser.add_argument(
        "-n",
        "--dry-run",
        action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        default=0,
        help="print debug information (use multiple times for more)")
    parser.add_argument(
        "-r",
        "--retry-failed",
        action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_UNITTEST_FILENAME)

    # Unused parameters.
    parser.add_argument("-l",
                        "--languages",
                        action="store",
                        type=utf8_decoder,
                        default="",
                        help="unused")
    parser.add_argument("-c",
                        "--contest",
                        action="store",
                        type=utf8_decoder,
                        help="unused")

    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose
    CONFIG["COVERAGE"] = True

    start_time = datetime.datetime.now()

    try:
        git_root = subprocess.check_output(
            "git rev-parse --show-toplevel",
            shell=True,
            stderr=io.open(os.devnull, "wb")).decode('utf8').strip()
    except subprocess.CalledProcessError:
        print("Please run the unit tests from the git repository.")
        return 1

    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()

    if args.regex:
        # Require at least one regex to match to include it in the list.
        filter_regexps = [re.compile(regex) for regex in args.regex]

        def test_match(t):
            return any(r.search(t) is not None for r in filter_regexps)

        test_list = [t for t in test_list if test_match(' '.join(t))]

    if args.dry_run:
        for t in test_list:
            print(t[0], t[1])
        return 0

    if args.retry_failed:
        logger.info("Re-running %d failed tests from last run.",
                    len(test_list))

    # Load config from cms.conf.
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        logger.info("Clearing old coverage data.")
        sh([sys.executable, "-m", "coverage", "erase"])

    # Run all of our test cases.
    passed, test_results = run_unittests(test_list)

    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print("Time elapsed: %s" % (end_time - start_time))

    if passed:
        return 0
    else:
        return 1
Beispiel #32
0
def main():
    parser = ArgumentParser(description="Runs the CMS functional test suite.")
    parser.add_argument(
        "regex", action="store", type=utf8_decoder, nargs='*', metavar="regex",
        help="a regex to match to run a subset of tests")
    parser.add_argument(
        "-l", "--languages", action="store", type=utf8_decoder, default="",
        help="a comma-separated list of languages to test")
    parser.add_argument(
        "-c", "--contest", action="store", type=utf8_decoder,
        help="use an existing contest (and the tasks in it)")
    parser.add_argument(
        "-r", "--retry-failed", action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_TEST_FILENAME)
    parser.add_argument(
        "-n", "--dry-run", action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v", "--verbose", action="count",
        help="print debug information (use multiple times for more)")
    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose
    CONFIG["COVERAGE"] = True

    # Pre-process our command-line arguments to figure out which tests to run.
    regexes = [re.compile(s) for s in args.regex]
    if args.languages:
        languages = frozenset(args.languages.split(','))
    else:
        languages = frozenset()
    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = ALL_TESTS
    test_list = filter_tests(test_list, regexes, languages)

    if not test_list:
        logger.info(
            "There are no tests to run! (was your filter too restrictive?)")
        return 0

    tests = 0
    for test in test_list:
        for language in test.languages:
            if args.dry_run:
                logger.info("Test %s in %s.", test.name, language)
            tests += 1
        if test.user_tests:
            for language in test.languages:
                if args.dry_run:
                    logger.info("Test %s in %s (for usertest).",
                                test.name, language)
                tests += 1
    if args.dry_run:
        return 0

    if args.retry_failed:
        logger.info(
            "Re-running %s failed tests from last run.", len(test_list))

    # Clear out any old coverage data.
    logging.info("Clearing old coverage data.")
    sh(sys.executable + " -m coverage erase")

    # Startup the test runner.
    runner = TestRunner(test_list, contest_id=args.contest, workers=4)

    # Submit and wait for all tests to complete.
    runner.submit_tests()
    failures = runner.wait_for_evaluation()
    write_test_case_list(
        [(test, lang) for test, lang, _ in failures],
        FAILED_TEST_FILENAME)

    # And good night!
    runner.shutdown()
    runner.log_elapsed_time()
    combine_coverage()

    logger.info("Executed: %s", tests)
    logger.info("Failed: %s", len(failures))
    if not failures:
        logger.info("All tests passed!")
        return 0
    else:
        logger.error("Some test failed!")
        logger.info("Run again with --retry-failed (or -r) to retry.")
        logger.info("Failed tests:")
        for test, lang, msg in failures:
            logger.info("%s (%s): %s\n", test.name, lang, msg)
        return 1
Beispiel #33
0
    if not args.keep_working:
        def _cleanup():
            try:
                # Clean up tree.
                info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
                shutil.rmtree("%(TEST_DIR)s" % CONFIG)
            except:
                pass
        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s/cms" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s/cms" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    exec_cmd = ["./cmstestsuite/RunTests.py"] + args.arguments
    sh(exec_cmd)

    # Export coverage results.
    sh("python -m coverage xml --include 'cms*'")
    shutil.copyfile("coverage.xml", "%(GIT_ORIGIN)s/coverage.xml" % CONFIG)