Ejemplo n.º 1
0
 def _cleanup():
     try:
         # Clean up tree.
         info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
         shutil.rmtree("%(TEST_DIR)s" % CONFIG)
     except:
         pass
Ejemplo n.º 2
0
def get_task_id(contest_id, user_id, task_module):
    # Create a task in the contest if we haven't already.
    if task_module not in task_id_map:
        # add the task itself.
        task_id = add_task(
            contest_id=contest_id,
            **task_module.task_info)

        # add the task's test data.
        data_path = os.path.join(
            os.path.dirname(task_module.__file__),
            "data")
        for input_file, output_file, public in task_module.test_cases:
            ipath = os.path.join(data_path, input_file)
            opath = os.path.join(data_path, output_file)
            add_testcase(task_id, ipath, opath, public)

        task_id_map[task_module] = task_id

        info("Creating task %s as id %d" % (
            task_module.task_info['name'], task_id))

        # We need to restart ScoringService to ensure it has picked up the
        # new task.
        restart_service("ScoringService", contest=contest_id)
    else:
        task_id = task_id_map[task_module]

    return task_id
Ejemplo n.º 3
0
def create_or_get_user(contest_id):
    global num_users
    num_users += 1

    def enumerify(x):
        if 11 <= x <= 13:
            return 'th'
        return {1: 'st', 2: 'nd', 3: 'rd'}.get(x % 10, 'th')

    username = "******" % num_users

    # Find a user that may already exist (from a previous contest).
    users = get_users(contest_id)
    user_create_args = {
        "username": username,
        "password": "******",
        "first_name": "Ms. Test",
        "last_name": "Wabbit the %d%s" % (num_users, enumerify(num_users)),
    }
    if username in users:
        user_id = users[username]['id']
        add_existing_user(contest_id, user_id, **user_create_args)
        info("Using existing user with id %d." % user_id)
    else:
        user_id = add_user(contest_id, **user_create_args)
        info("Created user with id %d." % user_id)
    return user_id
Ejemplo n.º 4
0
 def _cleanup():
     try:
         # Clean up tree.
         info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
         shutil.rmtree("%(TEST_DIR)s" % CONFIG)
     except:
         pass
Ejemplo n.º 5
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." % CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh("git clone %(GIT_ORIGIN)s %(TEST_DIR)s" % CONFIG)
    os.chdir("%(TEST_DIR)s/cms" % CONFIG)
    sh("git checkout %(GIT_REVISION)s" % CONFIG)

    info("Configuring CMS.")
    configure_cms(
        {
            "database": '"postgresql+psycopg2://' "%(DB_USER)s:%(DB_PASSWORD)s@" '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
            "keep_sandbox": "false",
            "contest_listen_address": '["127.0.0.1"]',
            "admin_listen_address": '"127.0.0.1"',
            "min_submission_interval": "0",
        }
    )

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s/cms" % CONFIG

    info("Creating tables.")
    sh("python db/SQLAlchemyAll.py")
Ejemplo n.º 6
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh("git clone %(GIT_ORIGIN)s %(TEST_DIR)s" % CONFIG)
    os.chdir("%(TEST_DIR)s/cms" % CONFIG)
    sh("git checkout %(GIT_REVISION)s" % CONFIG)

    info("Configuring CMS.")
    configure_cms(
        {
            "database": '"postgresql+psycopg2://' \
                '%(DB_USER)s:%(DB_PASSWORD)s@' \
                '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
            "keep_sandbox": "false",
            "contest_listen_address": '["127.0.0.1"]',
            "admin_listen_address": '"127.0.0.1"',
            "min_submission_interval": '0',
        })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s/cms" % CONFIG

    info("Creating tables.")
    sh("python db/SQLAlchemyAll.py")
Ejemplo n.º 7
0
def create_or_get_user(contest_id):
    global num_users
    num_users += 1

    def enumerify(x):
        if 11 <= x <= 13:
            return 'th'
        return {1: 'st', 2: 'nd', 3: 'rd'}.get(x % 10, 'th')

    username = "******" % num_users

    # Find a user that may already exist (from a previous contest).
    users = get_users(contest_id)
    user_create_args = {
        "username": username,
        "password": "******",
        "first_name": "Ms. Test",
        "last_name": "Wabbit the %d%s" % (num_users, enumerify(num_users)),
    }
    if username in users:
        user_id = users[username]['id']
        add_existing_user(contest_id, user_id, **user_create_args)
        info("Using existing user with id %d." % user_id)
    else:
        user_id = add_user(contest_id, **user_create_args)
        info("Created user with id %d." % user_id)
    return user_id
Ejemplo n.º 8
0
def create_a_user(contest_id):
    global num_users
    num_users += 1

    def enumerify(x):
        if 11 <= x <= 13:
            return 'th'
        return {1: 'st', 2: 'nd', 3: 'rd'}.get(x % 10, 'th')

    info("Creating user.")
    user_id = add_user(contest_id=contest_id,
                       username="******" % num_users,
                       password="******",
                       first_name="Ms. Test",
                       last_name="Wabbit the %d%s" %
                       (num_users, enumerify(num_users)))
    return user_id
Ejemplo n.º 9
0
def create_a_user(contest_id):
    global num_users
    num_users += 1

    def enumerify(x):
        if 11 <= x <= 13:
            return 'th'
        return {1: 'st', 2: 'nd', 3: 'rd'}.get(x % 10, 'th')

    info("Creating user.")
    user_id = add_user(
        contest_id=contest_id,
        username="******" % num_users,
        password="******",
        first_name="Ms. Test",
        last_name="Wabbit the %d%s" % (num_users,
                                       enumerify(num_users)))
    return user_id
Ejemplo n.º 10
0
def create_contest():
    start_time = datetime.datetime.utcnow()
    stop_time = start_time + datetime.timedelta(1, 0, 0)
    contest_id = add_contest(
        name="testcontest1",
        description="A test contest #1.",
        start=start_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        stop=stop_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        timezone=get_system_timezone(),
        token_initial="100",
        token_max="100",
        token_total="100",
        token_min_interval="0",
        token_gen_time="0",
        token_gen_number="0",
    )

    info("Created contest %d." % contest_id)

    return contest_id
Ejemplo n.º 11
0
def create_contest():
    start_time = datetime.datetime.utcnow()
    stop_time = start_time + datetime.timedelta(1, 0, 0)
    contest_id = add_contest(
        name="testcontest1",
        description="A test contest #1.",
        start=start_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        stop=stop_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        timezone=get_system_timezone(),
        token_initial="100",
        token_max="100",
        token_total="100",
        token_min_interval="0",
        token_gen_time="0",
        token_gen_number="0",
        )

    info("Created contest %d." % contest_id)

    return contest_id
Ejemplo n.º 12
0
def get_task_id(contest_id, user_id, task_module):
    # Create a task in the contest if we haven't already.
    if task_module not in task_id_map:
        # add the task itself.
        task_id = add_task(
            contest_id=contest_id,
            token_initial="100",
            token_max="100",
            token_total="100",
            token_min_interval="0",
            token_gen_time="0",
            token_gen_number="0",
            max_submission_number="100",
            max_user_test_number="100",
            min_submission_interval=None,
            min_user_test_interval=None,
            **task_module.task_info)

        # add the task's test data.
        data_path = os.path.join(
            os.path.dirname(task_module.__file__),
            "data")
        for num, (input_file, output_file, public) \
                in enumerate(task_module.test_cases):
            ipath = os.path.join(data_path, input_file)
            opath = os.path.join(data_path, output_file)
            add_testcase(task_id, str(num), ipath, opath, public)

        task_id_map[task_module] = task_id

        info("Creating task %s as id %d" % (
            task_module.task_info['name'], task_id))

        # We need to restart ScoringService to ensure it has picked up the
        # new task.
        restart_service("ScoringService", contest=contest_id)
    else:
        task_id = task_id_map[task_module]

    return task_id
Ejemplo n.º 13
0
def create_contest():
    start_time = datetime.datetime.utcnow()
    stop_time = start_time + datetime.timedelta(1, 0, 0)
    contest_id = add_contest(
        name="testcontest" + str(rand),
        description="A test contest #%s." % rand,
        languages=LANGUAGES,
        start=start_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        stop=stop_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        timezone=get_system_timezone(),
        token_mode="finite",
        token_max_number="100",
        token_min_interval="0",
        token_gen_initial="100",
        token_gen_number="0",
        token_gen_interval="1",
        token_gen_max="100",
    )

    info("Created contest %d." % contest_id)

    return contest_id
Ejemplo n.º 14
0
def create_contest():
    info("Creating contest.")
    start_time = datetime.datetime.now()
    end_time = start_time + datetime.timedelta(1, 0, 0)
    contest_id = add_contest(
        name="testcontest1",
        description="A test contest #1.",
        start=start_time.strftime("%d/%m/%Y %H:%M:%S"),
        end=end_time.strftime("%d/%m/%Y %H:%M:%S"),
        token_initial="100",
        #token_max="",
        #token_total="",
        #token_min_interval="",
        #token_gen_time="",
        #token_gen_number="",
        )

    start_service("ScoringService", contest=contest_id)
    start_service("EvaluationService", contest=contest_id)
    start_server("ContestWebServer", contest=contest_id)

    return contest_id
Ejemplo n.º 15
0
def create_contest():
    info("Creating contest.")
    start_time = datetime.datetime.utcnow()
    stop_time = start_time + datetime.timedelta(1, 0, 0)
    contest_id = add_contest(
        name="testcontest1",
        description="A test contest #1.",
        start=start_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        stop=stop_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        timezone=get_system_timezone(),
        token_initial="100",
        token_max="100",
        token_total="100",
        token_min_interval="0",
        token_gen_time="0",
        token_gen_number="0",
        )

    start_service("ScoringService", contest=contest_id)
    start_service("EvaluationService", contest=contest_id)
    start_server("ContestWebServer", contest=contest_id)

    return contest_id
Ejemplo n.º 16
0
def create_contest():
    info("Creating contest.")
    start_time = datetime.datetime.utcnow()
    stop_time = start_time + datetime.timedelta(1, 0, 0)
    contest_id = add_contest(
        name="testcontest1",
        description="A test contest #1.",
        start=start_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        stop=stop_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
        timezone=get_system_timezone(),
        token_initial="100",
        token_max="100",
        token_total="100",
        token_min_interval="0",
        token_gen_time="0",
        token_gen_number="0",
    )

    start_service("ScoringService", contest=contest_id)
    start_service("EvaluationService", contest=contest_id)
    start_server("ContestWebServer", contest=contest_id)

    return contest_id
Ejemplo n.º 17
0
def get_task_id(contest_id, user_id, task_module):
    # Create a task in the contest if we haven't already.
    if task_module not in task_id_map:
        # add the task itself.
        task_id = add_task(contest_id=contest_id,
                           token_initial="100",
                           token_max="100",
                           token_total="100",
                           token_min_interval="0",
                           token_gen_time="0",
                           token_gen_number="0",
                           max_submission_number="100",
                           max_usertest_number="100",
                           min_submission_interval="0",
                           min_usertest_interval="0",
                           **task_module.task_info)

        # add the task's test data.
        data_path = os.path.join(os.path.dirname(task_module.__file__), "data")
        for input_file, output_file, public in task_module.test_cases:
            ipath = os.path.join(data_path, input_file)
            opath = os.path.join(data_path, output_file)
            add_testcase(task_id, ipath, opath, public)

        task_id_map[task_module] = task_id

        info("Creating task %s as id %d" %
             (task_module.task_info['name'], task_id))

        # We need to restart ScoringService to ensure it has picked up the
        # new task.
        restart_service("ScoringService", contest=contest_id)
    else:
        task_id = task_id_map[task_module]

    return task_id
Ejemplo n.º 18
0
def run_testcases(contest_id, user_id, test_list):
    """Run all test cases specified by the Tests module.

    contest_id and user_id must specify an already-created contest and user
    under which the tests are submitted.

    test_list should be a list of 2-tuples, each representing a test. The first
    element of each tuple is a Test object, and the second is the language for
    which it should be executed.
    """
    info("Running test cases ...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (test, lang) in enumerate(test_list):
        # This installs the task into the contest if we haven't already.
        task_id = get_task_id(contest_id, user_id, test.task_module)

        info("Running test %d/%d: %s (%s)" % (
            i + 1, num_tests_to_execute,
            test.name, lang))

        try:
            test.run(contest_id, task_id, user_id, lang)
        except TestFailure as f:
            info("  (FAILED: %s)" % f.message)

            # Add this case to our list of failures, if we haven't already.
            failures.append((test, lang, f.message))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for test, lang, msg in failures:
        results += " %s (%s): %s\n" % (test.name, lang, msg)

    if failures:
        write_test_case_list(
            [(test, lang) for test, lang, _ in failures],
            FAILED_TEST_FILENAME)
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_TEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return results
Ejemplo n.º 19
0
def run_testcases(contest_id, user_id, test_list):
    """Run all test cases specified by the Tests module.

    contest_id and user_id must specify an already-created contest and user
    under which the tests are submitted.

    test_list should be a list of 2-tuples, each representing a test. The first
    element of each tuple is a Test object, and the second is the language for
    which it should be executed.
    """
    info("Running test cases ...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (test, lang) in enumerate(test_list):
        # This installs the task into the contest if we haven't already.
        task_id = get_task_id(contest_id, user_id, test.task_module)

        info("Running test %d/%d: %s (%s)" % (
            i + 1, num_tests_to_execute,
            test.name, lang))

        try:
            test.run(contest_id, task_id, user_id, lang)
        except TestFailure as f:
            info("  (FAILED: %s)" % f.message)

            # Add this case to our list of failures, if we haven't already.
            failures.append((test, lang, f.message))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for test, lang, msg in failures:
        results += " %s (%s): %s\n" % (test.name, lang, msg)

    if failures:
        write_test_case_list(
            [(test, lang) for test, lang, _ in failures],
            FAILED_TEST_FILENAME)
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_TEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
Ejemplo n.º 20
0
def run_unittests(test_list):
    """Run all needed unit tests.

    test_list ([(string, string)]): a list of test to run in the
                                    format (path, filename.py).
    return (int):
    """
    info("Running unit tests...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (path, filename) in enumerate(test_list):
        info("Running test %d/%d: %s.%s" % (
            i + 1, num_tests_to_execute,
            path, filename))
        try:
            sh('python -m coverage run -p --source=cms %s' %
               os.path.join(path, filename))
        except FrameworkException:
            info("  (FAILED: %s)" % filename)

            # Add this case to our list of failures, if we haven't already.
            failures.append((path, filename))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for path, filename in failures:
        results += " %s.%s\n" % (path, filename)

    if failures:
        with io.open(FAILED_UNITTEST_FILENAME,
                     "wt", encoding="utf-8") as failed_filename:
            for path, filename in failures:
                failed_filename.write("%s %s\n" % (path, filename))
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_UNITTEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
Ejemplo n.º 21
0
def run_unittests(test_list):
    """Run all needed unit tests.

    test_list ([(string, string)]): a list of test to run in the
                                    format (path, filename.py).
    return (int):
    """
    info("Running unit tests...")

    failures = []
    num_tests_to_execute = len(test_list)

    # For all tests...
    for i, (path, filename) in enumerate(test_list):
        info("Running test %d/%d: %s.%s" % (
            i + 1, num_tests_to_execute,
            path, filename))
        try:
            sh('python-coverage run -p --source=cms %s' %
               os.path.join(path, filename))
        except FrameworkException:
            info("  (FAILED: %s)" % filename)

            # Add this case to our list of failures, if we haven't already.
            failures.append((path, filename))

    results = "\n\n"
    if not failures:
        results += "================== ALL TESTS PASSED! ==================\n"
    else:
        results += "------ TESTS FAILED: ------\n"

    results += " Executed: %d\n" % num_tests_to_execute
    results += "   Failed: %d\n" % len(failures)
    results += "\n"

    for path, filename in failures:
        results += " %s.%s\n" % (path, filename)

    if failures:
        with io.open(FAILED_UNITTEST_FILENAME,
                     "wt", encoding="utf-8") as failed_filename:
            for path, filename in failures:
                failed_filename.write("%s %s\n" % (path, filename))
        results += "\n"
        results += "Failed tests stored in %s.\n" % FAILED_UNITTEST_FILENAME
        results += "Run again with --retry-failed (or -r) to retry.\n"

    return len(failures) == 0, results
Ejemplo n.º 22
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh(["git", "clone", "--recursive", CONFIG["GIT_ORIGIN"],
        CONFIG["TEST_DIR"]])
    os.chdir("%(TEST_DIR)s" % CONFIG)
    sh(["git", "checkout", CONFIG["GIT_REVISION"]])

    info("Configuring CMS.")
    configure_cms(
        {"database": '"postgresql+psycopg2://'
         '%(DB_USER)s:%(DB_PASSWORD)s@'
         '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
         "keep_sandbox": "false",
         "contest_listen_address": '["127.0.0.1"]',
         "admin_listen_address": '"127.0.0.1"',
         "min_submission_interval": '0',
         })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

    info("Building cms.")
    sh("./setup.py build")
    # Add permission bits to isolate.
    sh("sudo chown root:root isolate/isolate")
    sh("sudo chmod 4755 isolate/isolate")

    # Ensure our logs get preserved. Point them into the checkout instead of
    # the tempdir that we blow away.
    sh(["mkdir", "-p", "%(GIT_ORIGIN)s/log" % CONFIG])
    sh(["ln", "-s", "%(GIT_ORIGIN)s/log" % CONFIG, "log"])

    info("Creating tables.")
    sh("python scripts/cmsInitDB")
Ejemplo n.º 23
0
def main():
    parser = ArgumentParser(description="Runs the CMS functional test suite.")
    parser.add_argument("regex",
                        action="store",
                        type=utf8_decoder,
                        nargs='*',
                        metavar="regex",
                        help="a regex to match to run a subset of tests")
    parser.add_argument("-l",
                        "--languages",
                        action="store",
                        type=utf8_decoder,
                        default="",
                        help="a comma-separated list of languages to test")
    parser.add_argument("-c",
                        "--contest",
                        action="store",
                        type=utf8_decoder,
                        help="use an existing contest (and the tasks in it)")
    parser.add_argument(
        "-r",
        "--retry-failed",
        action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_TEST_FILENAME)
    parser.add_argument(
        "-n",
        "--dry-run",
        action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        help="print debug information (use multiple times for more)")
    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose

    start_time = datetime.datetime.now()

    # Pre-process our command-line arugments to figure out which tests to run.
    regexes = [re.compile(s) for s in args.regex]
    if args.languages:
        languages = frozenset(args.languages.split(','))
    else:
        languages = frozenset()
    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()
    test_list = filter_testcases(test_list, regexes, languages)

    if not test_list:
        info("There are no tests to run! (was your filter too restrictive?)")
        return 0

    if args.dry_run:
        for t in test_list:
            print(t[0].name, t[1])
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    try:
        git_root = subprocess.check_output("git rev-parse --show-toplevel",
                                           shell=True,
                                           stderr=io.open(os.devnull,
                                                          "wb")).strip()
    except subprocess.CalledProcessError:
        git_root = None
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"
    cms_config = get_cms_config()

    if not config_is_usable(cms_config):
        return 1

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python-coverage erase")

    # Fire us up!
    start_generic_services()
    if args.contest is None:
        contest_id = create_contest()
    else:
        contest_id = int(args.contest)
    user_id = create_or_get_user(contest_id)

    start_contest(contest_id)

    # Run all of our test cases.
    passed, test_results = run_testcases(contest_id, user_id, test_list)

    # And good night!
    shutdown_services()
    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print(time_difference(start_time, end_time))

    if passed:
        return 0
    else:
        return 1
Ejemplo n.º 24
0
def get_task_id(contest_id, user_id, task_module):
    name = task_module.task_info['name']

    # Have we done this before? Pull it out of our cache if so.
    if task_module in task_id_map:
        # Ensure we don't have multiple modules with the same task name.
        assert task_id_map[task_module][1] == task_module

        return task_id_map[name][0]

    task_create_args = {
        "token_mode": "finite",
        "token_max_number": "100",
        "token_min_interval": "0",
        "token_gen_initial": "100",
        "token_gen_number": "0",
        "token_gen_interval": "1",
        "token_gen_max": "100",
        "max_submission_number": "100",
        "max_user_test_number": "100",
        "min_submission_interval": None,
        "min_user_test_interval": None,
    }
    task_create_args.update(task_module.task_info)

    # Find if the task already exists in the contest.
    tasks = get_tasks(contest_id)
    if name in tasks:
        # Then just use the existing one.
        task = tasks[name]
        task_id = task['id']
        task_id_map[name] = (task_id, task_module)
        add_existing_task(contest_id, task_id, **task_create_args)
        return task_id

    # Otherwise, we need to add the task ourselves.
    task_id = add_task(contest_id, **task_create_args)

    # add any managers
    code_path = os.path.join(os.path.dirname(task_module.__file__), "code")
    if hasattr(task_module, 'managers'):
        for manager in task_module.managers:
            mpath = os.path.join(code_path, manager)
            add_manager(task_id, mpath)

    # add the task's test data.
    data_path = os.path.join(os.path.dirname(task_module.__file__), "data")
    for num, (input_file, output_file, public) \
            in enumerate(task_module.test_cases):
        ipath = os.path.join(data_path, input_file)
        opath = os.path.join(data_path, output_file)
        add_testcase(task_id, num, ipath, opath, public)

    task_id_map[name] = (task_id, task_module)

    info("Created task %s as id %d" % (name, task_id))

    # We need to restart ProxyService to ensure it reinitializes,
    # picking up the new task and sending it to RWS.
    restart_service("ProxyService", contest=contest_id)

    return task_id
Ejemplo n.º 25
0
def setup_cms():
    info("Creating database called %(DB_NAME)s accessible by %(DB_USER)s." %
         CONFIG)
    sh("sudo -u postgres createdb %(DB_NAME)s -O %(DB_USER)s" % CONFIG)

    info("Checking out code.")
    sh([
        "git", "clone", "--recursive", CONFIG["GIT_ORIGIN"], CONFIG["TEST_DIR"]
    ])
    os.chdir("%(TEST_DIR)s" % CONFIG)
    sh(["git", "checkout", CONFIG["GIT_REVISION"]])

    info("Configuring CMS.")
    configure_cms({
        "database":
        '"postgresql+psycopg2://'
        '%(DB_USER)s:%(DB_PASSWORD)s@'
        '%(DB_HOST)s/%(DB_NAME)s"' % CONFIG,
        "keep_sandbox":
        "false",
        "contest_listen_address":
        '["127.0.0.1"]',
        "admin_listen_address":
        '"127.0.0.1"',
        "min_submission_interval":
        '0',
    })

    info("Setting environment.")
    os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

    info("Building cms.")
    sh("./setup.py build")
    # Add permission bits to isolate.
    sh("sudo chown root:root isolate/isolate")
    sh("sudo chmod 4755 isolate/isolate")

    # Ensure our logs get preserved. Point them into the checkout instead of
    # the tempdir that we blow away.
    sh(["mkdir", "-p", "%(GIT_ORIGIN)s/log" % CONFIG])
    sh(["ln", "-s", "%(GIT_ORIGIN)s/log" % CONFIG, "log"])

    info("Creating tables.")
    sh("python scripts/cmsInitDB")
Ejemplo n.º 26
0
def drop_old_data():
    info("Dropping any old databases called %(DB_NAME)s." % CONFIG)
    sh("sudo -u postgres dropdb %(DB_NAME)s" % CONFIG, ignore_failure=True)

    info("Purging old checkout from %(TEST_DIR)s." % CONFIG)
    shutil.rmtree("%(TEST_DIR)s" % CONFIG)
Ejemplo n.º 27
0
    else:
        CONFIG["GIT_REVISION"] = args.revision

    if not args.keep_working:

        def _cleanup():
            try:
                # Clean up tree.
                info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
                shutil.rmtree("%(TEST_DIR)s" % CONFIG)
            except:
                pass

        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    sh(["./cmstestsuite/RunTests.py"] + args.arguments)

    # We export the contest, import it again and re-run the tests on the
Ejemplo n.º 28
0
def main():
    parser = ArgumentParser(description="Runs the CMS unittest suite.")
    parser.add_argument(
        "-n", "--dry-run", action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v", "--verbose", action="count",
        help="print debug information (use multiple times for more)")
    parser.add_argument(
        "-r", "--retry-failed", action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_UNITTEST_FILENAME)

    # Unused parameters.
    parser.add_argument(
        "regex", action="store", type=utf8_decoder, nargs='*', metavar="regex",
        help="unused")
    parser.add_argument(
        "-l", "--languages", action="store", type=utf8_decoder, default="",
        help="unused")
    parser.add_argument(
        "-c", "--contest", action="store", type=utf8_decoder,
        help="unused")

    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose

    start_time = datetime.datetime.now()

    try:
        git_root = subprocess.check_output(
            "git rev-parse --show-toplevel", shell=True,
            stderr=io.open(os.devnull, "wb")).strip()
    except subprocess.CalledProcessError:
        print("Please run the unit tests from the git repository.")
        return 1

    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()

    if args.dry_run:
        for t in test_list:
            print(t[0].name, t[1])
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python -m coverage erase")

    # Run all of our test cases.
    passed, test_results = run_unittests(test_list)

    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print(time_difference(start_time, end_time))

    if passed:
        return 0
    else:
        return 1
Ejemplo n.º 29
0
        CONFIG["GIT_REVISION"] = \
            subprocess.check_output("git rev-parse HEAD", shell=True).strip()
    else:
        CONFIG["GIT_REVISION"] = args.revision

    if not args.keep_working:
        def _cleanup():
            try:
                # Clean up tree.
                info("Cleaning up test directory %(TEST_DIR)s" % CONFIG)
                shutil.rmtree("%(TEST_DIR)s" % CONFIG)
            except:
                pass
        atexit.register(_cleanup)

    info("Testing `%(GIT_REVISION)s' in %(TEST_DIR)s" % CONFIG)

    reinitialize_everything = True

    if reinitialize_everything:
        drop_old_data()
        setup_cms()
    else:
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG
        read_cms_config()

    # Now run the tests from the checkout.
    sh(["./cmstestsuite/RunTests.py"] + args.arguments)

    # We export the contest, import it again and re-run the tests on the
Ejemplo n.º 30
0
def main():
    parser = ArgumentParser(description="Runs the CMS unittest suite.")
    parser.add_argument(
        "-n",
        "--dry-run",
        action="store_true",
        help="show what tests would be run, but do not run them")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        help="print debug information (use multiple times for more)")
    parser.add_argument(
        "-r",
        "--retry-failed",
        action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_UNITTEST_FILENAME)

    # Unused parameters.
    parser.add_argument("regex",
                        action="store",
                        type=utf8_decoder,
                        nargs='*',
                        metavar="regex",
                        help="unused")
    parser.add_argument("-l",
                        "--languages",
                        action="store",
                        type=utf8_decoder,
                        default="",
                        help="unused")
    parser.add_argument("-c",
                        "--contest",
                        action="store",
                        type=utf8_decoder,
                        help="unused")

    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose
    CONFIG["COVERAGE"] = True

    start_time = datetime.datetime.now()

    try:
        git_root = subprocess.check_output("git rev-parse --show-toplevel",
                                           shell=True,
                                           stderr=io.open(os.devnull,
                                                          "wb")).strip()
    except subprocess.CalledProcessError:
        print("Please run the unit tests from the git repository.")
        return 1

    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()

    if args.dry_run:
        for t in test_list:
            print(t[0].name, t[1])
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python -m coverage erase")

    # Run all of our test cases.
    passed, test_results = run_unittests(test_list)

    combine_coverage()

    print(test_results)

    end_time = datetime.datetime.now()
    print(time_difference(start_time, end_time))

    if passed:
        return 0
    else:
        return 1
Ejemplo n.º 31
0
def drop_old_data():
    info("Dropping any old databases called %(DB_NAME)s." % CONFIG)
    sh("sudo -u postgres dropdb %(DB_NAME)s" % CONFIG, ignore_failure=True)

    info("Purging old checkout from %(TEST_DIR)s." % CONFIG)
    shutil.rmtree("%(TEST_DIR)s" % CONFIG)
Ejemplo n.º 32
0
def main():
    parser = ArgumentParser(description="Runs the CMS test suite.")
    parser.add_argument("regex", metavar="regex",
        type=str, nargs='*',
        help="a regex to match to run a subset of tests")
    parser.add_argument("-l", "--languages",
        type=str, action="store", default="",
        help="a comma-separated list of languages to test")
    parser.add_argument("-r", "--retry-failed", action="store_true",
        help="only run failed tests from the previous run (stored in %s)" %
        FAILED_TEST_FILENAME)
    parser.add_argument("-v", "--verbose", action="count",
        help="print debug information (use multiple times for more)")
    args = parser.parse_args()

    CONFIG["VERBOSITY"] = args.verbose

    start_time = datetime.datetime.now()

    # Pre-process our command-line arugments to figure out which tests to run.
    regexes = [re.compile(s) for s in args.regex]
    if args.languages:
        languages = frozenset(args.languages.split(','))
    else:
        languages = frozenset()
    if args.retry_failed:
        test_list = load_failed_tests()
    else:
        test_list = get_all_tests()
    test_list = filter_testcases(test_list, regexes, languages)

    if not test_list:
        info("There are no tests to run! (was your filter too restrictive?)")
        return 0

    if args.retry_failed:
        info("Re-running %d failed tests from last run." % len(test_list))

    # Load config from cms.conf.
    try:
        git_root = subprocess.check_output(
            "git rev-parse --show-toplevel", shell=True,
            stderr=open(os.devnull, "w")).strip()
    except subprocess.CalledProcessError:
        git_root = None
    CONFIG["TEST_DIR"] = git_root
    CONFIG["CONFIG_PATH"] = "%s/examples/cms.conf" % CONFIG["TEST_DIR"]
    if CONFIG["TEST_DIR"] is None:
        CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"
    cms_config = get_cms_config()

    if not config_is_usable(cms_config):
        return 1

    if CONFIG["TEST_DIR"] is not None:
        # Set up our expected environment.
        os.chdir("%(TEST_DIR)s" % CONFIG)
        os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG

        # Clear out any old coverage data.
        info("Clearing old coverage data.")
        sh("python-coverage erase")

    # Fire us up!
    start_generic_services()
    contest_id = create_contest()
    user_id = create_a_user(contest_id)

    # Run all of our test cases.
    test_results = run_testcases(contest_id, user_id, test_list)

    # And good night!
    shutdown_services()
    combine_coverage()

    print test_results

    end_time = datetime.datetime.now()
    print time_difference(start_time, end_time)
Ejemplo n.º 33
0
def get_task_id(contest_id, user_id, task_module):
    name = task_module.task_info['name']

    # Have we done this before? Pull it out of our cache if so.
    if task_module in task_id_map:
        # Ensure we don't have multiple modules with the same task name.
        assert task_id_map[task_module][1] == task_module

        return task_id_map[name][0]

    task_create_args = {
        "token_mode": "finite",
        "token_max_number": "100",
        "token_min_interval": "0",
        "token_gen_initial": "100",
        "token_gen_number": "0",
        "token_gen_interval": "1",
        "token_gen_max": "100",
        "max_submission_number": "100",
        "max_user_test_number": "100",
        "min_submission_interval": None,
        "min_user_test_interval": None,
    }
    task_create_args.update(task_module.task_info)

    # Find if the task already exists in the contest.
    tasks = get_tasks(contest_id)
    if name in tasks:
        # Then just use the existing one.
        task = tasks[name]
        task_id = task['id']
        task_id_map[name] = (task_id, task_module)
        add_existing_task(contest_id, task_id, **task_create_args)
        return task_id

    # Otherwise, we need to add the task ourselves.
    task_id = add_task(contest_id, **task_create_args)

    # add any managers
    code_path = os.path.join(
        os.path.dirname(task_module.__file__),
        "code")
    if hasattr(task_module, 'managers'):
        for manager in task_module.managers:
            mpath = os.path.join(code_path, manager)
            add_manager(task_id, mpath)

    # add the task's test data.
    data_path = os.path.join(
        os.path.dirname(task_module.__file__),
        "data")
    for num, (input_file, output_file, public) \
            in enumerate(task_module.test_cases):
        ipath = os.path.join(data_path, input_file)
        opath = os.path.join(data_path, output_file)
        add_testcase(task_id, num, ipath, opath, public)

    task_id_map[name] = (task_id, task_module)

    info("Created task %s as id %d" % (name, task_id))

    # We need to restart ProxyService to ensure it reinitializes,
    # picking up the new task and sending it to RWS.
    restart_service("ProxyService", contest=contest_id)

    return task_id