Esempio n. 1
0
def test_counter_chained(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "counter", language, tmp_path,
                         "chained.yaml", "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"] * 3
    assert len(updates.find_all("start-testcase")) == 4
Esempio n. 2
0
def test_programmed_evaluator_lotto(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "lotto", lang, tmp_path,
                         "one-programmed-python.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 1
    assert updates.find_status_enum() == ["correct"]
Esempio n. 3
0
def test_io_function_exercise_haskell_io(language: str, tmp_path: Path,
                                         pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "one.tson", "correct_io")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 4
0
def test_missing_key_types_detected(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "objects", lang, tmp_path,
                         "missing_key_types.yaml", "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 0
    assert updates.find_status_enum() == ["internal error"]
Esempio n. 5
0
def test_simple_programmed_eval_wrong(language: str, tmp_path: Path,
                                      pytestconfig):
    conf = configuration(pytestconfig, "echo", language, tmp_path,
                         "one-programmed-wrong.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["wrong"]
Esempio n. 6
0
def test_programmed_evaluation(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "programmed.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"] * 4
    assert len(updates.find_all("append-message")) == 4
Esempio n. 7
0
def test_named_parameters_not_supported(language, tmp_path: Path,
                                        pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "one-named.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["internal error"]
Esempio n. 8
0
def test_language_evaluator_exception(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json",
                         "wrong")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["wrong"]
    assert len(updates.find_all("append-message")) == 1
Esempio n. 9
0
def test_special_numbers(language, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "", language, tmp_path)
    plan = Plan()
    bundle = create_bundle(conf, sys.stdout, plan)
    type_map = bundle.lang_config.type_support_map()

    # Create a list of basic types we want to test.
    types = []
    for t, n in itertools.product([
            BasicNumericTypes.RATIONAL, AdvancedNumericTypes.DOUBLE_PRECISION,
            AdvancedNumericTypes.SINGLE_PRECISION
    ], [
            SpecialNumbers.NOT_A_NUMBER, SpecialNumbers.POS_INFINITY,
            SpecialNumbers.NEG_INFINITY
    ]):
        if type_map[t] == TypeSupport.SUPPORTED:
            types.append(NumberType(type=t, data=n))

    # Run the encode templates.
    results = run_encoder(bundle, tmp_path, types)

    assert len(results) == len(types)

    for result, expected in zip(results, types):
        actual = as_basic_type(parse_value(result))
        expected = as_basic_type(expected)
        assert expected.type == actual.type
        py_expected = to_python_comparable(expected)
        py_actual = to_python_comparable(actual)
        assert py_expected == py_actual
Esempio n. 10
0
def test_objects(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "objects", language, tmp_path,
                         "plan.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"] * 2
    assert len(updates.find_all("start-testcase")) == 3
Esempio n. 11
0
def test_too_much_output(tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo", "python", tmp_path, "two.tson",
                         "output_limit", {"output_limit": 1000})
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    # 4 times: two times for the tests, one escalate and one judgement.
    assert updates.find_status_enum() == ["output limit exceeded"] * 4
    assert len(updates.find_all("close-test")) == 2
Esempio n. 12
0
def test_heterogeneous_arguments_are_detected(lang: str, tmp_path: Path,
                                              pytestconfig):
    conf = configuration(pytestconfig, "isbn", lang, tmp_path, "full.tson",
                         "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 0
    assert updates.find_status_enum() == ["internal error"]
Esempio n. 13
0
def test_javascript_async(exercise: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, exercise, "javascript", tmp_path,
                         "one.tson", "correct-async")
    workdir = Path(conf.resources).parent / "workdir"
    if workdir.exists():
        shutil.copytree(workdir, tmp_path, dirs_exist_ok=True)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 14
0
def test_escape(language, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "", language, tmp_path)
    plan = Plan()
    bundle = create_bundle(conf, sys.stdout, plan)
    assert_serialisation(
        bundle, tmp_path, StringType(type=BasicStringTypes.TEXT,
                                     data='"hallo"'))
    assert_serialisation(bundle, tmp_path,
                         StringType(type=AdvancedStringTypes.CHAR, data="'"))
Esempio n. 15
0
def test_program_params(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "sum", lang, tmp_path, "short.tson",
                         "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == [
        'correct', 'correct', 'correct', 'correct'
    ]
    assert len(updates.find_all("start-testcase")) == 3
    assert len(updates.find_all("start-test")) == 4
Esempio n. 16
0
def test_io_function_file_exercise(language: str, tmp_path: Path,
                                   pytestconfig):
    conf = configuration(pytestconfig, "echo-function-file", language,
                         tmp_path, "one.tson", "correct")
    shutil.copytree(Path(conf.resources).parent / "workdir",
                    tmp_path,
                    dirs_exist_ok=True)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 17
0
def test_hide_expected_wrong(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo", language, tmp_path,
                         "one-hide-expected.tson", "wrong")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["wrong"]
    assert len(
        list(
            filter(lambda x: not bool(x["expected"]),
                   updates.find_all("start-test")))) == 1
Esempio n. 18
0
def evaluator_config(tmp_path: Path, pytestconfig, options=None) -> EvaluatorConfig:
    if options is None:
        options = dict()
    conf = configuration(pytestconfig, "", "python", tmp_path)
    plan = Plan()
    bundle = create_bundle(conf, sys.stdout, plan)
    return EvaluatorConfig(
        bundle=bundle,
        options=options,
        context_dir=tmp_path
    )
Esempio n. 19
0
def test_javascript_exception_missing_message(tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig,
                         "js-exceptions",
                         "javascript",
                         tmp_path,
                         "plan.yaml",
                         "wrong-message",
                         backward_compatibility_plan=True)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["wrong"]
    assert len(updates.find_all("append-message")) == 1
Esempio n. 20
0
def test_batch_compilation_no_fallback(language: str, tmp_path: Path,
                                       pytestconfig):
    config_ = {"options": {"allow_fallback": False}}
    conf = configuration(pytestconfig, "echo", language, tmp_path, "two.tson",
                         "run-error", config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 2
    # One wrong status for every stderr + stdout
    assert len(updates.find_status_enum()) >= 4
    # There could be more wrongs: some languages might modify the exit code
    assert all(s == "wrong" for s in updates.find_status_enum())
Esempio n. 21
0
def test_crashing_assignment_with_before(lang: str, tmp_path: Path,
                                         pytestconfig):
    conf = configuration(pytestconfig, "isbn", lang, tmp_path,
                         f"one-with-crashing-assignment-{lang}.tson",
                         "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    # Only the assignment was started.
    assert len(updates.find_all("start-testcase")) == 1
    assert updates.find_status_enum() == ["wrong"]
    # Assert the exception is included.
    assert updates.find_next("start-test")["channel"] == "exception"
Esempio n. 22
0
def test_assignment_and_use_in_expression_list(lang: str, tmp_path: Path,
                                               pytestconfig):
    conf = configuration(pytestconfig, "isbn-list", lang, tmp_path,
                         "one-with-assignment.tson", "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    # Assert that the empty context testcase is not shown, while the assignment
    # and expression testcase are shown.
    assert len(updates.find_all("start-testcase")) == 2
    # Assert the only one test was executed.
    assert updates.find_status_enum() == ["correct"]
    assert len(updates.find_all("start-test")) == 1
Esempio n. 23
0
def test_full_echo(lang: str, tmp_path: Path, pytestconfig):
    config_ = {"options": {"parallel": True}}
    conf = configuration(pytestconfig,
                         "echo",
                         lang,
                         tmp_path,
                         "full.tson",
                         "correct",
                         options=config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 50
    assert updates.find_status_enum() == ["correct"] * 50
Esempio n. 24
0
def test_basic_types(language, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "", language, tmp_path)
    plan = Plan()
    bundle = create_bundle(conf, sys.stdout, plan)
    type_map = bundle.lang_config.type_support_map()

    # Create a list of basic types we want to test.
    types = []
    if type_map[BasicNumericTypes.INTEGER] != TypeSupport.UNSUPPORTED:
        types.append(NumberType(type=BasicNumericTypes.INTEGER, data=5))
    if type_map[BasicNumericTypes.RATIONAL] != TypeSupport.UNSUPPORTED:
        types.append(NumberType(type=BasicNumericTypes.RATIONAL, data=5.5))
    if type_map[BasicStringTypes.TEXT] != TypeSupport.UNSUPPORTED:
        types.append(StringType(type=BasicStringTypes.TEXT, data="hallo"))
    if type_map[BasicBooleanTypes.BOOLEAN] != TypeSupport.UNSUPPORTED:
        types.append(BooleanType(type=BasicBooleanTypes.BOOLEAN, data=True))
    if type_map[BasicSequenceTypes.SEQUENCE] != TypeSupport.UNSUPPORTED:
        types.append(
            SequenceType(
                type=BasicSequenceTypes.SEQUENCE,
                data=[NumberType(type=BasicNumericTypes.INTEGER, data=20)]))
    if type_map[BasicSequenceTypes.SET] != TypeSupport.UNSUPPORTED:
        types.append(
            SequenceType(
                type=BasicSequenceTypes.SET,
                data=[NumberType(type=BasicNumericTypes.INTEGER, data=20)]))
    if type_map[BasicObjectTypes.MAP] != TypeSupport.UNSUPPORTED:
        types.append(
            ObjectType(type=BasicObjectTypes.MAP,
                       data=[
                           ObjectKeyValuePair(
                               key=StringType(type=BasicStringTypes.TEXT,
                                              data="data"),
                               value=NumberType(type=BasicNumericTypes.INTEGER,
                                                data=5))
                       ]))
    if type_map[BasicNothingTypes.NOTHING] != TypeSupport.UNSUPPORTED:
        types.append(NothingType())

    # Run the encode templates.
    results = run_encoder(bundle, tmp_path, types)

    assert len(results) == len(types)

    for result, expected in zip(results, types):
        actual = as_basic_type(parse_value(result))
        assert expected.type == actual.type
        py_expected = to_python_comparable(expected)
        py_actual = to_python_comparable(actual)
        assert py_expected == py_actual
Esempio n. 25
0
def test_batch_compilation_fallback(language: str, tmp_path: Path,
                                    pytestconfig, mocker):
    lang_class = LANGUAGES[language]
    class_instance = lang_class()
    mocker.patch.object(lang_class,
                        'compilation',
                        wraps=class_instance.compilation)
    conf = configuration(pytestconfig, "echo", language, tmp_path, "two.tson",
                         "comp-error")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 2
    assert updates.find_status_enum() == ["compilation error"] * 2
    assert class_instance.compilation.call_count == 3
Esempio n. 26
0
def test_context_compilation(language: str, tmp_path: Path, pytestconfig,
                             mocker):
    config_ = {"options": {"mode": "context"}}
    # Mock the compilation callback to ensure we call it for every context.
    lang_class = LANGUAGES[language]
    class_instance = lang_class()
    mocker.patch.object(lang_class,
                        'compilation',
                        wraps=class_instance.compilation)
    conf = configuration(pytestconfig, "echo", language, tmp_path, "two.tson",
                         "correct", config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 2
    assert updates.find_status_enum() == ["correct"] * 2
    assert class_instance.compilation.call_count == 2
Esempio n. 27
0
def test_io_function_display_multiline_exercise(language: str, tmp_path: Path,
                                                pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "one-display-multiline.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
    start_test = updates.find_all('start-test')
    close_test = updates.find_all('close-test')
    assert 1 == len(start_test)
    assert 1 == len(close_test)
    assert "return (String)" == start_test[0].get("channel", '')
    expected, actual = start_test[0].get("expected", ''), close_test[0].get(
        "generated", '')
    quote = quotes[language]
    assert expected[0] != quote and expected[-1] != quote
    assert actual[0] != quote and actual[-1] != quote
Esempio n. 28
0
def test_timeout(language_and_time: Tuple[str, int], tmp_path: Path,
                 pytestconfig):
    config_ = {
        "time_limit": language_and_time[1]  # seconds
    }
    conf = configuration(pytestconfig, "echo", language_and_time[0], tmp_path,
                         "full.tson", "correct", config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 50
    status = updates.find_status_enum()
    correct = [x for x in status if x == "correct"]
    exceeded = [x for x in status if x == "time limit exceeded"]
    wrong = [x for x in status if x == "wrong"]
    # We should have at least one good result.
    assert len(correct) >= 1
    assert len(wrong) <= 2
    assert len(exceeded) >= 1
    # Once for every status, plus one escalation, plus one judgement-close
    assert len(wrong + correct + exceeded) == 50 + 1 + 1
Esempio n. 29
0
def test_advanced_types(language, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "", language, tmp_path)
    plan = Plan()
    bundle = create_bundle(conf, sys.stdout, plan)
    type_map = bundle.lang_config.type_support_map()

    # Create a list of basic types we want to test.
    types = []
    # The only advanced type undefined that should be tested
    if type_map[AdvancedNothingTypes.UNDEFINED] == TypeSupport.SUPPORTED:
        types.append(NothingType(type=AdvancedNothingTypes.UNDEFINED))

    # Run the encode templates.
    results = run_encoder(bundle, tmp_path, types)

    assert len(results) == len(types)

    for result, expected in zip(results, types):
        actual = parse_value(result)
        assert expected.type == actual.type
        py_expected = to_python_comparable(expected)
        py_actual = to_python_comparable(actual)
        assert py_expected == py_actual
Esempio n. 30
0
def test_language_evaluator_exception(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json",
                         "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]