Ejemplo n.º 1
0
def get_rate(tests_results: str, task_index: int) -> float:
    tasks = TASK.tasks()
    tests_results = unpack_tests_results(tests_results, tasks)
    if task_index >= len(tasks) or task_index >= len(tests_results):
        log_and_raise_error(
            f'Task index {task_index} is more than length of tasks list', log)
    return tests_results[task_index]
Ejemplo n.º 2
0
def run_tests(path: str) -> str:
    log.info(f'Start running tests on path {path}')
    output_directory = get_output_directory(
        path, consts.RUNNING_TESTS_OUTPUT_DIRECTORY)

    files = get_all_file_system_items(path, ct_file_condition)
    str_len_files = str(len(files))
    log.info(f'Found {str_len_files} files to run tests on them')

    files = filter_already_tested_files(files, output_directory)
    str_len_files = str(len(files))
    log.info(
        f'Found {str_len_files} files to run tests on them after filtering already tested'
    )

    tasks = TASK.tasks()
    in_and_out_files_dict = create_in_and_out_dict(tasks)

    for i, file in enumerate(files):
        file_log_info = f'file: {str(i + 1)}/{str_len_files}'
        log.info(f'Start running tests on {file_log_info}, {file}')
        data = pd.read_csv(file, encoding=consts.ISO_ENCODING)
        language, data = __check_tasks_on_correct_fragments(
            data, tasks, in_and_out_files_dict, file_log_info)
        log.info(f'Finish running tests on {file_log_info}, {file}')
        write_based_on_language(output_directory, file, data, language)

    return output_directory
def __get_task_by_ct_file(file: str) -> Optional[TASK]:
    task_key = get_name_from_path(get_parent_folder(file),
                                  with_extension=False)
    try:
        return TASK(task_key)
    except ValueError:
        log.info(f'Unexpected task for the file {file}')
        return None
def run_tests(path: str) -> str:
    """
    Run tests on all code snapshots in the data for the task.
    Note: the enum class TASK (see consts.py file)  must have the task key.
    It also must match the name of the folder with test files in the resources/tasks_tests.

    For example, if your task has key [my_key], you should add a new value into TASK const with value [my_key]
    and add a new folder [my_key] with input and output files for tests in the resources/tasks_tests folder.

    The test result is an array containing values for all tasks from the TASK enum class.
    If the code snapshot is incorrect, then the value -1 is specified.
    To deserialize this array of ratings, use the function unpack_tests_results from task_scoring.py.
    To get the rate only for the current task use the calculate_current_task_rate function from plots/scoring_solutions_plots.py

    For more details see
    https://github.com/JetBrains-Research/task-tracker-post-processing/wiki/Data-processing:-find-tests-results-for-the-tasks
    """
    log.info(f'Start running tests on path {path}')
    output_directory = get_output_directory(
        path, consts.RUNNING_TESTS_OUTPUT_DIRECTORY)

    files = get_all_file_system_items(path, tt_file_condition)
    str_len_files = str(len(files))
    log.info(f'Found {str_len_files} files to run tests on them')

    files = filter_already_tested_files(files, output_directory)
    str_len_files = str(len(files))
    log.info(
        f'Found {str_len_files} files to run tests on them after filtering already tested'
    )

    tasks = TASK.tasks()
    in_and_out_files_dict = create_in_and_out_dict(tasks)

    for i, file in enumerate(files):
        file_log_info = f'file: {str(i + 1)}/{str_len_files}'
        log.info(f'Start running tests on {file_log_info}, {file}')
        current_task = __get_task_by_ct_file(file)
        if not current_task:
            # We don't need to handle other files with tasks which are not in the TASK enum class
            continue
        data = pd.read_csv(file, encoding=consts.ISO_ENCODING)
        language, data = __check_tasks_on_correct_fragments(
            data,
            tasks,
            in_and_out_files_dict,
            file_log_info,
            current_task=current_task)
        log.info(f'Finish running tests on {file_log_info}, {file}')
        output_directory_with_user_folder = os.path.join(
            output_directory, __get_user_folder_name_from_path(file))
        write_based_on_language(output_directory_with_user_folder, file, data,
                                language)

    return output_directory
Ejemplo n.º 5
0
def run_test_task(task: TASK, expected_pairs: Dict[SOLUTION, Tuple[int, int]], language: LANGUAGE) -> None:
    remove_compiled_files()
    in_and_out_files_dict = create_in_and_out_dict(TASK.tasks())
    for s in SOLUTION:
        code = get_source_code(task, language, s.value)
        expected_pair = expected_pairs[s.value]
        expected_rate = expected_pair[1] / expected_pair[0]
        actual_rate = get_actual_rate(task, language, code, in_and_out_files_dict)
        assert expected_rate == actual_rate, \
            f'Actual rate for task {task}, language {language}, solution {s} is wrong, code:\n{code}. ' \
            f'Expected rate = {expected_rate}. Actual rate = {actual_rate}'
def is_incorrect_fragment(tests_results: str) -> bool:
    return TEST_RESULT.INCORRECT_CODE.value in unpack_tests_results(
        tests_results, TASK.tasks())
Ejemplo n.º 7
0
 def test_fragments(self, fragment_file: str) -> None:
     in_and_out_files_dict = create_in_and_out_dict(TASK.tasks())
     language = get_language_by_extension(get_extension_from_file(fragment_file))
     check_tasks(TASK.tasks(), get_content_from_file(fragment_file), in_and_out_files_dict, language, False)
Ejemplo n.º 8
0
 def str_to_task(self, task: str) -> TASK:
     try:
         return TASK(task)
     except ValueError:
         log_and_raise_error(f'Task value has to be one from the values: {TASK.tasks_values()}', self._log)
def calculate_current_task_rate(df: pd.DataFrame) -> pd.DataFrame:
    file_name = df[FILE_NAME].unique()[0]
    current_task = TASK(get_name_from_path(file_name, False))
    return df[TESTS_RESULTS].apply(lambda x: unpack_tests_results(x, TASK.tasks())[TASK.tasks().index(current_task)])