Exemple #1
0
        ct_files, at_file = __separate_ati_and_tt_files(files)
    except ValueError:
        return VALUE_ERROR, None
    return ct_files, at_file


def are_pairs_equal(first_pair: tuple, second_pair: tuple) -> bool:
    return first_pair[0] == second_pair[0] and first_pair[1] == second_pair[1]


def run_test(case: dict) -> bool:
    ct_files, at_file = separate_files(case[TEST_DATA.FILES.value])
    return are_pairs_equal(case[TEST_DATA.RESULT.value], (ct_files, at_file))


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.PROCESSING),
                    reason=TEST_LEVEL.PROCESSING.value)
class TestFilterFiles:
    @staticmethod
    @pytest.fixture(scope="function",
                    params=[
                        two_ati_case, two_same_files_case, normal_case,
                        without_ati_case
                    ])
    def param_filter_files_test(request) -> dict:
        return request.param

    def test_filter_files(self, param_filter_files_test: Callable):
        case = param_filter_files_test
        assert run_test(case)
Exemple #2
0
def get_chain_without_loops(chain: List[Tuple[Code, CodeInfo]]) -> List[Tuple[Code, CodeInfo]]:
    return __remove_loops(chain, user)


def compare_chains(chain_without_loops: List[Tuple[Code, CodeInfo]],
                   expected_chain: List[Tuple[Code, CodeInfo]]) -> bool:
    if len(chain_without_loops) != len(expected_chain):
        return False
    for first, second in zip(chain_without_loops, expected_chain):
        if first[0].get_pretty_string() != second[0].get_pretty_string():
            return False
    return True


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.SOLUTION_SPACE), reason=TEST_LEVEL.SOLUTION_SPACE.value)
class TestRemoveLoops:

    @staticmethod
    @pytest.fixture(scope="function",
                    params=[
                        # [source_1, source_2, source_3, source_4] -> [source_1, source_2, source_3, source_4]
                        (get_code_chain_from_sources(0, 4), get_code_chain_from_sources(0, 4)),
                        # [source_1, source_2, source_3, source_1, source_3] -> [source_1, source_3]
                        (get_code_chain_with_one_loop(), get_code_chain_from_sources(0, 1) + get_code_chain_from_sources(2, 3)),
                        # [source_1, source_2, source_3, source_1] -> [source_1]
                        (get_code_chain_with_same_start_and_end(), get_code_chain_from_sources(0, 1)),
                        # [source_1, source_2, source_3, source_4, source_5, source_3, source_1] -> [source_1]
                        (get_code_chain_with_nested_loop(), get_code_chain_from_sources(0, 1)),
                        # [source_1, source_2, source_3, source_1, source_4, source_5, source_4] -> [source_1, source_4]
                        (get_code_chain_with_several_loops(), get_code_chain_from_sources(0, 1) + get_code_chain_from_sources(3, 4)),
Exemple #3
0
import pytest

from src.test.test_config import to_skip, TEST_LEVEL
from src.main.util.consts import TEST_DATA_PATH, CLI_PATH
from src.main.util.consts import RUNNING_TESTS_OUTPUT_DIRECTORY
from src.main.util.file_util import get_parent_folder, remove_directory
from src.main.cli.configs import PREPROCESSING_PARAMS, PREPROCESSING_LEVEL


BASE_FOLDER_NAME = 'preprocessing'
PREPROCESSING_OUTPUT_PREFIX = 'preprocessing_output'
DATA_PATH = os.path.join(TEST_DATA_PATH, 'cli', BASE_FOLDER_NAME)


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.CLI), reason=TEST_LEVEL.CLI.value)
class TestPreprocessingCli:

    @staticmethod
    def __get_args(params: List[str], data_path: str = DATA_PATH) -> List[str]:
        return ['python3', os.path.join(CLI_PATH, 'preprocessing.py'), data_path] + params

    @staticmethod
    @pytest.fixture(scope="function",
                    params=[
                        [],
                        [PREPROCESSING_PARAMS.LEVEL.value, str(PREPROCESSING_LEVEL.min_value())],
                        [PREPROCESSING_PARAMS.LEVEL.value, str(PREPROCESSING_LEVEL.max_value())]
                    ])
    def param_data_preprocessing(request) -> List[str]:
        return request.param
    '        print("NO")\n' \
    'if __name__ == "__main__":\n    main()'

sources_without_empty = [
    source_0, source_1, source_2, source_3, source_4, source_5, source_6
]
sources_with_empty = sources_without_empty + [empty_source]
different_sources = [source_0, source_1, source_2, source_3, source_4]
equal_sources = [source_0, source_5, source_6]


def get_asts_from_sources(sources: List[str]) -> List[ast.AST]:
    return [ast.parse(source) for source in sources]


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.CANONICALIZATION),
                    reason=TEST_LEVEL.CANONICALIZATION.value)
class TestASTsComparison:
    @pytest.mark.parametrize('ast', get_asts_from_sources(sources_with_empty))
    def test_same_ast(self, ast: ast.AST) -> None:
        assert are_asts_equal(ast, ast)

    @pytest.mark.parametrize('not_empty_ast',
                             get_asts_from_sources(sources_without_empty))
    def test_empty_ast(self, not_empty_ast: ast.AST) -> None:
        empty_ast_1 = ast.parse('')
        empty_ast_2 = ast.parse('')
        # Check that different empty asts are equal
        assert are_asts_equal(empty_ast_1, empty_ast_2)

        assert not are_asts_equal(empty_ast_1, not_empty_ast), \
import pytest

from src.test.plots.util import TO_OPEN_PLOTS
from src.main.util.consts import TEST_DATA_PATH
from src.test.test_config import to_skip, TEST_LEVEL
from src.main.plots.util.consts import STATISTICS_KEY, CHART_TYPE
from src.main.plots.profile_statistics_plots import plot_profile_statistics
from src.main.statistics_gathering.statistics_gathering import get_profile_statistics

DATA_PATH = os.path.join(TEST_DATA_PATH, 'plots/profile_statistics_plots/')
STATISTICS_PATH = os.path.join(TEST_DATA_PATH, 'plots/data.csv')


# Just to check no errors are raised during plot creation
@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.PLOTS),
                    reason=TEST_LEVEL.PLOTS.value)
class TestProfileStatisticsPlots:
    def test_plot_creation(self) -> None:
        result_path = get_profile_statistics(DATA_PATH)
        age_statistics = [
            os.path.join(result_path, 'age.pickle'), STATISTICS_KEY.AGE
        ]
        experience_statistics = [
            os.path.join(result_path, 'programExperience.pickle'),
            STATISTICS_KEY.EXPERIENCE
        ]
        for statistics, column in [age_statistics, experience_statistics]:
            for type in CHART_TYPE:
                plot_profile_statistics(statistics,
                                        column,
Exemple #6
0
                                                                             EXPERIENCE.FROM_FOUR_TO_SIX_YEARS.value,
                                                                             DEFAULT_VALUE.EXPERIENCE.value]})

VALID_DFS = [VALID_DF, VALID_DF_WITH_DEFAULT]


VALID_DF_ALL_DEFAULT = pd.DataFrame({CODE_TRACKER_COLUMN.EXPERIENCE.value: [DEFAULT_VALUE.EXPERIENCE.value,
                                                                            DEFAULT_VALUE.EXPERIENCE.value,
                                                                            DEFAULT_VALUE.EXPERIENCE.value]})

VALID_EMPTY_DF = pd.DataFrame({CODE_TRACKER_COLUMN.EXPERIENCE.value: []})

DEFAULT_DFS = [VALID_DF_ALL_DEFAULT, VALID_EMPTY_DF]


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.PREPROCESSING), reason=TEST_LEVEL.PREPROCESSING.value)
class TestExperienceColumnFilling:

    @pytest.mark.parametrize('invalid_df', INVALID_DFS)
    def test_invalid_dfs(self, invalid_df: pd.DataFrame) -> None:
        column_value = fill_column(invalid_df, CODE_TRACKER_COLUMN.EXPERIENCE,
                                   CODE_TRACKER_COLUMN.EXPERIENCE.fits_restrictions, DEFAULT_VALUE.EXPERIENCE)
        assert INVALID_FILE_FOR_PREPROCESSING == column_value

    @pytest.mark.parametrize('valid_df', VALID_DFS)
    def test_valid_dfs(self, valid_df: pd.DataFrame) -> None:
        column_value = fill_column(valid_df, CODE_TRACKER_COLUMN.EXPERIENCE,
                                   CODE_TRACKER_COLUMN.EXPERIENCE.fits_restrictions, DEFAULT_VALUE.EXPERIENCE)
        assert EXPERIENCE.FROM_FOUR_TO_SIX_YEARS.value == column_value

    @pytest.mark.parametrize('default_df', DEFAULT_DFS)
Exemple #7
0
           [consts.TASK.PIES.value] * PIES_COUNT_3


def get_df() -> pd.DataFrame:
    return pd.DataFrame({consts.CODE_TRACKER_COLUMN.CHOSEN_TASK.value: __get_chosen_tasks()})


def crop_first_pies(df: pd.DataFrame, n: int = PIES_COUNT_1) -> pd.DataFrame:
    return df[n:]


def crop_last_pies(df: pd.DataFrame, n: int = PIES_COUNT_3) -> pd.DataFrame:
    return df[:-n]


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.SPLITTING), reason=TEST_LEVEL.SPLITTING.value)
class TestStartIndexFinding:

    @staticmethod
    def find_and_check_start_indices(df: pd.DataFrame, expected_indices: List[int]) -> None:
        actual_indices = find_task_start_indices(df, consts.TASK.PIES)
        assert expected_indices == actual_indices

    # Finding start indices in:
    #
    #   chosenTask
    # 0     pies
    # 1     pies
    # 2     is_zero
    # 3     is_zero
    # 4     pies
kotlin_actual_pairs = {
    SOLUTION.FULL.value: (8, 8),
    SOLUTION.PARTIAL.value: (8, 3),
    SOLUTION.WRONG.value: (8, 0),
    SOLUTION.ERROR.value: (-1, 1)
}

cpp_actual_pairs = {
    SOLUTION.FULL.value: (8, 8),
    SOLUTION.PARTIAL.value: (8, 3),
    SOLUTION.WRONG.value: (8, 0),
    SOLUTION.ERROR.value: (-1, 1)
}


@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.TEST_SCORING),
                    reason=TEST_LEVEL.TEST_SCORING.value)
class TestPiesTests:
    task = TASK.PIES

    @staticmethod
    @pytest.fixture(
        scope="function",
        params=[
            (python_actual_pairs, LANGUAGE.PYTHON),
            (java_actual_pairs, LANGUAGE.JAVA),
            # (kotlin_actual_pairs, LANGUAGE.KOTLIN),
            (cpp_actual_pairs, LANGUAGE.CPP)
        ],
        ids=[
            'test_python',