Ejemplo n.º 1
0
def get_submission_file_path(submission_dir: Path, main_file_name=None):
    """
    Find the student's main submission file and figure out the language by the file's extension.
    Also checks that all student's files are readable.

    :param submission_dir: Directory with the student's submission
    :param main_file_name: Name of the main file from the rubric.
    :return: tuple: path to the main submission's file and its language
    """
    submission_files = []
    if not submission_dir.is_dir():
        raise GspackFailure(f"Not a directory: {submission_dir}")

    for f in os.listdir(submission_dir):
        platform = determine_platform(submission_dir / f)
        if platform is not None:
            try:
                # we don't do anything with the output because at this point we just want to check
                # that the file is readable.
                with open(submission_dir / f, 'r') as f2:
                    _ = f2.read()
            except Exception as e:
                raise GspackFailure(
                    f"Gradescope is unable to read your file: \n {str(e)} \n" +
                    f"This might happen if your file is damaged or improperly encoded (not in UTF-8)"
                )
            submission_files.append(submission_dir / f)
        else:
            continue

    if len(submission_files) == 0:
        raise GspackFailure(
            "No student solution files found. Check that you submitted either .m files or .py files."
        )

    main_file = None
    if main_file_name is not None:
        # Checks whether one and only one file matches the main file's name from the rubric
        # in the submission's directory
        for file in submission_files:
            if file.stem == main_file_name:
                if main_file is not None:
                    raise GspackFailure(
                        f"More than one file matches the main file's name" +
                        f" ({main_file_name}): {main_file} and {file}")
                main_file = file

        if main_file is None:
            raise UserFailure(
                f"File with the name {main_file_name} is not found. Check that you named your "
                + f"main file properly.")
    else:
        # If no `main_file_name` is provided then we only check that there is only one file in
        # the submission directory
        if len(submission_files) > 1:
            raise GspackFailure(
                "You should have submitted one file, but you submitted many: \n "
                + "\n".join([str(f) for f in submission_files]))
        main_file = submission_files[0]
    return main_file
Ejemplo n.º 2
0
    def execute(self, file_path: Path, platform=None):
        """
        Executes the script file.

        :param file_path: path to the script file
        :param platform: language (platform) of this file
        :return: tuple: platform dictionary with values from the script's namespace.
        """
        if not os.path.exists(file_path):
            raise UserFailure(f"File does not exist: {file_path}")
        if platform is None:
            platform = determine_platform(file_path)
        if platform is None:
            raise UserFailure(
                f"Can't recognize the language platform for the file {file_path}"
            )
        my_dir = os.getcwd()
        os.chdir(file_path.parent)
        try:
            if platform == "matlab":
                output = self.execute_matlab(file_path)
            elif platform == "jupyter":
                output = self.execute_jupyter(file_path)
            elif platform == "python":
                output = self.execute_python(file_path)
            else:
                raise GspackFailure(f"Unrecognized platform: {platform}")
        finally:
            os.chdir(my_dir)
        if self.verbose:
            print(f"Found and executed successfully: \n-> {file_path}")
        return platform, output
Ejemplo n.º 3
0
def execute_matlab(file_path: Path, matlab_config: dict):
    """
    Executes MATLAB solution script and returns variables from it's namespace.

    :param file_path: Path to the script
    :param matlab_config: Dictionary with additional parameters:

        - `variables_to_take`: list of variables' names which should be pulled from MATLAB's namespace

    :return: Dictionary "name" - "value" for variables listed in matlab_config["variables_to_take"]
    """
    try:
        # Launch MATLAB engione
        eng = matlab.engine.start_matlab()
    except Exception as e:
        raise GspackFailure(
            f"MATLAB Engine failed to start with the following error: \n {e}.")
    try:
        # Execute MATLAB script
        eval(f"eng.{file_path.stem}(nargout=0)")
    except Exception as e:
        err_msg = f"Exception occurred while executing your code: \n {str(e)}"
        raise UserFailure(err_msg)
    try:
        # pull variables from MATLAB workspace `eng.workspace` into `workspace` dict.
        # The reason to explicitly require the list of variables' names is because
        # MATLAB engine and all its components die once the execution leaves this scope,
        # so `return eng.workspace` would not work.
        workspace = {}
        variables_to_take = matlab_config.get("variables_to_take", ())
        for name in variables_to_take:
            item = get_from_workspace(eng.workspace, name)
            if item is not None:
                workspace[name] = matlab2python(item)
        eng.quit()
        return workspace
    except Exception as e:
        raise GspackFailure(
            f"Failure while exporting data from MATLAB environment: \n {str(e)}"
        )
Ejemplo n.º 4
0
    def fetch_values_for_tests(self, variables: dict):
        """
        Goes through the rubric and variables, and saves the values from variables from test_suite to the rubric.

        :param variables: Dictionary of the variables.
        :return: None if successful, otherwise raises an error
        """
        if self.test_suite is None:
            raise GspackFailure(
                "Rubric was not initialized properly: test_suite is None.")
        self.test_suite_values = {}
        for test in self.test_suite:
            test_value = variables.get(test["variable_name"], None)
            if test_value is None:
                raise UserFailure(
                    f"{test['test_name']}: variable {test['variable_name']} is set to be checked"
                    +
                    f" but it's not defined after the solution finishes its execution."
                )
            self.test_suite_values[test["variable_name"]] = test_value
Ejemplo n.º 5
0
def reduce_type(a):
    """
    Attempts to simplify the type of a: brings all numbers and matrices of one element to Python floats,
    and all lists, sets, and NumPy arrays of any type to Numpy arrays of floats, if possible.

    Meant to make 3, 3.0+1e-16, and np.array([3], dtype=double) to be just 3.

    :param a: variable which type needs to be simplified.
    :return: a with a possibly converted type.
    """
    if isinstance(a, numbers.Number):
        return float(a)
    elif isinstance(a, np.ndarray) and a.flatten().shape == (1, ):
        return float(a.flatten()[0])
    elif isinstance(a, np.ndarray) or isinstance(a, list) or isinstance(
            a, set):
        try:
            res = np.array(a, dtype=float)
        except Exception as e:
            raise GspackFailure(
                f"Conversion error to numpy array: {e}. \n Object: {a}")
        return res
    else:
        return a
Ejemplo n.º 6
0
def create_archive(archive_path: Path,
                   rubric: Rubric,
                   platform: str,
                   verbose=False):
    """
    Creates a Gradescope autograder archive (autograder.zip).

    :param archive_path: path where archive should be saved.
    :param rubric: rubric with attached true values.
    :param platform: which language was used for writing the solution file
    :param verbose: whether to print detailed outputs.
    :return: None. Saves the archive to archive_path or aborts.
    """
    if verbose:
        print("Generating the archive:")
    program_dir = Path(os.path.dirname(__file__))
    archive_dir = archive_path.parent.absolute()
    try:
        # This function puts all the files for the archive to a newly created DIST_DIR and
        # deletes it once it's packed into autograder.zip
        os.mkdir(archive_dir / DIST_DIR)

        # Copy necessary files (setup.sh, run_autograder). See AUTOGRADER_ARCHIVE_FILES.
        for file in AUTOGRADER_ARCHIVE_FILES:
            full_path = program_dir / TEMPLATES_DIR / file
            if not os.path.exists(full_path):
                raise GspackFailure(
                    f"-> {file}: the file {full_path} does not exist." +
                    f" It's likely a gspack installation bug." +
                    f" You should contact authors" +
                    f" or post the issue on the project's Github page.")
            shutil.copyfile(full_path, archive_dir / DIST_DIR / file)
            if verbose:
                print(f"-> {file}: OK")

        # Generate requirements file. It's either in 'requirements' variable or it can be generated via pipreqs.
        if verbose:
            print("Looking for package requirements for your solution:")
        if rubric.requirements is not None:
            with open(archive_dir / DIST_DIR / REQUIREMENTS_FILE, "w") as f:
                f.write("\n".join(rubric.requirements))
                print("-> saved from 'requirements' variable, OK.")
        else:
            # pipreqs package scans the solution and generates the list of (non-standard) Python packages used.
            if platform == "python":
                generate_reqs_output = generate_requirements(
                    archive_dir,
                    output_path=archive_dir / DIST_DIR / REQUIREMENTS_FILE)
                if not generate_reqs_output[0].startswith(
                        b"INFO: Successfully saved"):
                    raise GspackFailure(
                        "Extra package requirements identification FAILED. " +
                        "Make sure all solution files in the solution's " +
                        "directory (including subdirectories), " +
                        "can be executed without errors, and there are no other,"
                        +
                        " irrelevant python files in the solution directory.")
                if verbose:
                    print(f"-> Generated via pipreqs: OK")
            elif platform == "jupyter":
                if verbose:
                    print(
                        "-> Not provided, assumed no extra packages are needed"
                    )
            elif platform == "matlab":
                print(
                    "-> If you need extra MATLAB toolboxes for your solution contact your department "
                    +
                    "to make sure they're added to the department's MATLAB distribution for Gradescope."
                )
            else:
                pass

        # This file will contain all the information that setup.sh needs during
        # the Docker initialization process.
        config = {}

        if "matlab" in rubric.supported_platforms:
            # Add MATLAB support
            if rubric.matlab_credentials is None:
                raise UserFailure(
                    "MATLAB support is requested but no matlab_credentials path is provided"
                )
            if verbose:
                print("Adding MATLAB support...")

            # Check that all the necessary files are in the credentials folder
            matlab_folder_path = Path(
                rubric.matlab_credentials).expanduser().absolute()
            if not matlab_folder_path.exists(
            ) or not matlab_folder_path.is_dir():
                raise UserFailure(
                    f"matlab_credentials: the directory {matlab_folder_path} does not exist"
                    + f" or it's not a directory.")

            # Move all necessary files to the archive's directory
            for file in MATLAB_FILES:
                if not (matlab_folder_path / file).exists():
                    raise UserFailure(
                        f"-> {file}: File {(matlab_folder_path / file).absolute()} does not exist."
                    )
                shutil.copyfile(matlab_folder_path / file,
                                archive_dir / DIST_DIR / file)
                if verbose:
                    print(f"-> {file}: OK")

            # Getting prefix and suffix commands for the run_autograder script, if any.
            # These _prefix and _suffix normally contain all the commands
            # which need to be executed before and after the main
            # grading script kicks in. For instance, opening and closing a SSH tunnels to
            # MATLAB license servers, if used.
            run_autograder_prefix = ""
            run_autograder_suffix = ""
            if (matlab_folder_path / PROXY_SETTINGS).exists():
                with open(matlab_folder_path / PROXY_SETTINGS,
                          "r") as proxy_settings_file:
                    proxy_settings = json.load(proxy_settings_file)
                    if proxy_settings['open_tunnel'] is not None:
                        run_autograder_prefix += proxy_settings['open_tunnel']
                    if proxy_settings['close_tunnel'] is not None:
                        run_autograder_suffix += proxy_settings['close_tunnel']

            # Create run_autograder file given the prefix and suffix
            with open(archive_dir / DIST_DIR / RUN_AUTOGRADER_FILE,
                      'w') as run_autograder_dest:
                run_autograder_dest.write("#!/usr/bin/env bash \n")
                if run_autograder_prefix is not None:
                    run_autograder_dest.write(run_autograder_prefix + "\n")
                with open(program_dir / TEMPLATES_DIR / RUN_AUTOGRADER_FILE,
                          'r') as run_autograder_src:
                    run_autograder_dest.write(run_autograder_src.read() + "\n")
                if run_autograder_suffix is not None:
                    run_autograder_dest.write(run_autograder_suffix)
            config["matlab_support"] = 1
            if verbose:
                print("MATLAB support added successfully.", end='\n')
        else:
            config["matlab_support"] = 0
            # Create run_autograder file only adding bash prefix to the template
            with open(archive_dir / DIST_DIR / RUN_AUTOGRADER_FILE,
                      'w') as run_autograder_dest:
                run_autograder_dest.write("#!/usr/bin/env bash \n")
                with open(program_dir / TEMPLATES_DIR / RUN_AUTOGRADER_FILE,
                          'r') as run_autograder_src:
                    run_autograder_dest.write(run_autograder_src.read() + "\n")

        # Add Jupyter Notebooks support.
        if "jupyter" in rubric.supported_platforms:
            config["jupyter_support"] = 1
        else:
            config["jupyter_support"] = 0

        # save the config.json file
        with open(archive_dir / DIST_DIR / CONFIG_JSON, 'w') as f:
            json.dump(config, f)

        # Check and add extra files from extra_files list,
        if verbose and rubric.extra_files is not None:
            print("Find extra files list:")

        for extra_file in rubric.extra_files:
            if not (archive_dir / extra_file).exists():
                raise UserFailure(
                    f"{extra_file}: can't find {archive_dir / extra_file}")
            shutil.copyfile(archive_dir / extra_file,
                            archive_dir / DIST_DIR / extra_file)
            if verbose:
                print(f"-> {extra_file}: OK")

        # save the rubric and true values from the rubric to the archive's folder.
        rubric.save_to(archive_dir / DIST_DIR)

        # Zip all files in DIST directory
        zip_archive = ZipFile(archive_dir / AUTOGRADER_ZIP, 'w')
        for extra_file in os.listdir(archive_dir / DIST_DIR):
            zip_archive.write(archive_dir / DIST_DIR / extra_file,
                              arcname=extra_file)
        zip_archive.close()
        return True
    finally:
        # Delete the temporary dist directory
        if os.path.exists(archive_dir / DIST_DIR):
            shutil.rmtree(archive_dir / DIST_DIR)
Ejemplo n.º 7
0
def get_grades(rubric: Rubric, platform: str, solution: dict):
    """
    Grade student's solution results

    :param rubric: An initialized instance of Rubric with `test_suite_values` attached
    :param platform: Solution's platform. Does not affect grades, only used for getting
                    language-specific hints.
    :param solution: variables from student's submission
    :return: results -- dictionary obeying Gradescope formatting for results.json if everything goes okay,
                    otherwise raises an error.
    """

    # Prepare an empty dictionary for results
    results = {
        "output": "",
        "score": 0,
        "tests": [],
        "extra_data": {
            "success": True,
            "pretest": False
        }
    }

    total_score = 0

    if rubric.test_suite is None:
        raise GspackFailure(
            "Rubric is not initialized properly: test_suite is None")
    if rubric.test_suite_values is None:
        raise GspackFailure(
            "Rubric's values are not attached. Call .fetch_values_for_tests() beforehand."
        )

    # Iterate over tests in the rubric and compare their values from `rubric.test_suite_values`
    # with the ones from the student submission
    for i, test in enumerate(rubric.test_suite):
        true_answer = rubric.test_suite_values[test["variable_name"]]
        test_result = {
            "name": f"{i + 1}. {test['test_name']}",
            "score": 0,
            "visibility": "visible"
        }

        if test.get("description", None) is not None:
            test_result["name"] += f": {test['description']}"

        results["tests"].append(test_result)

        # Get student's answer and simplify its type, if possible
        answer = solution.get(test["variable_name"], None)
        if answer is None:
            test_result["output"] = (
                f"Variable {test['variable_name']} is not defined in your solution file. "
                + get_hint(test, "hint_not_defined", platform))
            continue

        try:
            reduced_answer = reduce_type(answer)
        except GspackFailure:
            # This is a student's failure.
            test_result[
                "output"] = f"Variable {test['variable_name']} has an unrecognized type. "
            continue

        # The error from this one is not captured here because
        # if it happens then it's an error which is a result of a bug in gspack which happened when
        # the rubric was created, so the whole process should be aborted.
        reduced_true_answer = reduce_type(true_answer)

        # Check whether types match
        if not ((type(reduced_answer) == type(reduced_true_answer)) or
                (type(reduced_answer) in (float, int) and
                 (type(reduced_true_answer) in (float, int)))):
            test_result["output"] = (
                f"Wrong answer type: the type of your variable {test['variable_name']}"
                + f" is {print_reduced_type(reduced_answer)}, " +
                f"but it should be a {print_reduced_type(reduced_true_answer)}. "
            )
            test_result["output"] += get_hint(test, "hint_wrong_type",
                                              platform)

            continue

        if (type(reduced_answer) is np.ndarray) or (type(reduced_answer) is
                                                    float):
            # Check whether dimensions match in case when the answers are arrays
            if (type(reduced_answer) is
                    np.ndarray) and (type(reduced_true_answer) is np.ndarray):
                if reduced_answer.shape != reduced_true_answer.shape:
                    test_result["output"] = (
                        f"Wrong dimensions: the shape of your variable" +
                        f" {test['variable_name']} is {reduced_answer.shape}, "
                        + f"but it should be {reduced_true_answer.shape}. ")
                    test_result["output"] += get_hint(test, "hint_wrong_size",
                                                      platform)
                    continue

                if reduced_answer.dtype != reduced_true_answer.dtype:
                    test_result["output"] = (
                        f"Wrong data type of the array: the data type" +
                        f" of your array {test['variable_name']} is {reduced_answer.dtype}, "
                        + f"but it should be {reduced_true_answer.dtype}. ")
                    test_result["output"] += get_hint(test, "hint_wrong_type",
                                                      platform)
                    continue
            # Check whether there are NaNs in the answer
            if np.isnan(reduced_answer).any():
                test_result[
                    "output"] = f"Your variable {test['variable_name']} contains NaNs. "
                test_result["output"] += get_hint(test, "hint_nans", platform)
                continue

            # Check if the answers are close enough
            rtol = float(test.get("rtol", None) or 1e-5)
            atol = float(test.get("atol", None) or 1e-8)
            if not np.allclose(
                    reduced_answer, reduced_true_answer, rtol=rtol, atol=atol):
                test_result[
                    "output"] = f"Your answer is not within tolerance from the right answer. "
                test_result["output"] += get_hint(test, "hint_tolerance",
                                                  platform)
                continue

        # Strings are compared in lower capital.
        elif type(reduced_answer) == str:
            if not reduced_answer.lower().strip() == reduced_true_answer.lower(
            ).strip():
                test_result[
                    "output"] = f"Your answer does not match the right answer. "
                continue

        test_result["output"] = "Correct."
        test_result["score"] = test["score"]
        total_score += test["score"]
    results["score"] = round(total_score, 2)
    return results
Ejemplo n.º 8
0
    def check_rubric_correctness(rubric: dict,
                                 verbose=False,
                                 solution_platform=None,
                                 **kwargs):
        """
        Checks that the rubric in the dictionary is correct. Raises UserFailure if it's not.

        :param rubric: Dictionary that contains arguments for the rubric
        :param verbose: Whether to print logs
        :param solution_platform: the language (platform) which the solution is implemented on
        :param kwargs: For passing along irrelevant variables.
        :return: True if the rubric is correct, otherwise raises a UserFailure
        """

        # Check the correctness of the test suite
        test_suite = rubric.get('test_suite', None)
        if test_suite is None:
            raise UserFailure(
                "No test_suite variable defined in the solution file.")
        if type(test_suite) is not list:
            raise UserFailure(
                f"test_suite is defined as {type(test_suite)} but it should be list."
            )

        # Assign individual test's scores, if not assigned, using total_score,
        # or make sure total_score is consistent with individual scores, if both are set.
        score_per_test = None
        total_score = rubric.get('total_score', None)
        if total_score is not None:
            try:
                total_score = float(total_score)
            except Exception:
                raise UserFailure(
                    "Total score should be a number. Check the type of the total_score variable."
                )
            score_per_test = total_score / len(test_suite)

        if verbose:
            print("Found the test suite configuration:")

        # Recovers individual scores based on `total_score`, if needed.
        actual_total_score = 0
        for test in test_suite:
            if (score_per_test is None) ^ (test.get('score', None) is None):
                if test.get('score', None) is None:
                    test['score'] = score_per_test
            else:
                if (test.get('score', None) is None) and (score_per_test is
                                                          None):
                    raise UserFailure(
                        f"{test['test_name']}: score is missing and total_score is not defined."
                        +
                        " You need to either define scores for each test or define total_score."
                    )
                else:
                    try:
                        score_from_rubric = float(test['score'])
                    except Exception:
                        raise UserFailure(
                            f"Score for {test['test_name']} ({test['variable_name']}) is not a number."
                        )

                    if abs(score_from_rubric - score_per_test
                           ) > 1e-2 and score_per_test is not None:
                        raise UserFailure(
                            f"{test['test_name']}: score for this test is not consistent with total_score:"
                            +
                            f" {score_from_rubric:.2f} vs {score_per_test:.2f} ({total_score}/{len(test_suite)})."
                            f" You need to define either one global score to assign points evenly,"
                            +
                            f" or to define all test's scores manually. When you do both make sure they're consistent."
                        )

            try:
                _ = float(test['rtol']) if test.get('rtol', None) else None
                _ = float(test['atol']) if test.get('atol', None) else None
            except Exception:
                raise UserFailure(
                    f"Tolerances for test {test['test_name']}: rtol and atol should be float numbers"
                )

            actual_total_score += float(test['score'])
            if verbose:
                print(f"-> {test['test_name']}: OK")

        if verbose:
            print(f"The total number of points is {actual_total_score:.0f}.")

        # Check the number of attempts
        number_of_attempts = rubric.get('number_of_attempts', None)
        if number_of_attempts is not None:
            try:
                number_of_attempts = int(number_of_attempts)
            except Exception:
                raise UserFailure("number_of_attempts should be int.")
            if verbose:
                print(f"Number of attempts: {number_of_attempts}")
        else:
            if verbose:
                print(f"Number of attempts: unlimited.")

        # Check the list of supported platforms.
        supported_platforms = rubric.get("supported_platforms", None)
        if supported_platforms is not None:
            if not type(supported_platforms) is list:
                raise UserFailure(
                    "supported_platforms should be a list of strings")
            for platform in supported_platforms:
                if platform not in all_supported_platforms.keys():
                    raise UserFailure(
                        f"Unrecognized platform: {platform}." +
                        f" Options are: {', '.join(all_supported_platforms.keys())}"
                    )
        else:
            # If no list of supported platforms is provided it's assumed that the only platform to supports
            # is the one which the solution file is implemented with.
            if solution_platform is not None:
                supported_platforms = [
                    solution_platform,
                ]
            else:
                raise GspackFailure(
                    "Neither supported_platforms nor solution's platform is provided."
                )

        rubric["supported_platforms"] = supported_platforms
        if verbose:
            print(f"Supported platforms: {', '.join(supported_platforms)}")

        # Check the main file's name
        main_file_name = rubric.get("main_file_name", None)
        if main_file_name is not None:
            if verbose:
                print(
                    f"Main file's name: {main_file_name}" +
                    f"[{';'.join(chain(*[all_supported_platforms[platform] for platform in supported_platforms]))}]"
                )

        # Check the list of extra files
        extra_files = rubric.get("extra_files", None)
        if extra_files is not None:
            if not type(extra_files) is list:
                raise UserFailure(
                    "extra_files should be a list of file names"
                    " located in the same directory as the solution")

        # Check the list of requirements
        requirements = rubric.get("requirements", None)
        if requirements is not None:
            if not type(requirements) is list:
                raise UserFailure(
                    "requirements variables should be a list of package names")

        return True