Exemple #1
0
    def test_determine_grade_code_grade_and_solution(self):
        cell = self._create_grade_and_solution_cell('test', "code", "foo", 10)
        cell.outputs = []
        assert_equal(utils.determine_grade(cell), (10, 10))

        cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
        assert_equal(utils.determine_grade(cell), (0, 10))
Exemple #2
0
def test_determine_grade_code_grade():
    cell = create_grade_cell('print("test")', "code", "foo", 10)
    cell.outputs = []
    assert utils.determine_grade(cell) == (10, 10)

    cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
    assert utils.determine_grade(cell) == (0, 10)
Exemple #3
0
def test_determine_grade_code_grade():
    cell = create_grade_cell('print("test")', "code", "foo", 10)
    cell.outputs = []
    assert utils.determine_grade(cell) == (10, 10)

    cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
    assert utils.determine_grade(cell) == (0, 10)
Exemple #4
0
    def test_determine_grade_markdown_grade_and_solution(self):
        cell = self._create_grade_and_solution_cell('test', "markdown", "foo", 10)
        assert_equal(utils.determine_grade(cell), (0, 10))

        cell = self._create_grade_and_solution_cell('test', "markdown", "foo", 10)
        cell.source = 'test!'
        assert_equal(utils.determine_grade(cell), (None, 10))
Exemple #5
0
def test_determine_grade_solution():
    cell = create_solution_cell('test', "code", "foo")
    with pytest.raises(ValueError):
        utils.determine_grade(cell)

    cell = create_solution_cell('test', "markdown", "foo")
    with pytest.raises(ValueError):
        utils.determine_grade(cell)
Exemple #6
0
def test_determine_grade_markdown_grade_and_solution():
    cell = create_grade_and_solution_cell('test', "markdown", "foo", 10)
    cell.metadata.nbgrader['checksum'] = utils.compute_checksum(cell)
    assert utils.determine_grade(cell) == (0, 10)

    cell = create_grade_and_solution_cell('test', "markdown", "foo", 10)
    cell.source = 'test!'
    assert utils.determine_grade(cell) == (None, 10)
Exemple #7
0
def test_determine_grade_solution():
    cell = create_solution_cell('test', "code", "foo")
    with pytest.raises(ValueError):
        utils.determine_grade(cell)

    cell = create_solution_cell('test', "markdown", "foo")
    with pytest.raises(ValueError):
        utils.determine_grade(cell)
Exemple #8
0
def test_determine_grade_markdown_grade_and_solution():
    cell = create_grade_and_solution_cell('test', "markdown", "foo", 10)
    cell.metadata.nbgrader['checksum'] = utils.compute_checksum(cell)
    assert utils.determine_grade(cell) == (0, 10)

    cell = create_grade_and_solution_cell('test', "markdown", "foo", 10)
    cell.source = 'test!'
    assert utils.determine_grade(cell) == (None, 10)
Exemple #9
0
def test_determine_grade_code_grade_and_solution():
    cell = create_grade_and_solution_cell('test', "code", "foo", 10)
    cell.metadata.nbgrader['checksum'] = utils.compute_checksum(cell)
    cell.outputs = []
    assert utils.determine_grade(cell) == (0, 10)

    cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
    cell.source = 'test!'
    assert utils.determine_grade(cell) == (None, 10)
Exemple #10
0
def test_determine_grade_code_grade_and_solution():
    cell = create_grade_and_solution_cell('test', "code", "foo", 10)
    cell.metadata.nbgrader['checksum'] = utils.compute_checksum(cell)
    cell.outputs = []
    assert utils.determine_grade(cell) == (0, 10)

    cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
    cell.source = 'test!'
    assert utils.determine_grade(cell) == (None, 10)
Exemple #11
0
    def _add_score(self, cell, resources):
        """Graders can override the autograder grades, and may need to
        manually grade written solutions anyway. This function adds
        score information to the database if it doesn't exist. It does
        NOT override the 'score' field, as this is the manual score
        that might have been provided by a grader.

        """
        # these are the fields by which we will identify the score
        # information
        grade = self.gradebook.find_grade(
            cell.metadata['nbgrader']['grade_id'], self.notebook_id,
            self.assignment_id, self.student_id)

        # determine what the grade is
        auto_score, _ = utils.determine_grade(cell)
        grade.auto_score = auto_score

        # if there was previously a manual grade, or if there is no autograder
        # score, then we should mark this as needing review
        if (grade.manual_score is not None) or (grade.auto_score is None):
            grade.needs_manual_grade = True
        else:
            grade.needs_manual_grade = False

        self.gradebook.db.commit()
        self.log.debug(grade)
Exemple #12
0
    def _add_score(self, cell, resources):
        """Graders can override the autograder grades, and may need to
        manually grade written solutions anyway. This function adds
        score information to the database if it doesn't exist. It does
        NOT override the 'score' field, as this is the manual score
        that might have been provided by a grader.

        """
        # these are the fields by which we will identify the score
        # information
        grade = self.gradebook.find_grade(
            cell.metadata['nbgrader']['grade_id'],
            self.notebook_id,
            self.assignment_id,
            self.student_id)

        # determine what the grade is
        auto_score, _ = utils.determine_grade(cell)
        grade.auto_score = auto_score

        # if there was previously a manual grade, or if there is no autograder
        # score, then we should mark this as needing review
        if (grade.manual_score is not None) or (grade.auto_score is None):
            grade.needs_manual_grade = True
        else:
            grade.needs_manual_grade = False

        self.gradebook.db.commit()
        self.log.debug(grade)
Exemple #13
0
    def preprocess_cell(self, cell, resources, cell_index):
        if not (utils.is_grade(cell) or utils.is_locked(cell)):
            return cell, resources

        # if we're ignoring checksums, then remove the checksum from the
        # cell metadata
        if self.ignore_checksums and 'checksum' in cell.metadata.nbgrader:
            del cell.metadata.nbgrader['checksum']

        # verify checksums of cells
        if utils.is_locked(cell) and 'checksum' in cell.metadata.nbgrader:
            old_checksum = cell.metadata.nbgrader['checksum']
            new_checksum = utils.compute_checksum(cell)
            if old_checksum != new_checksum:
                resources['nbgrader']['checksum_mismatch'].append(cell_index)

        # if it's a grade cell, the check the grade
        if utils.is_grade(cell):
            score, max_score = utils.determine_grade(cell)

            # it's a markdown cell, so we can't do anything
            if score is None:
                pass
            elif score < max_score:
                resources['nbgrader']['failed_cells'].append(cell_index)
            else:
                resources['nbgrader']['passed_cells'].append(cell_index)

        return cell, resources
Exemple #14
0
    def preprocess_cell(self, cell, resources, cell_index):
        if not utils.is_grade(cell):
            return cell, resources

        # if we're ignoring checksums, then remove the checksum from the
        # cell metadata
        if self.ignore_checksums and 'checksum' in cell.metadata.nbgrader:
            del cell.metadata.nbgrader['checksum']

        # verify checksums of cells
        if not utils.is_solution(cell) and 'checksum' in cell.metadata.nbgrader:
            old_checksum = cell.metadata.nbgrader['checksum']
            new_checksum = utils.compute_checksum(cell)
            if old_checksum != new_checksum:
                resources['nbgrader']['checksum_mismatch'].append(cell_index)

        # if it's a grade cell, the add a grade
        score, max_score = utils.determine_grade(cell)

        # it's a markdown cell, so we can't do anything
        if score is None:
            pass
        elif score < max_score:
            resources['nbgrader']['failed_cells'].append(cell_index)
        else:
            resources['nbgrader']['passed_cells'].append(cell_index)

        return cell, resources
Exemple #15
0
def grade(nb):
    total_score = 0
    max_total_score = 0
    for cell in nb.cells:
        if utils.is_grade(cell):
            score, max_score = utils.determine_grade(cell)
            total_score += score
            max_total_score += max_score

    return total_score, max_total_score
Exemple #16
0
    def test_determine_grade(self):
        cell = self._create_code_cell()
        cell.metadata['nbgrader'] = {}
        cell.metadata['nbgrader']['grade'] = True
        cell.metadata['nbgrader']['points'] = 10
        cell.outputs = []
        assert utils.determine_grade(cell) == (10, 10)

        cell.outputs = [
            new_output('error',
                       ename="NotImplementedError",
                       evalue="",
                       traceback=["error"])
        ]
        assert utils.determine_grade(cell) == (0, 10)

        cell = self._create_text_cell()
        cell.metadata['nbgrader'] = {}
        cell.metadata['nbgrader']['grade'] = True
        cell.metadata['nbgrader']['points'] = 10
        assert utils.determine_grade(cell) == (None, 10)
Exemple #17
0
def determine_grade(
    cell: NotebookNode, log: Logger = None
) -> Tuple[Optional[float], float]:
    if not nbutils.is_grade(cell):
        raise ValueError("cell is not a grade cell")

    if not (is_multiplechoice(cell) or is_singlechoice(cell)):
        return nbutils.determine_grade(cell, log)

    max_points = float(cell.metadata["nbgrader"]["points"])

    if is_singlechoice(cell):
        # Get the choices of the student
        student_choices = get_choices(cell)
        # Get the instructor choices
        instructor_choices = get_instructor_choices(cell)

        if (
            (len(student_choices) > 0)
            and (len(instructor_choices) > 0)
            and (student_choices[0] == instructor_choices[0])
        ):
            return max_points, max_points
        else:
            return 0, max_points

    elif is_multiplechoice(cell):
        # Get the choices of the student
        student_choices = get_choices(cell)
        # Get the weights of the answer
        instructor_choices = get_instructor_choices(cell)
        option_points = max_points / get_num_of_choices(cell)

        points = 0
        for i in range(get_num_of_choices(cell)):
            if ((i in student_choices) and (i in instructor_choices)) or (
                (i not in student_choices) and (i not in instructor_choices)
            ):
                points += option_points
            else:
                points -= option_points
        return max(0, points), max_points
Exemple #18
0
    def _add_score(self, cell, resources):
        """Graders can override the autograder grades, and may need to
        manually grade written solutions anyway. This function adds
        score information to the database if it doesn't exist. It does
        NOT override the 'score' field, as this is the manual score
        that might have been provided by a grader.

        """
        # these are the fields by which we will identify the score
        # information
        grade = self.gradebook.find_or_create_grade(
            notebook=self.notebook,
            grade_id=cell.metadata['nbgrader']['grade_id'])

        # deterine what the grade is
        grade.autoscore, grade.max_score = utils.determine_grade(cell)

        # Update the grade information and print it out
        self.gradebook.update_grade(grade)
        self.log.debug(grade)
    def _add_score(self, cell, resources):
        """Graders can override the autograder grades, and may need to
        manually grade written solutions anyway. This function adds
        score information to the database if it doesn't exist. It does
        NOT override the 'score' field, as this is the manual score
        that might have been provided by a grader.

        """
        # these are the fields by which we will identify the score
        # information
        grade = self.gradebook.find_grade(
            cell.metadata['nbgrader']['grade_id'],
            self.notebook_id,
            self.assignment_id,
            self.student_id)

        # determine what the grade is
        auto_score, max_score = utils.determine_grade(cell)
        grade.auto_score = auto_score
        self.gradebook.db.commit()
        self.log.debug(grade)
Exemple #20
0
    def preprocess_cell(self, cell, resources, cell_index):
        if not utils.is_grade(cell):
            return cell, resources

        # verify checksums of cells
        if 'checksum' in cell.metadata.nbgrader:
            old_checksum = cell.metadata.nbgrader['checksum']
            new_checksum = utils.compute_checksum(cell)
            if old_checksum != new_checksum:
                resources['nbgrader']['checksum_mismatch'].append(cell_index)

        # if it's a grade cell, the add a grade
        score, max_score = utils.determine_grade(cell)

        # it's a markdown cell, so we can't do anything
        if score is None:
            pass
        elif score < max_score:
            resources['nbgrader']['failed_cells'].append(cell_index)
        else:
            resources['nbgrader']['passed_cells'].append(cell_index)

        return cell, resources
Exemple #21
0
def test_determine_grade_markdown_grade():
    cell = create_grade_cell('test', "markdown", "foo", 10)
    assert utils.determine_grade(cell) == (None, 10)
Exemple #22
0
def test_determine_grade_markdown_grade():
    cell = create_grade_cell('test', "markdown", "foo", 10)
    assert utils.determine_grade(cell) == (None, 10)
Exemple #23
0
 def test_determine_grade_markdown_grade(self):
     cell = self._create_grade_cell('test', "markdown", "foo", 10)
     assert_equal(utils.determine_grade(cell), (None, 10))