def test_compute_checksum_cell_type(): # does the cell type make a difference? cell1 = create_grade_cell("hello", "code", "foo", 1) cell2 = create_grade_cell("hello", "markdown", "foo", 1) assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_solution_cell("hello", "code", "foo") cell2 = create_solution_cell("hello", "markdown", "foo") assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2)
def test_compute_checksum_identical(): # is the same for two identical cells? cell1 = create_grade_cell("hello", "code", "foo", 1) cell2 = create_grade_cell("hello", "code", "foo", 1) assert utils.compute_checksum(cell1) == utils.compute_checksum(cell2) cell1 = create_solution_cell("hello", "code", "foo") cell2 = create_solution_cell("hello", "code", "foo") assert utils.compute_checksum(cell1) == utils.compute_checksum(cell2)
def test_checksum_grade_cell_type(self, preprocessor): """Test that the checksum is computed for grade cells of different cell types""" cell1 = create_grade_cell("", "code", "foo", 1) cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] cell2 = create_grade_cell("", "markdown", "foo", 1) cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"]
def test_checksum_grade_source(self, preprocessor): """Test that the checksum is computed for grade cells with different sources""" cell1 = create_grade_cell("a", "code", "foo", 1) cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] cell2 = create_grade_cell("b", "code", "foo", 1) cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"]
def test_compute_checksum_grade_id(): # does the grade id make a difference (only for grade cells)? cell1 = create_grade_cell("hello", "code", "foo", 1) cell2 = create_grade_cell("hello", "code", "bar", 1) assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_grade_cell("hello", "code", "foo", 1) cell2 = create_grade_cell("hello", "code", "bar", 1) cell1.metadata.nbgrader["grade"] = False cell2.metadata.nbgrader["grade"] = False assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2)
def test_compute_checksum_points(): # does the number of points make a difference (only for grade cells)? cell1 = create_grade_cell("hello", "code", "foo", 2) cell2 = create_grade_cell("hello", "code", "foo", 1) assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_grade_cell("hello", "code", "foo", 2) cell2 = create_grade_cell("hello", "code", "foo", 1) cell1.metadata.nbgrader["grade"] = False cell2.metadata.nbgrader["grade"] = False assert utils.compute_checksum(cell1) == utils.compute_checksum(cell2)
def test_checksum_grade_cell_type(self, preprocessor): """Test that the checksum is computed for grade cells of different cell types""" cell1 = create_grade_cell("", "code", "foo", 1) cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] cell2 = create_grade_cell("", "markdown", "foo", 1) cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader[ "checksum"]
def test_checksum_grade_and_solution(self, preprocessor): """Test that a checksum is created for grade cells that are also solution cells""" cell1 = create_grade_cell("", "markdown", "foo", 1) cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] cell2 = create_grade_cell("", "markdown", "foo", 1) cell2.metadata.nbgrader["solution"] = True cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"]
def test_duplicate_grade_cell(self, preprocessor): cell1 = create_grade_cell("hello", "code", "foo", 2) cell2 = create_grade_cell("goodbye", "code", "foo", 2) nb = new_notebook() nb.cells.append(cell1) nb.cells.append(cell2) nb, resources = preprocessor.preprocess(nb, {}) assert nb.cells[0].metadata.nbgrader == {} assert nb.cells[1].metadata.nbgrader != {}
def test_checksum_grade_source(self, preprocessor): """Test that the checksum is computed for grade cells with different sources""" cell1 = create_grade_cell("a", "code", "foo", 1) cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] cell2 = create_grade_cell("b", "code", "foo", 1) cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader[ "checksum"]
def test_checksum_grade_and_solution(self, preprocessor): """Test that a checksum is created for grade cells that are also solution cells""" cell1 = create_grade_cell("", "markdown", "foo", 1) cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] cell2 = create_grade_cell("", "markdown", "foo", 1) cell2.metadata.nbgrader["solution"] = True cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader[ "checksum"]
def test_determine_grade_code_grade(): cell = create_grade_cell('print("test")', "code", "foo", 10) cell.outputs = [] assert utils.determine_grade(cell) == (10, 10) cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] assert utils.determine_grade(cell) == (0, 10)
def test_nonexistant_grade_id(self, preprocessors, resources): """Are cells not in the database ignored?""" cell = create_grade_cell("", "code", "", 1) cell.metadata.nbgrader["grade"] = False nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessors[0].preprocess(nb, resources) nb, resources = preprocessors[1].preprocess(nb, resources) assert "grade_id" not in cell.metadata.nbgrader cell = create_grade_cell("", "code", "foo", 1) cell.metadata.nbgrader["grade"] = False nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessors[0].preprocess(nb, resources) nb, resources = preprocessors[1].preprocess(nb, resources) assert "grade_id" not in cell.metadata.nbgrader
def test_invalid_grade_cell_id(self, preprocessor): """Check that an error is raised when the grade cell id is invalid""" resources = dict(grade_ids=[]) cell = create_grade_cell("", "code", "", 1) with pytest.raises(RuntimeError): preprocessor.preprocess_cell(cell, resources, 0) cell = create_grade_cell("", "code", "a b", 1) with pytest.raises(RuntimeError): preprocessor.preprocess_cell(cell, resources, 0) cell = create_grade_cell("", "code", "a\"b", 1) with pytest.raises(RuntimeError): preprocessor.preprocess_cell(cell, resources, 0) cell = create_solution_cell("", "code", "abc-ABC_0") preprocessor.preprocess_cell(cell, resources, 0)
def test_non_nbgrader_cell_blank_grade_id(self, preprocessor): resources = dict(grade_ids=[]) cell = create_grade_cell("", "code", "", 1) cell.metadata.nbgrader['grade'] = False new_cell, _ = preprocessor.preprocess_cell(cell, resources, 0) assert 'grade_id' not in new_cell.metadata.nbgrader resources = dict(grade_ids=[]) cell = create_solution_cell("", "code", "") cell.metadata.nbgrader['solution'] = False new_cell, _ = preprocessor.preprocess_cell(cell, resources, 0) assert 'grade_id' not in new_cell.metadata.nbgrader
def test_overwrite_grade_source(self, preprocessors, resources): """Is the source overwritten for grade cells?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader["checksum"] = compute_checksum(cell) nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessors[0].preprocess(nb, resources) cell.source = "hello!" nb, resources = preprocessors[1].preprocess(nb, resources) assert cell.source == "hello"
def test_overwrite_points(self, preprocessors, resources): """Are points overwritten for grade cells?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessors[0].preprocess(nb, resources) cell.metadata.nbgrader["points"] = 2 nb, resources = preprocessors[1].preprocess(nb, resources) assert cell.metadata.nbgrader["points"] == 1
def test_overwrite_grade_source(self, preprocessors, resources): """Is the source overwritten for grade cells?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessors[0].preprocess(nb, resources) cell.source = "hello!" nb, resources = preprocessors[1].preprocess(nb, resources) assert cell.source == "hello"
def test_compute_checksum_source(): # does the source make a difference? cell1 = create_grade_cell("print('hello')", "code", "foo", 1) cell2 = create_grade_cell("print( 'hello' )", "code", "foo", 1) assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_grade_cell("print('hello')", "code", "foo", 1) cell2 = create_grade_cell("print( 'hello!' )", "code", "foo", 1) assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_grade_cell("print('hello')", "markdown", "foo", 1) cell2 = create_grade_cell("print( 'hello' )", "markdown", "foo", 1) assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_solution_cell("print('hello')", "code", "foo") cell2 = create_solution_cell("print( 'hello' )", "code", "foo") assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_solution_cell("print('hello')", "code", "foo") cell2 = create_solution_cell("print( 'hello!' )", "code", "foo") assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2) cell1 = create_solution_cell("print('hello')", "markdown", "foo") cell2 = create_solution_cell("print( 'hello' )", "markdown", "foo") assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2)
def test_save_markdown_grade_cell(self, preprocessor, resources): cell = create_grade_cell("hello", "markdown", "foo", 1) nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessor.preprocess(nb, resources) gb = preprocessor.gradebook grade_cell = gb.find_grade_cell("foo", "test", "ps0") assert grade_cell.max_score == 1 assert grade_cell.source == "hello" assert grade_cell.checksum == cell.metadata.nbgrader["checksum"] assert grade_cell.cell_type == "markdown"
def test_save_correct_code(self, preprocessors, gradebook, resources): """Is a passing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) preprocessors[2].preprocess(nb, resources) assert cell.metadata.nbgrader['score'] == 1 assert cell.metadata.nbgrader['points'] == 1 assert 'comment' not in cell.metadata.nbgrader
def test_save_correct_code(self, preprocessors, gradebook, resources): """Is a passing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) preprocessors[2].preprocess(nb, resources) assert cell.metadata.nbgrader['score'] == 1 assert cell.metadata.nbgrader['points'] == 1 assert 'comment' not in cell.metadata.nbgrader
def test_save_incorrect_code(self, preprocessors, gradebook, resources): """Is a failing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) preprocessors[2].preprocess(nb, resources) assert cell.metadata.nbgrader['score'] == 0 assert cell.metadata.nbgrader['points'] == 1 assert 'comment' not in cell.metadata.nbgrader
def test_grade_correct_code(self, preprocessors, gradebook, resources): """Is a passing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") assert grade_cell.score == 1 assert grade_cell.max_score == 1 assert grade_cell.auto_score == 1 assert grade_cell.manual_score == None assert not grade_cell.needs_manual_grade
def test_save_incorrect_code(self, preprocessors, gradebook, resources): """Is a failing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) preprocessors[2].preprocess(nb, resources) assert cell.metadata.nbgrader['score'] == 0 assert cell.metadata.nbgrader['points'] == 1 assert 'comment' not in cell.metadata.nbgrader
def test_grade_incorrect_code(self, preprocessors, gradebook, resources): """Is a failing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") assert grade_cell.score == 0 assert grade_cell.max_score == 1 assert grade_cell.auto_score == 0 assert grade_cell.manual_score == None assert not grade_cell.needs_manual_grade
def test_grade_correct_code(self, preprocessors, gradebook, resources): """Is a passing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") assert grade_cell.score == 1 assert grade_cell.max_score == 1 assert grade_cell.auto_score == 1 assert grade_cell.manual_score == None assert not grade_cell.needs_manual_grade
def test_save_code_grade_cell(self, preprocessor, resources): cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) nb = new_notebook() nb.cells.append(cell) nb, resources = preprocessor.preprocess(nb, resources) gb = preprocessor.gradebook grade_cell = gb.find_grade_cell("foo", "test", "ps0") assert grade_cell.max_score == 1 assert grade_cell.cell_type == "code" source_cell = gb.find_source_cell("foo", "test", "ps0") assert source_cell.source == "hello" assert source_cell.checksum == cell.metadata.nbgrader["checksum"] assert source_cell.cell_type == "code" assert source_cell.locked
def test_grade_incorrect_code(self, preprocessors, gradebook, resources): """Is a failing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) cell.outputs = [ new_output('error', ename="NotImplementedError", evalue="", traceback=["error"]) ] nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") assert grade_cell.score == 0 assert grade_cell.max_score == 1 assert grade_cell.auto_score == 0 assert grade_cell.manual_score == None assert not grade_cell.needs_manual_grade
def test_determine_grade_markdown_grade(): cell = create_grade_cell('test', "markdown", "foo", 10) assert utils.determine_grade(cell) == (None, 10)
def test_compute_checksum_grade_cell(): # does it make a difference if grade=True? cell1 = create_grade_cell("hello", "code", "foo", 1) cell2 = create_grade_cell("hello", "code", "foo", 1) cell2.metadata.nbgrader["grade"] = False assert utils.compute_checksum(cell1) != utils.compute_checksum(cell2)