def test_autograder(self): """ Check that the correct zipfile is created by gs_generator.py """ # create the zipfile generate_command = [ "generate", "-t", TEST_FILES_PATH + "tests", "-o", TEST_FILES_PATH, "-r", TEST_FILES_PATH + "requirements.txt", TEST_FILES_PATH + "data/test-df.csv" ] # args = parser.parse_args(generate_command) # args.func = autograder # args.func(args) if 'environment.yml' in os.listdir(os.getcwd()): os.rename('environment.yml', 'environment_temp_rename.yml') run_otter(generate_command) with self.unzip_to_temp(TEST_FILES_PATH + "autograder.zip", delete=True) as unzipped_dir: self.assertDirsEqual(unzipped_dir, TEST_FILES_PATH + "autograder-correct") if 'environment_temp_rename.yml' in os.listdir(os.getcwd()): os.rename('environment_temp_rename.yml', 'environment.yml')
def test_gradescope_example(self, mocked_client): """ Checks that otter assign filters and outputs correctly, as well as creates a correct .zip file along with PDFs. Additionally, includes testing Gradescope integration. """ mocked_client.return_value = 'token' run_gradescope_args = [ "assign", "--no-run-tests", TEST_FILES_PATH + "generate-gradescope.ipynb", TEST_FILES_PATH + "output", ] # args = parser.parse_args(run_gradescope_args) # args.func = assign # args.func(args) run_otter(run_gradescope_args) self.assertDirsEqual(TEST_FILES_PATH + "output", TEST_FILES_PATH + "gs-correct", ignore_ext=[".pdf", ".zip"]) # check gradescope zip file self.check_gradescope_zipfile( TEST_FILES_PATH + "output/autograder/autograder.zip", TEST_FILES_PATH + "gs-autograder-correct", )
def test_no_close(self): """ Tests a filtered export without a closing comment """ test_file = "no-close-tag-test" grade_command = [ "export", "--filtering", "-e", "latex", "--pagebreaks", "-s", TEST_FILES_PATH + test_file + ".ipynb" ] # args = parser.parse_args(grade_command) # args.func = export # args.func(args) run_otter(grade_command) # check existence of pdf and tex self.assertTrue(os.path.isfile(TEST_FILES_PATH + test_file + ".pdf")) self.assertTrue(os.path.isfile(TEST_FILES_PATH + test_file + ".tex")) # cleanup cleanup_command = [ "rm", TEST_FILES_PATH + test_file + ".pdf", TEST_FILES_PATH + test_file + ".tex" ] cleanup = subprocess.run(cleanup_command, stdout=PIPE, stderr=PIPE) self.assertEqual(cleanup.returncode, 0, "Error in cleanup:" + str(cleanup.stderr))
def generate_autograder_zip(self, pdfs=False): cmd = [ "generate", "-t", TEST_FILES_PATH + "tests", "-r", TEST_FILES_PATH + "requirements.txt", "-o", TEST_FILES_PATH ] if pdfs: cmd += ["-c", TEST_FILES_PATH + "otter_config.json"] run_otter(cmd)
def tearDownClass(cls): super().tearDownClass() create_image_cmd = ["make", "cleanup-docker-grade-test"] subprocess.run(create_image_cmd, check=True) if os.path.exists("otter/grade/old-Dockerfile"): os.remove("otter/grade/Dockerfile") shutil.move("otter/grade/old-Dockerfile", "otter/grade/Dockerfile") # prune images run_otter(["grade", "--prune", "-f"])
def test_otter_example(self): """ Checks that otter assign filters and outputs correctly, as well as creates a correct .otter file """ run_assign_args = [ "assign", TEST_FILES_PATH + "generate-otter.ipynb", TEST_FILES_PATH + "output", ] # args = parser.parse_args(run_assign_args) # args.func = assign # args.func(args) run_otter(run_assign_args) self.assertDirsEqual(TEST_FILES_PATH + "output", TEST_FILES_PATH + "otter-correct")
def test_r_example(self): """ Checks that otter assign works for R notebooks correctly """ run_assign_args = [ "assign", TEST_FILES_PATH + "r-example.ipynb", TEST_FILES_PATH + "output", ] # args = parser.parse_args(run_assign_args) # args.func = assign # args.func(args) run_otter(run_assign_args) self.assertDirsEqual(TEST_FILES_PATH + "output", TEST_FILES_PATH + "r-correct", ignore_ext=[".pdf", ".zip"])
def test_pdf_example(self): """ Checks that otter assign filters and outputs correctly, as well as creates a correct .zip file along with PDFs """ run_assign_args = [ "assign", "--no-run-tests", TEST_FILES_PATH + "generate-pdf.ipynb", TEST_FILES_PATH + "output", ] # args = parser.parse_args(run_assign_args) # args.func = assign # args.func(args) run_otter(run_assign_args) self.assertDirsEqual(TEST_FILES_PATH + "output", TEST_FILES_PATH + "pdf-correct", ignore_ext=[".pdf", ".zip"])
def test_convert_example(self): """ Checks that otter assign filters and outputs correctly """ # run otter assign run_assign_args = [ "assign", "--no-run-tests", TEST_FILES_PATH + "example.ipynb", TEST_FILES_PATH + "output", ] # args = parser.parse_args(run_assign_args) # args.func = assign # args.func(args) run_otter(run_assign_args) self.assertDirsEqual(TEST_FILES_PATH + "output", TEST_FILES_PATH + "example-correct")
def test_custom_env(self): """ Check that a custom environment.yml is correctly read and modified """ # create the zipfile generate_command = [ "generate", "-t", TEST_FILES_PATH + "tests", "-o", TEST_FILES_PATH, "-r", TEST_FILES_PATH + "requirements.txt", "-e", TEST_FILES_PATH + "environment.yml", TEST_FILES_PATH + "data/test-df.csv" ] # args = parser.parse_args(generate_command) # args.func = autograder # args.func(args) run_otter(generate_command) with self.unzip_to_temp(TEST_FILES_PATH + "autograder.zip", delete=True) as unzipped_dir: self.assertDirsEqual(unzipped_dir, TEST_FILES_PATH + "autograder-custom-env")
def test_rmd_example(self): """ Checks that otter assign works for Rmd files """ run_assign_args = [ "assign", TEST_FILES_PATH + "rmd-example.Rmd", TEST_FILES_PATH + "output", ] # args = parser.parse_args(run_assign_args) # args.func = assign # args.func(args) run_otter(run_assign_args) self.assertDirsEqual(TEST_FILES_PATH + "output", TEST_FILES_PATH + "rmd-correct", ignore_ext=[".zip"]) # check gradescope zip file self.check_gradescope_zipfile( TEST_FILES_PATH + "output/autograder/autograder.zip", TEST_FILES_PATH + "rmd-autograder-correct", )
def test_notebooks_with_pdfs(self): """ Check that the example of 100 notebooks runs correctely locally. """ self.generate_autograder_zip(pdfs=True) # grade the 100 notebooks grade_command = [ "grade", "-y", TEST_FILES_PATH + "notebooks/meta.yml", "-p", TEST_FILES_PATH + "notebooks/", # "-t", TEST_FILES_PATH + "tests/", # "-r", TEST_FILES_PATH + "requirements.txt", "-o", "test/", # "--pdfs", "-a", TEST_FILES_PATH + "autograder.zip", "--containers", "5", "--image", "otter-test", ] # args = parser.parse_args(grade_command) # args.func = grade # args.func(args) run_otter(grade_command) # read the output and expected output df_test = pd.read_csv("test/final_grades.csv") # sort by filename df_test = df_test.sort_values("identifier").reset_index(drop=True) df_test["failures"] = df_test["identifier"].apply( lambda x: [int(n) for n in re.split(r"\D+", x) if len(n) > 0]) # add score sum cols for tests for test in self.test_points: test_cols = [ l for l in df_test.columns if bool(re.search(fr"\b{test}\b", l)) ] df_test[test] = df_test[test_cols].sum(axis=1) # check point values for _, row in df_test.iterrows(): for test in self.test_points: if int(re.sub(r"\D", "", test)) in row["failures"]: # q6.py has all_or_nothing set to False, so if the hidden tests fail you should get 2.5 points if "6H" in row["identifier"] and "q6" == test: self.assertEqual( row[test], 2.5, "{} supposed to fail {} but passed".format( row["identifier"], test)) else: self.assertEqual( row[test], 0, "{} supposed to fail {} but passed".format( row["identifier"], test)) else: self.assertEqual( row[test], self.test_points[test], "{} supposed to pass {} but failed".format( row["identifier"], test)) # remove the extra output cleanup_command = [ "rm", "-rf", "test/final_grades.csv", "test/submission_pdfs", "test/final_grades.csv", TEST_FILES_PATH + "autograder.zip" ] cleanup = subprocess.run(cleanup_command, stdout=PIPE, stderr=PIPE) self.assertEqual(len(cleanup.stderr), 0, cleanup.stderr.decode("utf-8"))
def test_otter_check_script(self): """ Checks that the script checker works """ # run for each individual test for file in glob(TEST_FILES_PATH + "tests/*.py"): check_command = ["check", TEST_FILES_PATH + "file0.py", "-q", os.path.split(file)[1][:-3], "-t", os.path.split(file)[0] ] # args = parser.parse_args(check_command) # args.func = check # capture stdout output = StringIO() with contextlib.redirect_stdout(output): # mock block_print otherwise they interfere with capture of stdout with mock.patch("otter.check.block_print"): # args.func(args) run_otter(check_command) if os.path.split(file)[1] != "q2.py": self.assertEqual( output.getvalue().strip().split("\n")[-1].strip(), "All tests passed!", "Did not pass test at {}".format(file) ) # run checker command check_command = ["check", TEST_FILES_PATH + "file0.py", "-t", TEST_FILES_PATH + "tests" ] # args = parser.parse_args(check_command) # args.func = check # capture stdout output = StringIO() with contextlib.redirect_stdout(output): # mock block_print otherwise they interfere with capture of stdout with mock.patch("otter.check.block_print"): # args.func(args) run_otter(check_command) self.assertEqual( output.getvalue().strip(), dedent("""\ [0. 0.02002002 0.04004004 0.06006006 0.08008008] q1 passed! q2 results: Trying: 1 == 1 Expecting: False ********************************************************************** Line 2, in q2 0 Failed example: 1 == 1 Expected: False Got: True q3 passed! q4 passed! q5 passed!"""), "Did not pass correct tests" )