class MetaRegressions(CSE141Lab.MetaRegressions):
        def setUp(self):
            self.lab_spec = ThisLab.load(".")

        @parameterized.parameterized.expand(test_configs("solution", "."))
        def test_solution(self, solution, flags):
            if flags.devel and "solution" == solution:
                self.skipTest(
                    "Skipping since this solution doesn't work in devel mode")

            result, tag = self.run_solution(solution, flags)

            js = result.results

            c = self.read_text_file("code.csv", root=".")
            b = self.read_text_file("benchmark.csv", root=".")
            self.assertEqual(
                len(c.strip().split("\n")), 2,
                "code.csv should have 2 lines.  It has a different number.")
            self.assertEqual(
                len(b.strip().split("\n")), 2,
                "benchmark.csv should have 2 lines.  It has a different number."
            )

            runtime = self.lab_spec.csv_extract_by_line(b, 'runtime')
            if flags.grades_valid():
                self.assertEqual(
                    len(js["gradescope_test_output"]['leaderboard']), 1,
                    "too many or too few leader board entries")
    class MetaRegressions(CSE141Lab.MetaRegressions):
        def setUp(self):
            CSE141Lab.MetaRegressions.setUp(self)
            self.lab_spec = ThisLab.load(".")

        @parameterized.parameterized.expand(test_configs("solution", "."))
        def test_solution(self, solution, flags):
            result, tag = self.run_solution(solution, flags)

            js = result.results

            self.assertFileExists("inst_mix.csv", tag)
            self.assertFileExists("pe.csv", tag)
            self.assertFileExists("code.s", tag)

            b = self.read_file("inst_mix.csv", tag)
            runtime = self.lab_spec.csv_extract_by_line(b, 'runtime')

            if solution == ".":
                self.assertLess(runtime, 30)
                if flags.grades_valid():
                    self.assertEqual(
                        float(js['gradescope_test_output']['score']), 1,
                        f"Failed on {tag}: score check")
            elif solution == "solution":
                self.assertLess(runtime, 30)
                if flags.grades_valid():
                    self.assertEqual(
                        float(js['gradescope_test_output']['score']), 8,
                        f"Failed on {tag}: score check")

            if flags.gprof:
                self.assertFileExists("inst_mix.gprof", tag)
                self.assertFileExists("pe.gprof", tag)
Beispiel #3
0
    class MetaRegressions(CSE141Lab.MetaRegressions):
        @parameterized.parameterized.expand(test_configs("solution", "."))
        def test_solution(self, solution, flags):
            result, tag = self.run_solution(solution, flags)

            self.assertFileExists("code.out", tag=tag)
            self.assertFileExists("code-stats.csv", tag=tag)
            if not flags.devel:
                self.assertRegex(
                    self.read_file("code-stats.csv", root="."), "inst_count",
                    f"Failed on {tag}: looking for 'inst_count' in code-stats.csv"
                )
            else:
                self.assertRegex(
                    self.read_file("code-stats.csv", root="."), 'runtime',
                    f"Failed on {tag}: looking for 'runtime' in code-stats.csv"
                )

            js = result.results

            if solution == ".":
                if flags.grades_valid():
                    self.assertEqual(
                        float(js['gradescope_test_output']['score']), 3,
                        f"Failed on {tag}: score check")
            elif solution == "solution":
                if flags.grades_valid():
                    self.assertEqual(
                        float(js['gradescope_test_output']['score']), 7,
                        f"Failed on {tag}: score check")
Beispiel #4
0
 class MetaRegressions(CSE141Lab.MetaRegressions):
     @parameterized.parameterized.expand(test_configs("solution", ".", "bad_solution"))
     def test_solution(self, solution, flags):
         result, tag = self.run_solution(solution, flags)
         js = result.results
         log.debug(json.dumps(js, indent=4))
         if solution == ".":
             if flags.grades_valid():
                 self.assertEqual(float(js['gradescope_test_output']['score']), 0)
         elif solution == "bad_solution":
             if flags.grades_valid():
                 self.assertEqual(float(js['gradescope_test_output']['score']), 0)
         elif solution == "solution":
             if flags.grades_valid():                
                 self.assertEqual(float(js['gradescope_test_output']['score']), 1)
                 self.assertEqual(float(js['gradescope_test_output']['leaderboard'][0]['value']), 4096)