Exemplo n.º 1
0
    def test_failure_second_none(self):
        with self.assertRaises(ValueError):
            merge_execution_stats(None, None)

        r0 = get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_OK)
        with self.assertRaises(ValueError):
            merge_execution_stats(r0, None)
Exemplo n.º 2
0
    def test_failure_second_none(self):
        with self.assertRaises(ValueError):
            merge_execution_stats(None, None)

        r0 = get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_OK)
        with self.assertRaises(ValueError):
            merge_execution_stats(r0, None)
Exemplo n.º 3
0
 def test_success_first_status_ok(self):
     self.assertStats(
         merge_execution_stats(get_stats(0, 0, 0, Sandbox.EXIT_OK),
                               get_stats(0, 0, 0, Sandbox.EXIT_TIMEOUT)),
         get_stats(0, 0, 0, Sandbox.EXIT_TIMEOUT))
     self.assertStats(
         merge_execution_stats(
             get_stats(0, 0, 0, Sandbox.EXIT_OK),
             get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=11)),
         get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=11))
     self.assertStats(
         merge_execution_stats(
             get_stats(0, 0, 0, Sandbox.EXIT_OK),
             get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=11)),
         get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=11))
Exemplo n.º 4
0
 def test_success_output_joined(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1", stderr="e1")
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o2", stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(0, 0, 0, Sandbox.EXIT_OK,
                      stdout="o1\n===\no2", stderr="e1\n===\ne2"))
Exemplo n.º 5
0
 def test_success_first_status_not_ok(self):
     self.assertStats(
         merge_execution_stats(
             get_stats(0, 0, 0, Sandbox.EXIT_TIMEOUT),
             get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=11)),
         get_stats(0, 0, 0, Sandbox.EXIT_TIMEOUT))
     self.assertStats(
         merge_execution_stats(
             get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=9),
             get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=11)),
         get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=9))
     self.assertStats(
         merge_execution_stats(
             get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=9),
             get_stats(0, 0, 0, Sandbox.EXIT_OK)),
         get_stats(0, 0, 0, Sandbox.EXIT_SIGNAL, signal=9))
Exemplo n.º 6
0
    def test_many_processes_merged_timeout(self):
        # Solution was ok, but considering all runtimes, it hit timeout.
        tt, job = self.prepare(
            [2, "stub", "fifo_io"],
            {"foo": EXE_FOO}, {"manager": MANAGER})
        job.time_limit = 2.5
        stats0 = dict(STATS_OK)
        stats0["execution_time"] = 1.0
        stats1 = dict(STATS_OK)
        stats1["execution_time"] = 2.0
        sandbox_mgr = self.expect_sandbox()
        sandbox_usr0 = self.expect_sandbox()
        sandbox_usr1 = self.expect_sandbox()
        self._set_evaluation_step_return_values({
            sandbox_mgr: (True, True, STATS_OK),
            sandbox_usr0: (True, True, stats0),
            sandbox_usr1: (True, True, stats1),
        })

        tt.evaluate(job, self.file_cacher)

        # The stats are the merge of the two, but the status is changed to
        # timeout since the sum of the cpu times is over the time limit.
        stats = merge_execution_stats(stats0, stats1)
        stats["exit_status"] = "timeout"
        self.assertResultsInJob(
            job, True, str(0.0), self.human_evaluation_message.return_value,
            stats)
        sandbox_mgr.cleanup.assert_called_once_with(delete=True)
        sandbox_usr0.cleanup.assert_called_once_with(delete=True)
        sandbox_usr1.cleanup.assert_called_once_with(delete=True)
Exemplo n.º 7
0
    def test_many_processes_merged_timeout(self):
        # Solution was ok, but considering all runtimes, it hit timeout.
        tt, job = self.prepare([2, "stub", "fifo_io", "eval_manager"],
                               {"foo": EXE_FOO}, {"manager": MANAGER})
        job.time_limit = 2.5
        stats0 = dict(STATS_OK)
        stats0["execution_time"] = 1.0
        stats1 = dict(STATS_OK)
        stats1["execution_time"] = 2.0
        sandbox_mgr = self.expect_sandbox()
        sandbox_usr0 = self.expect_sandbox()
        sandbox_usr1 = self.expect_sandbox()
        self._set_evaluation_step_return_values({
            sandbox_mgr: (True, True, STATS_OK),
            sandbox_usr0: (True, True, stats0),
            sandbox_usr1: (True, True, stats1),
        })

        tt.evaluate(job, self.file_cacher)

        # The stats are the merge of the two, but the status is changed to
        # timeout since the sum of the cpu times is over the time limit.
        stats = merge_execution_stats(stats0, stats1)
        stats["exit_status"] = "timeout"
        self.assertResultsInJob(job, True, str(0.0),
                                self.human_evaluation_message.return_value,
                                stats)
        sandbox_mgr.cleanup.assert_called_once_with(delete=True)
        sandbox_usr0.cleanup.assert_called_once_with(delete=True)
        sandbox_usr1.cleanup.assert_called_once_with(delete=True)
Exemplo n.º 8
0
 def test_empty_outputs_are_preserved(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1", stderr="")
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="", stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(0, 0, 0, Sandbox.EXIT_OK,
                      stdout="o1\n===\n", stderr="\n===\ne2"))
Exemplo n.º 9
0
 def test_success_sequential(self):
     # In non-concurrent mode memory is max'd and wall clock is added.
     self.assertStats(
         merge_execution_stats(get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK),
                               get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_OK),
                               concurrent=False),
         get_stats(1.1, 2.2, 300.0, Sandbox.EXIT_OK))
Exemplo n.º 10
0
 def test_success_sequential(self):
     # In non-concurrent mode memory is max'd and wall clock is added.
     self.assertStats(
         merge_execution_stats(
             get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK),
             get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_OK),
             concurrent=False),
         get_stats(1.1, 2.2, 300.0, Sandbox.EXIT_OK))
Exemplo n.º 11
0
 def test_success_stats_are_not_modified(self):
     r0 = get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK)
     r1 = get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_SIGNAL, signal=11)
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(1.1, 2.0, 300.3, Sandbox.EXIT_SIGNAL, signal=11))
     self.assertStats(r0, get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK))
     self.assertStats(
         r1, get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_SIGNAL, signal=11))
Exemplo n.º 12
0
 def test_success_stats_are_not_modified(self):
     r0 = get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK)
     r1 = get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_SIGNAL, signal=11)
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(1.1, 2.0, 300.3, Sandbox.EXIT_SIGNAL, signal=11))
     self.assertStats(
         r0, get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK))
     self.assertStats(
         r1, get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_SIGNAL, signal=11))
Exemplo n.º 13
0
 def test_success_output_joined(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1", stderr="e1")
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o2", stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m,
         get_stats(0,
                   0,
                   0,
                   Sandbox.EXIT_OK,
                   stdout="o1\n===\no2",
                   stderr="e1\n===\ne2"))
Exemplo n.º 14
0
 def test_empty_outputs_are_preserved(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1", stderr="")
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="", stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m,
         get_stats(0,
                   0,
                   0,
                   Sandbox.EXIT_OK,
                   stdout="o1\n===\n",
                   stderr="\n===\ne2"))
Exemplo n.º 15
0
    def test_many_processes_last_user_failure(self):
        # One of the user programs had problems, it's the user's fault.
        tt, job = self.prepare([2], {"foo": EXE_FOO}, {"manager": MANAGER})
        sandbox_mgr = self.expect_sandbox()
        sandbox_usr0 = self.expect_sandbox()
        sandbox_usr1 = self.expect_sandbox()
        self._set_evaluation_step_return_values({
            sandbox_mgr: (True, True, STATS_OK),
            sandbox_usr0: (True, True, STATS_OK),
            sandbox_usr1: (True, False, STATS_RE),
        })

        tt.evaluate(job, self.file_cacher)

        self.assertResultsInJob(job, True, str(0.0),
                                self.human_evaluation_message.return_value,
                                merge_execution_stats(STATS_OK, STATS_RE))
        sandbox_mgr.cleanup.assert_called_once_with(delete=True)
        sandbox_usr0.cleanup.assert_called_once_with(delete=True)
        sandbox_usr1.cleanup.assert_called_once_with(delete=True)
Exemplo n.º 16
0
    def test_many_processes_last_user_failure(self):
        # One of the user programs had problems, it's the user's fault.
        tt, job = self.prepare(
            ["grader", 2], {"foo": EXE_FOO}, {"manager": MANAGER})
        sandbox_mgr = self.expect_sandbox()
        sandbox_usr0 = self.expect_sandbox()
        sandbox_usr1 = self.expect_sandbox()
        self._set_evaluation_step_return_values({
            sandbox_mgr: (True, True, STATS_OK),
            sandbox_usr0: (True, True, STATS_OK),
            sandbox_usr1: (True, False, STATS_RE),
        })

        tt.evaluate(job, self.file_cacher)

        self.assertResultsInJob(
            job, True, str(0.0), self.human_evaluation_message.return_value,
            merge_execution_stats(STATS_OK, STATS_RE))
        sandbox_mgr.cleanup.assert_called_once_with(delete=True)
        sandbox_usr0.cleanup.assert_called_once_with(delete=True)
        sandbox_usr1.cleanup.assert_called_once_with(delete=True)
Exemplo n.º 17
0
 def test_success_output_missing_one(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1")
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1", stderr="e2"))
Exemplo n.º 18
0
 def test_success_first_none(self):
     r1 = get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_SIGNAL, signal=11)
     m = merge_execution_stats(None, r1)
     self.assertStats(m, r1)
     self.assertIsNot(r1, m)
Exemplo n.º 19
0
    def test_many_processes_success(self):
        tt, job = self.prepare([2, "stub", "fifo_io", "eval_manager"],
                               {"foo": EXE_FOO}, {"manager": MANAGER})
        sandbox_mgr = self.expect_sandbox()
        sandbox_usr0 = self.expect_sandbox()
        sandbox_usr1 = self.expect_sandbox()

        tt.evaluate(job, self.file_cacher)

        # Sandboxes created with the correct file cacher and names.
        self.Sandbox.assert_has_calls([
            call(self.file_cacher, name="manager_evaluate"),
            call(self.file_cacher, name="user_evaluate"),
            call(self.file_cacher, name="user_evaluate"),
        ],
                                      any_order=False)
        self.assertEqual(self.Sandbox.call_count, 3)
        # We need input (with the default filename for redirection) and
        # executable copied in the sandbox.
        sandbox_mgr.create_file_from_storage.assert_has_calls([
            call("manager", "digest of manager", executable=True),
            call("input.txt", "digest of input"),
        ],
                                                              any_order=True)
        self.assertEqual(sandbox_mgr.create_file_from_storage.call_count, 2)
        # Same content in both user sandboxes.
        for s in [sandbox_usr0, sandbox_usr1]:
            s.create_file_from_storage.assert_has_calls([
                call("foo", "digest of foo", executable=True),
            ],
                                                        any_order=True)
            self.assertEqual(s.create_file_from_storage.call_count, 1)
        # Evaluation step called with the right arguments, in particular
        # redirects, and no (other) writable files. For the user's command,
        # see fake_evaluation_commands in the mixin.
        cmdline_mgr = [
            "./manager", "/fifo0/u0_to_m", "/fifo0/m_to_u0", "/fifo1/u1_to_m",
            "/fifo1/m_to_u1"
        ]
        cmdline_usr0 = [
            "run1", "foo", "stub", "/fifo0/m_to_u0", "/fifo0/u0_to_m", "0"
        ]
        cmdline_usr1 = [
            "run1", "foo", "stub", "/fifo1/m_to_u1", "/fifo1/u1_to_m", "1"
        ]
        self.evaluation_step_before_run.assert_has_calls([
            call(sandbox_mgr,
                 cmdline_mgr,
                 4321,
                 1234 * 1024 * 1024,
                 dirs_map={
                     os.path.join(self.base_dir, "0"): ("/fifo0", "rw"),
                     os.path.join(self.base_dir, "1"): ("/fifo1", "rw"),
                 },
                 writable_files=["output.txt"],
                 stdin_redirect="input.txt",
                 multiprocess=True),
            call(sandbox_usr0,
                 cmdline_usr0,
                 2.5,
                 123 * 1024 * 1024,
                 dirs_map={os.path.join(self.base_dir, "0"): ("/fifo0", "rw")},
                 stdin_redirect=None,
                 stdout_redirect=None,
                 multiprocess=True),
            call(sandbox_usr1,
                 cmdline_usr1,
                 2.5,
                 123 * 1024 * 1024,
                 dirs_map={os.path.join(self.base_dir, "1"): ("/fifo1", "rw")},
                 stdin_redirect=None,
                 stdout_redirect=None,
                 multiprocess=True),
        ],
                                                         any_order=True)
        self.assertEqual(self.evaluation_step_before_run.call_count, 3)
        self.assertEqual(self.evaluation_step_after_run.call_count, 3)
        # Results put in job and sandbox deleted.
        self.assertResultsInJob(job, True, str(OUTCOME), TEXT,
                                merge_execution_stats(STATS_OK, STATS_OK))
        sandbox_mgr.cleanup.assert_called_once_with(delete=True)
        sandbox_usr0.cleanup.assert_called_once_with(delete=True)
        sandbox_usr1.cleanup.assert_called_once_with(delete=True)
Exemplo n.º 20
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        executable_filename = next(iterkeys(job.executables))
        executable_digest = job.executables[executable_filename].digest

        first_sandbox = create_sandbox(file_cacher, name="first_evaluate")
        second_sandbox = create_sandbox(file_cacher, name="second_evaluate")
        job.sandboxes.append(first_sandbox.get_root_path())
        job.sandboxes.append(second_sandbox.get_root_path())

        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_command = ["./%s" % executable_filename, "0", "/fifo/fifo"]
        first_executables_to_get = {executable_filename: executable_digest}
        first_files_to_get = {
            TwoSteps.INPUT_FILENAME: job.input
            }

        # Put the required files into the sandbox
        for filename, digest in iteritems(first_executables_to_get):
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in iteritems(first_files_to_get):
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            job.time_limit,
            job.memory_limit,
            dirs_map={fifo_dir: ("/fifo", "rw")},
            stdin_redirect=TwoSteps.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Second step: we start the second manager.
        second_command = ["./%s" % executable_filename, "1", "/fifo/fifo"]
        second_executables_to_get = {executable_filename: executable_digest}
        second_files_to_get = {}

        # Put the required files into the second sandbox
        for filename, digest in iteritems(second_executables_to_get):
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in iteritems(second_files_to_get):
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            job.time_limit,
            job.memory_limit,
            dirs_map={fifo_dir: ("/fifo", "rw")},
            stdout_redirect=TwoSteps.OUTPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Consume output.
        wait_without_std([second, first])

        box_success_first, evaluation_success_first, first_stats = \
            evaluation_step_after_run(first_sandbox)
        box_success_second, evaluation_success_second, second_stats = \
            evaluation_step_after_run(second_sandbox)

        box_success = box_success_first and box_success_second
        evaluation_success = \
            evaluation_success_first and evaluation_success_second
        stats = merge_execution_stats(first_stats, second_stats)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists(TwoSteps.OUTPUT_FILENAME):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        TwoSteps.OUTPUT_FILENAME]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        TwoSteps.OUTPUT_FILENAME,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher, job,
                        TwoSteps.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=second_sandbox.relative_path(
                            TwoSteps.OUTPUT_FILENAME))

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(first_sandbox, job.success)
        delete_sandbox(second_sandbox, job.success)
Exemplo n.º 21
0
    def test_many_processes_success(self):
        tt, job = self.prepare(
            [2, "stub", "fifo_io"],
            {"foo": EXE_FOO}, {"manager": MANAGER})
        sandbox_mgr = self.expect_sandbox()
        sandbox_usr0 = self.expect_sandbox()
        sandbox_usr1 = self.expect_sandbox()

        tt.evaluate(job, self.file_cacher)

        # Sandboxes created with the correct file cacher and names.
        self.Sandbox.assert_has_calls([
            call(self.file_cacher, name="manager_evaluate"),
            call(self.file_cacher, name="user_evaluate"),
            call(self.file_cacher, name="user_evaluate"),
        ], any_order=False)
        self.assertEqual(self.Sandbox.call_count, 3)
        # We need input (with the default filename for redirection) and
        # executable copied in the sandbox.
        sandbox_mgr.create_file_from_storage.assert_has_calls([
            call("manager", "digest of manager", executable=True),
            call("input.txt", "digest of input"),
        ], any_order=True)
        self.assertEqual(sandbox_mgr.create_file_from_storage.call_count, 2)
        # Same content in both user sandboxes.
        for s in [sandbox_usr0, sandbox_usr1]:
            s.create_file_from_storage.assert_has_calls([
                call("foo", "digest of foo", executable=True),
            ], any_order=True)
            self.assertEqual(s.create_file_from_storage.call_count, 1)
        # Evaluation step called with the right arguments, in particular
        # redirects, and no (other) writable files. For the user's command,
        # see fake_evaluation_commands in the mixin.
        cmdline_mgr = ["./manager",
                       "/fifo0/u0_to_m", "/fifo0/m_to_u0",
                       "/fifo1/u1_to_m", "/fifo1/m_to_u1"]
        cmdline_usr0 = ["run1", "foo", "stub",
                        "/fifo0/m_to_u0", "/fifo0/u0_to_m", "0"]
        cmdline_usr1 = ["run1", "foo", "stub",
                        "/fifo1/m_to_u1", "/fifo1/u1_to_m", "1"]
        self.evaluation_step_before_run.assert_has_calls([
            call(sandbox_mgr, cmdline_mgr, 4321, 1234 * 1024 * 1024,
                 dirs_map={
                     os.path.join(self.base_dir, "0"): ("/fifo0", "rw"),
                     os.path.join(self.base_dir, "1"): ("/fifo1", "rw"),
                 },
                 writable_files=["output.txt"],
                 stdin_redirect="input.txt", multiprocess=True),
            call(sandbox_usr0, cmdline_usr0, 2.5, 123 * 1024 * 1024,
                 dirs_map={os.path.join(self.base_dir, "0"): ("/fifo0", "rw")},
                 stdin_redirect=None,
                 stdout_redirect=None,
                 multiprocess=True),
            call(sandbox_usr1, cmdline_usr1, 2.5, 123 * 1024 * 1024,
                 dirs_map={os.path.join(self.base_dir, "1"): ("/fifo1", "rw")},
                 stdin_redirect=None,
                 stdout_redirect=None,
                 multiprocess=True),
        ], any_order=True)
        self.assertEqual(self.evaluation_step_before_run.call_count, 3)
        self.assertEqual(self.evaluation_step_after_run.call_count, 3)
        # Results put in job and sandbox deleted.
        self.assertResultsInJob(job, True, str(OUTCOME), TEXT,
                                merge_execution_stats(STATS_OK, STATS_OK))
        sandbox_mgr.cleanup.assert_called_once_with(delete=True)
        sandbox_usr0.cleanup.assert_called_once_with(delete=True)
        sandbox_usr1.cleanup.assert_called_once_with(delete=True)
Exemplo n.º 22
0
 def test_missing_outputs_are_not_preserved(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK)
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o2", stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o2", stderr="e2"))
Exemplo n.º 23
0
 def test_missing_outputs_are_not_preserved(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK)
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o2", stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o2", stderr="e2"))
Exemplo n.º 24
0
 def test_success_first_none(self):
     r1 = get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_SIGNAL, signal=11)
     m = merge_execution_stats(None, r1)
     self.assertStats(m, r1)
     self.assertIsNot(r1, m)
Exemplo n.º 25
0
 def test_success_status_ok(self):
     self.assertStats(
         merge_execution_stats(get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK),
                               get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_OK)),
         get_stats(1.1, 2.0, 300.3, Sandbox.EXIT_OK))
Exemplo n.º 26
0
 def test_success_output_missing_one(self):
     r0 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1")
     r1 = get_stats(0, 0, 0, Sandbox.EXIT_OK, stderr="e2")
     m = merge_execution_stats(r0, r1)
     self.assertStats(
         m, get_stats(0, 0, 0, Sandbox.EXIT_OK, stdout="o1", stderr="e2"))
Exemplo n.º 27
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        executable_filename = next(iter(job.executables.keys()))
        executable_digest = job.executables[executable_filename].digest

        first_sandbox = create_sandbox(file_cacher, name="first_evaluate")
        second_sandbox = create_sandbox(file_cacher, name="second_evaluate")
        job.sandboxes.append(first_sandbox.get_root_path())
        job.sandboxes.append(second_sandbox.get_root_path())

        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_command = ["./%s" % executable_filename, "0", "/fifo/fifo"]
        first_executables_to_get = {executable_filename: executable_digest}
        first_files_to_get = {TwoSteps.INPUT_FILENAME: job.input}

        # Put the required files into the sandbox
        for filename, digest in first_executables_to_get.items():
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in first_files_to_get.items():
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            job.time_limit,
            job.memory_limit,
            dirs_map={fifo_dir: ("/fifo", "rw")},
            stdin_redirect=TwoSteps.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Second step: we start the second manager.
        second_command = ["./%s" % executable_filename, "1", "/fifo/fifo"]
        second_executables_to_get = {executable_filename: executable_digest}
        second_files_to_get = {}

        # Put the required files into the second sandbox
        for filename, digest in second_executables_to_get.items():
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in second_files_to_get.items():
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            job.time_limit,
            job.memory_limit,
            dirs_map={fifo_dir: ("/fifo", "rw")},
            stdout_redirect=TwoSteps.OUTPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Consume output.
        wait_without_std([second, first])

        box_success_first, evaluation_success_first, first_stats = \
            evaluation_step_after_run(first_sandbox)
        box_success_second, evaluation_success_second, second_stats = \
            evaluation_step_after_run(second_sandbox)

        box_success = box_success_first and box_success_second
        evaluation_success = \
            evaluation_success_first and evaluation_success_second
        stats = merge_execution_stats(first_stats, second_stats)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists(TwoSteps.OUTPUT_FILENAME):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    TwoSteps.OUTPUT_FILENAME
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        TwoSteps.OUTPUT_FILENAME,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher,
                        job,
                        TwoSteps.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=second_sandbox.relative_path(
                            TwoSteps.OUTPUT_FILENAME))

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(first_sandbox, job.success, job.keep_sandbox)
        delete_sandbox(second_sandbox, job.success, job.keep_sandbox)
Exemplo n.º 28
0
 def test_success_status_ok(self):
     self.assertStats(
         merge_execution_stats(
             get_stats(1.0, 2.0, 300, Sandbox.EXIT_OK),
             get_stats(0.1, 0.2, 0.3, Sandbox.EXIT_OK)),
         get_stats(1.1, 2.0, 300.3, Sandbox.EXIT_OK))