예제 #1
0
    def __init__(self, prob: Problem, user: User):
        probpath = f"/problems/{prob.id}"

        btn = f"rejudgeAll('{prob.id}')" if user.isAdmin() else None

        title = prob.title
        if not user.isAdmin():
            # Compute problem status
            icon = ''
            result = ''
            for sub in Submission.all():
                if sub.problem != prob or sub.user != user or sub.status == Submission.STATUS_REVIEW:
                    continue

                if sub.user == user and "ok" == sub.result:
                    icon = "check"
                    break
                else:
                    icon = "times"

            if icon != '':
                result = f'<i class="fa fa-{icon}"></i> '

            title = result + title

        self.html = Card(title, prob.description, probpath, rejudge=btn)
예제 #2
0
 def __init__(self, user: User):
     cls = "blue" if user.isAdmin() else ""
     self.html = div(cls="col-3",
                     contents=[
                         Card(div(
                             h.strong(h.i("Username:"******"username-hidden"),
                             h.br(cls="username-hidden"),
                             h.p("&quot;", cls="username-hidden"),
                             h2(user.username, cls="card-title"),
                             h.p("&quot;", cls="username-hidden")),
                              div(h.strong(h.i("Fullname:")), h.br(),
                                  f"&quot;{user.fullname}&quot;", h.br(),
                                  h.strong(h.i("Password:"******"&quot;{user.password}&quot;"),
                              delete=f"deleteUser('{user.username}')",
                              cls=cls)
                     ])
예제 #3
0
def runCode(sub: Submission, user: User) -> list:
    """Executes submission `sub` and returns lists of data files"""
    extension = exts[sub.language]

    # Use semaphore to throttle number of concurrent submissions
    with Submission.runningSubmissions:
        try:
            shutil.rmtree(f"/tmp/{id}", ignore_errors=True)
            os.makedirs(f"/tmp/{sub.id}", exist_ok=True)

            # Copy the code over to the runner /tmp folder
            writeFile(f"/tmp/{sub.id}/code.{extension}", sub.code)

            prob = sub.problem

            if sub.type == Submission.TYPE_TEST and not user.isAdmin():
                numTests = prob.samples
            elif sub.type == Submission.TYPE_CUSTOM:
                numTests = 1
            else:
                numTests = prob.tests

            # Copy the input over to the tmp folder for the runner
            if sub.type == Submission.TYPE_CUSTOM:
                writeFile(f"/tmp/{sub.id}/in0.txt", sub.custominput)
            else:
                for i in range(numTests):
                    shutil.copyfile(f"/db/problems/{prob.id}/input/in{i}.txt",
                                    f"/tmp/{sub.id}/in{i}.txt")

            # Output files will go here
            os.makedirs(f"/tmp/{sub.id}/out", exist_ok=True)

            # Run the runner
            cmd = f"docker run --rm --network=none -m 256MB -v /tmp/{sub.id}/:/source {OC_DOCKERIMAGE_BASE}-{sub.language}-runner {numTests} {prob.timelimit} > /tmp/{sub.id}/result.txt"
            logger.debug(cmd)
            rc = os.system(cmd)

            overall_result = readFile(f"/tmp/{sub.id}/result.txt")

            if rc != 0 or not overall_result:
                # Test failed to complete properly

                logger.warn(
                    f"Result of submission {sub.id}: rc={rc}, overall_result={overall_result}"
                )

                sub.result = "internal_error"
                if sub.type == Submission.TYPE_SUBMIT:
                    sub.save()
                return [], [], [], []

            logger.info("Overall result: '" + overall_result + "'")

            # Check for compile error
            if overall_result == "compile_error\n":
                logger.info("Compile error")
                sub.result = "compile_error"
                sub.delete()
                sub.compile = readFile(f"/tmp/{sub.id}/out/compile_error.txt")
                return None, None, None, None

            # Submission ran; process test results

            inputs = []
            outputs = []
            answers = []
            errors = []
            results = []
            result = "ok"

            all_samples_correct = True
            for i in range(numTests):
                if sub.type == Submission.TYPE_CUSTOM:
                    inputs.append(sub.custominput)
                    answers.append("")
                else:
                    inputs.append(sub.problem.testData[i].input)
                    answers.append(
                        readFile(f"/db/problems/{prob.id}/output/out{i}.txt"))

                errors.append(readFile(f"/tmp/{sub.id}/out/err{i}.txt"))
                outputs.append(readFile(f"/tmp/{sub.id}/out/out{i}.txt"))

                anstrip = strip(answers[-1])
                outstrip = strip(outputs[-1])
                answerLines = anstrip.split('\n')
                outLines = outstrip.split('\n')

                res = readFile(f"/tmp/{sub.id}/out/result{i}.txt")
                if res == None:
                    res = "tle"
                elif res == "ok" and anstrip != outstrip:
                    if sub.type == Submission.TYPE_CUSTOM:
                        pass  # custom input cannot produce incorrect result
                    elif compareStrings(outLines, answerLines):
                        res = "incomplete_output"
                    elif compareStrings(answerLines, outLines):
                        res = "extra_output"
                    else:
                        res = "wrong_answer"

                results.append(res)

                # Make result the first incorrect result
                if res != "ok" and result == "ok":
                    result = res

                if i < prob.samples and res != "ok":
                    all_samples_correct = False

            may_autojudge = True
            if res == "wrong_answer" and sub.result == "presentation_error":
                # During a rejudge, do not replace "presentation_error" with "wrong_answer"
                pass
            else:
                sub.result = result

            if Contest.getCurrent():
                if Contest.getCurrent(
                ).tieBreaker and all_samples_correct and sub.result in [
                        "runtime_error", "tle"
                ]:
                    # Force review of submissions where all sample tests were correct if samples break ties
                    may_autojudge = False

            if sub.result == "ok" or sub.type == Submission.TYPE_TEST or (
                    may_autojudge and sub.result in ["runtime_error", "tle"]):
                sub.status = Submission.STATUS_JUDGED

            sub.results = results

            logger.debug(f"Result of testing {sub.id}: {sub}")

            saveData(sub, inputs, 'in')
            saveData(sub, outputs, 'out')
            saveData(sub, answers, 'answer')
            saveData(sub, errors, 'error')
            if sub.type == Submission.TYPE_SUBMIT:
                sub.save()

            return inputs, outputs, answers, errors

        finally:
            shutil.rmtree(f"/tmp/{sub.id}", ignore_errors=True)