Beispiel #1
0
def addSubmission(probId, lang, code, user, type, custominput):
    sub = Submission()
    sub.problem = Problem.get(probId)
    sub.language = lang
    sub.code = code
    sub.result = Submission.RESULT_PENDING
    sub.custominput = custominput
    sub.user = user
    sub.timestamp = time.time() * 1000
    sub.type = type
    sub.status = Submission.STATUS_REVIEW

    if type == Submission.TYPE_SUBMIT:
        sub.save()
    else:
        sub.id = str(uuid4())

    return sub
Beispiel #2
0
def runCode(sub: Submission, user: User) -> list:
    """Executes submission `sub` and returns lists of data files"""
    extension = exts[sub.language]

    # Use semaphore to throttle number of concurrent submissions
    with Submission.runningSubmissions:
        try:
            shutil.rmtree(f"/tmp/{id}", ignore_errors=True)
            os.makedirs(f"/tmp/{sub.id}", exist_ok=True)

            # Copy the code over to the runner /tmp folder
            writeFile(f"/tmp/{sub.id}/code.{extension}", sub.code)

            prob = sub.problem

            if sub.type == Submission.TYPE_TEST and not user.isAdmin():
                numTests = prob.samples
            elif sub.type == Submission.TYPE_CUSTOM:
                numTests = 1
            else:
                numTests = prob.tests

            # Copy the input over to the tmp folder for the runner
            if sub.type == Submission.TYPE_CUSTOM:
                writeFile(f"/tmp/{sub.id}/in0.txt", sub.custominput)
            else:
                for i in range(numTests):
                    shutil.copyfile(f"/db/problems/{prob.id}/input/in{i}.txt",
                                    f"/tmp/{sub.id}/in{i}.txt")

            # Output files will go here
            os.makedirs(f"/tmp/{sub.id}/out", exist_ok=True)

            # Run the runner
            cmd = f"docker run --rm --network=none -m 256MB -v /tmp/{sub.id}/:/source {OC_DOCKERIMAGE_BASE}-{sub.language}-runner {numTests} {prob.timelimit} > /tmp/{sub.id}/result.txt"
            logger.debug(cmd)
            rc = os.system(cmd)

            overall_result = readFile(f"/tmp/{sub.id}/result.txt")

            if rc != 0 or not overall_result:
                # Test failed to complete properly

                logger.warn(
                    f"Result of submission {sub.id}: rc={rc}, overall_result={overall_result}"
                )

                sub.result = "internal_error"
                if sub.type == Submission.TYPE_SUBMIT:
                    sub.save()
                return [], [], [], []

            logger.info("Overall result: '" + overall_result + "'")

            # Check for compile error
            if overall_result == "compile_error\n":
                logger.info("Compile error")
                sub.result = "compile_error"
                sub.delete()
                sub.compile = readFile(f"/tmp/{sub.id}/out/compile_error.txt")
                return None, None, None, None

            # Submission ran; process test results

            inputs = []
            outputs = []
            answers = []
            errors = []
            results = []
            result = "ok"

            all_samples_correct = True
            for i in range(numTests):
                if sub.type == Submission.TYPE_CUSTOM:
                    inputs.append(sub.custominput)
                    answers.append("")
                else:
                    inputs.append(sub.problem.testData[i].input)
                    answers.append(
                        readFile(f"/db/problems/{prob.id}/output/out{i}.txt"))

                errors.append(readFile(f"/tmp/{sub.id}/out/err{i}.txt"))
                outputs.append(readFile(f"/tmp/{sub.id}/out/out{i}.txt"))

                anstrip = strip(answers[-1])
                outstrip = strip(outputs[-1])
                answerLines = anstrip.split('\n')
                outLines = outstrip.split('\n')

                res = readFile(f"/tmp/{sub.id}/out/result{i}.txt")
                if res == None:
                    res = "tle"
                elif res == "ok" and anstrip != outstrip:
                    if sub.type == Submission.TYPE_CUSTOM:
                        pass  # custom input cannot produce incorrect result
                    elif compareStrings(outLines, answerLines):
                        res = "incomplete_output"
                    elif compareStrings(answerLines, outLines):
                        res = "extra_output"
                    else:
                        res = "wrong_answer"

                results.append(res)

                # Make result the first incorrect result
                if res != "ok" and result == "ok":
                    result = res

                if i < prob.samples and res != "ok":
                    all_samples_correct = False

            may_autojudge = True
            if res == "wrong_answer" and sub.result == "presentation_error":
                # During a rejudge, do not replace "presentation_error" with "wrong_answer"
                pass
            else:
                sub.result = result

            if Contest.getCurrent():
                if Contest.getCurrent(
                ).tieBreaker and all_samples_correct and sub.result in [
                        "runtime_error", "tle"
                ]:
                    # Force review of submissions where all sample tests were correct if samples break ties
                    may_autojudge = False

            if sub.result == "ok" or sub.type == Submission.TYPE_TEST or (
                    may_autojudge and sub.result in ["runtime_error", "tle"]):
                sub.status = Submission.STATUS_JUDGED

            sub.results = results

            logger.debug(f"Result of testing {sub.id}: {sub}")

            saveData(sub, inputs, 'in')
            saveData(sub, outputs, 'out')
            saveData(sub, answers, 'answer')
            saveData(sub, errors, 'error')
            if sub.type == Submission.TYPE_SUBMIT:
                sub.save()

            return inputs, outputs, answers, errors

        finally:
            shutil.rmtree(f"/tmp/{sub.id}", ignore_errors=True)