예제 #1
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1, or_more=True):
            return

        # Prepare the files to copy in the sandbox and to add to the
        # compilation command.
        files_to_get = {}
        source_filenames = []
        # The stub, that must have been provided (copy and add to compilation).
        if self._uses_grader():
            stub_filename = self.STUB_BASENAME + source_ext
            if not check_manager_present(job, stub_filename):
                return
            source_filenames.append(stub_filename)
            files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submitted file(s) (copy and add to compilation).
        for codename, file_ in iteritems(job.files):
            source_filename = codename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
        # Any other useful manager (just copy).
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                files_to_get[filename] = manager.digest

        # Prepare the compilation command
        executable_filename = self._executable_filename(iterkeys(job.files))
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        # Copy all required files in the sandbox.
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)
예제 #2
0
    def get(self, task_name, user_test_num, filename):
        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        task = self.get_task(task_name)
        if task is None:
            raise tornado.web.HTTPError(404)

        user_test = self.get_user_test(task, user_test_num)
        if user_test is None:
            raise tornado.web.HTTPError(404)

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if user_test.language is not None:
            extension = get_language(user_test.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename in user_test.files:
            digest = user_test.files[stored_filename].digest
        elif stored_filename in user_test.managers:
            digest = user_test.managers[stored_filename].digest
        else:
            raise tornado.web.HTTPError(404)
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #3
0
파일: taskusertest.py 프로젝트: Nyrio/cms
    def get(self, task_name, user_test_num, filename):
        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        task = self.get_task(task_name)
        if task is None:
            raise tornado.web.HTTPError(404)

        user_test = self.get_user_test(task, user_test_num)
        if user_test is None:
            raise tornado.web.HTTPError(404)

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if user_test.language is not None:
            extension = get_language(user_test.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename in user_test.files:
            digest = user_test.files[stored_filename].digest
        elif stored_filename in user_test.managers:
            digest = user_test.managers[stored_filename].digest
        else:
            raise tornado.web.HTTPError(404)
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #4
0
    def post(self, task_name):
        task = self.get_task(task_name)
        if task is None:
            raise tornado.web.HTTPError(404)

        # Only set the official bit when the user can compete and we are not in
        # analysis mode.
        official = self.r_params["actual_phase"] == 0

        query_args = dict()

        # fill in any missing files with text contents
        for filename in task.submission_format:
            if filename not in self.request.files:
                ta_input = self.get_argument('ta_' + filename)

                # potentially append new file
                if ta_input:
                    stored_filename = filename
                    language = self.get_argument("language", None)
                    if language:
                        extension = get_language(language).source_extension
                        stored_filename = filename.replace(".%l", extension)

                    logger.info("Appending file from contents: " +
                                stored_filename)
                    ta_file = HTTPFile(filename=stored_filename,
                                       body=ta_input.encode(),
                                       type='text/plain')
                    self.request.files[filename] = [ta_file]

        try:
            submission = accept_submission(self.sql_session,
                                           self.service.file_cacher,
                                           self.current_user, task,
                                           self.timestamp, self.request.files,
                                           self.get_argument("language",
                                                             None), official)
            self.sql_session.commit()
        except UnacceptableSubmission as e:
            logger.info("Sent error: `%s' - `%s'", e.subject, e.formatted_text)
            self.notify_error(e.subject, e.text, e.text_params)
        else:
            self.service.evaluation_service.new_submission(
                submission_id=submission.id)
            self.notify_success(
                N_("Submission received"),
                N_("Your submission has been received "
                   "and is currently being evaluated."))
            # The argument (encrypted submission id) is not used by CWS
            # (nor it discloses information to the user), but it is
            # useful for automatic testing to obtain the submission id).
            query_args["submission_id"] = \
                encrypt_number(submission.id, config.secret_key)
            query_args["tab"] = "submissions"

        # self.redirect(self.contest_url("tasks", task.name, "submissions",
        #                               **query_args))
        self.redirect(
            self.contest_url("tasks", task.name, "full", **query_args))
예제 #5
0
파일: Job.py 프로젝트: ioi-germany/cms
    def from_user_test(operation, user_test, dataset):
        """Create an EvaluationJob from a user test.

        operation (ESOperation): an USER_TEST_EVALUATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (EvaluationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TEST_EVALUATION:
            logger.error("Programming error: asking for a user test "
                         "evaluation job, but the operation is %s.",
                         operation.type_)
            raise ValueError("Operation is not a user test evaluation")

        multithreaded = _is_contest_multithreaded(user_test.task.contest)

        user_test_result = user_test.get_result(dataset)
        # This should have been created by now.
        assert user_test_result is not None

        # Add the managers to be got from the Task.
        # dict() is required to detach the dictionary that gets added
        # to the Job from the control of SQLAlchemy
        language = get_language(user_test.language)
        managers = dict(user_test.managers)
        task_type = dataset.task_type_object
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                if manager_filename.endswith(".%l") and language is not None:
                    manager_filename = manager_filename.replace(
                        ".%l", language.source_extension)
                managers[manager_filename] = dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in managers:
                    managers[manager_filename] = \
                        dataset.managers[manager_filename]

        return EvaluationJob(
            operation=operation.to_dict(),
            task_type=dataset.task_type,
            task_type_parameters=dataset.task_type_parameters,
            language=user_test.language,
            multithreaded_sandbox=multithreaded,
            files=dict(user_test.files),
            managers=managers,
            executables=dict(user_test_result.executables),
            input=user_test.input,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit,
            info="evaluate user test %d" % (user_test.id),
            get_output=True,
            only_execution=True
        )
예제 #6
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1, or_more=True):
            return

        # Prepare the files to copy in the sandbox and to add to the
        # compilation command.
        files_to_get = {}
        source_filenames = []
        # The stub, that must have been provided (copy and add to compilation).
        stub_filename = self.STUB_BASENAME + source_ext
        if not check_manager_present(job, stub_filename):
            return
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submitted file(s) (copy and add to compilation).
        for codename, file_ in iteritems(job.files):
            source_filename = codename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
        # Any other useful manager (just copy).
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                files_to_get[filename] = manager.digest

        # Prepare the compilation command
        executable_filename = self._executable_filename(iterkeys(job.files))
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        # Copy all required files in the sandbox.
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)
예제 #7
0
    def from_user_test(operation, user_test, dataset):
        """Create an EvaluationJob from a user test.

        operation (ESOperation): an USER_TEST_EVALUATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (EvaluationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TEST_EVALUATION:
            logger.error("Programming error: asking for a user test "
                         "evaluation job, but the operation is %s.",
                         operation.type_)
            raise ValueError("Operation is not a user test evaluation")

        multithreaded = _is_contest_multithreaded(user_test.task.contest)

        user_test_result = user_test.get_result(dataset)
        # This should have been created by now.
        assert user_test_result is not None

        # Add the managers to be got from the Task.
        # dict() is required to detach the dictionary that gets added
        # to the Job from the control of SQLAlchemy
        language = get_language(user_test.language)
        managers = dict(user_test.managers)
        task_type = dataset.task_type_object
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                if manager_filename.endswith(".%l") and language is not None:
                    manager_filename = manager_filename.replace(
                        ".%l", language.source_extension)
                managers[manager_filename] = dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in managers:
                    managers[manager_filename] = \
                        dataset.managers[manager_filename]

        return EvaluationJob(
            operation=operation,
            task_type=dataset.task_type,
            task_type_parameters=dataset.task_type_parameters,
            language=user_test.language,
            multithreaded_sandbox=multithreaded,
            files=dict(user_test.files),
            managers=managers,
            executables=dict(user_test_result.executables),
            input=user_test.input,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit,
            info="evaluate user test %d" % (user_test.id),
            get_output=True,
            only_execution=True
        )
예제 #8
0
파일: Batch.py 프로젝트: radroxx/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1):
            return

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_source_filename):
                return
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.path)

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(user_source_filename,
                                         job.files[user_file_format].digest)
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                sandbox.create_file_from_storage(filename, manager.digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)
예제 #9
0
파일: Batch.py 프로젝트: Nyrio/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1, or_more=True):
            return

        executable_filename = self._executable_filename(iterkeys(job.files))

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        source_filenames = [codename.replace(".%l", source_ext)
                            for codename in iterkeys(job.files)]
        if self._uses_grader():
            grader_source_filename = self.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_source_filename):
                return
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        # Copy required files in the sandbox (includes the grader if present).
        for codename, file_ in iteritems(job.files):
            filename = codename.replace(".%l", source_ext)
            sandbox.create_file_from_storage(filename, file_.digest)
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                sandbox.create_file_from_storage(filename, manager.digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)
예제 #10
0
파일: Job.py 프로젝트: saco-admin/cms
    def from_submission(operation, submission, dataset):
        """Create an EvaluationJob from a submission.

        operation (ESOperation): an EVALUATION operation.
        submission (Submission): the submission object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (EvaluationJob): the job.

        """
        if operation.type_ != ESOperation.EVALUATION:
            logger.error(
                "Programming error: asking for an evaluation job, "
                "but the operation is %s.", operation.type_)
            raise ValueError("Operation is not an evaluation")

        multithreaded = _is_contest_multithreaded(submission.task.contest)

        submission_result = submission.get_result(dataset)
        # This should have been created by now.
        assert submission_result is not None
        try:
            language = get_language(submission.language)
        except KeyError:
            language = None

        testcase = dataset.testcases[operation.testcase_codename]

        info = "evaluate submission %d on testcase %s" % \
            (submission.id, testcase.codename)

        if dataset.time_limit:
            time_limit = dataset.time_limit * (language.time_multiplier
                                               if language is not None else 1)
        else:
            time_limit = dataset.time_limit

        # dict() is required to detach the dictionary that gets added
        # to the Job from the control of SQLAlchemy
        return EvaluationJob(operation=operation,
                             task_type=dataset.task_type,
                             task_type_parameters=dataset.task_type_parameters,
                             language=submission.language,
                             multithreaded_sandbox=multithreaded,
                             files=dict(submission.files),
                             managers=dict(dataset.managers),
                             executables=dict(submission_result.executables),
                             input=testcase.input,
                             output=testcase.output,
                             time_limit=time_limit,
                             memory_limit=dataset.memory_limit,
                             info=info)
예제 #11
0
파일: Test.py 프로젝트: zj-cs2103/cms
    def _filenames_for_language(self, language, filenames, alt_filenames={}):
        if language is not None:
            # First check if language is in alt_filenames. This allows to
            # submit different sources for languages that would otherwise
            # have matching source extensions.
            filenames = alt_filenames.get(language, filenames)

            ext = get_language(language).source_extension
            return [filename.replace(".%l", ext) for filename in filenames]
        else:
            return filenames
예제 #12
0
파일: submission.py 프로젝트: cms-dev/cms
    def get(self, file_id):
        sub_file = self.safe_get_item(File, file_id)
        submission = sub_file.submission

        real_filename = sub_file.filename
        if submission.language is not None:
            real_filename = real_filename.replace(
                ".%l", get_language(submission.language).source_extension)
        digest = sub_file.digest

        self.sql_session.close()
        self.fetch(digest, "text/plain", real_filename)
예제 #13
0
def _is_contest_multithreaded(contest):
    """Return if the contest allows multithreaded compilations and evaluations

    The rule is that this is allowed when the contest has a language that
    requires this.

    contest (Contest): the contest to check
    return (boolean): True if the sandbox should allow multithreading.

    """
    return any(get_language(l).requires_multithreading
               for l in contest.languages)
예제 #14
0
    def get(self, file_id):
        sub_file = self.safe_get_item(File, file_id)
        submission = sub_file.submission

        real_filename = sub_file.filename
        if submission.language is not None:
            real_filename = real_filename.replace(
                ".%l", get_language(submission.language).source_extension)
        digest = sub_file.digest

        self.sql_session.close()
        self.fetch(digest, "text/plain", real_filename)
예제 #15
0
파일: Job.py 프로젝트: PJeBeK/cms
def _is_contest_multithreaded(contest):
    """Return if the contest allows multithreaded compilations and evaluations

    The rule is that this is allowed when the contest has a language that
    requires this.

    contest (Contest): the contest to check
    return (boolean): True if the sandbox should allow multithreading.

    """
    return any(get_language(l).requires_multithreading
               for l in contest.languages)
예제 #16
0
파일: usertest.py 프로젝트: PJeBeK/cms
    def get(self, file_id):
        user_test_file = self.safe_get_item(UserTestFile, file_id)
        user_test = user_test_file.user_test

        real_filename = user_test_file.filename
        if user_test.language is not None:
            real_filename = real_filename.replace(
                ".%l", get_language(user_test.language).source_extension)
        digest = user_test_file.digest

        self.sql_session.close()
        self.fetch(digest, "text/plain", real_filename)
예제 #17
0
파일: Job.py 프로젝트: ioi-germany/cms
    def from_user_test(operation, user_test, dataset):
        """Create a CompilationJob from a user test.

        operation (ESOperation): a USER_TEST_COMPILATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (CompilationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TEST_COMPILATION:
            logger.error("Programming error: asking for a user test "
                         "compilation job, but the operation is %s.",
                         operation.type_)
            raise ValueError("Operation is not a user test compilation")

        multithreaded = _is_contest_multithreaded(user_test.task.contest)

        # Add the managers to be got from the Task.
        # dict() is required to detach the dictionary that gets added
        # to the Job from the control of SQLAlchemy
        try:
            language = get_language(user_test.language)
        except KeyError:
            language = None
        managers = dict(user_test.managers)
        task_type = dataset.task_type_object
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                if manager_filename.endswith(".%l") and language is not None:
                    manager_filename = manager_filename.replace(
                        ".%l", language.source_extension)
                managers[manager_filename] = dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in managers:
                    managers[manager_filename] = \
                        dataset.managers[manager_filename]

        return CompilationJob(
            operation=operation.to_dict(),
            task_type=dataset.task_type,
            task_type_parameters=dataset.task_type_parameters,
            language=user_test.language,
            multithreaded_sandbox=multithreaded,
            files=dict(user_test.files),
            managers=managers,
            info="compile user test %d" % (user_test.id)
        )
예제 #18
0
    def from_user_test(operation, user_test, dataset):
        """Create a CompilationJob from a user test.

        operation (ESOperation): a USER_TEST_COMPILATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (CompilationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TEST_COMPILATION:
            logger.error("Programming error: asking for a user test "
                         "compilation job, but the operation is %s.",
                         operation.type_)
            raise ValueError("Operation is not a user test compilation")

        multithreaded = _is_contest_multithreaded(user_test.task.contest)

        # Add the managers to be got from the Task.
        # dict() is required to detach the dictionary that gets added
        # to the Job from the control of SQLAlchemy
        try:
            language = get_language(user_test.language)
        except KeyError:
            language = None
        managers = dict(user_test.managers)
        task_type = dataset.task_type_object
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                if manager_filename.endswith(".%l") and language is not None:
                    manager_filename = manager_filename.replace(
                        ".%l", language.source_extension)
                managers[manager_filename] = dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in managers:
                    managers[manager_filename] = \
                        dataset.managers[manager_filename]

        return CompilationJob(
            operation=operation,
            task_type=dataset.task_type,
            task_type_parameters=dataset.task_type_parameters,
            language=user_test.language,
            multithreaded_sandbox=multithreaded,
            files=dict(user_test.files),
            managers=managers,
            info="compile user test %d" % (user_test.id)
        )
예제 #19
0
    def get(self, file_id):
        user_test_file = self.safe_get_item(UserTestFile, file_id)
        user_test = user_test_file.user_test

        real_filename = user_test_file.filename
        if user_test.language is not None:
            real_filename = real_filename.replace(
                ".%l",
                get_language(user_test.language).source_extension)
        digest = user_test_file.digest

        self.sql_session.close()
        self.fetch(digest, "text/plain", real_filename)
예제 #20
0
    def _sources_names(self, language):
        # Source files are stored under cmstestsuite/code/.
        path = os.path.join(os.path.dirname(__file__), 'code')

        # Choose the correct file to submit.
        filenames = [
            filename.replace(".%l",
                             get_language(language).source_extension)
            for filename in self.filenames
        ]

        full_paths = [os.path.join(path, filename) for filename in filenames]

        return full_paths
예제 #21
0
파일: Test.py 프로젝트: akmohtashami/cms
    def _sources_names(self, language):
        # Source files are stored under cmstestsuite/code/.
        path = os.path.join(os.path.dirname(__file__), 'code')

        # Choose the correct file to submit.
        if language is not None:
            ext = get_language(language).source_extension
            filenames = [filename.replace(".%l", ext)
                         for filename in self.filenames]
        else:
            filenames = self.filenames

        full_paths = [os.path.join(path, filename) for filename in filenames]

        return full_paths
예제 #22
0
    def get(self, task_name, submission_num, filename):
        if not self.contest.submissions_download_allowed:
            raise tornado.web.HTTPError(404)

        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)
        if self.contest.restrict_level and self.current_user.user.level != task.level and self.current_user.user.level != "x" and task.level != "x":
            raise tornado.web.HTTPError(404)

        submission = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .order_by(Submission.timestamp)\
            .offset(int(submission_num) - 1)\
            .first()
        if submission is None:
            raise tornado.web.HTTPError(404)

        # The following code assumes that submission.files is a subset
        # of task.submission_format. CWS will always ensure that for new
        # submissions, yet, if the submission_format changes during the
        # competition, this may not hold anymore for old submissions.

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if submission.language is not None:
            extension = get_language(submission.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename not in submission.files:
            raise tornado.web.HTTPError(404)

        digest = submission.files[stored_filename].digest
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #23
0
    def get(self, task_name, submission_num, filename):
        if not self.contest.submissions_download_allowed:
            raise tornado.web.HTTPError(404)

        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)
        if self.contest.restrict_level and self.current_user.user.level !=  task.level and self.current_user.user.level != "x" and task.level != "x":
            raise tornado.web.HTTPError(404)

        submission = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .order_by(Submission.timestamp)\
            .offset(int(submission_num) - 1)\
            .first()
        if submission is None:
            raise tornado.web.HTTPError(404)

        # The following code assumes that submission.files is a subset
        # of task.submission_format. CWS will always ensure that for new
        # submissions, yet, if the submission_format changes during the
        # competition, this may not hold anymore for old submissions.

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if submission.language is not None:
            extension = get_language(submission.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename not in submission.files:
            raise tornado.web.HTTPError(404)

        digest = submission.files[stored_filename].digest
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #24
0
    def get(self, task_name, user_test_num, filename):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)
        if self.contest.restrict_level and self.current_user.user.level != task.level and self.current_user.user.level != "x" and task.level != "x":
            raise tornado.web.HTTPError(404)

        user_test = self.sql_session.query(UserTest)\
            .filter(UserTest.participation == participation)\
            .filter(UserTest.task == task)\
            .order_by(UserTest.timestamp)\
            .offset(int(user_test_num) - 1)\
            .first()
        if user_test is None:
            raise tornado.web.HTTPError(404)

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if user_test.language is not None:
            extension = get_language(user_test.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename in user_test.files:
            digest = user_test.files[stored_filename].digest
        elif stored_filename in user_test.managers:
            digest = user_test.managers[stored_filename].digest
        else:
            raise tornado.web.HTTPError(404)
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #25
0
    def get(self, task_name, user_test_num, filename):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        user_test = self.sql_session.query(UserTest)\
            .filter(UserTest.participation == participation)\
            .filter(UserTest.task == task)\
            .order_by(UserTest.timestamp)\
            .offset(int(user_test_num) - 1)\
            .first()
        if user_test is None:
            raise tornado.web.HTTPError(404)

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if user_test.language is not None:
            extension = get_language(user_test.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename in user_test.files:
            digest = user_test.files[stored_filename].digest
        elif stored_filename in user_test.managers:
            digest = user_test.managers[stored_filename].digest
        else:
            raise tornado.web.HTTPError(404)
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #26
0
    def _sources_names(self, language):
        # Source files are stored under cmstestsuite/code/.
        path = os.path.join(os.path.dirname(__file__), 'code')

        # Choose the correct file to submit.
        if language is not None:
            # First check if language is in alt_filenames. This allows to
            # submit different sources for languages that would otherwise
            # have matching source extensions.
            filenames = self.alt_filenames.get(language, self.filenames)

            ext = get_language(language).source_extension
            filenames = [filename.replace(".%l", ext)
                         for filename in filenames]
        else:
            filenames = self.filenames

        full_paths = [os.path.join(path, filename) for filename in filenames]

        return full_paths
예제 #27
0
    def get(self, task_name, submission_num, filename):
        if not self.contest.submissions_download_allowed:
            raise tornado.web.HTTPError(404)

        task = self.get_task(task_name)
        if task is None:
            raise tornado.web.HTTPError(404)

        submission = self.get_submission(task, submission_num)
        if submission is None:
            raise tornado.web.HTTPError(404)

        # The following code assumes that submission.files is a subset
        # of task.submission_format. CWS will always ensure that for new
        # submissions, yet, if the submission_format changes during the
        # competition, this may not hold anymore for old submissions.

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if submission.language is not None:
            extension = get_language(submission.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename not in submission.files:
            raise tornado.web.HTTPError(404)

        digest = submission.files[stored_filename].digest
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #28
0
    def get(self, task_name, submission_num, filename):
        if not self.contest.submissions_download_allowed:
            raise tornado.web.HTTPError(404)

        task = self.get_task(task_name)
        if task is None:
            raise tornado.web.HTTPError(404)

        submission = self.get_submission(task, submission_num)
        if submission is None:
            raise tornado.web.HTTPError(404)

        # The following code assumes that submission.files is a subset
        # of task.submission_format. CWS will always ensure that for new
        # submissions, yet, if the submission_format changes during the
        # competition, this may not hold anymore for old submissions.

        # filename is the name used by the browser, hence is something
        # like 'foo.c' (and the extension is CMS's preferred extension
        # for the language). To retrieve the right file, we need to
        # decode it to 'foo.%l'.
        stored_filename = filename
        if submission.language is not None:
            extension = get_language(submission.language).source_extension
            stored_filename = re.sub(r'%s$' % extension, '.%l', filename)

        if stored_filename not in submission.files:
            raise tornado.web.HTTPError(404)

        digest = submission.files[stored_filename].digest
        self.sql_session.close()

        mimetype = get_type_for_file_name(filename)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.fetch(digest, mimetype, filename)
예제 #29
0
def _get_submission_file_path(target_dir, sid, language, timestamp, score,
                              comp_outcome, filename, username, task_name,
                              contest_name):
    """
    Get the full file path for the given submission.
    See export_submissions.
    Timestamps are hard coded to convert from UTC to Asia/Jerusalem.
    """

    # Time in directory name.
    utc_stamp = timestamp.replace(tzinfo=timezone('UTC'))
    local_stamp = utc_stamp.astimezone(timezone('Asia/Jerusalem'))
    time_str = local_stamp.strftime("%Y-%m-%d.%H-%M")

    # Score in directory name.
    if comp_outcome == "fail":
        score_str = "compilation-failed"
    elif score is None:
        score_str = "score-none"
    else:
        # Round down if there is no need for precision.
        if score - int(score) < 0.01:
            score_str = "score-%d" % int(score)
        else:
            score_str = "score-%.02f" % score

    # Submission directory name.
    submission_string = "%s.%s.%s.%s.%s" % (time_str, sid, task_name, username,
                                            score_str)

    # Replace file name extension if needed.
    if filename.endswith(".%l"):
        filename = filename[:-3] + get_language(language).source_extension

    # Join everything.
    return os.path.join(target_dir, contest_name, task_name, username,
                        submission_string, filename)
예제 #30
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [create_sandbox(file_cacher, job.multithreaded_sandbox)
                        for i in indices]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: prepare the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename:
            job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": job.input
            }
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)

        # Second step: load the executables for the user processes
        # (done before launching the manager so that it does not
        # impact its wall clock time).
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        for i in indices:
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(
                    filename, digest, executable=True)

        # Third step: start the manager.
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt")

        # Fourth step: start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(
                executable_filename,
                main="stub",
                args=args)
            user_allow_dirs = [fifo_dir[i]]
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
예제 #31
0
파일: Batch.py 프로젝트: romeorizzi/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {self.input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self.output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(file_cacher,
                                              multithreaded=True,
                                              name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
예제 #32
0
파일: Batch.py 프로젝트: cms-dev/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1, or_more=True):
            return

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        filenames_to_compile = []
        filenames_and_digests_to_get = {}
        # The grader, that must have been provided (copy and add to
        # compilation).
        if self._uses_grader():
            grader_filename = self.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_filename):
                return
            filenames_to_compile.append(grader_filename)
            filenames_and_digests_to_get[grader_filename] = \
                job.managers[grader_filename].digest
        # User's submitted file(s) (copy and add to compilation).
        for codename, file_ in job.files.items():
            filename = codename.replace(".%l", source_ext)
            filenames_to_compile.append(filename)
            filenames_and_digests_to_get[filename] = file_.digest
        # Any other useful manager (just copy).
        for filename, manager in job.managers.items():
            if is_manager_for_compilation(filename, language):
                filenames_and_digests_to_get[filename] = manager.digest

        # Prepare the compilation command.
        executable_filename = self._executable_filename(job.files.keys())
        commands = language.get_compilation_commands(
            filenames_to_compile, executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        # Copy required files in the sandbox (includes the grader if present).
        for filename, digest in filenames_and_digests_to_get.items():
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success, job.keep_sandbox)
예제 #33
0
파일: Batch.py 프로젝트: Nyrio/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = self.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(
            executable_filename, main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        files_to_get = {
            self._actual_input: job.input
        }

        # Check which redirect we need to perform, and in case we don't
        # manage the output via redirect, the submission needs to be able
        # to write on it.
        files_allowing_write = []
        stdin_redirect = None
        stdout_redirect = None
        if len(self.input_filename) == 0:
            stdin_redirect = self._actual_input
        if len(self.output_filename) == 0:
            stdout_redirect = self._actual_output
        else:
            files_allowing_write.append(self._actual_output)

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, name="evaluate")
        job.sandboxes.append(sandbox.get_root_path())

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        box_success, evaluation_success, stats = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect,
            multiprocess=job.multithreaded_sandbox)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self._actual_output):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        self._actual_output]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self._actual_output,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher, job,
                        self.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=sandbox.relative_path(
                            self._actual_output),
                        user_output_filename=self.output_filename)

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(sandbox, job.success)
예제 #34
0
    def post(self, task_name):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        self.fallback_page = ["testing"]
        self.fallback_args = {"task_name": task.name}

        # Check that the task is testable
        task_type = get_task_type(dataset=task.active_dataset)
        if not task_type.testable:
            logger.warning("User %s tried to make test on task %s.",
                           participation.user.username, task_name)
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of user_tests
        try:
            if contest.max_user_test_number is not None:
                user_test_c = self.sql_session.query(func.count(UserTest.id))\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_c >= contest.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests among all tasks.") %
                        contest.max_user_test_number)
            if task.max_user_test_number is not None:
                user_test_t = self.sql_session.query(func.count(UserTest.id))\
                    .filter(UserTest.task == task)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_t >= task.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests on this task.") %
                        task.max_user_test_number)
        except ValueError as error:
            self._send_error(self._("Too many tests!"), str(error))
            return

        # Enforce minimum time between user_tests
        try:
            if contest.min_user_test_interval is not None:
                last_user_test_c = self.sql_session.query(UserTest)\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .order_by(UserTest.timestamp.desc())\
                    .first()
                if last_user_test_c is not None and \
                        self.timestamp - last_user_test_c.timestamp < \
                        contest.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can test again "
                               "after %d seconds from last test.") %
                        contest.min_user_test_interval.total_seconds())
            # We get the last user_test even if we may not need it
            # for min_user_test_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_user_test_t = self.sql_session.query(UserTest)\
                .filter(UserTest.participation == participation)\
                .filter(UserTest.task == task)\
                .order_by(UserTest.timestamp.desc())\
                .first()
            if task.min_user_test_interval is not None:
                if last_user_test_t is not None and \
                        self.timestamp - last_user_test_t.timestamp < \
                        task.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can test again "
                               "after %d seconds from last test.") %
                        task.min_user_test_interval.total_seconds())
        except ValueError as error:
            self._send_error(self._("Tests too frequent!"), str(error))
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format] +
                       task_type.get_user_managers(task.submission_format) +
                       ["input"])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(
                len(filename) != 1
                for filename in itervalues(self.request.files)):
            self._send_error(self._("Invalid test format!"),
                             self._("Please select the correct files."))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                next(iterkeys(self.request.files)) == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(self._("Invalid test format!"),
                                 self._("Please select the correct files."),
                                 task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        provided = set(iterkeys(self.request.files))
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(self._("Invalid test format!"),
                             self._("Please select the correct files."))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in iteritems(self.request.files):
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(
            our_filename.find(".%l") != -1 for our_filename in files)

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous user test. And put them
        # in file_digests (i.e. like they have already been sent to FS).
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and \
                last_user_test_t is not None and \
                (submission_lang is None or
                 submission_lang == last_user_test_t.language):
            submission_lang = last_user_test_t.language
            for filename in required.difference(provided):
                if filename in last_user_test_t.files:
                    file_digests[filename] = \
                        last_user_test_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the user test language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
        if error is not None:
            self._send_error(self._("Invalid test!"), error)
            return

        # Check if submitted files are small enough.
        if any([
                len(f[1]) > config.max_submission_length
                for n, f in iteritems(files) if n != "input"
        ]):
            self._send_error(
                self._("Test too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length)
            return
        if len(files["input"][1]) > config.max_input_length:
            self._send_error(
                self._("Input too big!"),
                self._("The input file must be at most %d bytes long.") %
                config.max_input_length)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.tests_local_copy:
            try:
                path = os.path.join(
                    config.tests_local_copy_path.replace(
                        "%s", config.data_dir), participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id, participation.user.id,
                                 task.id, files), file_)
            except Exception as error:
                logger.error("Test local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.service.file_cacher.put_file_content(
                    files[filename][1], "Test file %s sent by %s at %d." %
                    (filename, participation.user.username,
                     make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(self._("Test storage failed!"),
                             self._("Please try again."))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for test sent by %s",
                    participation.user.username)
        user_test = UserTest(self.timestamp,
                             submission_lang,
                             file_digests["input"],
                             participation=participation,
                             task=task)

        for filename in [sfe.filename for sfe in task.submission_format]:
            digest = file_digests[filename]
            self.sql_session.add(
                UserTestFile(filename, digest, user_test=user_test))
        for filename in task_type.get_user_managers(task.submission_format):
            digest = file_digests[filename]
            if submission_lang is not None:
                extension = get_language(submission_lang).source_extension
                filename = filename.replace(".%l", extension)
            self.sql_session.add(
                UserTestManager(filename, digest, user_test=user_test))

        self.sql_session.add(user_test)
        self.sql_session.commit()
        self.service.evaluation_service.new_user_test(
            user_test_id=user_test.id)
        self.service.add_notification(
            participation.user.username, self.timestamp,
            self._("Test received"),
            self._("Your test has been received "
                   "and is currently being executed."), NOTIFICATION_SUCCESS)

        # The argument (encripted user test id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the user test id).
        self.redirect(
            self.contest_url(*self.fallback_page,
                             user_test_id=encrypt_number(user_test.id),
                             **self.fallback_args))
예제 #35
0
파일: Communication.py 프로젝트: xjzhou/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        source_filenames = []
        # Stub.
        stub_filename = "stub%s" % source_ext
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submission.
        for filename, fileinfo in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = fileinfo.digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in HEADER_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in SOURCE_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in OBJECT_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = \
            "_".join(pattern.replace(".%l", "")
                     for pattern in job.files.keys())
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #36
0
def main():
    """Parse arguments and launch process.

    """
    parser = argparse.ArgumentParser(
        description="Export CMS submissions to a folder.")
    parser.add_argument("-c", "--contest-id", action="store", type=int,
                        help="id of contest (default: all contests)")
    parser.add_argument("-t", "--task-id", action="store", type=int,
                        help="id of task (default: all tasks)")
    parser.add_argument("-u", "--user-id", action="store", type=int,
                        help="id of user (default: all users)")
    parser.add_argument("-s", "--submission-id", action="store", type=int,
                        help="id of submission (default: all submissions)")
    parser.add_argument("--utf8", action="store_true",
                        help="if set, the files will be encoded in utf8"
                             " when possible")
    parser.add_argument("--add-info", action="store_true",
                        help="if set, information on the submission will"
                             " be added in the first lines of each file")
    parser.add_argument("--min-score", action="store", type=float,
                        help="ignore submissions which scored strictly"
                             " less than this (default: 0.0)",
                        default=0.0)
    parser.add_argument("--filename", action="store", type=utf8_decoder,
                        help="the filename format to use"
                             " (default: {id}.{name}{ext})",
                        default="{id}.{name}{ext}")
    parser.add_argument("output_dir", action="store", type=utf8_decoder,
                        help="directory where to save the submissions")

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument("--unique", action="store_true",
                       help="if set, only the earliest best submission"
                            " will be exported for each (user, task)")
    group.add_argument("--best", action="store_true",
                       help="if set, only the best submissions will be"
                            " exported for each (user, task)")

    args = parser.parse_args()

    if args.add_info and not args.utf8:
        logger.critical("If --add-info is specified, then --utf8 must be"
                        " specified as well.")
        return 1

    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)
    if not os.path.isdir(args.output_dir):
        logger.critical("The output-dir parameter must point to a directory")
        return 1

    with SessionGen() as session:
        q = session.query(Submission)\
            .join(Submission.task)\
            .join(Submission.files)\
            .join(Submission.results)\
            .join(SubmissionResult.dataset)\
            .join(Submission.participation)\
            .join(Participation.user)\
            .filter(Dataset.id == Task.active_dataset_id)\
            .filter(SubmissionResult.score >= args.min_score)\
            .with_entities(Submission.id, Submission.language,
                           Submission.timestamp,
                           SubmissionResult.score,
                           File.filename, File.digest,
                           User.id, User.username, User.first_name,
                           User.last_name,
                           Task.id, Task.name)

        if args.contest_id:
            q = q.filter(Participation.contest_id == args.contest_id)

        if args.task_id:
            q = q.filter(Submission.task_id == args.task_id)

        if args.user_id:
            q = q.filter(Participation.user_id == args.user_id)

        if args.submission_id:
            q = q.filter(Submission.id == args.submission_id)

        results = q.all()

        if args.unique or args.best:
            results = filter_top_scoring(results, args.unique)

        print("%s file(s) will be created." % len(results))
        if raw_input("Continue? [Y/n] ").lower() not in ["y", ""]:
            return 0

        done = 0
        for row in results:
            s_id, s_language, s_timestamp, sr_score, f_filename, f_digest, \
                u_id, u_name, u_fname, u_lname, t_id, t_name = row

            name = f_filename
            if name.endswith(".%l"):
                name = name[:-3]  # remove last 3 chars
            ext = languagemanager.get_language(s_language).source_extension \
                if s_language else '.txt'

            filename = args.filename.format(id=s_id, name=name, ext=ext,
                                            time=s_timestamp, user=u_name,
                                            task=t_name)
            filename = os.path.join(args.output_dir, filename)
            if os.path.exists(filename):
                logger.warning("Skipping file '%s' because it already exists",
                               filename)
                continue
            filedir = os.path.dirname(filename)
            if not os.path.exists(filedir):
                os.makedirs(filedir)
            if not os.path.isdir(filedir):
                logger.warning("%s is not a directory, skipped.", filedir)
                continue

            fso = FSObject.get_from_digest(f_digest, session)
            assert fso is not None
            with fso.get_lobject(mode="rb") as file_obj:
                data = file_obj.read()

                if args.utf8:
                    try:
                        data = utf8_decoder(data)
                    except TypeError:
                        logger.critical("Could not guess encoding of file "
                                        "'%s'. Aborting.",
                                        filename)
                        sys.exit(1)

                    if args.add_info:
                        data = TEMPLATE[ext] % (
                            u_name,
                            u_fname,
                            u_lname,
                            t_name,
                            sr_score,
                            s_timestamp
                        ) + data

                    # Print utf8-encoded, possibly altered data
                    with codecs.open(filename, "w", encoding="utf-8") as f_out:
                        f_out.write(data)
                else:
                    # Print raw, untouched binary data
                    with open(filename, "wb") as f_out:
                        f_out.write(data)

            done += 1
            print(done, "/", len(results))

    return 0
예제 #37
0
파일: Batch.py 프로젝트: smadbe/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self._uses_grader():
            source_filenames.insert(0, "grader%s" % source_ext)
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in HEADER_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in SOURCE_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in OBJECT_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #38
0
파일: Batch.py 프로젝트: smadbe/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {
            input_filename: job.input
            }

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')",
                                         extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename,
                                job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox,
                                [["./%s" % manager_filename,
                                  input_filename, "res.txt", output_filename]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s", e.message,
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
예제 #39
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return
        executable_filename = next(iterkeys(job.executables))
        executable_digest = job.executables[executable_filename].digest

        # Make sure the required manager is among the job managers.
        if not check_manager_present(job, self.MANAGER_FILENAME):
            return
        manager_digest = job.managers[self.MANAGER_FILENAME].digest

        # Indices for the objects related to each user process.
        indices = range(self.num_processes)

        # Create FIFOs.
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        if not self._uses_grader():
            abortion_control_fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_user_to_manager = [
            os.path.join(fifo_dir[i], "u%d_to_m" % i) for i in indices]
        fifo_manager_to_user = [
            os.path.join(fifo_dir[i], "m_to_u%d" % i) for i in indices]
        if not self._uses_grader():
            fifo_solution_quitter = os.path.join(abortion_control_fifo_dir, "sq")
            fifo_manager_quitter = os.path.join(abortion_control_fifo_dir, "mq")
        for i in indices:
            os.mkfifo(fifo_user_to_manager[i])
            os.mkfifo(fifo_manager_to_user[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_user_to_manager[i], 0o666)
            os.chmod(fifo_manager_to_user[i], 0o666)
        if not self._uses_grader():
            os.mkfifo(fifo_solution_quitter)
            os.mkfifo(fifo_manager_quitter)
            os.chmod(abortion_control_fifo_dir, 0o755)
            os.chmod(fifo_solution_quitter, 0o666)
            os.chmod(fifo_manager_quitter, 0o666)

        # Names of the fifos after being mapped inside the sandboxes.
        sandbox_fifo_dir = ["/fifo%d" % i for i in indices]
        sandbox_fifo_user_to_manager = [
            os.path.join(sandbox_fifo_dir[i], "u%d_to_m" % i) for i in indices]
        sandbox_fifo_manager_to_user = [
            os.path.join(sandbox_fifo_dir[i], "m_to_u%d" % i) for i in indices]
        if not self._uses_grader():
            sandbox_abortion_control_fifo_dir = "/abort"
            sandbox_fifo_solution_quitter = \
                os.path.join(sandbox_abortion_control_fifo_dir, "sq")
            sandbox_fifo_manager_quitter = \
                os.path.join(sandbox_abortion_control_fifo_dir, "mq")

        # Create the manager sandbox and copy manager and input and
        # reference output.
        sandbox_mgr = create_sandbox(file_cacher, name="manager_evaluate")
        job.sandboxes.append(sandbox_mgr.get_root_path())
        sandbox_mgr.create_file_from_storage(
            self.MANAGER_FILENAME, manager_digest, executable=True)
        sandbox_mgr.create_file_from_storage(
            self.INPUT_FILENAME, job.input)
        sandbox_mgr.create_file_from_storage(
            self.OK_FILENAME, job.output)

        # Create the user sandbox(es) and copy the executable.
        sandbox_user = [create_sandbox(file_cacher, name="user_evaluate")
                        for i in indices]
        job.sandboxes.extend(s.get_root_path() for s in sandbox_user)
        for i in indices:
            sandbox_user[i].create_file_from_storage(
                executable_filename, executable_digest, executable=True)

        # Start the manager. Redirecting to stdin is unnecessary, but for
        # historical reasons the manager can choose to read from there
        # instead than from INPUT_FILENAME.
        manager_command = ["./%s" % self.MANAGER_FILENAME]
        for i in indices:
            manager_command += [sandbox_fifo_user_to_manager[i],
                                sandbox_fifo_manager_to_user[i]]
        if not self._uses_grader():
            manager_command += [sandbox_fifo_solution_quitter,
                                sandbox_fifo_manager_quitter]
        # We could use trusted_step for the manager, since it's fully
        # admin-controlled. But trusted_step is only synchronous at the moment.
        # Thus we use evaluation_step, and we set a time limit generous enough
        # to prevent user programs from sending the manager in timeout.
        # This means that:
        # - the manager wall clock timeout must be greater than the sum of all
        #     wall clock timeouts of the user programs;
        # - with the assumption that the work the manager performs is not
        #     greater than the work performed by the user programs, the manager
        #     user timeout must be greater than the maximum allowed total time
        #     of the user programs; in theory, this is the task's time limit,
        #     but in practice is num_processes times that because the
        #     constraint on the total time can only be enforced after all user
        #     programs terminated.
        manager_time_limit = max(self.num_processes * (job.time_limit + 1.0),
                                 config.trusted_sandbox_max_time_s)
        manager_dirs_map = dict((fifo_dir[i], (sandbox_fifo_dir[i], "rw"))
                                for i in indices)
        if not self._uses_grader():
            manager_dirs_map[abortion_control_fifo_dir] = \
                (sandbox_abortion_control_fifo_dir, "rw")
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            manager_time_limit,
            config.trusted_sandbox_max_memory_kib // 1024,
            dirs_map=manager_dirs_map,
            writable_files=[self.OUTPUT_FILENAME],
            stdin_redirect=self.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox)

        if not self._uses_grader():
            solution_quitter = open(fifo_solution_quitter, "r")
            manager_quitter = open(fifo_manager_quitter, "w")
            manager_quitter_open = True

        # Start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [sandbox_fifo_manager_to_user[i],
                    sandbox_fifo_user_to_manager[i]]
            if self.num_processes != 1:
                args.append(str(i))
            if self._uses_grader():
                main = self.STUB_BASENAME
            else:
                main = executable_filename
            commands = language.get_evaluation_commands(
                executable_filename,
                main=main,
                args=args)
            # Assumes that the actual execution of the user solution is the
            # last command in commands, and that the previous are "setup"
            # that don't need tight control.
            if len(commands) > 1:
                trusted_step(sandbox_user[i], commands[:-1])
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                dirs_map={fifo_dir[i]: (sandbox_fifo_dir[i], "rw")},
                stdin_redirect=sandbox_fifo_manager_to_user[i],
                stdout_redirect=sandbox_fifo_user_to_manager[i],
                multiprocess=job.multithreaded_sandbox)

        if not self._uses_grader():
            # Manager still running but wants to quit
            if solution_quitter.read() == "<3":
                for i in indices:
                    processes[i].send_signal(signal.SIGINT)  # Kill user
                wait_without_std(processes)
                manager_quitter.close()
                manager_quitter_open = False

        # Wait for the processes to conclude, without blocking them on I/O.
        wait_without_std(processes + [manager])

        if not self._uses_grader():
            solution_quitter.close()
            if manager_quitter_open:
                manager_quitter.close()

        # Get the results of the manager sandbox.
        box_success_mgr, evaluation_success_mgr, unused_stats_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        # Coalesce the results of the user sandboxes.
        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        box_success_user = all(r[0] for r in user_results)
        evaluation_success_user = all(r[1] for r in user_results)
        stats_user = reduce(merge_execution_stats,
                            [r[2] for r in user_results])
        # The actual running time is the sum of every user process, but each
        # sandbox can only check its own; if the sum is greater than the time
        # limit we adjust the result.
        if box_success_user and evaluation_success_user and \
                stats_user["execution_time"] >= job.time_limit:
            evaluation_success_user = False
            stats_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        success = box_success_user \
            and box_success_mgr and evaluation_success_mgr
        outcome = None
        text = None

        # If at least one sandbox had problems, or the manager did not
        # terminate correctly, we report an error (and no need for user stats).
        if not success:
            stats_user = None
            pass

        # If just asked to execute, fill text and set dummy outcome.
        elif job.only_execution:
            outcome = 0.0
            text = [N_("Execution completed successfully")]

        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not evaluation_success_user:
            outcome = 0.0
            text = human_evaluation_message(stats_user)

        # Otherwise, we use the manager to obtain the outcome.
        else:
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file with additional information,
        # provided that it exists.
        if job.get_output:
            if sandbox_mgr.file_exists(self.OUTPUT_FILENAME):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    self.OUTPUT_FILENAME,
                    "Output file in job %s" % job.info,
                    trunc_len=100 * 1024)
            else:
                job.user_output = None

        # Fill in the job with the results.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text
        job.plus = stats_user

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
            if not self._uses_grader():
                rmtree(abortion_control_fifo_dir)
예제 #40
0
    def post(self, task_name):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        self.fallback_page = ["testing"]
        self.fallback_args = {"task_name": task.name}

        # Check that the task is testable
        task_type = get_task_type(dataset=task.active_dataset)
        if not task_type.testable:
            logger.warning("User %s tried to make test on task %s.",
                           participation.user.username, task_name)
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of user_tests
        try:
            if contest.max_user_test_number is not None:
                user_test_c = self.sql_session.query(func.count(UserTest.id))\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_c >= contest.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests among all tasks.") %
                        contest.max_user_test_number)
            if task.max_user_test_number is not None:
                user_test_t = self.sql_session.query(func.count(UserTest.id))\
                    .filter(UserTest.task == task)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_t >= task.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests on this task.") %
                        task.max_user_test_number)
        except ValueError as error:
            self._send_error(self._("Too many tests!"), str(error))
            return

        # Enforce minimum time between user_tests
        try:
            if contest.min_user_test_interval is not None:
                last_user_test_c = self.sql_session.query(UserTest)\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .order_by(UserTest.timestamp.desc())\
                    .first()
                if last_user_test_c is not None and \
                        self.timestamp - last_user_test_c.timestamp < \
                        contest.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can test again "
                               "after %d seconds from last test.") %
                        contest.min_user_test_interval.total_seconds())
            # We get the last user_test even if we may not need it
            # for min_user_test_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_user_test_t = self.sql_session.query(UserTest)\
                .filter(UserTest.participation == participation)\
                .filter(UserTest.task == task)\
                .order_by(UserTest.timestamp.desc())\
                .first()
            if task.min_user_test_interval is not None:
                if last_user_test_t is not None and \
                        self.timestamp - last_user_test_t.timestamp < \
                        task.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can test again "
                               "after %d seconds from last test.") %
                        task.min_user_test_interval.total_seconds())
        except ValueError as error:
            self._send_error(self._("Tests too frequent!"), str(error))
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format] +
                       task_type.get_user_managers(task.submission_format) +
                       ["input"])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(filename) != 1 for filename in itervalues(self.request.files)):
            self._send_error(
                self._("Invalid test format!"),
                self._("Please select the correct files."))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                next(iterkeys(self.request.files)) == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(
                    self._("Invalid test format!"),
                    self._("Please select the correct files."),
                    task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        provided = set(iterkeys(self.request.files))
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(
                self._("Invalid test format!"),
                self._("Please select the correct files."))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in iteritems(self.request.files):
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(our_filename.find(".%l") != -1
                        for our_filename in files)

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous user test. And put them
        # in file_digests (i.e. like they have already been sent to FS).
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and \
                last_user_test_t is not None and \
                (submission_lang is None or
                 submission_lang == last_user_test_t.language):
            submission_lang = last_user_test_t.language
            for filename in required.difference(provided):
                if filename in last_user_test_t.files:
                    file_digests[filename] = \
                        last_user_test_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the user test language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
        if error is not None:
            self._send_error(self._("Invalid test!"), error)
            return

        # Check if submitted files are small enough.
        if any([len(f[1]) > config.max_submission_length
                for n, f in iteritems(files) if n != "input"]):
            self._send_error(
                self._("Test too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length)
            return
        if len(files["input"][1]) > config.max_input_length:
            self._send_error(
                self._("Input too big!"),
                self._("The input file must be at most %d bytes long.") %
                config.max_input_length)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.tests_local_copy:
            try:
                path = os.path.join(
                    config.tests_local_copy_path.replace("%s",
                                                         config.data_dir),
                    participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id,
                                 participation.user.id,
                                 task.id,
                                 files), file_)
            except Exception as error:
                logger.error("Test local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.service.file_cacher.put_file_content(
                    files[filename][1],
                    "Test file %s sent by %s at %d." % (
                        filename, participation.user.username,
                        make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(
                self._("Test storage failed!"),
                self._("Please try again."))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for test sent by %s",
                    participation.user.username)
        user_test = UserTest(self.timestamp,
                             submission_lang,
                             file_digests["input"],
                             participation=participation,
                             task=task)

        for filename in [sfe.filename for sfe in task.submission_format]:
            digest = file_digests[filename]
            self.sql_session.add(
                UserTestFile(filename, digest, user_test=user_test))
        for filename in task_type.get_user_managers(task.submission_format):
            digest = file_digests[filename]
            if submission_lang is not None:
                extension = get_language(submission_lang).source_extension
                filename = filename.replace(".%l", extension)
            self.sql_session.add(
                UserTestManager(filename, digest, user_test=user_test))

        self.sql_session.add(user_test)
        self.sql_session.commit()
        self.service.evaluation_service.new_user_test(
            user_test_id=user_test.id)
        self.service.add_notification(
            participation.user.username,
            self.timestamp,
            self._("Test received"),
            self._("Your test has been received "
                   "and is currently being executed."),
            NOTIFICATION_SUCCESS)

        # The argument (encripted user test id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the user test id).
        self.redirect(self.contest_url(
            *self.fallback_page, user_test_id=encrypt_number(user_test.id),
            **self.fallback_args))
예제 #41
0
    def export_submissions(self):
        """Export submissions' source files.

        """
        logger.info("Exporting submissions.")

        with open(os.path.join(self.spool_dir, "queue"),
                  "wt", encoding="utf-8") as queue_file:
            for submission in sorted(self.submissions,
                                     key=lambda x: x.timestamp):
                logger.info("Exporting submission %s.", submission.id)
                username = submission.participation.user.username
                task = submission.task.name
                timestamp = time.mktime(submission.timestamp.timetuple())

                # Get source files to the spool directory.
                ext = languagemanager.get_language(submission.language)\
                    .source_extension
                submission_dir = os.path.join(
                    self.upload_dir, username,
                    "%s.%d.%s" % (task, timestamp, ext))
                os.mkdir(submission_dir)
                for filename, file_ in submission.files.items():
                    self.file_cacher.get_file_to_path(
                        file_.digest,
                        os.path.join(submission_dir,
                                     filename.replace(".%l", ext)))
                last_submission_dir = os.path.join(
                    self.upload_dir, username, "%s.%s" % (task, ext))
                try:
                    os.unlink(last_submission_dir)
                except OSError:
                    pass
                os.symlink(os.path.basename(submission_dir),
                           last_submission_dir)
                print("./upload/%s/%s.%d.%s" % (username, task, timestamp, ext),
                      file=queue_file)

                # Write results file for the submission.
                active_dataset = submission.task.active_dataset
                result = submission.get_result(active_dataset)
                if result.evaluated():
                    with open(os.path.join(self.spool_dir,
                                           "%d.%s.%s.%s.res"
                                           % (timestamp, username, task, ext)),
                              "wt", encoding="utf-8") as res_file, \
                            open(os.path.join(self.spool_dir,
                                              "%s.%s.%s.res"
                                              % (username, task, ext)),
                                 "wt", encoding="utf-8") as res2_file:
                        total = 0.0
                        for evaluation in result.evaluations:
                            outcome = float(evaluation.outcome)
                            total += outcome
                            line = (
                                "Executing on file with codename '%s' %s (%.4f)"
                                % (evaluation.testcase.codename,
                                   evaluation.text, outcome))
                            print(line, file=res_file)
                            print(line, file=res2_file)
                        line = "Score: %.6f" % total
                        print(line, file=res_file)
                        print(line, file=res2_file)

            print("", file=queue_file)
예제 #42
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        source_filenames = []
        # Stub.
        stub_filename = "stub%s" % source_ext
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submission.
        for filename, fileinfo in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = fileinfo.digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in HEADER_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in SOURCE_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in OBJECT_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = \
            "_".join(pattern.replace(".%l", "")
                     for pattern in job.files.keys())
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #43
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(input_filename) == 0:
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if len(output_filename) == 0:
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
예제 #44
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension
        header_ext = language.header_extension

        if not check_files_number(job, 2):
            return

        files_to_get = {}
        source_filenames = []

        # Manager.
        manager_filename = "manager%s" % source_ext
        if not check_manager_present(job, manager_filename):
            return
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        if header_ext is not None:
            manager_filename = "manager%s" % header_ext
            if not check_manager_present(job, manager_filename):
                return
            source_filenames.append(manager_filename)
            files_to_get[manager_filename] = \
                job.managers[manager_filename].digest

        # User's submissions and headers.
        for filename, file_ in job.files.items():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers (fixing compile error again here).
            if header_ext is not None:
                header_filename = filename.replace(".%l", header_ext)
                if not check_manager_present(job, header_filename):
                    return
                source_filenames.append(header_filename)
                files_to_get[header_filename] = \
                    job.managers[header_filename].digest

        # Get compilation command.
        executable_filename = "manager"
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Create the sandbox and put the required files in it.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        for filename, digest in files_to_get.items():
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success, job.keep_sandbox)
예제 #45
0
파일: Batch.py 프로젝트: frazierbaker/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1, or_more=True):
            return

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        filenames_to_compile = []
        filenames_and_digests_to_get = {}
        # The grader, that must have been provided (copy and add to
        # compilation).
        if self._uses_grader():
            grader_filename = self.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_filename):
                return
            filenames_to_compile.append(grader_filename)
            filenames_and_digests_to_get[grader_filename] = \
                job.managers[grader_filename].digest
        # User's submitted file(s) (copy and add to compilation).
        for codename, file_ in job.files.items():
            filename = codename.replace(".%l", source_ext)
            filenames_to_compile.append(filename)
            filenames_and_digests_to_get[filename] = file_.digest
        # Any other useful manager (just copy).
        for filename, manager in job.managers.items():
            if is_manager_for_compilation(filename, language):
                filenames_and_digests_to_get[filename] = manager.digest

        # Prepare the compilation command.
        executable_filename = self._executable_filename(job.files.keys())
        commands = language.get_compilation_commands(filenames_to_compile,
                                                     executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        # Copy required files in the sandbox (includes the grader if present).
        for filename, digest in filenames_and_digests_to_get.items():
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success, job.keep_sandbox)
예제 #46
0
파일: Communication.py 프로젝트: xjzhou/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [
            create_sandbox(file_cacher, job.multithreaded_sandbox)
            for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(sandbox_mgr,
                                             manager_command,
                                             num_processes * job.time_limit,
                                             0,
                                             allow_dirs=manager_allow_dirs,
                                             writable_files=["output.txt"],
                                             stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        language = get_language(job.language)
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="stub",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt", "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
예제 #47
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""

        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [
            create_sandbox(file_cacher, job.multithreaded_sandbox)
            for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt",
            stdout_redirect="output.txt",
        )

        # Second step: we start the user submission compiled with the
        # stub.
        language = get_language(job.language)
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="grader",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
            if job.get_output:
                job.user_output = None
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome = None
            text = None

            input_filename = "input.txt"
            output_filename = "output.txt"
            # Check that the output file was created
            if not sandbox_mgr.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox_mgr.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=1024 * 1024 * 10)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox_mgr.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[1] == "diff":
                        outcome, text = white_diff_step(
                            sandbox_mgr, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[1] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox_mgr.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox_mgr.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox_mgr.file_exists(
                                    input_filename)
                            sandbox_mgr.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox_mgr.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox_mgr, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox_mgr)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized second parameter"
                                         " `%s' for Communication tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
예제 #48
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            job.plus = {}
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        compile_command = []
        if self._uses_grader():
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest
            # For solutions using C or C++,
            # we first compile the grader source
            # file and then delete it from sandbox,
            # to prevent the user's solution
            # files from including it.
            try:
                compile_command = language.get_compilation_no_link_command(
                    ["grader%s" % source_ext])
                compile_command += [["/bin/rm", "grader%s" % source_ext]]
            except NotImplementedError:
                compile_command = []
                source_filenames.insert(0, "grader%s" % source_ext)
            else:
                source_filenames.insert(0, "grader%s" %
                                        language.object_extension)

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in
                   language.header_extensions):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in
                     language.source_extensions):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in
                     language.object_extensions):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = compile_command + language.get_compilation_commands(
            source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #49
0
파일: Batch.py 프로젝트: akmohtashami/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(
            executable_filename, main=main)
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {
            self.input_filename: job.input
        }

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        self.output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(
                        file_cacher,
                        multithreaded=True,
                        name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
예제 #50
0
파일: Batch.py 프로젝트: akmohtashami/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return

        # Create the sandbox.
        sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="compile")
        job.sandboxes.append(sandbox.path)

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(
            user_source_filename, job.files[user_file_format].digest)
        for filename in iterkeys(job.managers):
            if Batch._is_manager_for_compilation(filename):
                sandbox.create_file_from_storage(
                    filename, job.managers[filename].digest)

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #51
0
파일: Batch.py 프로젝트: radroxx/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        files_to_get = {self._actual_input: job.input}

        # Check which redirect we need to perform, and in case we don't
        # manage the output via redirect, the submission needs to be able
        # to write on it.
        files_allowing_write = []
        stdin_redirect = None
        stdout_redirect = None
        if len(self.input_filename) == 0:
            stdin_redirect = self._actual_input
        if len(self.output_filename) == 0:
            stdout_redirect = self._actual_output
        else:
            files_allowing_write.append(self._actual_output)

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, name="evaluate")
        job.sandboxes.append(sandbox.path)

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        box_success, evaluation_success, stats = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect,
            multiprocess=job.multithreaded_sandbox)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self._actual_output):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self._actual_output
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self._actual_output,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher,
                        job,
                        Batch.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=sandbox.relative_path(
                            self._actual_output),
                        user_output_filename=self.output_filename)

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(sandbox, job.success)
예제 #52
0
파일: usersdata.py 프로젝트: Nyrio/cms
    def get(self, contest_id, format="online"):
        self.contest = self.sql_session.query(Contest)\
            .filter(Contest.id == contest_id)\
            .options(joinedload('participations'))\
            .options(joinedload('participations.submissions'))\
            .options(joinedload('participations.submissions.token'))\
            .options(joinedload('participations.submissions.results'))\
            .first()

        self.set_header("Content-Type", "application/zip")
        self.set_header("Content-Disposition",
                        "attachment; filename=\"users_data.zip\"")

        shutil.rmtree(BASE_PATH, ignore_errors=True)

        fc = FileCacher()

        for p in self.contest.participations:
            path = "%s/%s/" % (BASE_PATH, p.user.username)
            os.makedirs(path)

            # Identify all the files submitted by the user for each task
            task_sr = defaultdict(list)
            for sub in p.submissions:
                sub_sr = sub.get_result(sub.task.active_dataset)

                file = sub.files.items()[0][1]
                filename = file.filename
                if sub.language is not None:
                    filename = filename.replace(
                        ".%l", get_language(sub.language).source_extension)
                if sub_sr.score:
                    task_sr[sub.task_id].append(
                        (sub_sr.score, sub.timestamp, (filename, file.digest)))

            # Select the last file submitted with maximum score for each task
            task_last_best = [
                sorted(task_sr[tid], key=lambda x: (x[0], x[1]),
                       reverse=True)[0][2]
                for tid in task_sr
            ]

            # Write the selected file for each task
            for filename, digest in task_last_best:
                file_content = fc.get_file(digest).read()
                with open("%s%s" % (path, filename), "w") as f:
                    f.write(file_content.decode("utf8"))

            # Find the users' scores for each task
            scores = []
            for task in self.contest.tasks:
                t_score, _ = task_score(p, task)
                t_score = round(t_score, task.score_precision)
                scores.append((task.id, t_score))

            # Find the users' last progress for each task
            task_last_progress = {
                tid:
                sorted(task_sr[tid], key=lambda x: (-x[0], x[1]))[0][1]
                for tid in task_sr
            }

            # Write a csv with some information on the participation
            info_csv = [["Username", "User"]]
            for task in self.contest.tasks:
                info_csv[0].append("%s (score)" % task.name)
                info_csv[0].append("%s (last progress)" % task.name)
            info_csv[0].append("Last progress")
            full_name = "%s %s" % (p.user.first_name, p.user.last_name)
            info_csv.append([p.user.username.encode('utf-8'),
                             full_name.encode('utf-8')])
            for tid, t_score in scores:
                info_csv[1].append(str(t_score))
                if tid in task_last_progress:
                    last_progress = \
                        task_last_progress[tid].strftime('%H:%M:%S')
                else:
                    last_progress = ""
                info_csv[1].append(last_progress)
            if task_last_progress:
                last_progress_overall_ts = max(task_last_progress.values())
                last_progress_overall = \
                    last_progress_overall_ts.strftime('%H:%M:%S')
            else:
                last_progress_overall = ""
            info_csv[1].append(last_progress_overall)
            with open("%sinfo.csv" % path, "wb") as f_out:
                csv_writer = csv.writer(f_out)
                for row in info_csv:
                    csv_writer.writerow(row)

        # Create a downloadable archive will all this data
        shutil.make_archive("users_data", "zip", ".", "users_data")

        output = open("users_data.zip", "rb", buffering=0)

        self.finish(output.read())
예제 #53
0
파일: TwoSteps.py 프로젝트: Nyrio/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension
        header_ext = language.header_extension

        if not check_files_number(job, 2):
            return

        files_to_get = {}
        source_filenames = []

        # Manager.
        manager_filename = "manager%s" % source_ext
        if not check_manager_present(job, manager_filename):
            return
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        if header_ext is not None:
            manager_filename = "manager%s" % header_ext
            if not check_manager_present(job, manager_filename):
                return
            source_filenames.append(manager_filename)
            files_to_get[manager_filename] = \
                job.managers[manager_filename].digest

        # User's submissions and headers.
        for filename, file_ in iteritems(job.files):
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers (fixing compile error again here).
            if header_ext is not None:
                header_filename = filename.replace(".%l", header_ext)
                if not check_manager_present(job, header_filename):
                    return
                source_filenames.append(header_filename)
                files_to_get[header_filename] = \
                    job.managers[header_filename].digest

        # Get compilation command.
        executable_filename = "manager"
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Create the sandbox and put the required files in it.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #54
0
파일: Batch.py 프로젝트: romeorizzi/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files),
                         extra={"operation": job.info})
            return

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="compile")
        job.sandboxes.append(sandbox.path)

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(user_source_filename,
                                         job.files[user_file_format].digest)
        for filename in iterkeys(job.managers):
            if Batch._is_manager_for_compilation(filename):
                sandbox.create_file_from_storage(filename,
                                                 job.managers[filename].digest)

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
예제 #55
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        language = get_language(job.language)
        executable_filename = next(iter(job.executables.keys()))
        executable_digest = job.executables[executable_filename].digest

        first_sandbox = create_sandbox(file_cacher, name="first_evaluate")
        second_sandbox = create_sandbox(file_cacher, name="second_evaluate")
        job.sandboxes.append(first_sandbox.get_root_path())
        job.sandboxes.append(second_sandbox.get_root_path())

        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_command = language.get_evaluation_commands(
            executable_filename, main="grader", args=["0", "/fifo/fifo"])[0]
        first_executables_to_get = {executable_filename: executable_digest}
        first_files_to_get = {TwoSteps2019.INPUT_FILENAME: job.input}

        # Put the required files into the sandbox
        for filename, digest in first_executables_to_get.items():
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in first_files_to_get.items():
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            job.time_limit,
            job.memory_limit,
            dirs_map={fifo_dir: ("/fifo", "rw")},
            stdin_redirect=TwoSteps2019.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Second step: we start the second manager.
        second_command = language.get_evaluation_commands(
            executable_filename, main="grader", args=["1", "/fifo/fifo"])[0]
        second_executables_to_get = {executable_filename: executable_digest}
        second_files_to_get = {}

        # Put the required files into the second sandbox
        for filename, digest in second_executables_to_get.items():
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in second_files_to_get.items():
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            job.time_limit,
            job.memory_limit,
            dirs_map={fifo_dir: ("/fifo", "rw")},
            stdout_redirect=TwoSteps2019.OUTPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Consume output.
        wait_without_std([second, first])

        box_success_first, evaluation_success_first, first_stats = \
            evaluation_step_after_run(first_sandbox)
        box_success_second, evaluation_success_second, second_stats = \
            evaluation_step_after_run(second_sandbox)

        box_success = box_success_first and box_success_second
        evaluation_success = \
            evaluation_success_first and evaluation_success_second
        stats = merge_execution_stats(
            first_stats,
            second_stats) if second_stats is not None else first_stats

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists(TwoSteps2019.OUTPUT_FILENAME):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    TwoSteps2019.OUTPUT_FILENAME
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        TwoSteps2019.OUTPUT_FILENAME,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher,
                        job,
                        TwoSteps2019.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=second_sandbox.relative_path(
                            TwoSteps2019.OUTPUT_FILENAME))

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(first_sandbox, job.success, job.keep_sandbox)
        delete_sandbox(second_sandbox, job.success, job.keep_sandbox)
예제 #56
0
def main():
    """Parse arguments and launch process.

    """
    parser = argparse.ArgumentParser(
        description="Export CMS submissions to a folder.\n",
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument("-c",
                        "--contest-id",
                        action="store",
                        type=int,
                        help="id of contest (default: all contests)")
    parser.add_argument("-t",
                        "--task-id",
                        action="store",
                        type=int,
                        help="id of task (default: all tasks)")
    parser.add_argument("-u",
                        "--user-id",
                        action="store",
                        type=int,
                        help="id of user (default: all users)")
    parser.add_argument("-s",
                        "--submission-id",
                        action="store",
                        type=int,
                        help="id of submission (default: all submissions)")
    parser.add_argument("--utf8",
                        action="store_true",
                        help="if set, the files will be encoded in utf8"
                        " when possible")
    parser.add_argument("--add-info",
                        action="store_true",
                        help="if set, information on the submission will"
                        " be added in the first lines of each file")
    parser.add_argument("--min-score",
                        action="store",
                        type=float,
                        help="ignore submissions which scored strictly"
                        " less than this (default: 0.0)",
                        default=0.0)
    parser.add_argument("--filename",
                        action="store",
                        type=utf8_decoder,
                        help="the filename format to use\n"
                        "Variables:\n"
                        "  id: submission id\n"
                        "  file: filename without extension\n"
                        "  ext: filename extension\n"
                        "  time: submission timestamp\n"
                        "  user: username\n"
                        "  task: taskname\n"
                        "  score: raw score\n"
                        " (default: {id}.{file}{ext})",
                        default="{id}.{file}{ext}")
    parser.add_argument("output_dir",
                        action="store",
                        type=utf8_decoder,
                        help="directory where to save the submissions")

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument("--unique",
                       action="store_true",
                       help="if set, only the earliest best submission"
                       " will be exported for each (user, task)")
    group.add_argument("--best",
                       action="store_true",
                       help="if set, only the best submissions will be"
                       " exported for each (user, task)")

    args = parser.parse_args()

    if args.add_info and not args.utf8:
        logger.critical("If --add-info is specified, then --utf8 must be"
                        " specified as well.")
        return 1

    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)
    if not os.path.isdir(args.output_dir):
        logger.critical("The output-dir parameter must point to a directory")
        return 1

    with SessionGen() as session:
        q = session.query(Submission)\
            .join(Submission.task)\
            .join(Submission.files)\
            .join(Submission.results)\
            .join(SubmissionResult.dataset)\
            .join(Submission.participation)\
            .join(Participation.user)\
            .filter(Dataset.id == Task.active_dataset_id)\
            .filter(SubmissionResult.score >= args.min_score)\
            .with_entities(Submission.id, Submission.language,
                           Submission.timestamp,
                           SubmissionResult.score,
                           File.filename, File.digest,
                           User.id, User.username, User.first_name,
                           User.last_name,
                           Task.id, Task.name)

        if args.contest_id:
            q = q.filter(Participation.contest_id == args.contest_id)

        if args.task_id:
            q = q.filter(Submission.task_id == args.task_id)

        if args.user_id:
            q = q.filter(Participation.user_id == args.user_id)

        if args.submission_id:
            q = q.filter(Submission.id == args.submission_id)

        results = q.all()

        if args.unique or args.best:
            results = filter_top_scoring(results, args.unique)

        print("%s file(s) will be created." % len(results))
        if input("Continue? [Y/n] ").strip().lower() not in ["y", ""]:
            return 0

        done = 0
        for row in results:
            s_id, s_language, s_timestamp, sr_score, f_filename, f_digest, \
                u_id, u_name, u_fname, u_lname, t_id, t_name = row

            timef = s_timestamp.strftime('%Y%m%dT%H%M%S')

            ext = languagemanager.get_language(s_language).source_extension \
                if s_language else '.txt'
            filename_base, filename_ext = os.path.splitext(
                f_filename.replace('.%l', ext))

            # "name" is a deprecated specifier with the same meaning as "file"
            filename = args.filename.format(id=s_id,
                                            file=filename_base,
                                            name=filename_base,
                                            ext=filename_ext,
                                            time=timef,
                                            user=u_name,
                                            task=t_name,
                                            score=sr_score)
            filename = os.path.join(args.output_dir, filename)
            if os.path.exists(filename):
                logger.warning("Skipping file '%s' because it already exists",
                               filename)
                continue
            filedir = os.path.dirname(filename)
            if not os.path.exists(filedir):
                os.makedirs(filedir)
            if not os.path.isdir(filedir):
                logger.warning("%s is not a directory, skipped.", filedir)
                continue

            fso = FSObject.get_from_digest(f_digest, session)
            assert fso is not None
            with fso.get_lobject(mode="rb") as file_obj:
                data = file_obj.read()

                if args.utf8:
                    try:
                        data = utf8_decoder(data)
                    except TypeError:
                        logger.warning(
                            "Could not guess encoding of file "
                            "'%s'. Skipping.", filename)
                        continue

                    if args.add_info:
                        data = TEMPLATE[ext] % (u_name, u_fname, u_lname,
                                                t_name, sr_score,
                                                s_timestamp) + data

                    # Print utf8-encoded, possibly altered data
                    with open(filename, "wt", encoding="utf-8") as f_out:
                        f_out.write(data)
                else:
                    # Print raw, untouched binary data
                    with open(filename, "wb") as f_out:
                        f_out.write(data)

            done += 1
            print(done, "/", len(results))

    return 0
예제 #57
0
def match_files_and_language(given_files, given_language_name,
                             submission_format, allowed_language_names):
    """Figure out what the given files are and which language they're in.

    Take a set of files and a set of languages that these files are
    claimed to be in and try to make sense of it. That is, the provided
    information may come from sloppy, untrusted or adversarial sources
    and this function's duty is to parse and validate it to ensure it
    conforms to the expected format for a submission. Such a format is
    given as a set of desired codenames (i.e., filenames with language
    specific extensions replaced by %l) and a set of allowed languages
    (if such a limitation is in place). The function tries to be lenient
    as long as it believes the contestant's intentions are clear.

    The function first figures out which set of candidate languages the
    submission could be in, then tries to match the data against all of
    them. If exactly one matches then that match is returned. The
    languages that are considered are the ones provided by the user (if
    they exist and are allowed) or, if not provided, all languages
    (restricted to the allowed ones). If the submission format contains
    no element ending in ".%l" then the None language is always used
    (the rest of the arguments still needs to make sense though).
    Matching a language is done using the match_files function.

    given_files ([ReceivedFile]): the submitted files.
    given_language_name (str|None): the language, usually provided by
        the contestant, which the submitted files are in (None means
        this information isn't available and we should guess it).
    submission_format ({str}): the codenames that the submitted files
        should be matched to.
    allowed_language_names ([str]|None): the languages that the result
        is allowed to have (None means no limitation).

    return ({str: bytes}, Language|None): the mapping from codenames to
        content, and the language of the submission (with None meaning
        that no language is needed as the format was language-agnostic).

    raise (InvalidFilesOrLanguages): if issues arise when finding a
        match.

    """
    if len(given_files) == 0:
        raise InvalidFilesOrLanguage("no files given")

    # If the submission format is language-agnostic the only "language"
    # that makes sense is None, and if the caller thought differently we
    # let them know.
    if not any(element.endswith(".%l") for element in submission_format):
        if given_language_name is not None:
            raise InvalidFilesOrLanguage(
                "a language %r is given when not needed" % given_language_name)
        candidate_languages = {None}

    # If a language is required and the caller told us which one to use
    # we follow their indication, provided it exists and is allowed.
    elif given_language_name is not None:
        try:
            language = get_language(given_language_name)
        except KeyError:
            raise InvalidFilesOrLanguage(
                "the given language %r isn't a language" % given_language_name)

        if allowed_language_names is not None \
                and language.name not in allowed_language_names:
            raise InvalidFilesOrLanguage(
                "the given language %r isn't allowed" % given_language_name)

        candidate_languages = {language}

    # If a language is needed but the caller didn't provide any we try
    # to auto-detect it by guessing among all allowed languages.
    else:
        if allowed_language_names is None:
            candidate_languages = set(LANGUAGES)
        else:
            candidate_languages = set()
            for language_name in allowed_language_names:
                try:
                    language = get_language(language_name)
                except KeyError:
                    pass
                else:
                    candidate_languages.add(language)

    matched_files_by_language = dict()
    invalidity_reasons = list()
    for language in candidate_languages:
        try:
            matched_files_by_language[language] = \
                _match_files(given_files, language, submission_format)
        except InvalidFiles as err:
            invalidity_reasons.append("%r: %s" % (
                language.name if language is not None else None, err))

    if len(matched_files_by_language) == 0:
        raise InvalidFilesOrLanguage(
            "there isn't any language that matches all the files:\n%s"
            % (";\n".join(invalidity_reasons)))
    elif len(matched_files_by_language) > 1:
        raise InvalidFilesOrLanguage(
            "there is more than one language that matches all the files: %r"
            % set(matched_files_by_language.keys()))

    language, files = matched_files_by_language.popitem()

    return files, language