示例#1
0
def debugSubmission(submission_id, dataset_id, testcase_codename):
    config.keep_sandbox = True
    file_cacher = FileCacher()

    with SessionGen() as session:
        submission = session.query(Submission)\
            .filter(Submission.id == submission_id)\
            .first()

        if submission is None:
            logger.error("There's no submission with id %d" % submission_id)
            return False

        if dataset_id is None:
            dataset = submission.task.active_dataset
            dataset_id = submission.task.active_dataset_id
        else:
            dataset = session.query(Dataset)\
                .filter(Dataset.id == dataset_id)\
                .first()

        # Compilation
        operation = ESOperation(ESOperation.COMPILATION, submission_id,
                                dataset_id)
        comp_job = CompilationJob.from_submission(operation, submission,
                                                  dataset)

        task_type = get_task_type(comp_job.task_type,
                                  comp_job.task_type_parameters)
        task_type.execute_job(comp_job, file_cacher)

        for sandbox_path in comp_job.sandboxes:
            logger.info("Compilation sandbox created in %s" % sandbox_path)

        # Check if the compilation is successful
        result = submission.get_result(dataset)
        if result is None or result.compilation_failed():
            logger.error("Compilatoin Failed")
            return True

        # Evaluation
        operation = ESOperation(ESOperation.EVALUATION, submission_id,
                                dataset_id, testcase_codename)
        eval_job = EvaluationJob.from_submission(operation, submission,
                                                 dataset)

        task_type = get_task_type(eval_job.task_type,
                                  eval_job.task_type_parameters)
        task_type.execute_job(eval_job, file_cacher)

        for sandbox_path in eval_job.sandboxes:
            logger.info("Evaluation sandbox created in %s" % sandbox_path)

    return True
示例#2
0
    def get_submission_data(self, submission_id):
        """Given the id, returns the submission object and a new task
        type object of the correct type.

        submission_id (int): id of the submission.

        return (Submission, TaskType): corresponding objects.

        raise: JobException if id or task type not found.

        """
        submission = Submission.get_from_id(submission_id, self.session)
        if submission is None:
            err_msg = "Couldn't find submission %s " \
                      "in the database." % submission_id
            logger.critical(err_msg)
            raise JobException(err_msg)

        try:
            task_type = get_task_type(submission, self.file_cacher)
        except KeyError as error:
            err_msg = "Task type `%s' not known for " \
                "submission %s (error: %s)." % (
                submission.task.task_type, submission_id, error)
            logger.error(err_msg)
            raise JobException(err_msg)

        return (submission, task_type)
示例#3
0
文件: Worker.py 项目: Mloc/cms
    def execute_job(self, job_dict):
        job = Job.import_from_dict_with_type(job_dict)

        if self.work_lock.acquire(False):

            try:
                logger.operation = "job '%s'" % (job.info)
                logger.info("Request received")
                job.shard = self.shard

                self.task_type = get_task_type(job, self.file_cacher)
                self.task_type.execute_job()
                logger.info("Request finished.")

                return job.export_to_dict()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                self.task_type = None
                self.session = None
                logger.operation = ""
                self.work_lock.release()

        else:
            err_msg = "Request '%s' received, " \
                "but declined because of acquired lock" % \
                (job.info)
            logger.warning(err_msg)
            raise JobException(err_msg)
示例#4
0
def safe_get_task_type(env, *args, **kwargs):
    try:
        return get_task_type(*args, **kwargs)
    # The task type's constructor is called, which may raise any
    # arbitrary exception, hence we stay as general al possible.
    except Exception as err:
        return env.undefined("TaskType not found: %s" % err)
示例#5
0
文件: Job.py 项目: lucach/cms
    def from_user_test(operation, user_test, dataset):
        """Create an EvaluationJob from a user test.

        operation (ESOperation): an USER_TEST_EVALUATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (EvaluationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TESTEVALUATION:
            logger.error("Programming error: asking for a user test "
                         "evaluation job, but the operation is %s.",
                         operation.type_)
            raise ValueError("Operation is not a user test evaluation")

        job = EvaluationJob()

        # Job
        job.operation = operation.to_dict()
        job.task_type = dataset.task_type
        job.task_type_parameters = dataset.task_type_parameters

        user_test_result = user_test.get_result(dataset)

        # This should have been created by now.
        assert user_test_result is not None

        # EvaluationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.language = user_test.language
        job.files = dict(user_test.files)
        job.managers = dict(user_test.managers)
        job.executables = dict(user_test_result.executables)
        job.input = user_test.input
        job.time_limit = dataset.time_limit
        job.memory_limit = dataset.memory_limit
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(dataset=dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        dataset.managers[manager_filename]

        job.get_output = True
        job.only_execution = True

        return job
示例#6
0
def safe_get_task_type(env, *args, **kwargs):
    try:
        return get_task_type(*args, **kwargs)
    # The task type's constructor is called, which may raise any
    # arbitrary exception, hence we stay as general al possible.
    except Exception as err:
        return env.undefined("TaskType not found: %s" % err)
示例#7
0
    def from_user_test(user_test):
        job = CompilationJob()

        # Job
        job.task_type = user_test.task.task_type
        job.task_type_parameters = json.loads(
            user_test.task.task_type_parameters)

        # CompilationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.language = user_test.language
        job.files = dict(user_test.files)
        job.managers = dict(user_test.managers)
        job.info = "compile user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(task=user_test.task)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    user_test.task.managers[manager_filename]
        else:
            for manager_filename in user_test.task.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        user_test.task.managers[manager_filename]

        return job
示例#8
0
文件: Job.py 项目: yangkf1985/cms
    def from_user_test(operation, user_test, dataset):
        """Create an EvaluationJob from a user test.

        operation (ESOperation): an USER_TEST_EVALUATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (EvaluationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TESTEVALUATION:
            logger.error(
                "Programming error: asking for a user test "
                "evaluation job, but the operation is %s.", operation.type_)
            raise ValueError("Operation is not a user test evaluation")

        job = EvaluationJob()

        # Job
        job.operation = operation.to_dict()
        job.task_type = dataset.task_type
        job.task_type_parameters = dataset.task_type_parameters

        user_test_result = user_test.get_result(dataset)

        # This should have been created by now.
        assert user_test_result is not None

        # EvaluationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.language = user_test.language
        job.files = dict(user_test.files)
        job.managers = dict(user_test.managers)
        job.executables = dict(user_test_result.executables)
        job.input = user_test.input
        job.time_limit = dataset.time_limit
        job.memory_limit = dataset.memory_limit
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(dataset=dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        dataset.managers[manager_filename]

        job.get_output = True
        job.only_execution = True

        return job
示例#9
0
    def execute_job_group(self, job_group_dict):
        """Receive a group of jobs in a dict format and executes them
        one by one.

        job_group_dict (dict): a dictionary suitable to be imported
            from JobGroup.

        """
        job_group = JobGroup.import_from_dict(job_group_dict)

        if self.work_lock.acquire(False):

            try:
                self._ignore_job = False

                for k, job in job_group.jobs.iteritems():
                    logger.info("Starting job.", extra={"operation": job.info})

                    job.shard = self.shard

                    # FIXME This is actually kind of a workaround...
                    # The only TaskType that needs it is OutputOnly.
                    job._key = k

                    # FIXME We're creating a new TaskType for each Job
                    # even if, at the moment, a JobGroup always uses
                    # the same TaskType and the same parameters. Yet,
                    # this could change in the future, so the best
                    # solution is to keep a cache of TaskTypes objects
                    # (like ScoringService does with ScoreTypes, except
                    # that we cannot index by Dataset ID here...).
                    task_type = get_task_type(job.task_type,
                                              job.task_type_parameters)
                    task_type.execute_job(job, self.file_cacher)

                    logger.info("Finished job.", extra={"operation": job.info})

                    if not job.success or self._ignore_job:
                        job_group.success = False
                        break
                else:
                    job_group.success = True

                return job_group.export_to_dict()

            except:
                err_msg = "Worker failed."
                logger.error(err_msg, exc_info=True)
                raise JobException(err_msg)

            finally:
                self.work_lock.release()

        else:
            err_msg = "Request received, but declined because of acquired " \
                "lock (Worker is busy executing another job group, this " \
                "should not happen: check if there are more than one ES " \
                "running, or for bugs in ES."
            logger.warning(err_msg)
            raise JobException(err_msg)
示例#10
0
    def from_user_test(user_test):
        job = EvaluationJob()

        # Job
        job.task_type = user_test.task.task_type
        job.task_type_parameters = json.loads(
            user_test.task.task_type_parameters)

        # EvaluationJob
        job.executables = user_test.executables
        job.testcases = [Testcase(input=user_test.input,
                                  output=None)]
        job.time_limit = user_test.task.time_limit
        job.memory_limit = user_test.task.memory_limit
        job.managers = dict(user_test.managers)
        job.files = user_test.files
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(task=user_test.task)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    user_test.task.managers[manager_filename]
        else:
            for manager_filename in user_test.task.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        user_test.task.managers[manager_filename]

        return job
示例#11
0
文件: Worker.py 项目: kennyboy/cms
    def execute_job(self, job_dict):
        job = Job.import_from_dict_with_type(job_dict)

        if self.work_lock.acquire(False):

            try:
                logger.operation = "job '%s'" % (job.info)
                logger.info("Request received")
                job.shard = self.shard

                self.task_type = get_task_type(job, self.file_cacher)
                self.task_type.execute_job()
                logger.info("Request finished.")

                return job.export_to_dict()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                self.task_type = None
                self.session = None
                logger.operation = ""
                self.work_lock.release()

        else:
            err_msg = "Request '%s' received, " \
                "but declined because of acquired lock" % \
                (job.info)
            logger.warning(err_msg)
            raise JobException(err_msg)
示例#12
0
文件: Rule.py 项目: ioi-germany/cms
 def run(self):
     from cms.grading.Job import Job
     from cms.grading.tasktypes import get_task_type
     task_type = get_task_type(self.job.task_type,
                               self.job.task_type_parameters)
     # Crazy workaround to clone the job
     jobresult = Job.import_from_dict_with_type(self.job.export_to_dict())
     task_type.execute_job(jobresult, self.file_cacher)
     self.result.log['job'] = jobresult.export_to_dict()
示例#13
0
文件: Worker.py 项目: wlh1980/cms
    def execute_job_group(self, job_group_dict):
        """Receive a group of jobs in a list format and executes them one by
        one.

        job_group_dict ({}): a JobGroup exported to dict.

        return ({}): the same JobGroup in dict format, but containing
            the results.

        """
        start_time = time.time()
        job_group = JobGroup.import_from_dict(job_group_dict)

        if self.work_lock.acquire(False):
            try:
                logger.info("Starting job group.")
                for job in job_group.jobs:
                    logger.info("Starting job.",
                                extra={"operation": job.info})

                    job.shard = self.shard

                    if self._fake_worker_time is None:
                        task_type = get_task_type(job.task_type,
                                                  job.task_type_parameters)
                        try:
                            task_type.execute_job(job, self.file_cacher)
                        except TombstoneError:
                            job.success = False
                            job.plus = {"tombstone": True}
                    else:
                        self._fake_work(job)

                    logger.info("Finished job.",
                                extra={"operation": job.info})

                logger.info("Finished job group.")
                return job_group.export_to_dict()

            except Exception as e:
                err_msg = "Worker failed: %s." % e
                logger.error(err_msg, exc_info=True)
                raise JobException(err_msg)

            finally:
                self._finalize(start_time)
                self.work_lock.release()

        else:
            err_msg = "Request received, but declined because of acquired " \
                "lock (Worker is busy executing another job, this should " \
                "not happen: check if there are more than one ES running, " \
                "or for bugs in ES."
            logger.warning(err_msg)
            self._finalize(start_time)
            raise JobException(err_msg)
示例#14
0
文件: Worker.py 项目: s546360316/cms
    def execute_job_group(self, job_group_dict):
        job_group = JobGroup.import_from_dict(job_group_dict)

        if self.work_lock.acquire(False):

            try:
                self.ignore_job = False

                for k, job in job_group.jobs.iteritems():
                    logger.operation = "job '%s'" % (job.info)
                    logger.info("Request received")

                    job.shard = self.shard

                    # FIXME This is actually kind of a workaround...
                    # The only TaskType that needs it is OutputOnly.
                    job._key = k

                    # FIXME We're creating a new TaskType for each Job
                    # even if, at the moment, a JobGroup always uses
                    # the same TaskType and the same parameters. Yet,
                    # this could change in the future, so the best
                    # solution is to keep a cache of TaskTypes objects
                    # (like ScoringService does with ScoreTypes, except
                    # that we cannot index by Dataset ID here...).
                    task_type = get_task_type(job.task_type,
                                              job.task_type_parameters)
                    task_type.execute_job(job, self.file_cacher)

                    logger.info("Request finished.")

                    if not job.success or self.ignore_job:
                        job_group.success = False
                        break
                else:
                    job_group.success = True

                return job_group.export_to_dict()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                logger.operation = ""
                self.work_lock.release()

        else:
            err_msg = "Request '%s' received, " \
                "but declined because of acquired lock" % \
                (job.info)
            logger.warning(err_msg)
            raise JobException(err_msg)
示例#15
0
 def run(self):
     from cms.grading.Job import Job
     from cms.grading.tasktypes import get_task_type
     task_type = get_task_type(self.job.task_type,
                               self.job.task_type_parameters)
     # Crazy workaround to clone the job
     jobresult = Job.import_from_dict_with_type(self.job.export_to_dict())
     task_type.execute_job(jobresult, self.file_cacher)
     # Don't save the result if the sandbox failed.
     if not jobresult.success:
         self.result.badfail = True
     self.result.log['job'] = jobresult.export_to_dict()
示例#16
0
文件: Job.py 项目: PJeBeK/cms
    def from_user_test(operation, user_test, dataset):
        """Create a CompilationJob from a user test.

        operation (ESOperation): a USER_TEST_COMPILATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (CompilationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TEST_COMPILATION:
            logger.error("Programming error: asking for a user test "
                         "compilation job, but the operation is %s.",
                         operation.type_)
            raise ValueError("Operation is not a user test compilation")

        multithreaded = _is_contest_multithreaded(user_test.task.contest)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        # dict() is required to detach the dictionary that gets added
        # to the Job from the control of SQLAlchemy
        managers = dict(user_test.managers)
        task_type = get_task_type(dataset=dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                managers[manager_filename] = \
                    dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in managers:
                    managers[manager_filename] = \
                        dataset.managers[manager_filename]

        return CompilationJob(
            operation=operation.to_dict(),
            task_type=dataset.task_type,
            task_type_parameters=dataset.task_type_parameters,
            language=user_test.language,
            multithreaded_sandbox=multithreaded,
            files=dict(user_test.files),
            managers=managers,
            info="compile user test %d" % (user_test.id)
        )
示例#17
0
文件: Job.py 项目: yangkf1985/cms
    def from_user_test(operation, user_test, dataset):
        """Create a CompilationJob from a user test.

        operation (ESOperation): a USER_TEST_COMPILATION operation.
        user_test (UserTest): the user test object referred by the
            operation.
        dataset (Dataset): the dataset object referred by the
            operation.

        return (CompilationJob): the job.

        """
        if operation.type_ != ESOperation.USER_TEST_COMPILATION:
            logger.error(
                "Programming error: asking for a user test "
                "compilation job, but the operation is %s.", operation.type_)
            raise ValueError("Operation is not a user test compilation")

        job = CompilationJob()

        # Job
        job.operation = operation.to_dict()
        job.task_type = dataset.task_type
        job.task_type_parameters = dataset.task_type_parameters

        # CompilationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.language = user_test.language
        job.files = dict(user_test.files)
        job.managers = dict(user_test.managers)
        job.info = "compile user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(dataset=dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        dataset.managers[manager_filename]

        return job
示例#18
0
    def _perform_job(self, job):
        task_type = get_task_type(job.task_type, job.task_type_parameters)

        tries = 0
        MAX_TRIES = 5

        while tries < MAX_TRIES:
            try:
                task_type.execute_job(job, self.file_cacher)
            except TombstoneError:
                job.success = False
                job.plus = {"tombstone": True}
                return

            if not isinstance(job, EvaluationJob):
                return
            if job.plus is None:
                return

            execution_time = job.plus['execution_time']

            if job.text[0] != 'Execution timed out':
                if tries > 0:
                    logger.info("Took: %s (TL: %s)",
                                execution_time,
                                job.time_limit,
                                extra={"operation": job.info})
                    logger.info("Not a TLE anymore.",
                                extra={"operation": job.info})

                return

            logger.info("Took: %s (TL: %s)",
                        execution_time,
                        job.time_limit,
                        extra={"operation": job.info})

            if execution_time > 1.3 * job.time_limit:
                logger.info("Significant TLE. Not retrying.",
                            extra={"operation": job.info})
                return

            tries += 1
            logger.info("Slight TLE. Retrying (%s of %s)",
                        tries,
                        MAX_TRIES,
                        extra={"operation": job.info})
示例#19
0
文件: task.py 项目: cms-dev/cms
 def task_type_object(self):
     if not hasattr(self, "_cached_task_type_object") \
             or self.task_type != self._cached_task_type \
             or (self.task_type_parameters
                 != self._cached_task_type_parameters):
         # Import late to avoid a circular dependency.
         from cms.grading.tasktypes import get_task_type
         # This can raise.
         self._cached_task_type_object = get_task_type(
             self.task_type, self.task_type_parameters)
         # If an exception is raised these updates don't take place:
         # that way, next time this property is accessed, we get a
         # cache miss again and the same exception is raised again.
         self._cached_task_type = self.task_type
         self._cached_task_type_parameters = \
             copy.deepcopy(self.task_type_parameters)
     return self._cached_task_type_object
示例#20
0
 def task_type_object(self):
     if not hasattr(self, "_cached_task_type_object") \
             or self.task_type != self._cached_task_type \
             or self.task_type_parameters \
                != self._cached_task_type_parameters:
         # Import late to avoid a circular dependency.
         from cms.grading.tasktypes import get_task_type
         # This can raise.
         self._cached_task_type_object = get_task_type(
             self.task_type, self.task_type_parameters)
         # If an exception is raised these updates don't take place:
         # that way, next time this property is accessed, we get a
         # cache miss again and the same exception is raised again.
         self._cached_task_type = self.task_type
         self._cached_task_type_parameters = \
             copy.deepcopy(self.task_type_parameters)
     return self._cached_task_type_object
示例#21
0
    def from_user_test_evaluation(user_test, dataset):
        job = EvaluationJob()

        # Job
        job.task_type = dataset.task_type
        job.task_type_parameters = dataset.task_type_parameters

        user_test_result = user_test.get_result(dataset)

        # This should have been created by now.
        assert user_test_result is not None

        # EvaluationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.language = user_test.language
        job.files = dict(user_test.files)
        job.managers = dict(user_test.managers)
        job.executables = dict(user_test_result.executables)
        job.input = user_test.input
        job.time_limit = dataset.time_limit
        job.memory_limit = dataset.memory_limit
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(dataset=dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        dataset.managers[manager_filename]

        job.get_output = True
        job.only_execution = True

        jobs = {"": job}

        return JobGroup(jobs)
示例#22
0
文件: Worker.py 项目: Corea/cms
    def execute_job(self, job_dict):
        """Receive a group of jobs in a dict format and executes them
        one by one.

        job_dict (dict): a dictionary suitable to be imported from Job.

        """
        start_time = time.time()
        job = Job.import_from_dict_with_type(job_dict)

        if self.work_lock.acquire(False):

            try:
                logger.info("Starting job.",
                            extra={"operation": job.info})

                job.shard = self.shard

                task_type = get_task_type(job.task_type,
                                          job.task_type_parameters)
                task_type.execute_job(job, self.file_cacher)

                logger.info("Finished job.",
                            extra={"operation": job.info})

                return job.export_to_dict()

            except:
                err_msg = "Worker failed."
                logger.error(err_msg, exc_info=True)
                raise JobException(err_msg)

            finally:
                self._finalize(start_time)
                self.work_lock.release()

        else:
            err_msg = "Request received, but declined because of acquired " \
                "lock (Worker is busy executing another job, this should " \
                "not happen: check if there are more than one ES running, " \
                "or for bugs in ES."
            logger.warning(err_msg)
            self._finalize(start_time)
            raise JobException(err_msg)
示例#23
0
文件: Job.py 项目: Zhangkaiqiang/cms
    def from_user_test_evaluation(user_test, dataset):
        job = EvaluationJob()

        # Job
        job.task_type = dataset.task_type
        job.task_type_parameters = dataset.task_type_parameters

        user_test_result = user_test.get_result(dataset)

        # This should have been created by now.
        assert user_test_result is not None

        # EvaluationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.language = user_test.language
        job.files = dict(user_test.files)
        job.managers = dict(user_test.managers)
        job.executables = dict(user_test_result.executables)
        job.input = user_test.input
        job.time_limit = dataset.time_limit
        job.memory_limit = dataset.memory_limit
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(dataset=dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    dataset.managers[manager_filename]
        else:
            for manager_filename in dataset.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        dataset.managers[manager_filename]

        job.get_output = True
        job.only_execution = True

        jobs = {"": job}

        return JobGroup(jobs)
示例#24
0
    def execute_job(self, job_dict):
        """Receive a group of jobs in a dict format and executes them
        one by one.

        job_dict (dict): a dictionary suitable to be imported from Job.

        """
        start_time = time.time()
        job = Job.import_from_dict_with_type(job_dict)

        if self.work_lock.acquire(False):

            try:
                logger.info("Starting job.", extra={"operation": job.info})

                job.shard = self.shard

                task_type = get_task_type(job.task_type,
                                          job.task_type_parameters)
                task_type.execute_job(job, self.file_cacher)

                logger.info("Finished job.", extra={"operation": job.info})

                return job.export_to_dict()

            except:
                err_msg = "Worker failed."
                logger.error(err_msg, exc_info=True)
                raise JobException(err_msg)

            finally:
                self._finalize(start_time)
                self.work_lock.release()

        else:
            err_msg = "Request received, but declined because of acquired " \
                "lock (Worker is busy executing another job, this should " \
                "not happen: check if there are more than one ES running, " \
                "or for bugs in ES."
            logger.warning(err_msg)
            self._finalize(start_time)
            raise JobException(err_msg)
示例#25
0
文件: Test.py 项目: ldct/cms
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, soluzione),
        "Solution %s for task %s" % (soluzione, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        language=language,
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus["exit_status"]
        info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                   (job.plus["execution_time"],
                    job.plus["execution_wall_clock_time"],
                    mem_human(job.plus["execution_memory"])))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print()
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print()
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen)))

    return zip(points, comments, info)
示例#26
0
    def post(self, task_name):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        self.fallback_page = ["testing"]
        self.fallback_args = {"task_name": task.name}

        # Check that the task is testable
        task_type = get_task_type(dataset=task.active_dataset)
        if not task_type.testable:
            logger.warning("User %s tried to make test on task %s.",
                           participation.user.username, task_name)
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of user_tests
        try:
            if contest.max_user_test_number is not None:
                user_test_c = self.sql_session.query(func.count(UserTest.id))\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_c >= contest.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests among all tasks.") %
                        contest.max_user_test_number)
            if task.max_user_test_number is not None:
                user_test_t = self.sql_session.query(func.count(UserTest.id))\
                    .filter(UserTest.task == task)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_t >= task.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests on this task.") %
                        task.max_user_test_number)
        except ValueError as error:
            self._send_error(self._("Too many tests!"), str(error))
            return

        # Enforce minimum time between user_tests
        try:
            if contest.min_user_test_interval is not None:
                last_user_test_c = self.sql_session.query(UserTest)\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .order_by(UserTest.timestamp.desc())\
                    .first()
                if last_user_test_c is not None and \
                        self.timestamp - last_user_test_c.timestamp < \
                        contest.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can test again "
                               "after %d seconds from last test.") %
                        contest.min_user_test_interval.total_seconds())
            # We get the last user_test even if we may not need it
            # for min_user_test_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_user_test_t = self.sql_session.query(UserTest)\
                .filter(UserTest.participation == participation)\
                .filter(UserTest.task == task)\
                .order_by(UserTest.timestamp.desc())\
                .first()
            if task.min_user_test_interval is not None:
                if last_user_test_t is not None and \
                        self.timestamp - last_user_test_t.timestamp < \
                        task.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can test again "
                               "after %d seconds from last test.") %
                        task.min_user_test_interval.total_seconds())
        except ValueError as error:
            self._send_error(self._("Tests too frequent!"), str(error))
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format] +
                       task_type.get_user_managers(task.submission_format) +
                       ["input"])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(
                len(filename) != 1
                for filename in itervalues(self.request.files)):
            self._send_error(self._("Invalid test format!"),
                             self._("Please select the correct files."))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                next(iterkeys(self.request.files)) == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(self._("Invalid test format!"),
                                 self._("Please select the correct files."),
                                 task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        provided = set(iterkeys(self.request.files))
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(self._("Invalid test format!"),
                             self._("Please select the correct files."))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in iteritems(self.request.files):
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(
            our_filename.find(".%l") != -1 for our_filename in files)

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous user test. And put them
        # in file_digests (i.e. like they have already been sent to FS).
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and \
                last_user_test_t is not None and \
                (submission_lang is None or
                 submission_lang == last_user_test_t.language):
            submission_lang = last_user_test_t.language
            for filename in required.difference(provided):
                if filename in last_user_test_t.files:
                    file_digests[filename] = \
                        last_user_test_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the user test language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
        if error is not None:
            self._send_error(self._("Invalid test!"), error)
            return

        # Check if submitted files are small enough.
        if any([
                len(f[1]) > config.max_submission_length
                for n, f in iteritems(files) if n != "input"
        ]):
            self._send_error(
                self._("Test too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length)
            return
        if len(files["input"][1]) > config.max_input_length:
            self._send_error(
                self._("Input too big!"),
                self._("The input file must be at most %d bytes long.") %
                config.max_input_length)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.tests_local_copy:
            try:
                path = os.path.join(
                    config.tests_local_copy_path.replace(
                        "%s", config.data_dir), participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id, participation.user.id,
                                 task.id, files), file_)
            except Exception as error:
                logger.error("Test local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.service.file_cacher.put_file_content(
                    files[filename][1], "Test file %s sent by %s at %d." %
                    (filename, participation.user.username,
                     make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(self._("Test storage failed!"),
                             self._("Please try again."))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for test sent by %s",
                    participation.user.username)
        user_test = UserTest(self.timestamp,
                             submission_lang,
                             file_digests["input"],
                             participation=participation,
                             task=task)

        for filename in [sfe.filename for sfe in task.submission_format]:
            digest = file_digests[filename]
            self.sql_session.add(
                UserTestFile(filename, digest, user_test=user_test))
        for filename in task_type.get_user_managers(task.submission_format):
            digest = file_digests[filename]
            if submission_lang is not None:
                extension = get_language(submission_lang).source_extension
                filename = filename.replace(".%l", extension)
            self.sql_session.add(
                UserTestManager(filename, digest, user_test=user_test))

        self.sql_session.add(user_test)
        self.sql_session.commit()
        self.service.evaluation_service.new_user_test(
            user_test_id=user_test.id)
        self.service.add_notification(
            participation.user.username, self.timestamp,
            self._("Test received"),
            self._("Your test has been received "
                   "and is currently being executed."), NOTIFICATION_SUCCESS)

        # The argument (encripted user test id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the user test id).
        self.redirect(
            self.contest_url(*self.fallback_page,
                             user_test_id=encrypt_number(user_test.id),
                             **self.fallback_args))
示例#27
0
    def post(self, task_name):
        participation = self.current_user
        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of submissions
        try:
            if contest.max_submission_number is not None:
                submission_c = self.sql_session\
                    .query(func.count(Submission.id))\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_c >= contest.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions among all tasks.") %
                        contest.max_submission_number)
            if task.max_submission_number is not None:
                submission_t = self.sql_session\
                    .query(func.count(Submission.id))\
                    .filter(Submission.task == task)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_t >= task.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions on this task.") %
                        task.max_submission_number)
        except ValueError as error:
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Too many submissions!"), error.message,
                NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # Enforce minimum time between submissions
        try:
            if contest.min_submission_interval is not None:
                last_submission_c = self.sql_session.query(Submission)\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .order_by(Submission.timestamp.desc())\
                    .first()
                if last_submission_c is not None and \
                        self.timestamp - last_submission_c.timestamp < \
                        contest.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can submit again "
                               "after %d seconds from last submission.") %
                        contest.min_submission_interval.total_seconds())
            # We get the last submission even if we may not need it
            # for min_submission_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_submission_t = self.sql_session.query(Submission)\
                .filter(Submission.task == task)\
                .filter(Submission.participation == participation)\
                .order_by(Submission.timestamp.desc())\
                .first()
            if task.min_submission_interval is not None:
                if last_submission_t is not None and \
                        self.timestamp - last_submission_t.timestamp < \
                        task.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can submit again "
                               "after %d seconds from last submission.") %
                        task.min_submission_interval.total_seconds())
        except ValueError as error:
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Submissions too frequent!"), error.message,
                NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(filename) != 1 for filename in self.request.files.values()):
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Invalid submission format!"),
                self._("Please select the correct files."), NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files.
        if len(self.request.files) == 1 and \
                self.request.files.keys()[0] == "submission":
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self.application.service.add_notification(
                    participation.user.username, self.timestamp,
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."),
                    NOTIFICATION_ERROR)
                self.redirect("/tasks/%s/submissions" %
                              quote(task.name, safe=''))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        task_type = get_task_type(dataset=task.active_dataset)
        required = set([sfe.filename for sfe in task.submission_format])
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Invalid submission format!"),
                self._("Please select the correct files."), NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in self.request.files.iteritems():
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous submission. And put them
        # in file_digests (i.e. like they have already been sent to FS).
        submission_lang = None
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and \
                last_submission_t is not None:
            for filename in required.difference(provided):
                if filename in last_submission_t.files:
                    # If we retrieve a language-dependent file from
                    # last submission, we take not that language must
                    # be the same.
                    if "%l" in filename:
                        submission_lang = last_submission_t.language
                    file_digests[filename] = \
                        last_submission_t.files[filename].digest

        # We need to ensure that everytime we have a .%l in our
        # filenames, the user has the extension of an allowed
        # language, and that all these are the same (i.e., no
        # mixed-language submissions).

        error = None
        for our_filename in files:
            user_filename = files[our_filename][0]
            if our_filename.find(".%l") != -1:
                lang = filename_to_language(user_filename)
                if lang is None:
                    error = self._("Cannot recognize submission's language.")
                    break
                elif submission_lang is not None and \
                        submission_lang != lang:
                    error = self._("All sources must be in the same language.")
                    break
                elif lang not in contest.languages:
                    error = self._("Language %s not allowed in this contest." %
                                   lang)
                    break
                else:
                    submission_lang = lang
        if error is not None:
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Invalid submission!"), error, NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # Check if submitted files are small enough.
        if any(
            [len(f[1]) > config.max_submission_length
             for f in files.values()]):
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Submission too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length, NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.submit_local_copy:
            try:
                path = os.path.join(
                    config.submit_local_copy_path.replace(
                        "%s", config.data_dir), participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id, participation.user.id,
                                 task.id, files), file_)
            except Exception as error:
                logger.warning("Submission local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.application.service.file_cacher.put_file_content(
                    files[filename][1],
                    "Submission file %s sent by %s at %d." %
                    (filename, participation.user.username,
                     make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self.application.service.add_notification(
                participation.user.username, self.timestamp,
                self._("Submission storage failed!"),
                self._("Please try again."), NOTIFICATION_ERROR)
            self.redirect("/tasks/%s/submissions" % quote(task.name, safe=''))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for submission sent by %s",
                    participation.user.username)
        submission = Submission(self.timestamp,
                                submission_lang,
                                task=task,
                                participation=participation)

        for filename, digest in file_digests.items():
            self.sql_session.add(File(filename, digest, submission=submission))
        self.sql_session.add(submission)
        self.sql_session.commit()
        self.application.service.evaluation_service.new_submission(
            submission_id=submission.id)
        self.application.service.add_notification(
            participation.user.username, self.timestamp,
            self._("Submission received"),
            self._("Your submission has been received "
                   "and is currently being evaluated."), NOTIFICATION_SUCCESS)
        # The argument (encripted submission id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the submission id).
        # FIXME is it actually used by something?
        self.redirect(
            "/tasks/%s/submissions?%s" %
            (quote(task.name, safe=''), encrypt_number(submission.id)))
示例#28
0
    def post(self, task_id):
        task = self.safe_get_item(Task, task_id)
        self.contest = task.contest
        task.name = self.get_argument("name", task.name)
        task.title = self.get_argument("title", task.title)
        time_limit = self.get_argument("time_limit",
                                       repr(task.time_limit))
        task.official_language = self.get_argument("official_language",
                                                   task.official_language)

        try:
            time_limit = float(time_limit)
            if time_limit < 0 or time_limit >= float("+inf"):
                raise TypeError("Time limit out of range.")
        except TypeError as error:
            self.write("Invalid time limit.")
            self.finish()
            return
        task.time_limit = time_limit

        try:
            task.memory_limit = self.get_non_negative_int(
                "memory_limit",
                task.memory_limit,
                allow_empty=False)
            if task.memory_limit == 0:
                raise ValueError("Memory limit is 0.")
            task.token_initial = self.get_non_negative_int(
                "token_initial",
                task.token_initial,
                allow_empty=False)
            task.token_max = self.get_non_negative_int(
                "token_max",
                task.token_max)
            task.token_total = self.get_non_negative_int(
                "token_total",
                task.token_total)
            task.token_min_interval = self.get_non_negative_int(
                "token_min_interval",
                task.token_min_interval)
            task.token_gen_time = self.get_non_negative_int(
                "token_gen_time",
                task.token_gen_time)
            task.token_gen_number = self.get_non_negative_int(
                "token_gen_number",
                task.token_gen_number)
        except ValueError as error:
            self.write("Invalid fields. %r" % error)
            self.finish()
            return

        for testcase in task.testcases:
            testcase.public = bool(self.get_argument("testcase_%s_public" %
                                                     testcase.num, False))

        task.task_type = self.get_argument("task_type", "")

        # Look for a task type with the specified name.
        try:
            task_type_class = get_task_type(task=task)
        except KeyError:
            # Task type not found.
            self.application.service.add_notification(
                int(time.time()),
                "Invalid field",
                "Task type not recognized: %s." % task.task_type)
            self.redirect("/task/%s" % task_id)
            return

        task_type_parameters = task_type_class.parse_handler(
            self, "TaskTypeOptions_%s_" % task.task_type)

        task.task_type_parameters = json.dumps(task_type_parameters)

        task.score_type = self.get_argument("score_type", "")

        task.score_parameters = self.get_argument("score_parameters", "")

        submission_format = self.get_argument("submission_format", "")
        if submission_format not in ["", "[]"] \
            and submission_format != json.dumps(
                [x.filename for x in task.submission_format]
                ):
            try:
                format_list = json.loads(submission_format)
                for element in task.submission_format:
                    self.sql_session.delete(element)
                del task.submission_format[:]
                for element in format_list:
                    self.sql_session.add(SubmissionFormatElement(str(element),
                                                                 task))
            except Exception as error:
                self.sql_session.rollback()
                logger.info(repr(error))
                self.application.service.add_notification(
                    int(time.time()),
                    "Invalid field",
                    "Submission format not recognized.")
                self.redirect("/task/%s" % task_id)
                return

        if try_commit(self.sql_session, self):
            self.application.service.scoring_service.reinitialize()
        self.redirect("/task/%s" % task_id)
示例#29
0
    def post(self, task_id):

        self.timestamp = self.r_params["timestamp"]

        self.task_id = task_id
        self.task = Task.get_from_id(task_id, self.sql_session)

        if self.current_user is None or self.task is None or self.task.contest != self.contest:
            raise tornado.web.HTTPError(404)

        # Enforce minimum time between submissions for the same task.
        last_submission = (
            self.sql_session.query(Submission)
            .filter_by(task_id=self.task.id)
            .filter_by(user_id=self.current_user.id)
            .order_by(Submission.timestamp.desc())
            .first()
        )
        if last_submission is not None and self.timestamp - last_submission.timestamp < config.min_submission_interval:
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Submissions too frequent!"),
                self._("For each task, you can submit " "again after %s seconds from last submission.")
                % config.min_submission_interval,
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(x) != 1 for x in self.request.files.values()):
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Invalid submission format!"),
                self._("Please select the correct files."),
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files.
        if len(self.request.files) == 1 and self.request.files.keys()[0] == "submission":
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Extract the files from the archive.
            temp_archive_file, temp_archive_filename = tempfile.mkstemp(config.temp_dir)
            with os.fdopen(temp_archive_file, "w") as temp_archive_file:
                temp_archive_file.write(archive_data["body"])

            archive_contents = extract_archive(temp_archive_filename, archive_data["filename"])

            if archive_contents is None:
                self.application.service.add_notification(
                    self.current_user.username,
                    int(time.time()),
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."),
                )
                self.redirect("/tasks/%s" % encrypt_number(self.task.id))
                return

            for item in archive_contents:
                self.request.files[item["filename"]] = [item]

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        task_type = get_task_type(task=self.task)
        required = set([x.filename for x in self.task.submission_format])
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION and required.issuperset(provided))):
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Invalid submission format!"),
                self._("Please select the correct files."),
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # Add submitted files. After this, self.files is a dictionary
        # indexed by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        self.files = {}
        for uploaded, data in self.request.files.iteritems():
            self.files[uploaded] = (data[0]["filename"], data[0]["body"])

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous submission. And put
        # them in self.file_digests (i.e., like they have already been
        # sent to FS).
        self.submission_lang = None
        self.file_digests = {}
        self.retrieved = 0
        if task_type.ALLOW_PARTIAL_SUBMISSION and last_submission is not None:
            for filename in required.difference(provided):
                if filename in last_submission.files:
                    # If we retrieve a language-dependent file from
                    # last submission, we take not that language must
                    # be the same.
                    if "%l" in filename:
                        self.submission_lang = last_submission.language
                    self.file_digests[filename] = last_submission.files[filename].digest
                    self.retrieved += 1

        # We need to ensure that everytime we have a .%l in our
        # filenames, the user has one amongst ".cpp", ".c", or ".pas,
        # and that all these are the same (i.e., no mixed-language
        # submissions).
        def which_language(user_filename):
            """Determine the language of user_filename from its
            extension.

            user_filename (string): the file to test.
            return (string): the extension of user_filename, or None
                             if it is not a recognized language.

            """
            extension = os.path.splitext(user_filename)[1]
            try:
                return Submission.LANGUAGES_MAP[extension]
            except KeyError:
                return None

        error = None
        for our_filename in self.files:
            user_filename = self.files[our_filename][0]
            if our_filename.find(".%l") != -1:
                lang = which_language(user_filename)
                if lang is None:
                    error = self._("Cannot recognize submission's language.")
                    break
                elif self.submission_lang is not None and self.submission_lang != lang:
                    error = self._("All sources must be in the same language.")
                    break
                else:
                    self.submission_lang = lang
        if error is not None:
            self.application.service.add_notification(
                self.current_user.username, int(time.time()), self._("Invalid submission!"), error
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # Check if submitted files are small enough.
        if any([len(f[1]) > config.max_submission_length for f in self.files.values()]):
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Submission too big!"),
                self._("Each files must be at most %d bytes long.") % config.max_submission_length,
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        self.local_copy_saved = False

        if config.submit_local_copy:
            try:
                path = os.path.join(
                    config.submit_local_copy_path.replace("%s", config.data_dir), self.current_user.username
                )
                if not os.path.exists(path):
                    os.makedirs(path)
                with codecs.open(os.path.join(path, str(self.timestamp)), "w", "utf-8") as file_:
                    pickle.dump((self.contest.id, self.current_user.id, self.task, self.files), file_)
                self.local_copy_saved = True
            except Exception as error:
                logger.error("Submission local copy failed - %s" % traceback.format_exc())
        self.username = self.current_user.username
        self.sql_session.close()

        # We now have to send all the files to the destination...
        try:
            for filename in self.files:
                digest = self.application.service.file_cacher.put_file(
                    description="Submission file %s sent by %s at %d." % (filename, self.username, self.timestamp),
                    binary_data=self.files[filename][1],
                )
                self.file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s" % error)
            if self.local_copy_saved:
                message = "In case of emergency, this server has a local copy."
            else:
                message = "No local copy stored! Your submission was ignored."
            self.application.service.add_notification(
                self.username, int(time.time()), self._("Submission storage failed!"), self._(message)
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task_id))

        # All the files are stored, ready to submit!
        self.sql_session = Session()
        current_user = self.get_current_user()
        self.task = Task.get_from_id(self.task_id, self.sql_session)
        logger.info("All files stored for submission sent by %s" % self.username)
        submission = Submission(
            user=current_user, task=self.task, timestamp=self.timestamp, files={}, language=self.submission_lang
        )

        for filename, digest in self.file_digests.items():
            self.sql_session.add(File(digest, filename, submission))
        self.sql_session.add(submission)
        self.sql_session.commit()
        self.r_params["submission"] = submission
        self.r_params["warned"] = False
        self.application.service.evaluation_service.new_submission(submission_id=submission.id)
        self.application.service.add_notification(
            self.username,
            int(time.time()),
            self._("Submission received"),
            self._("Your submission has been received " "and is currently being evaluated."),
        )
        # The argument (encripted submission id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the submission id).
        self.redirect("/tasks/%s?%s" % (encrypt_number(self.task.id), encrypt_number(submission.id)))
示例#30
0
    def post(self, task_name):
        participation = self.current_user
        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        self.fallback_page = ["tasks", task.name, "submissions"]

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of submissions
        try:
            if contest.max_submission_number is not None:
                submission_c = self.sql_session\
                    .query(func.count(Submission.id))\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_c >= contest.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions among all tasks.") %
                        contest.max_submission_number)
            if task.max_submission_number is not None:
                submission_t = self.sql_session\
                    .query(func.count(Submission.id))\
                    .filter(Submission.task == task)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_t >= task.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions on this task.") %
                        task.max_submission_number)
        except ValueError as error:
            self._send_error(self._("Too many submissions!"), error.message)
            return

        # Enforce minimum time between submissions
        try:
            if contest.min_submission_interval is not None:
                last_submission_c = self.sql_session.query(Submission)\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .order_by(Submission.timestamp.desc())\
                    .first()
                if last_submission_c is not None and \
                        self.timestamp - last_submission_c.timestamp < \
                        contest.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can submit again "
                               "after %d seconds from last submission.") %
                        contest.min_submission_interval.total_seconds())
            # We get the last submission even if we may not need it
            # for min_submission_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_submission_t = self.sql_session.query(Submission)\
                .filter(Submission.task == task)\
                .filter(Submission.participation == participation)\
                .order_by(Submission.timestamp.desc())\
                .first()
            if task.min_submission_interval is not None:
                if last_submission_t is not None and \
                        self.timestamp - last_submission_t.timestamp < \
                        task.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can submit again "
                               "after %d seconds from last submission.") %
                        task.min_submission_interval.total_seconds())
        except ValueError as error:
            self._send_error(self._("Submissions too frequent!"),
                             error.message)
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(filename) != 1 for filename in self.request.files.values()):
            self._send_error(self._("Invalid submission format!"),
                             self._("Please select the correct files."))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                self.request.files.keys()[0] == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(self._("Invalid submission format!"),
                                 self._("Please select the correct files."),
                                 task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                if filename not in required:
                    continue
                body = open(os.path.join(unpacked_dir, name), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        task_type = get_task_type(dataset=task.active_dataset)
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(self._("Invalid submission format!"),
                             self._("Please select the correct files."))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in self.request.files.iteritems():
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(
            our_filename.find(".%l") != -1 for our_filename in files)

        # If we allow partial submissions, we implicitly recover the
        # non-submitted files from the previous submission (if it has
        # the same programming language of the current one), and put
        # them in file_digests (since they are already in FS).
        file_digests = {}
        # if task_type.ALLOW_PARTIAL_SUBMISSION and \
        #         last_submission_t is not None and \
        #         (submission_lang is None or
        #          submission_lang == last_submission_t.language):
        #     submission_lang = last_submission_t.language
        #     for filename in required.difference(provided):
        #         if filename in last_submission_t.files:
        #             file_digests[filename] = \
        #                 last_submission_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the submission language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
            if error is not None:
                self._send_error(self._("Invalid submission!"), error)
                return

        # Check if submitted files are small enough.
        if sum([len(f[1])
                for f in files.values()]) > config.max_submission_length:
            self._send_error(
                self._("Submission too big!"),
                self._("Size of each submission must be at most %d bytes.") %
                config.max_submission_length)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.submit_local_copy:
            try:
                path = os.path.join(
                    config.submit_local_copy_path.replace(
                        "%s", config.data_dir), participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id, participation.user.id,
                                 task.id, files), file_)
            except Exception as error:
                logger.warning("Submission local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.application.service.file_cacher.put_file_content(
                    files[filename][1],
                    "Submission file %s sent by %s at %d." %
                    (filename, participation.user.username,
                     make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(self._("Submission storage failed!"),
                             self._("Please try again."))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for submission sent by %s",
                    participation.user.username)

        # Only set the official bit when the user can compete and we are not in
        # analysis mode.
        official = self.r_params["actual_phase"] == 0

        submission = Submission(self.timestamp,
                                submission_lang,
                                task=task,
                                participation=participation,
                                official=official)

        for filename, digest in file_digests.items():
            self.sql_session.add(File(filename, digest, submission=submission))
        self.sql_session.add(submission)
        self.sql_session.commit()

        # Store some data out of the session so we can close it before issuing
        # RPCs.
        username = participation.user.username
        submission_id = submission.id
        logger.metric("submission_added",
                      submission_id=submission.id,
                      language=submission.language,
                      task_id=task.id,
                      participant_id=participation.id,
                      value=1)

        self.sql_session.close()
        try:
            random_service(self.application.service.evaluation_services)\
                .new_submission(submission_id=submission_id)
        except IndexError:
            logger.error("No evaluation services found. "
                         "Leaving the submission to be "
                         "discovered by sweep. ")
        self.application.service.add_notification(
            username, self.timestamp, self._("Submission received"),
            self._("Your submission has been received "
                   "and is currently being evaluated."), NOTIFICATION_SUCCESS)

        # The argument (encripted submission id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the submission id).
        self.redirect(
            self.contest_url(*self.fallback_page,
                             submission_id=encrypt_number(submission.id)))
示例#31
0
    def post(self, task_name):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            self.redirect("/")
            return

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        # Check that the task is testable
        task_type = get_task_type(dataset=task.active_dataset)
        if not task_type.testable:
            logger.warning("User %s tried to make test on task %s.",
                           participation.user.username, task_name)
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of user_tests
        try:
            if contest.max_user_test_number is not None:
                user_test_c = self.sql_session.query(func.count(UserTest.id))\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_c >= contest.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests among all tasks.") %
                        contest.max_user_test_number)
            if task.max_user_test_number is not None:
                user_test_t = self.sql_session.query(func.count(UserTest.id))\
                    .filter(UserTest.task == task)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_t >= task.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests on this task.") %
                        task.max_user_test_number)
        except ValueError as error:
            self._send_error(self._("Too many tests!"), error.message, task)
            return

        # Enforce minimum time between user_tests
        try:
            if contest.min_user_test_interval is not None:
                last_user_test_c = self.sql_session.query(UserTest)\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .order_by(UserTest.timestamp.desc())\
                    .first()
                if last_user_test_c is not None and \
                        self.timestamp - last_user_test_c.timestamp < \
                        contest.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can test again "
                               "after %d seconds from last test.") %
                        contest.min_user_test_interval.total_seconds())
            # We get the last user_test even if we may not need it
            # for min_user_test_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_user_test_t = self.sql_session.query(UserTest)\
                .filter(UserTest.participation == participation)\
                .filter(UserTest.task == task)\
                .order_by(UserTest.timestamp.desc())\
                .first()
            if task.min_user_test_interval is not None:
                if last_user_test_t is not None and \
                        self.timestamp - last_user_test_t.timestamp < \
                        task.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can test again "
                               "after %d seconds from last test.") %
                        task.min_user_test_interval.total_seconds())
        except ValueError as error:
            self._send_error(
                self._("Tests too frequent!"), error.message, task)
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format] +
                       task_type.get_user_managers(task.submission_format) +
                       ["input"])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(filename) != 1 for filename in self.request.files.values()):
            self._send_error(
                self._("Invalid test format!"),
                self._("Please select the correct files."),
                task)
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                self.request.files.keys()[0] == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(
                    self._("Invalid test format!"),
                    self._("Please select the correct files."),
                    task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."),
                    task)
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(
                self._("Invalid test format!"),
                self._("Please select the correct files."),
                task)
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in self.request.files.iteritems():
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous submission. And put them
        # in file_digests (i.e. like they have already been sent to FS).
        submission_lang = None
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and last_user_test_t is not None:
            for filename in required.difference(provided):
                if filename in last_user_test_t.files:
                    # If we retrieve a language-dependent file from
                    # last submission, we take not that language must
                    # be the same.
                    if "%l" in filename:
                        submission_lang = last_user_test_t.language
                    file_digests[filename] = \
                        last_user_test_t.files[filename].digest

        # We need to ensure that everytime we have a .%l in our
        # filenames, the user has one amongst ".cpp", ".c", or ".pas,
        # and that all these are the same (i.e., no mixed-language
        # submissions).

        error = None
        for our_filename in files:
            user_filename = files[our_filename][0]
            if our_filename.find(".%l") != -1:
                lang = filename_to_language(user_filename)
                if lang is None:
                    error = self._("Cannot recognize test's language.")
                    break
                elif submission_lang is not None and \
                        submission_lang != lang:
                    error = self._("All sources must be in the same language.")
                    break
                else:
                    submission_lang = lang
        if error is not None:
            self._send_error(self._("Invalid test!"), error, task)
            return

        # Check if submitted files are small enough.
        if any([len(f[1]) > config.max_submission_length
                for n, f in files.items() if n != "input"]):
            self._send_error(
                self._("Test too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length,
                task)
            return
        if len(files["input"][1]) > config.max_input_length:
            self._send_error(
                self._("Input too big!"),
                self._("The input file must be at most %d bytes long.") %
                config.max_input_length,
                task)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.tests_local_copy:
            try:
                path = os.path.join(
                    config.tests_local_copy_path.replace("%s",
                                                         config.data_dir),
                    participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id,
                                 participation.user.id,
                                 task.id,
                                 files), file_)
            except Exception as error:
                logger.error("Test local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.application.service.file_cacher.put_file_content(
                    files[filename][1],
                    "Test file %s sent by %s at %d." % (
                        filename, participation.user.username,
                        make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(
                self._("Test storage failed!"),
                self._("Please try again."),
                task)
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for test sent by %s",
                    participation.user.username)
        user_test = UserTest(self.timestamp,
                             submission_lang,
                             file_digests["input"],
                             participation=participation,
                             task=task)

        for filename in [sfe.filename for sfe in task.submission_format]:
            digest = file_digests[filename]
            self.sql_session.add(
                UserTestFile(filename, digest, user_test=user_test))
        for filename in task_type.get_user_managers(task.submission_format):
            digest = file_digests[filename]
            if submission_lang is not None:
                filename = filename.replace("%l", submission_lang)
            self.sql_session.add(
                UserTestManager(filename, digest, user_test=user_test))

        self.sql_session.add(user_test)
        self.sql_session.commit()
        self.application.service.evaluation_service.new_user_test(
            user_test_id=user_test.id)
        self.application.service.add_notification(
            participation.user.username,
            self.timestamp,
            self._("Test received"),
            self._("Your test has been received "
                   "and is currently being executed."),
            NOTIFICATION_SUCCESS)
        # The argument (encripted user test id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the user test id).
        self.redirect("/testing?%s&%s" % (
            quote(task.name, safe=''), encrypt_number(user_test.id)))
示例#32
0
    def post(self, contest_id):
        self.contest = self.safe_get_item(Contest, contest_id)

        name = self.get_argument("name", "")
        title = self.get_argument("title", "")
        time_limit = self.get_argument("time_limit", "")
        memory_limit = self.get_argument("memory_limit", "")
        official_language = self.get_argument("official_language", "")
        task_type = self.get_argument("task_type", "")

        # Look for a task type with the specified name.
        try:
            task_type_class = get_task_type(task_type_name=task_type)
        except KeyError:
            # Task type not found.
            self.application.service.add_notification(
                int(time.time()),
                "Invalid field",
                "Task type not recognized: %s." % task_type)
            self.redirect("/add_task/%s" % contest_id)
            return

        task_type_parameters = task_type_class.parse_handler(
            self, "TaskTypeOptions_%s_" % task_type)

        task_type_parameters = json.dumps(task_type_parameters)

        submission_format_choice = self.get_argument("submission_format", "")

        if submission_format_choice == "simple":
            submission_format = [SubmissionFormatElement("%s.%%l" % name)]
        elif submission_format_choice == "other":
            submission_format = self.get_argument("submission_format_other",
                                                  "")
            if submission_format not in ["", "[]"]:
                try:
                    format_list = json.loads(submission_format)
                    submission_format = []
                    for element in format_list:
                        submission_format.append(SubmissionFormatElement(
                            str(element)))
                except Exception as error:
                    self.sql_session.rollback()
                    logger.info(repr(error))
                    self.application.service.add_notification(int(time.time()),
                    "Invalid field",
                    "Submission format not recognized.")
                    self.redirect("/add_task/%s" % contest_id)
                    return
        else:
            self.application.service.add_notification(int(time.time()),
                "Invalid field",
                "Submission format not recognized.")
            self.redirect("/add_task/%s" % contest_id)
            return

        score_type = self.get_argument("score_type", "")
        score_parameters = self.get_argument("score_parameters", "")

        statements = {}
        attachments = {}
        managers = {}
        testcases = []

        token_initial = self.get_non_negative_int(
            "token_initial",
            None,
            allow_empty=False)
        token_max = self.get_non_negative_int(
            "token_max",
            None)
        token_total = self.get_non_negative_int(
            "token_total",
            None)
        token_min_interval = self.get_non_negative_int(
            "token_min_interval",
            None)
        token_gen_time = self.get_non_negative_int(
            "token_gen_time",
            None)
        token_gen_number = self.get_non_negative_int(
            "token_gen_number",
            None)
        task = Task(name, title, statements, attachments,
                 time_limit, memory_limit, official_language,
                 task_type, task_type_parameters, submission_format, managers,
                 score_type, score_parameters, testcases,
                 token_initial, token_max, token_total,
                 token_min_interval, token_gen_time, token_gen_number,
                 contest=self.contest, num=len(self.contest.tasks))
        self.sql_session.add(task)
        if try_commit(self.sql_session, self):
            self.application.service.scoring_service.reinitialize()
        self.redirect("/task/%s" % task.id)
示例#33
0
    def post(self, task_name):
        participation = self.current_user
        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of submissions
        try:
            if contest.max_submission_number is not None:
                submission_c = self.sql_session\
                    .query(func.count(Submission.id))\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_c >= contest.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions among all tasks.") %
                        contest.max_submission_number)
            if task.max_submission_number is not None:
                submission_t = self.sql_session\
                    .query(func.count(Submission.id))\
                    .filter(Submission.task == task)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_t >= task.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions on this task.") %
                        task.max_submission_number)
        except ValueError as error:
            self._send_error(
                self._("Too many submissions!"), error.message, task)
            return

        # Enforce minimum time between submissions
        try:
            if contest.min_submission_interval is not None:
                last_submission_c = self.sql_session.query(Submission)\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .order_by(Submission.timestamp.desc())\
                    .first()
                if last_submission_c is not None and \
                        self.timestamp - last_submission_c.timestamp < \
                        contest.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can submit again "
                               "after %d seconds from last submission.") %
                        contest.min_submission_interval.total_seconds())
            # We get the last submission even if we may not need it
            # for min_submission_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_submission_t = self.sql_session.query(Submission)\
                .filter(Submission.task == task)\
                .filter(Submission.participation == participation)\
                .order_by(Submission.timestamp.desc())\
                .first()
            if task.min_submission_interval is not None:
                if last_submission_t is not None and \
                        self.timestamp - last_submission_t.timestamp < \
                        task.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can submit again "
                               "after %d seconds from last submission.") %
                        task.min_submission_interval.total_seconds())
        except ValueError as error:
            self._send_error(
                self._("Submissions too frequent!"), error.message, task)
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(filename) != 1 for filename in self.request.files.values()):
            self._send_error(
                self._("Invalid submission format!"),
                self._("Please select the correct files."),
                task)
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                self.request.files.keys()[0] == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(
                    self._("Invalid submission format!"),
                    self._("Please select the correct files."),
                    task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."),
                    task)
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        task_type = get_task_type(dataset=task.active_dataset)
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(
                self._("Invalid submission format!"),
                self._("Please select the correct files."),
                task)
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in self.request.files.iteritems():
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(our_filename.find(".%l") != -1
                        for our_filename in files)

        # If we allow partial submissions, we implicitly recover the
        # non-submitted files from the previous submission (if it has
        # the same programming language of the current one), and put
        # them in file_digests (since they are already in FS).
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and \
                last_submission_t is not None and \
                (submission_lang is None or
                 submission_lang == last_submission_t.language):
            submission_lang = last_submission_t.language
            for filename in required.difference(provided):
                if filename in last_submission_t.files:
                    file_digests[filename] = \
                        last_submission_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the submission language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
        if error is not None:
            self._send_error(self._("Invalid submission!"), error, task)
            return

        # Check if submitted files are small enough.
        if any([len(f[1]) > config.max_submission_length
                for f in files.values()]):
            self._send_error(
                self._("Submission too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length,
                task)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.submit_local_copy:
            try:
                path = os.path.join(
                    config.submit_local_copy_path.replace("%s",
                                                          config.data_dir),
                    participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id,
                                 participation.user.id,
                                 task.id,
                                 files), file_)
            except Exception as error:
                logger.warning("Submission local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.application.service.file_cacher.put_file_content(
                    files[filename][1],
                    "Submission file %s sent by %s at %d." % (
                        filename, participation.user.username,
                        make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(
                self._("Submission storage failed!"),
                self._("Please try again."),
                task)
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for submission sent by %s",
                    participation.user.username)
        submission = Submission(self.timestamp,
                                submission_lang,
                                task=task,
                                participation=participation)

        for filename, digest in file_digests.items():
            self.sql_session.add(File(filename, digest, submission=submission))
        self.sql_session.add(submission)
        self.sql_session.commit()
        self.application.service.evaluation_service.new_submission(
            submission_id=submission.id)
        self.application.service.add_notification(
            participation.user.username,
            self.timestamp,
            self._("Submission received"),
            self._("Your submission has been received "
                   "and is currently being evaluated."),
            NOTIFICATION_SUCCESS)
        # The argument (encripted submission id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the submission id).
        self.redirect("/tasks/%s/submissions?%s" % (
            quote(task.name, safe=''),
            encrypt_number(submission.id)))
示例#34
0
文件: Worker.py 项目: kevin00036/cms
    def execute_job_group(self, job_group_dict):
        """Receive a group of jobs in a dict format and executes them
        one by one.

        job_group_dict (dict): a dictionary suitable to be imported
            from JobGroup.

        """
        job_group = JobGroup.import_from_dict(job_group_dict)

        if self.work_lock.acquire(False):

            try:
                self._ignore_job = False

                for k, job in job_group.jobs.iteritems():
                    logger.info("Starting job.",
                                extra={"operation": job.info})
                    #self.rpc_test(job_group_dict)

                    job.shard = self.shard

                    # FIXME This is actually kind of a workaround...
                    # The only TaskType that needs it is OutputOnly.
                    job._key = k

                    # FIXME We're creating a new TaskType for each Job
                    # even if, at the moment, a JobGroup always uses
                    # the same TaskType and the same parameters. Yet,
                    # this could change in the future, so the best
                    # solution is to keep a cache of TaskTypes objects
                    # (like ScoringService does with ScoreTypes, except
                    # that we cannot index by Dataset ID here...).
                    task_type = get_task_type(job.task_type,
                                              job.task_type_parameters)
                    task_type.execute_job(job, self.file_cacher)

                    logger.info("Finished job.",
                                extra={"operation": job.info})

                    if not job.success or self._ignore_job:
                        job_group.success = False
                        break
                else:
                    job_group.success = True

                return job_group.export_to_dict()

            except:
                err_msg = "Worker failed."
                logger.error(err_msg, exc_info=True)
                raise JobException(err_msg)

            finally:
                self.work_lock.release()

        else:
            err_msg = "Request received, but declined because of acquired " \
                "lock (Worker is busy executing another job group, this " \
                "should not happen: check if there are more than one ES " \
                "running, or for bugs in ES."
            logger.warning(err_msg)
            raise JobException(err_msg)
示例#35
0
文件: Test.py 项目: saadtaame/cms
def test_testcases(base_dir, solution, language, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    cmscontrib.loaders.italy_yaml.logger = NullLogger()
    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = cmscontrib.loaders.italy_yaml.YamlLoader(
            base_dir, file_cacher)
        task = loader.get_task(get_statement=False)

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, solution),
        "Solution %s for task %s" % (solution, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [
        (t,
         EvaluationJob(
             operation=ESOperation(ESOperation.EVALUATION, None, dataset.id,
                                   dataset.testcases[t].codename).to_dict(),
             language=language,
             task_type=dataset.task_type,
             task_type_parameters=json.loads(dataset.task_type_parameters),
             managers=dict(dataset.managers),
             executables=executables,
             input=dataset.testcases[t].input,
             output=dataset.testcases[t].output,
             time_limit=dataset.time_limit,
             memory_limit=dataset.memory_limit)) for t in dataset.testcases
    ]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0])
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            move_cursor(directions.UP, erase=True)
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus.get("exit_status")
        info.append(
            (job.plus.get("execution_time"), job.plus.get("execution_memory")))
        points.append(float(job.outcome))

        # Avoid printing unneeded newline
        job.text = [t.rstrip() for t in job.text]

        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print("Want to stop and consider everything to timeout? [y/N] ",
                  end='')
            sys.stdout.flush()

            if assume is not None:
                tmp = assume
                print(tmp)
            else:
                # User input with a timeout of 5 seconds, at the end of which
                # we automatically say "n". ready will be a list of input ready
                # for reading, or an empty list if the timeout expired.
                # See: http://stackoverflow.com/a/2904057
                ready, _, _ = select.select([sys.stdin], [], [], 5)
                if ready:
                    tmp = sys.stdin.readline().strip().lower()
                else:
                    tmp = 'n'
                    print(tmp)

            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False
            print()
        move_cursor(directions.UP, erase=True)

    # Subtasks scoring
    subtasks = json.loads(dataset.score_type_parameters)
    if not isinstance(subtasks, list) or len(subtasks) == 0:
        subtasks = [[100, len(info)]]

    if dataset.score_type == 'GroupMin':
        scoreFun = min
    else:
        if dataset.score_type != 'Sum':
            logger.warning("Score type %s not yet supported! Using Sum" %
                           dataset.score_type)

        def scoreFun(x):
            return sum(x) / len(x)

    pos = 0
    sts = []

    # For each subtask generate a list of testcase it owns, the score gained
    # and the highest time and memory usage.
    for i in subtasks:
        stscores = []
        stsdata = []
        worst = [0, 0]
        try:
            for _ in xrange(i[1]):
                stscores.append(points[pos])
                stsdata.append(
                    (tcnames[pos], points[pos], comments[pos], info[pos]))
                if info[pos][0] > worst[0]:
                    worst[0] = info[pos][0]
                if info[pos][1] > worst[1]:
                    worst[1] = info[pos][1]
                pos += 1
            sts.append((scoreFun(stscores) * i[0], i[0], stsdata, worst))
        except:
            sts.append((0, i[0], stsdata, [0, 0]))

    # Result pretty printing
    # Strips sol/ and _EVAL from the solution's name
    solution = solution[4:-5]
    print()
    clen = max(len(c) for c in comments)
    for st, d in enumerate(sts):
        print(
            "Subtask %d:" % st,
            add_color_to_string(
                "%5.2f/%d" % (d[0], d[1]),
                colors.RED if abs(d[0] - d[1]) > 0.01 else colors.GREEN,
                bold=True))
        for (i, p, c, w) in d[2]:
            print("%s)" % i,
                  add_color_to_string(
                      "%5.2lf" % p,
                      colors.RED if abs(p - 1) > 0.01 else colors.BLACK),
                  "--- %s [Time:" % c.ljust(clen),
                  add_color_to_string(
                      ("%5.3f" % w[0]) if w[0] is not None else "N/A",
                      colors.BLUE if w[0] is not None
                      and w[0] >= 0.95 * d[3][0] else colors.BLACK),
                  "Memory:",
                  add_color_to_string(
                      "%5s" % mem_human(w[1]) if w[1] is not None else "N/A",
                      colors.BLUE if w[1] is not None
                      and w[1] >= 0.95 * d[3][1] else colors.BLACK,
                  ),
                  end="]")
            move_cursor(directions.RIGHT, 1000)
            move_cursor(directions.LEFT, len(solution) - 1)
            print(add_color_to_string(solution, colors.BLACK, bold=True))
    print()

    sols.append((solution, sum([st[0] for st in sts])))

    global tested_something
    if not tested_something:
        tested_something = True
        atexit.register(print_at_exit)

    return zip(points, comments, info)
示例#36
0
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a disabled FileCacher with a FSBackend in order to avoid to fill
    # the database with junk and to save up space.
    if file_cacher is None:
        file_cacher = FileCacher(path=os.path.join(config.cache_dir,
                                                   'cmsMake'),
                                 enabled=False)

    # Load the task
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    if dataset.task_type != "OutputOnly":
        digest = file_cacher.put_file_from_path(
            os.path.join(base_dir, soluzione),
            "Solution %s for task %s" % (soluzione, task.name))
        executables = {task.name: Executable(filename=task.name,
                                             digest=digest)}
        jobs = [(t, EvaluationJob(
            language=language,
            task_type=dataset.task_type,
            task_type_parameters=json.loads(dataset.task_type_parameters),
            managers=dict(dataset.managers),
            executables=executables,
            input=dataset.testcases[t].input,
            output=dataset.testcases[t].output,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit)) for t in dataset.testcases]
        tasktype = get_task_type(dataset=dataset)
    else:
        print("Generating outputs...", end='')
        files = {}
        for t in sorted(dataset.testcases.keys()):
            with file_cacher.get_file(dataset.testcases[t].input) as fin:
                with TemporaryFile() as fout:
                    print(str(t), end='')
                    call(soluzione, stdin=fin, stdout=fout, cwd=base_dir)
                    fout.seek(0)
                    digest = file_cacher.put_file_from_fobj(fout)
                    outname = "output_%s.txt" % t
                    files[outname] = File(filename=outname, digest=digest)
        jobs = [(t, EvaluationJob(
            task_type=dataset.task_type,
            task_type_parameters=json.loads(dataset.task_type_parameters),
            managers=dict(dataset.managers),
            files=files,
            input=dataset.testcases[t].input,
            output=dataset.testcases[t].output,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit)) for t in dataset.testcases]
        for k, job in jobs:
            job._key = k
        tasktype = get_task_type(dataset=dataset)
        print()

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        if dataset.task_type != "OutputOnly":
            status = job.plus["exit_status"]
            info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                       (job.plus["execution_time"],
                        job.plus["execution_wall_clock_time"],
                        mem_human(job.plus["execution_memory"])))
        else:
            status = "ok"
            info.append("N/A")
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print()
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print()
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen)))

    return zip(points, comments, info)
示例#37
0
def test_testcases(base_dir, solution, language, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    cmscontrib.loaders.italy_yaml.logger = NullLogger()
    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = cmscontrib.loaders.italy_yaml.YamlLoader(base_dir,
                                                          file_cacher)
        task = loader.get_task(get_statement=False)

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, solution),
        "Solution %s for task %s" % (solution, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        language=language,
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0])
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            move_cursor(directions.UP, erase=True)
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus.get("exit_status")
        info.append((job.plus.get("execution_time"),
                     job.plus.get("execution_memory")))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False
            print()
        move_cursor(directions.UP, erase=True)

    # Subtasks scoring
    try:
        subtasks = json.loads(dataset.score_type_parameters)
        subtasks[0]
    except:
        subtasks = [[100, len(info)]]

    if dataset.score_type == 'GroupMin':
        scoreFun = min
    else:
        if dataset.score_type != 'Sum':
            logger.warning("Score type %s not yet supported! Using Sum"
                           % dataset.score_type)

        def scoreFun(x):
            return sum(x) / len(x)

    pos = 0
    sts = []

    # For each subtask generate a list of testcase it owns, the score gained
    # and the highest time and memory usage.
    for i in subtasks:
        stscores = []
        stsdata = []
        worst = [0, 0]
        try:
            for _ in xrange(i[1]):
                stscores.append(points[pos])
                stsdata.append((tcnames[pos], points[pos],
                                comments[pos], info[pos]))
                if info[pos][0] > worst[0]:
                    worst[0] = info[pos][0]
                if info[pos][1] > worst[1]:
                    worst[1] = info[pos][1]
                pos += 1
            sts.append((scoreFun(stscores) * i[0], i[0], stsdata, worst))
        except:
            sts.append((0, i[0], stsdata, [0, 0]))

    # Result pretty printing
    # Strips sol/ and _EVAL from the solution's name
    solution = solution[4:-5]
    print()
    clen = max(len(c) for c in comments)
    for st, d in enumerate(sts):
        print(
            "Subtask %d:" % st,
            add_color_to_string(
                "%5.2f/%d" % (d[0], d[1]),
                colors.RED if abs(d[0] - d[1]) > 0.01 else colors.GREEN,
                bold=True
            )
        )
        for (i, p, c, w) in d[2]:
            print(
                "%s)" % i,
                add_color_to_string(
                    "%5.2lf" % p,
                    colors.RED if abs(p - 1) > 0.01 else colors.BLACK
                ),
                "--- %s [Time:" % c.ljust(clen),
                add_color_to_string(
                    ("%5.3f" % w[0]) if w[0] is not None else "N/A",
                    colors.BLUE if w[0] is not None and w[0] >= 0.95 * d[3][0]
                    else colors.BLACK
                ),
                "Memory:",
                add_color_to_string(
                    "%5s" % mem_human(w[1]) if w[1] is not None else "N/A",
                    colors.BLUE if w[1] is not None and w[1] >= 0.95 * d[3][1]
                    else colors.BLACK,
                ),
                end="]"
            )
            move_cursor(directions.RIGHT, 1000)
            move_cursor(directions.LEFT, len(solution) - 1)
            print(add_color_to_string(solution, colors.BLACK, bold=True))
    print()

    sols.append((solution, sum([st[0] for st in sts])))

    global tested_something
    if not tested_something:
        tested_something = True
        atexit.register(print_at_exit)

    return zip(points, comments, info)
示例#38
0
文件: Test.py 项目: laskarcyber/cms
def test_testcases(base_dir, soluzione, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, soluzione),
        "Solution %s for task %s" % (soluzione, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print jobinfo[0],
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus["exit_status"]
        info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                   (job.plus["execution_time"],
                    job.plus["execution_wall_clock_time"],
                    mem_human(job.plus["execution_memory"])))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print
            print "Want to stop and consider everything to timeout? [y/N]",
            if assume is not None:
                print assume
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print "%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))

    return zip(points, comments, info)
示例#39
0
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a disabled FileCacher with a FSBackend in order to avoid to fill
    # the database with junk and to save up space.
    if file_cacher is None:
        file_cacher = FileCacher(path=os.path.join(config.cache_dir,
                                                   'cmsMake'),
                                 enabled=False)

    # Load the task
    if task is None:
        loader = YamlLoader(os.path.realpath(os.path.join(base_dir, "..")),
                            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    if dataset.task_type != "OutputOnly":
        digest = file_cacher.put_file_from_path(
            os.path.join(base_dir, soluzione),
            "Solution %s for task %s" % (soluzione, task.name))
        executables = {
            task.name: Executable(filename=task.name, digest=digest)
        }
        jobs = [(t,
                 EvaluationJob(language=language,
                               task_type=dataset.task_type,
                               task_type_parameters=json.loads(
                                   dataset.task_type_parameters),
                               managers=dict(dataset.managers),
                               executables=executables,
                               input=dataset.testcases[t].input,
                               output=dataset.testcases[t].output,
                               time_limit=dataset.time_limit,
                               memory_limit=dataset.memory_limit))
                for t in dataset.testcases]
        tasktype = get_task_type(dataset=dataset)
    else:
        print("Generating outputs...", end='')
        files = {}
        for t in sorted(dataset.testcases.keys()):
            with file_cacher.get_file(dataset.testcases[t].input) as fin:
                with TemporaryFile() as fout:
                    print(str(t), end='')
                    call(soluzione, stdin=fin, stdout=fout, cwd=base_dir)
                    fout.seek(0)
                    digest = file_cacher.put_file_from_fobj(fout)
                    outname = "output_%s.txt" % t
                    files[outname] = File(filename=outname, digest=digest)
        jobs = [(t,
                 EvaluationJob(task_type=dataset.task_type,
                               task_type_parameters=json.loads(
                                   dataset.task_type_parameters),
                               managers=dict(dataset.managers),
                               files=files,
                               input=dataset.testcases[t].input,
                               output=dataset.testcases[t].output,
                               time_limit=dataset.time_limit,
                               memory_limit=dataset.memory_limit))
                for t in dataset.testcases]
        for k, job in jobs:
            job._key = k
        tasktype = get_task_type(dataset=dataset)
        print()

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        if dataset.task_type != "OutputOnly":
            status = job.plus["exit_status"]
            info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                        (job.plus["execution_time"],
                         job.plus["execution_wall_clock_time"],
                         mem_human(job.plus["execution_memory"])))
        else:
            status = "ok"
            info.append("N/A")
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print()
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print()
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen)))

    return zip(points, comments, info)