예제 #1
0
파일: Checker.py 프로젝트: strogo/cms-1
    def echo_callback(self, data, error=None):
        """Callback for check.

        """
        current = time.time()
        logger.debug("Checker.echo_callback")
        if error is not None:
            return
        try:
            service, time_ = data.split()
            time_ = float(time_)
            name, shard = service.split(",")
            shard = int(shard)
            service = ServiceCoord(name, shard)
            if service not in self.waiting_for or current - time_ > 10:
                logger.warning("Got late reply (%5.3lf s) from %s."
                            % (current - time_, service))
            else:
                if time_ - self.waiting_for[service] > 0.001:
                    logger.warning("Someone cheated on the timestamp?!")
                logger.info("Got reply (%5.3lf s) from %s."
                            % (current - time_, service))
                del self.waiting_for[service]
        except KeyError:
            logger.error("Echo answer mis-shapen.")
예제 #2
0
파일: TaskType.py 프로젝트: volpino/cms
    def finish_evaluation_testcase(self, test_number, success, outcome=0, text="", to_log=None):
        """Finalize the operation of evaluating the submission on a
        testcase. Fill the information in the submission.

        test_number (int): number of testcase.
        success (bool): if the operation was successful.
        outcome (float): the outcome obtained by the submission on the
                         testcase.
        text (string): the reason of failure of the submission (if
                       any).
        to_log (string): inform us that an unexpected event has
                         happened.

        return (bool): success.

        """
        if to_log is not None:
            logger.warning(to_log)
        if "evaluations" not in self.result:
            self.result["evaluations"] = {}
        obj = self.result["evaluations"]
        obj[test_number] = {"success": success}
        if success:
            obj[test_number]["text"] = text
            obj[test_number]["outcome"] = outcome
            obj[test_number]["evaluation_shard"] = self.worker_shard
            obj[test_number]["evaluation_sandbox"] = self.sandbox_paths
            self.sandbox_paths = ""
        return success
예제 #3
0
    def post(self, question_id):
        ref = self.get_argument("ref", "/")
        question = self.safe_get_item(Question, question_id)
        self.contest = question.user.contest
        reply_subject_code = self.get_argument("reply_question_quick_answer",
                                               "")
        question.reply_text = self.get_argument("reply_question_text", "")

        # Ignore invalid answers
        if reply_subject_code not in AdminWebServer.QUICK_ANSWERS:
            question.reply_subject = ""
        else:
            # Quick answer given, ignore long answer.
            question.reply_subject = \
                AdminWebServer.QUICK_ANSWERS[reply_subject_code]
            question.reply_text = ""

        question.reply_timestamp = int(time.time())

        self.sql_session.commit()

        logger.warning("Reply sent to user %s for question '%s'." %
                       (question.user.username, question.subject))

        self.redirect(ref)
예제 #4
0
파일: Checker.py 프로젝트: sekouperry/cms
    def echo_callback(self, data, error=None):
        """Callback for check.

        """
        current = time.time()
        logger.debug("Checker.echo_callback")
        if error is not None:
            return
        try:
            service, time_ = data.split()
            time_ = float(time_)
            name, shard = service.split(",")
            shard = int(shard)
            service = ServiceCoord(name, shard)
            if service not in self.waiting_for or current - time_ > 10:
                logger.warning("Got late reply (%5.3lf s) from %s." %
                               (current - time_, service))
            else:
                if time_ - self.waiting_for[service] > 0.001:
                    logger.warning("Someone cheated on the timestamp?!")
                logger.info("Got reply (%5.3lf s) from %s." %
                            (current - time_, service))
                del self.waiting_for[service]
        except KeyError:
            logger.error("Echo answer mis-shapen.")
예제 #5
0
파일: Worker.py 프로젝트: Mloc/cms
    def execute_job(self, job_dict):
        job = Job.import_from_dict_with_type(job_dict)

        if self.work_lock.acquire(False):

            try:
                logger.operation = "job '%s'" % (job.info)
                logger.info("Request received")
                job.shard = self.shard

                self.task_type = get_task_type(job, self.file_cacher)
                self.task_type.execute_job()
                logger.info("Request finished.")

                return job.export_to_dict()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                self.task_type = None
                self.session = None
                logger.operation = ""
                self.work_lock.release()

        else:
            err_msg = "Request '%s' received, " \
                "but declined because of acquired lock" % \
                (job.info)
            logger.warning(err_msg)
            raise JobException(err_msg)
예제 #6
0
    def safe_get_file(self, digest, path, descr_path=None):
        """Get file from FileCacher ensuring that the digest is
        correct.

        digest (string): the digest of the file to retrieve.
        path (string): the path where to save the file.
        descr_path (string): the path where to save the description.

        return (bool): True if all ok, False if something wrong.

        """
        # First get the file
        try:
            self.file_cacher.get_file(digest, path=path)
        except Exception as error:
            logger.error("File %s could not retrieved from file server (%r)." %
                         (digest, error))
            return False

        # Then check the digest
        calc_digest = sha1sum(path)
        if digest != calc_digest:
            logger.error("File %s has wrong hash %s." % (digest, calc_digest))
            return False

        # If applicable, retrieve also the description
        if descr_path is not None:
            with open(descr_path, 'wb') as fout:
                try:
                    fout.write(self.file_cacher.describe(digest))
                except UnicodeEncodeError:
                    logger.warning("Caught a UnicodeDecodeError when writing "
                                   "the description for file %s" % (digest))

        return True
예제 #7
0
    def post(self):
        # User can post only if we want.
        if not config.allow_questions:
            raise tornado.web.HTTPError(404)

        timestamp = int(time.time())
        question = Question(timestamp,
                            self.get_argument("question_subject", ""),
                            self.get_argument("question_text", ""),
                            user=self.current_user)
        self.sql_session.add(question)
        self.sql_session.commit()

        logger.warning("Question submitted by user %s."
                       % self.current_user.username)

        # Add "All ok" notification.
        self.application.service.add_notification(
            self.current_user.username,
            timestamp,
            self._("Question received"),
            self._("Your question has been received, you will be "
                   "notified when the it will be answered."))

        self.redirect("/communication")
예제 #8
0
파일: Worker.py 프로젝트: kennyboy/cms
    def execute_job(self, job_dict):
        job = Job.import_from_dict_with_type(job_dict)

        if self.work_lock.acquire(False):

            try:
                logger.operation = "job '%s'" % (job.info)
                logger.info("Request received")
                job.shard = self.shard

                self.task_type = get_task_type(job, self.file_cacher)
                self.task_type.execute_job()
                logger.info("Request finished.")

                return job.export_to_dict()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                self.task_type = None
                self.session = None
                logger.operation = ""
                self.work_lock.release()

        else:
            err_msg = "Request '%s' received, " \
                "but declined because of acquired lock" % \
                (job.info)
            logger.warning(err_msg)
            raise JobException(err_msg)
예제 #9
0
파일: YamlLoader.py 프로젝트: cbolk/cms
    def has_changed(self, name):
        """See docstring in class Loader

        """

        path = os.path.realpath(os.path.join(self.path, name))

        # If there is no .itime file, we assume that the task has changed
        if not os.path.exists(os.path.join(path, ".itime")):
            return True

        getmtime = lambda fname: os.stat(fname).st_mtime

        itime = getmtime(os.path.join(path, ".itime"))

        # Generate a task's list of files
        # Testcases
        files = []
        for filename in os.listdir(os.path.join(path, "input")):
            files.append(os.path.join(path, "input", filename))

        for filename in os.listdir(os.path.join(path, "output")):
            files.append(os.path.join(path, "output", filename))

        # Score file
        files.append(os.path.join(path, "gen", "GEN"))

        # Statement
        files.append(os.path.join(path, "statement", "statement.pdf"))
        files.append(os.path.join(path, "testo", "testo.pdf"))

        # Managers
        files.append(os.path.join(path, "check", "checker"))
        files.append(os.path.join(path, "cor", "correttore"))
        files.append(os.path.join(path, "check", "manager"))
        files.append(os.path.join(path, "cor", "manager"))
        for lang in LANGUAGES:
            files.append(os.path.join(path, "sol", "grader.%s" % lang))
        for other_filename in os.listdir(os.path.join(path, "sol")):
            if other_filename.endswith('.h') or \
                    other_filename.endswith('lib.pas'):
                files.append(os.path.join(path, "sol", other_filename))

        # Yaml
        files.append(os.path.join(self.path, name + ".yaml"))

        # Check is any of the files have changed
        for fname in files:
            if os.path.exists(fname):
                if getmtime(fname) > itime:
                    return True

        if os.path.exists(os.path.join(path, ".import_error")):
            logger.warning("Last attempt to import task %s failed,"
                           " I'm not trying again." % name)
        return False
예제 #10
0
파일: __init__.py 프로젝트: bblackham/cms
def get_auth_types():
    auth_types = []
    for auth_type_name in config.auth_types:
        try:
            auth_types.append(get_auth_type(auth_type_name))
        except KeyError:
            logger.warning("Authentication type '%s' not found." % 
                           auth_type_name)
            pass

    return auth_types
예제 #11
0
파일: Worker.py 프로젝트: s546360316/cms
    def execute_job_group(self, job_group_dict):
        job_group = JobGroup.import_from_dict(job_group_dict)

        if self.work_lock.acquire(False):

            try:
                self.ignore_job = False

                for k, job in job_group.jobs.iteritems():
                    logger.operation = "job '%s'" % (job.info)
                    logger.info("Request received")

                    job.shard = self.shard

                    # FIXME This is actually kind of a workaround...
                    # The only TaskType that needs it is OutputOnly.
                    job._key = k

                    # FIXME We're creating a new TaskType for each Job
                    # even if, at the moment, a JobGroup always uses
                    # the same TaskType and the same parameters. Yet,
                    # this could change in the future, so the best
                    # solution is to keep a cache of TaskTypes objects
                    # (like ScoringService does with ScoreTypes, except
                    # that we cannot index by Dataset ID here...).
                    task_type = get_task_type(job.task_type,
                                              job.task_type_parameters)
                    task_type.execute_job(job, self.file_cacher)

                    logger.info("Request finished.")

                    if not job.success or self.ignore_job:
                        job_group.success = False
                        break
                else:
                    job_group.success = True

                return job_group.export_to_dict()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                logger.operation = ""
                self.work_lock.release()

        else:
            err_msg = "Request '%s' received, " \
                "but declined because of acquired lock" % \
                (job.info)
            logger.warning(err_msg)
            raise JobException(err_msg)
예제 #12
0
def get_auth_types():
    auth_types = []
    for auth_type_name in config.auth_types:
        try:
            auth_types.append(get_auth_type(auth_type_name))
        except KeyError:
            logger.warning("Authentication type '%s' not found." %
                           auth_type_name)
            pass

    return auth_types
예제 #13
0
파일: TaskType.py 프로젝트: volpino/cms
def delete_sandbox(sandbox):
    """Delete the sandbox, if the configuration allows it to be
    deleted.

    sandbox (Sandbox): the sandbox to delete.

    """
    if not config.keep_sandbox:
        try:
            sandbox.delete()
        except (IOError, OSError):
            logger.warning("Couldn't delete sandbox.\n%s", traceback.format_exc())
예제 #14
0
파일: Worker.py 프로젝트: invinciblejha/cms
    def action(self, submission_id, job_type):
        """The actual work - that can be compilation or evaluation
        (the code is pretty much the same, the differencies are in
        what we ask TaskType to do).

        submission_id (string): the submission to which act on.
        job_type (string): a constant JOB_TYPE_*.

        """
        if self.work_lock.acquire(False):

            try:
                logger.operation = "%s of submission %s" % (job_type,
                                                            submission_id)
                logger.info("Request received: %s of submission %s." %
                            (job_type, submission_id))

                with SessionGen(commit=False) as self.session:

                    # Retrieve submission and task_type.
                    unused_submission, self.task_type = \
                        self.get_submission_data(submission_id)

                    # Store in the task type the shard number.
                    self.task_type.worker_shard = self.shard

                    # Do the actual work.
                    if job_type == Worker.JOB_TYPE_COMPILATION:
                        task_type_action = self.task_type.compile
                    elif job_type == Worker.JOB_TYPE_EVALUATION:
                        task_type_action = self.task_type.evaluate
                    else:
                        raise KeyError("Unexpected job type %s." % job_type)

                    logger.info("Request finished.")
                    return task_type_action()

            except:
                err_msg = "Worker failed on operation `%s'" % logger.operation
                logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
                raise JobException(err_msg)

            finally:
                self.task_type = None
                self.session = None
                logger.operation = ""
                self.work_lock.release()

        else:
            logger.warning("Request of %s of submission %s received, "
                           "but declined because of acquired lock" %
                           (job_type, submission_id))
            return False
예제 #15
0
    def run(self):
        """Watch for notifications.

        Obtain a new connection to the PostgreSQL database, attach to
        all the channels needed to fulfill the requirements posed by
        the callbacks we have to execute, notify the service by calling
        the initialization callback and then start waiting for events.

        When an event arrives parse it (both the channel name and the
        payload), check which callbacks it triggers and fire them.

        """
        while True:
            try:
                # Obtain a connection.
                conn = custom_psycopg2_connection()
                conn.autocommit = True

                # Execute all needed LISTEN queries.
                curs = conn.cursor()
                for event, table_name in self._channels:
                    curs.execute(b"LISTEN {0}_{1};".format(event, table_name))

                # Notify the service that we're ready to go: we're attached
                # to all notification channels. It can start fetching its
                # objects without fearing that we'll miss any update to them.
                for callback in self._init_callbacks:
                    gevent.spawn(callback)

                # Listen.
                while True:
                    # FIXME Use a timeout?
                    select.select([conn], [], [])
                    conn.poll()

                    for notify in conn.notifies:
                        # Parse the notification.
                        event, _, table_name = notify.channel.partition(b'_')
                        rows = notify.payload.split(b'\n')
                        pkey = tuple(int(i) for i in rows[0].split(b' '))
                        cols = set(rows[1:])

                        for item in self._callbacks:
                            if item[1] == event and item[2] == table_name and \
                                    (len(item[3]) == 0 or
                                     not item[3].isdisjoint(cols) > 0):
                                gevent.spawn(item[0], *pkey)

                    del conn.notifies[:]
            except psycopg2.OperationalError:
                logger.warning("Lost connection with database.")
                gevent.sleep(1)
예제 #16
0
    def finish(self, *args, **kwds):
        """ Finishes this response, ending the HTTP request.

        We override this method in order to properly close the database.

        """
        if hasattr(self, "sql_session"):
            logger.debug("Closing SQL connection.")
            try:
                self.sql_session.close()
            except Exception as error:
                logger.warning("Couldn't close SQL connection: %r" % error)
        tornado.web.RequestHandler.finish(self, *args, **kwds)
예제 #17
0
파일: TaskType.py 프로젝트: kennyboy/cms
def delete_sandbox(sandbox):
    """Delete the sandbox, if the configuration allows it to be
    deleted.

    sandbox (Sandbox): the sandbox to delete.

    """
    if not config.keep_sandbox:
        try:
            sandbox.delete()
        except (IOError, OSError):
            logger.warning("Couldn't delete sandbox.\n%s",
                           traceback.format_exc())
예제 #18
0
    def post(self, user_id):
        user = self.safe_get_item(User, user_id)
        self.contest = user.contest

        message = Message(int(time.time()),
                          self.get_argument("message_subject", ""),
                          self.get_argument("message_text", ""),
                          user=user)
        self.sql_session.add(message)
        if try_commit(self.sql_session, self):
            logger.warning("Message submitted to user %s."
                           % user.username)

        self.redirect("/user/%s" % user_id)
예제 #19
0
    def replay(self):
        """Start replaying the events in source on the CWS at the
        specified address.

        """
        with self.speed_lock:
            index = 0
            if self.start_from is not None:
                while index < len(self.events) and float(self.events[index][0]) < self.start_from:
                    index += 1
                self.start = time.time() - self.start_from
            else:
                self.start = time.time()

        while index < len(self.events):
            timestamp, username, password, task_id, task_name, type_, data = self.events[index]
            to_wait = timestamp / self.speed - (time.time() - self.start)
            while to_wait > 0.5:
                if 0 < to_wait % 10 <= 0.5:
                    logger.info("Next event in %d seconds." % int(to_wait))
                time.sleep(0.5)
                to_wait = timestamp / self.speed - (time.time() - self.start)
            if to_wait > 0:
                time.sleep(to_wait)

            if type_ == "s":  # Submit.
                files, language = data
                self.submit(
                    timestamp=timestamp,
                    username=username,
                    password=password,
                    t_id=task_id,
                    t_short=task_name,
                    files=files,
                    language=language,
                )
            elif type_ == "t":  # Token.
                self.token(
                    timestamp=timestamp,
                    username=username,
                    password=password,
                    t_id=task_id,
                    t_short=task_name,
                    submission_num=data,
                )
            else:
                logger.warning("Unexpected type `%s', ignoring." % type_)

            index += 1
예제 #20
0
    def post(self, question_id):
        ref = self.get_argument("ref", "/")

        # Fetch form data.
        question = self.safe_get_item(Question, question_id)
        self.contest = question.user.contest
        should_ignore = self.get_argument("ignore", "no") == "yes"

        # Commit the change.
        question.ignored = should_ignore
        if try_commit(self.sql_session, self):
            logger.warning("Question '%s' by user %s %s" %
                           (question.subject, question.user.username,
                            ["unignored", "ignored"][should_ignore]))

        self.redirect(ref)
예제 #21
0
    def replay(self):
        """Start replaying the events in source on the CWS at the
        specified address.

        """
        with self.speed_lock:
            index = 0
            if self.start_from is not None:
                while index < len(self.events) \
                        and float(self.events[index][0]) < self.start_from:
                    index += 1
                self.start = time.time() - self.start_from
            else:
                self.start = time.time()

        while index < len(self.events):
            timestamp, username, password, task_id, task_name, type_, data \
                = self.events[index]
            to_wait = (timestamp / self.speed - (time.time() - self.start))
            while to_wait > .5:
                if 0 < to_wait % 10 <= .5:
                    logger.info("Next event in %d seconds." % int(to_wait))
                time.sleep(.5)
                to_wait = (timestamp / self.speed - (time.time() - self.start))
            if to_wait > 0:
                time.sleep(to_wait)

            if type_ == "s":  # Submit.
                files, language = data
                self.submit(timestamp=timestamp,
                            username=username,
                            password=password,
                            t_id=task_id,
                            t_short=task_name,
                            files=files,
                            language=language)
            elif type_ == "t":  # Token.
                self.token(timestamp=timestamp,
                           username=username,
                           password=password,
                           t_id=task_id,
                           t_short=task_name,
                           submission_num=data)
            else:
                logger.warning("Unexpected type `%s', ignoring." % type_)

            index += 1
예제 #22
0
    def run(self):
        """Main routine for replaying a contest, handling arguments from
        command line, and managing the speed of the replayer.

        """
        if not self.no_import:
            logger.info("Importing contest...")
            self.importer.run()
            logger.info("Contest imported.")

        logger.info("Please run CMS against the contest (with ip_lock=False).")
        logger.info("Please ensure that:")
        logger.info("- the contest is active (we are between start and stop);")
        logger.info("- the minimum interval for submissions and usertests ")
        logger.info("  (contest- and task-wise) is None.")
        logger.info("Then press enter to start.")
        raw_input()

        with io.open(os.path.join(self.import_source, "contest.json"),
                     "rt", encoding="utf-8") as fin:
            self.compute_events(json.load(fin))

        thread = Thread(target=self.replay)
        thread.daemon = True
        thread.start()

        logger.info("Loading submission data...")
        while self.start is None:
            time.sleep(1)
        while thread.isAlive():
            new_speed = raw_input("Write the speed multiplier or q to quit "
                                  "(time %s, multiplier %s):\n" %
                                  (to_time((time.time() - self.start) *
                                           self.speed),
                                   self.speed))
            if new_speed == "q":
                return 0
            elif new_speed != "":
                try:
                    new_speed = int(new_speed)
                except ValueError:
                    logger.warning("Speed multiplier could not be parsed.")
                else:
                    self.recompute_start(new_speed)
        return 0
예제 #23
0
    def run(self):
        """Main routine for replaying a contest, handling arguments from
        command line, and managing the speed of the replayer.

        """
        if not self.no_import:
            logger.info("Importing contest...")
            self.importer.run()
            logger.info("Contest imported.")

        logger.info("Please run CMS against the contest (with ip_lock=False).")
        logger.info("Please ensure that:")
        logger.info("- the contest is active (we are between start and stop);")
        logger.info("- the minimum interval for submissions and usertests ")
        logger.info("  (contest- and task-wise) is None.")
        logger.info("Then press enter to start.")
        raw_input()

        with io.open(os.path.join(self.import_source, "contest.json"),
                     "rt",
                     encoding="utf-8") as fin:
            self.compute_events(json.load(fin))

        thread = Thread(target=self.replay)
        thread.daemon = True
        thread.start()

        logger.info("Loading submission data...")
        while self.start is None:
            time.sleep(1)
        while thread.isAlive():
            new_speed = raw_input(
                "Write the speed multiplier or q to quit "
                "(time %s, multiplier %s):\n" % (to_time(
                    (time.time() - self.start) * self.speed), self.speed))
            if new_speed == "q":
                return 0
            elif new_speed != "":
                try:
                    new_speed = int(new_speed)
                except ValueError:
                    logger.warning("Speed multiplier could not be parsed.")
                else:
                    self.recompute_start(new_speed)
        return 0
예제 #24
0
파일: TaskType.py 프로젝트: volpino/cms
    def finish_evaluation(self, success, to_log=None):
        """Finalize the operation of evaluating. Currently there is
        nothing to do.

        success (bool): if the evaluation was successful.
        to_log (string): inform us that an unexpected event has
                         happened.

        return (dict): result collected during the evaluation.

        """
        if to_log is not None:
            logger.warning(to_log)

        self.result["success"] = success

        self.ignore_job = False
        return self.result
예제 #25
0
    def finish_compilation(self, success, compilation_success=False,
                           text="", to_log=None):
        """Finalize the operation of compilation and build the
        dictionary to return to ES.

        success (bool): if the operation was successful (i.e., if cms
                        did everything in the right way).
        compilation_success (bool): if success = True, this is whether
                                    the compilation was successful
                                    (i.e., if the submission managed
                                    to compile).
        text (string): if success is True, stdout and stderr of the
                       compiler, or a message explaining why it
                       compilation_success is False.
        to_log (string): inform us that an unexpected event has
                         happened.

        return (dict): result collected during the evaluation.

        """
        if to_log is not None:
            logger.warning(to_log)
        self.result["success"] = success

        if success:
            if compilation_success:
                self.result["compilation_outcome"] = "ok"
            else:
                self.result["compilation_outcome"] = "fail"

            try:
                self.result["compilation_text"] = text.decode("utf-8")
            except UnicodeDecodeError:
                self.result["compilation_text"] = \
                    "Cannot decode compilation text."
                logger.error("Unable to decode UTF-8 for string %s." % text)

            self.result["compilation_shard"] = self.worker_shard
            self.result["compilation_sandbox"] = self.sandbox_paths
            self.sandbox_paths = ""

        self.ignore_job = False
        return self.result
예제 #26
0
def get_submissions(contest_id,
                    submission_id=None,
                    user_id=None,
                    task_id=None):
    """Return a list of submission_ids restricted with the given
    information.

    We allow at most one of the parameters to be non-None. If all
    parameters are, we return all submissions for the given contest.

    contest_id (int): the id of the contest.

    submission_id (int): id of the submission to invalidate, or None.
    user_id (int): id of the user we want to invalidate, or None.
    task_id (int): id of the task we want to invalidate, or None.
    level (string): 'compilation' or 'evaluation'

    """
    if [x is not None
        for x in [submission_id, user_id, task_id]].count(True) > 1:
        err_msg = "Too many arguments for invalidate_submission."
        logger.warning(err_msg)
        raise ValueError(err_msg)

    submission_ids = []
    if submission_id is not None:
        submission_ids = [submission_id]
    elif user_id is not None:
        with SessionGen(commit=False) as session:
            user = User.get_from_id(user_id, session)
            submission_ids = [x.id for x in user.submissions]
    elif task_id is not None:
        with SessionGen(commit=False) as session:
            submissions = session.query(Submission)\
                .join(Task).filter(Task.id == task_id)
            submission_ids = [x.id for x in submissions]
    else:
        with SessionGen(commit=False) as session:
            contest = session.query(Contest).\
                filter_by(id=contest_id).first()
            submission_ids = [x.id for x in contest.get_submissions()]

    return submission_ids
예제 #27
0
    def push_logs(self, logger):
        """Push all log lines written since the last call to
        get_logs() to the logger object.

        """
        with self.log_lock:
            tmp = self.logs
            self.logs = []
        for (line, severity) in tmp:
            if severity == 'debug':
                logger.debug(line)
            elif severity == 'info':
                logger.info(line)
            elif severity == 'warning':
                logger.warning(line)
            elif severity == 'error':
                logger.error(line)
            elif severity == 'critical':
                logger.critical(line)
예제 #28
0
 def newfunc(self, *args, **kwargs):
     # We reply with Forbidden if the given ID cannot be decrypted.
     new_args = []
     for arg in args:
         try:
             new_args.append(decrypt_number(arg))
         except ValueError:
             logger.warning("User %s called with undecryptable argument." %
                            self.current_user.username)
             raise HTTPError(403)
     new_kwargs = {}
     for k in kwargs:
         try:
             new_kwargs[k] = decrypt_number(kwargs[k])
         except ValueError:
             logger.warning("User %s called with undecryptable argument." %
                            self.current_user.username)
             raise HTTPError(403)
     return func(self, *new_args, **new_kwargs)
예제 #29
0
    def push_logs(self, logger):
        """Push all log lines written since the last call to
        get_logs() to the logger object.

        """
        with self.log_lock:
            tmp = self.logs
            self.logs = []
        for (line, severity) in tmp:
            if severity == 'debug':
                logger.debug(line)
            elif severity == 'info':
                logger.info(line)
            elif severity == 'warning':
                logger.warning(line)
            elif severity == 'error':
                logger.error(line)
            elif severity == 'critical':
                logger.critical(line)
예제 #30
0
def get_submissions(contest_id,
                    submission_id=None,
                    user_id=None,
                    task_id=None):
    """Return a list of submission_ids restricted with the given
    information.

    We allow at most one of the parameters to be non-None. If all
    parameters are, we return all submissions for the given contest.

    contest_id (int): the id of the contest.

    submission_id (int): id of the submission to invalidate, or None.
    user_id (int): id of the user we want to invalidate, or None.
    task_id (int): id of the task we want to invalidate, or None.
    level (string): 'compilation' or 'evaluation'

    """
    if [x is not None
            for x in [submission_id, user_id, task_id]].count(True) > 1:
        err_msg = "Too many arguments for invalidate_submission."
        logger.warning(err_msg)
        raise ValueError(err_msg)

    submission_ids = []
    if submission_id is not None:
        submission_ids = [submission_id]
    elif user_id is not None:
        with SessionGen(commit=False) as session:
            user = User.get_from_id(user_id, session)
            submission_ids = [x.id for x in user.submissions]
    elif task_id is not None:
        with SessionGen(commit=False) as session:
            submissions = session.query(Submission)\
                .join(Task).filter(Task.id == task_id)
            submission_ids = [x.id for x in submissions]
    else:
        with SessionGen(commit=False) as session:
            contest = session.query(Contest).\
                filter_by(id=contest_id).first()
            submission_ids = [x.id for x in contest.get_submissions()]

    return submission_ids
예제 #31
0
    def finish_evaluation_testcase(self, test_number, success,
                                   outcome=0, text="", plus=None,
                                   to_log=None):
        """Finalize the operation of evaluating the submission on a
        testcase. Fill the information in the submission.

        test_number (int): number of testcase.
        success (bool): if the operation was successful.
        outcome (float): the outcome obtained by the submission on the
                         testcase.
        text (string): the reason of failure of the submission (if
                       any).
        plus (dict): additional information extracted from the logs of
                     the 'main' evaluation step - in particular,
                     memory and time information.
        to_log (string): inform us that an unexpected event has
                         happened.

        return (bool): success.

        """
        if to_log is not None:
            logger.warning(to_log)
        if "evaluations" not in self.result:
            self.result["evaluations"] = {}
        obj = self.result["evaluations"]
        obj[test_number] = {"success": success}
        if success:
            obj[test_number]["text"] = text
            obj[test_number]["outcome"] = outcome
            obj[test_number]["evaluation_shard"] = self.worker_shard
            obj[test_number]["evaluation_sandbox"] = self.sandbox_paths
            self.sandbox_paths = ""

        if plus is None:
            plus = {}
        for info in ["memory_used",
                     "execution_time",
                     "execution_wall_clock_time"]:
            obj[test_number][info] = plus.get(info, None)

        return success
예제 #32
0
    def __init__(self, contest_id, export_target,
                 dump_files, dump_model, light,
                 skip_submissions, skip_user_tests):
        self.contest_id = contest_id
        self.dump_files = dump_files
        self.dump_model = dump_model
        self.light = light
        self.skip_submissions = skip_submissions
        self.skip_user_tests = skip_user_tests

        # If target is not provided, we use the contest's name.
        if export_target == "":
            with SessionGen(commit=False) as session:
                contest = Contest.get_from_id(self.contest_id, session)
                self.export_target = "dump_%s.tar.gz" % contest.name
                logger.warning("export_target not given, using \"%s\""
                               % self.export_target)
        else:
            self.export_target = export_target

        self.file_cacher = FileCacher()
예제 #33
0
파일: filecacher.py 프로젝트: cbolk/cms
    def put_file(self, digest, origin, description=""):
        """See FileCacherBackend.put_file().

        """
        try:
            with SessionGen() as session:

                # Check digest uniqueness
                if FSObject.get_from_digest(digest, session) is not None:
                    logger.debug("File %s already on database, "
                                 "dropping this one." % digest)
                    session.rollback()

                # If it is not already present, copy the file into the
                # lobject
                else:
                    fso = FSObject(description=description)
                    logger.debug("Sending file %s to the database." % digest)
                    with open(origin, 'rb') as temp_file:

                        with fso.get_lobject(session, mode='wb') \
                                as lobject:
                            logger.debug("Large object created.")
                            buf = temp_file.read(self.CHUNK_SIZE)
                            while buf != '':
                                while len(buf) > 0:
                                    written = lobject.write(buf)
                                    buf = buf[written:]
                                    # Cooperative yield
                                    gevent.sleep(0)
                                buf = temp_file.read(self.CHUNK_SIZE)

                    fso.digest = digest
                    session.add(fso)
                    session.commit()
                    logger.debug("File %s sent to the database." % digest)

        except IntegrityError:
            logger.warning("File %s caused an IntegrityError, ignoring..."
                           % digest)
예제 #34
0
    def post(self):

        timestamp = int(time.time())
        question = Question(
            timestamp,
            self.get_argument("question_subject", ""),
            self.get_argument("question_text", ""),
            user=self.current_user,
        )
        self.sql_session.add(question)
        self.sql_session.commit()

        logger.warning("Question submitted by user %s." % self.current_user.username)

        # Add "All ok" notification
        self.application.service.add_notification(
            self.current_user.username,
            timestamp,
            self._("Question received"),
            self._("Your question has been received, you will be " "notified when the it will be answered."),
        )

        self.redirect("/communication")
예제 #35
0
파일: ScoringService.py 프로젝트: cbolk/cms
def safe_put_data(ranking, resource, data, operation):
    """Send some data to ranking using a PUT request.

    ranking (str): the URL of ranking server.
    resource (str): the relative path of the entity.
    data (dict): the data to json-encode and send.
    operation (str): a human-readable description of the operation
                     we're performing (to produce log messages).

    raise CannotSendError in case of communication errors.

    """
    try:
        url = urljoin(ranking, resource)
        res = requests.put(url, json.dumps(data),
                           verify=config.https_certfile)
    except requests.exceptions.RequestException as error:
        logger.warning(
            "%s while %s: %s" % (type(error).__name__, operation, error))
        raise CannotSendError
    if res.status_code != 200:
        logger.warning("Status %s while %s." % (res.status_code, operation))
        raise CannotSendError
예제 #36
0
    def post(self):

        submission_id = self.get_argument("submission_id", "")

        # Decrypt submission_id.
        try:
            submission_id = decrypt_number(submission_id)
        except ValueError:
            # We reply with Forbidden if the given ID cannot be
            # decrypted.
            logger.warning(
                "User %s tried to play a token " "on an undecryptable submission_id." % self.current_user.username
            )
            raise tornado.web.HTTPError(403)

        # Find submission and check it is of the current user.
        submission = Submission.get_from_id(submission_id, self.sql_session)
        if submission is None or submission.user != self.current_user:
            logger.warning(
                "User %s tried to play a token " "on an unexisting submission_id." % self.current_user.username
            )
            raise tornado.web.HTTPError(404)

        # Don't trust the user, check again if (s)he can really play
        # the token.
        timestamp = int(time.time())
        if self.contest.tokens_available(self.current_user.username, submission.task.name, timestamp)[0] <= 0:
            logger.warning("User %s tried to play a token " "when it shouldn't." % self.current_user.username)
            # Add "no luck" notification
            self.application.service.add_notification(
                self.current_user.username,
                timestamp,
                self._("Token request discarded"),
                self._("Your request has been discarded because you have no " "tokens available."),
            )
            self.redirect("/tasks/%s" % encrypt_number(submission.task.id))
            return

        token = Token(timestamp, submission)
        self.sql_session.add(token)
        self.sql_session.commit()

        # Inform ScoringService and eventually the ranking that the
        # token has been played.
        self.application.service.scoring_service.submission_tokened(submission_id=submission_id, timestamp=timestamp)

        logger.info("Token played by user %s on task %s." % (self.current_user.username, submission.task.name))

        # Add "All ok" notification
        self.application.service.add_notification(
            self.current_user.username,
            timestamp,
            self._("Token request received"),
            self._("Your request has been received " "and applied to the submission."),
        )

        self.redirect("/tasks/%s" % encrypt_number(submission.task.id))
예제 #37
0
    def put_file(self, digest, origin, description=""):
        """See FileCacherBackend.put_file().

        """
        try:
            with SessionGen() as session:

                # Check digest uniqueness
                if FSObject.get_from_digest(digest, session) is not None:
                    logger.debug("File %s already on database, "
                                 "dropping this one." % digest)
                    session.rollback()

                # If it is not already present, copy the file into the
                # lobject
                else:
                    fso = FSObject(description=description)
                    logger.debug("Sending file %s to the database." % digest)
                    with open(origin, 'rb') as temp_file:
                        with fso.get_lobject(session, mode='wb') as lobject:
                            logger.debug("Large object created.")
                            buf = temp_file.read(self.CHUNK_SIZE)
                            while buf != '':
                                while len(buf) > 0:
                                    written = lobject.write(buf)
                                    buf = buf[written:]
                                    if self.service is not None:
                                        self.service._step()
                                buf = temp_file.read(self.CHUNK_SIZE)
                    fso.digest = digest
                    session.add(fso)
                    session.commit()
                    logger.debug("File %s sent to the database." % digest)

        except IntegrityError:
            logger.warning("File %s caused an IntegrityError, ignoring..."
                           % digest)
예제 #38
0
def extract_archive(temp_name, original_filename):
    """Obtain a list of files inside the specified archive.

    Returns a list of the files inside the archive located in
    temp_name, using original_filename to guess the type of the
    archive.

    """
    file_list = []
    if original_filename.endswith(".zip"):
        try:
            zip_object = zipfile.ZipFile(temp_name, "r")
            for item in zip_object.infolist():
                file_list.append({
                    "filename": item.filename,
                    "body": zip_object.read(item)
                })
        except Exception as error:
            logger.warning("Exception while extracting zip file `%s'. %r" %
                           (original_filename, error))
            return None
    elif original_filename.endswith(".tar.gz") \
            or original_filename.endswith(".tar.bz2") \
            or original_filename.endswith(".tar"):
        try:
            tar_object = tarfile.open(name=temp_name)
            for item in tar_object.getmembers():
                if item.isfile():
                    file_list.append({
                        "filename":
                        item.name,
                        "body":
                        tar_object.extractfile(item).read()
                    })
        except tarfile.TarError:
            logger.warning("Exception while extracting tar file `%s'. %r" %
                           (original_filename, error))
            return None
        except IOError:
            return None
    else:
        logger.warning("Compressed file `%s' not recognized." %
                       original_filename)
        return None
    return file_list
예제 #39
0
파일: __init__.py 프로젝트: strogo/cms-1
def extract_archive(temp_name, original_filename):
    """Obtain a list of files inside the specified archive.

    Returns a list of the files inside the archive located in
    temp_name, using original_filename to guess the type of the
    archive.

    """
    file_list = []
    if original_filename.endswith(".zip"):
        try:
            zip_object = zipfile.ZipFile(temp_name, "r")
            for item in zip_object.infolist():
                file_list.append({
                    "filename": item.filename,
                    "body": zip_object.read(item)})
        except Exception as error:
            logger.warning("Exception while extracting zip file `%s'. %r" %
                           (original_filename, error))
            return None
    elif original_filename.endswith(".tar.gz") \
            or original_filename.endswith(".tar.bz2") \
            or original_filename.endswith(".tar"):
        try:
            tar_object = tarfile.open(name=temp_name)
            for item in tar_object.getmembers():
                if item.isfile():
                    file_list.append({
                        "filename": item.name,
                        "body": tar_object.extractfile(item).read()})
        except tarfile.TarError:
            logger.warning("Exception while extracting tar file `%s'. %r" %
                           (original_filename, error))
            return None
        except IOError:
            return None
    else:
        logger.warning("Compressed file `%s' not recognized."
                       % original_filename)
        return None
    return file_list
예제 #40
0
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the
        evaluation for a submission.

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." % submission_id)
                return
            if not submission.evaluated():
                logger.warning("[new_evaluation] Submission %d "
                               "is not evaluated." % submission_id)
                return
            if submission.user.hidden:
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(
                submission_id, submission.timestamp, submission.user.username,
                dict((ev.num, {
                    "outcome": ev.outcome,
                    "text": ev.text,
                    "time": ev.execution_time,
                    "memory": ev.memory_used
                }) for ev in submission.evaluations), submission.tokened())

            # Mark submission as scored.
            self.submission_ids_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))
            }
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                "score": submission.score,
                "extra": submission.ranking_score_details
            }

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
예제 #41
0
    def do_reimport(self):
        """Ask the loader to load the contest and actually merge the
        two.

        """
        # Create the dict corresponding to the new contest.
        yaml_contest = self.loader.import_contest(self.path)
        yaml_users = dict(((x['username'], x) for x in yaml_contest['users']))
        yaml_tasks = dict(((x['name'], x) for x in yaml_contest['tasks']))

        with SessionGen(commit=False) as session:

            # Create the dict corresponding to the old contest, from
            # the database.
            contest = Contest.get_from_id(self.contest_id, session)
            cms_contest = contest.export_to_dict()
            cms_users = dict((x['username'], x) for x in cms_contest['users'])
            cms_tasks = dict((x['name'], x) for x in cms_contest['tasks'])

            # Delete the old contest from the database.
            session.delete(contest)
            session.flush()

            # Do the actual merge: first of all update all users of
            # the old contest with the corresponding ones from the new
            # contest; if some user is present in the old contest but
            # not in the new one we check if we have to fail or remove
            # it and, in the latter case, add it to a list
            users_to_remove = []
            for user_num, user in enumerate(cms_contest['users']):
                if user['username'] in yaml_users:
                    yaml_user = yaml_users[user['username']]

                    yaml_user['submissions'] = user['submissions']
                    yaml_user['user_tests'] = user['user_tests']
                    yaml_user['questions'] = user['questions']
                    yaml_user['messages'] = user['messages']

                    cms_contest['users'][user_num] = yaml_user
                else:
                    if self.force:
                        logger.warning("User %s exists in old contest, but "
                                       "not in the new one" % user['username'])
                        users_to_remove.append(user_num)
                        # FIXME Do we need really to do this, given that
                        # we already deleted the whole contest?
                        session.delete(contest.users[user_num])
                    else:
                        logger.error("User %s exists in old contest, but "
                                     "not in the new one" % user['username'])
                        return False

            # Delete the users
            for user_num in users_to_remove:
                del cms_contest['users'][user_num]

            # The append the users in the new contest, not present in
            # the old one.
            for user in yaml_contest['users']:
                if user['username'] not in cms_users.keys():
                    cms_contest['users'].append(user)

            # The same for tasks: update old tasks.
            tasks_to_remove = []
            for task_num, task in enumerate(cms_contest['tasks']):
                if task['name'] in yaml_tasks:
                    yaml_task = yaml_tasks[task['name']]

                    cms_contest['tasks'][task_num] = yaml_task
                else:
                    if self.force:
                        logger.warning("Task %s exists in old contest, but "
                                       "not in the new one" % task['name'])
                        tasks_to_remove.append(task_num)
                        # FIXME Do we need really to do this, given that
                        # we already deleted the whole contest?
                        session.delete(contest.tasks[task_num])
                    else:
                        logger.error("Task %s exists in old contest, but "
                                     "not in the new one" % task['name'])
                        return False

            # Delete the tasks
            for task_num in tasks_to_remove:
                del cms_contest['tasks'][task_num]

            # And add new tasks.
            for task in yaml_contest['tasks']:
                if task['name'] not in cms_tasks.keys():
                    cms_contest['tasks'].append(task)

            # Reimport the contest in the db, with the previous ID.
            contest = Contest.import_from_dict(cms_contest)
            contest.id = self.contest_id
            session.add(contest)
            session.flush()

            logger.info("Analyzing database.")
            analyze_all_tables(session)
            session.commit()

        logger.info("Reimport of contest %s finished." % self.contest_id)

        return True
예제 #42
0
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_verion (int): the dataset version used.

        """
        with SessionGen(commit=True) as session:
            submission_result = SubmissionResult.get_from_id(
                (submission_id, dataset_id), session)

            if submission_result is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." %
                             submission_id)
                raise KeyError
            submission = submission_result.submission

            if not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) "
                               "is not compiled." % (
                               submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" \
                    and not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) compiled "
                               "correctly but is not evaluated."
                               % (submission_id, dataset_id))
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers.get(dataset_id)
            if scorer is None:
                # We may get here because the scorer threw an exception whilst
                # initalizing, or we may be scoring for the wrong contest.
                logger.error(
                    "Not scoring submission %d because scorer is broken." %
                        submission_id)
                return

            try:
                scorer.add_submission(submission_id, dataset_id,
                      submission.timestamp,
                      submission.user.username,
                      submission_result.evaluated(),
                      dict((ev.num,
                            {"outcome": ev.outcome,
                             "text": ev.text,
                             "time": ev.execution_time,
                             "memory": ev.memory_used})
                           for ev in submission_result.evaluations),
                      submission.tokened())
            except:
                logger.error("Failed to score submission %d. "
                    "Scorer threw an exception: %s" % (
                        submission_id, traceback.format_exc()))
                return

            # Mark submission as scored.
            self.submission_ids_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = scorer.pool[submission_id]["score"]
            submission_result.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission_result.score_details = \
                scorer.pool[submission_id]["details"]
            submission_result.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission_result.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            try:
                ranking_score_details = json.loads(
                        submission_result.ranking_score_details)
            except (json.decoder.JSONDecodeError, TypeError):
                # It may be blank.
                ranking_score_details = None

            # If we are not a live dataset then we can bail out here, and avoid
            # updating RWS.
            if dataset_id != submission.task.active_dataset_id:
                return

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission_result.score}
            if ranking_score_details is not None:
                subchange_put_data["extra"] = ranking_score_details

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
예제 #43
0
    def get_params_for_task(self, path, num):
        """Given the path of a task, this function put all needed data
        into FS, and fills the dictionary of parameters required by
        Task.import_from_dict().

        path (string): path of the task.
        num (int): number of the task in the contest task ordering.

        return (dict): info of the task.

        """
        path = os.path.realpath(path)
        super_path, name = os.path.split(path)
        conf = yaml.load(
            codecs.open(os.path.join(super_path, name + ".yaml"), "r",
                        "utf-8"))

        logger.info("Loading parameters for task %s." % name)

        params = {"name": name}
        assert name == conf["nome_breve"]
        params["title"] = conf["nome"]
        if name == params["title"]:
            logger.warning("Short name equals long name (title). "
                           "Is this intended?")
        params["num"] = num
        params["time_limit"] = conf.get("timeout", None)
        params["memory_limit"] = conf.get("memlimit", None)
        params["attachments"] = []  # FIXME - Use auxiliary
        params["statements"] = [
            Statement(
                self.file_cacher.put_file(
                    path=os.path.join(path, "testo", "testo.pdf"),
                    description="Statement for task %s (lang: )" % name),
                "").export_to_dict()
        ]
        params["official_language"] = ""

        params["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name).export_to_dict()
        ]

        # Builds the parameters that depend on the task type
        params["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is cor/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form cor/grader.%l
        graders = False
        for lang in Submission.LANGUAGES:
            if os.path.exists(os.path.join(path, "cor", "grader.%s" % (lang))):
                graders = True
                break
        if graders:
            for lang in Submission.LANGUAGES:
                grader_filename = os.path.join(path, "cor",
                                               "grader.%s" % (lang))
                if os.path.exists(grader_filename):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=grader_filename,
                                description="Grader for task %s and "
                                "language %s" % (name, lang)),
                            "grader.%s" % (lang)).export_to_dict())
                else:
                    logger.warning("Could not find grader for "
                                   "language %s" % (lang))
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is cor/correttore, then, presuming that the task
        # type is Batch or OutputOnly, we retrieve the comparator
        if os.path.exists(os.path.join(path, "cor", "correttore")):
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "correttore"),
                        description="Manager for task %s" % (name)),
                    "checker").export_to_dict()
            ]
            evaluation_parameter = "comparator"
        else:
            evaluation_parameter = "diff"

        # If there is no sol/ directory, the the task type is
        # OutputOnly
        if not os.path.exists(os.path.join(path, "sol")):
            params["task_type"] = "OutputOnly"
            params["task_type_parameters"] = '["%s"]' % (evaluation_parameter)
            params["submission_format"] = [
                SubmissionFormatElement("output_%03d.txt" %
                                        (i)).export_to_dict()
                for i in xrange(int(conf["n_input"]))
            ]

        # If there is cor/manager, then the task type is Communication
        elif os.path.exists(os.path.join(path, "cor", "manager")):
            params["task_type"] = "Communication"
            params["task_type_parameters"] = '[]'
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "manager"),
                        description="Manager for task %s" % (name)),
                    "manager").export_to_dict()
            ]
            for lang in Submission.LANGUAGES:
                stub_name = os.path.join(path, "sol", "stub.%s" % lang)
                if os.path.exists(stub_name):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=stub_name,
                                description="Stub for task %s and language %s"
                                % (name, lang)),
                            "stub.%s" % lang).export_to_dict())
                else:
                    logger.warning("Stub for language %s not found." % lang)

        # Otherwise, the task type is Batch
        else:
            params["task_type"] = "Batch"
            params["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                (compilation_param, infile_param, outfile_param,
                 evaluation_parameter)

        params["score_type"] = conf.get("score_type", "Sum")
        params["score_parameters"] = conf.get(
            "score_parameters", str(100.0 / float(conf["n_input"])))
        public_testcases = conf.get("risultati", "").strip()
        if public_testcases != "":
            public_testcases = [
                int(x.strip()) for x in public_testcases.split(",")
            ]
        else:
            public_testcases = []
        params["testcases"] = []
        for i in xrange(int(conf["n_input"])):
            _input = os.path.join(path, "input", "input%d.txt" % i)
            output = os.path.join(path, "output", "output%d.txt" % i)
            input_digest = self.file_cacher.put_file(
                path=_input, description="Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file(
                path=output, description="Output %d for task %s" % (i, name))
            params["testcases"].append(
                Testcase(input_digest,
                         output_digest,
                         public=(i in public_testcases)).export_to_dict())
            if params["task_type"] == "OutputOnly":
                params["attachments"].append(
                    Attachment(input_digest,
                               "input_%03d.txt" % (i)).export_to_dict())
        params["token_initial"] = conf.get("token_initial", None)
        params["token_max"] = conf.get("token_max", None)
        params["token_total"] = conf.get("token_total", None)
        params["token_min_interval"] = conf.get("token_min_interval", 0)
        params["token_gen_time"] = conf.get("token_gen_time", 0)
        params["token_gen_number"] = conf.get("token_gen_number", 0)

        logger.info("Task parameters loaded.")

        return params
예제 #44
0
    def do_import(self):
        """Run the actual import code.

        """
        logger.operation = "importing contest from %s" % self.import_source
        logger.info("Starting import.")

        if not os.path.isdir(self.import_source):
            if self.import_source.endswith(".zip"):
                archive = zipfile.ZipFile(self.import_source, "r")
                file_names = archive.infolist()

                self.import_dir = tempfile.mkdtemp()
                archive.extractall(self.import_dir)
            elif self.import_source.endswith(".tar.gz") \
                     or self.import_source.endswith(".tgz") \
                     or self.import_source.endswith(".tar.bz2") \
                     or self.import_source.endswith(".tbz2") \
                     or self.import_source.endswith(".tar"):
                archive = tarfile.open(name=self.import_source)
                file_names = archive.getnames()
            else:
                logger.critical("Unable to import from %s." %
                                self.import_source)
                return False

            root = find_root_of_archive(file_names)
            if root is None:
                logger.critical("Cannot find a root directory in %s." %
                                self.import_source)
                return False

            self.import_dir = tempfile.mkdtemp()
            archive.extractall(self.import_dir)
            self.import_dir = os.path.join(self.import_dir, root)

        if self.drop:
            logger.info("Dropping and recreating the database.")
            try:
                metadata.drop_all()
            except sqlalchemy.exc.OperationalError as error:
                logger.critical("Unable to access DB.\n%r" % error)
                return False
        try:
            metadata.create_all()
        except sqlalchemy.exc.OperationalError as error:
            logger.critical("Unable to access DB.\n%r" % error)
            return False

        if not self.no_files:
            logger.info("Importing files.")
            files_dir = os.path.join(self.import_dir, "files")
            descr_dir = os.path.join(self.import_dir, "descriptions")
            files = set(os.listdir(files_dir))
            for _file in files:
                if not self.safe_put_file(os.path.join(files_dir, _file),
                                          os.path.join(descr_dir, _file)):
                    return False

        if not self.only_files:
            with SessionGen(commit=False) as session:

                # Import the contest in JSON format.
                logger.info("Importing the contest from JSON file.")
                with open(os.path.join(self.import_dir,
                                       "contest.json")) as fin:
                    contest = Contest.import_from_dict(json.load(fin))
                    session.add(contest)

                # Check that no files were missing (only if files were
                # imported).
                if not self.no_files:
                    contest_files = contest.enumerate_files()
                    missing_files = contest_files.difference(files)
                    if len(missing_files) > 0:
                        logger.warning("Some files needed to the contest "
                                       "are missing in the import directory.")

                session.flush()
                contest_id = contest.id
                session.commit()

        logger.info("Import finished (contest id: %s)." % contest_id)
        logger.operation = ""

        # If we extracted an archive, we remove it.
        if self.import_dir != self.import_source:
            shutil.rmtree(self.import_dir)

        return True
예제 #45
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 1:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.warning("Submission contains %d files, expecting 1" %
                           len(self.job.files))
            return True

        # Create the sandbox
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = self.job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub.%s" % language)
        files_to_get[source_filenames[1]] = \
                self.job.managers["stub.%s" % language].digest
        # User's submission.
        source_filenames.append(format_filename.replace("%l", language))
        files_to_get[source_filenames[0]] = \
            self.job.files[format_filename].digest
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        command = get_compilation_command(language, source_filenames,
                                          executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, self.job.info))
            self.job.executables[executable_filename] = \
                Executable(digest, executable_filename)

        # Cleanup
        delete_sandbox(sandbox)
예제 #46
0
    def do_import(self):
        """Run the actual import code.

        """
        logger.operation = "importing contest from %s" % self.import_source
        logger.info("Starting import.")

        if not os.path.isdir(self.import_source):
            if self.import_source.endswith(".zip"):
                archive = zipfile.ZipFile(self.import_source, "r")
                file_names = archive.infolist()

                self.import_dir = tempfile.mkdtemp()
                archive.extractall(self.import_dir)
            elif self.import_source.endswith(".tar.gz") \
                     or self.import_source.endswith(".tgz") \
                     or self.import_source.endswith(".tar.bz2") \
                     or self.import_source.endswith(".tbz2") \
                     or self.import_source.endswith(".tar"):
                archive = tarfile.open(name=self.import_source)
                file_names = archive.getnames()
            else:
                logger.critical("Unable to import from %s." %
                                self.import_source)
                return False

            root = find_root_of_archive(file_names)
            if root is None:
                logger.critical("Cannot find a root directory in %s." %
                                self.import_source)
                return False

            self.import_dir = tempfile.mkdtemp()
            archive.extractall(self.import_dir)
            self.import_dir = os.path.join(self.import_dir, root)

        if self.drop:
            logger.info("Dropping and recreating the database.")
            try:
                metadata.drop_all()
            except sqlalchemy.exc.OperationalError as error:
                logger.critical("Unable to access DB.\n%r" % error)
                return False
        try:
            metadata.create_all()
        except sqlalchemy.exc.OperationalError as error:
            logger.critical("Unable to access DB.\n%r" % error)
            return False

        if not self.only_files:
            with SessionGen(commit=False) as session:

                # Import the contest in JSON format.
                logger.info("Importing the contest from JSON file.")

                with open(os.path.join(self.import_dir,
                                       "contest.json")) as fin:
                    # Throughout all the code we'll assume the input is
                    # correct without actually doing any validations.
                    # Thus, for example, we're not checking that the
                    # decoded object is a dict...
                    self.datas = json.load(fin)

                self.objs = dict()
                for _id, data in self.datas.iteritems():
                    obj = self.import_object(data)
                    self.objs[_id] = obj
                    session.add(obj)

                for _id in self.datas:
                    self.add_relationships(self.datas[_id], self.objs[_id])

                # Mmh... kind of fragile interface
                contest = self.objs["0"]

                # Check that no files were missing (only if files were
                # imported).
                if False and not self.no_files:
                    contest_files = contest.enumerate_files()
                    missing_files = contest_files.difference(files)
                    if len(missing_files) > 0:
                        logger.warning("Some files needed to the contest "
                                       "are missing in the import directory.")

                session.flush()
                contest_id = contest.id
                contest_files = contest.enumerate_files()
                session.commit()

        if not self.no_files:
            logger.info("Importing files.")
            files_dir = os.path.join(self.import_dir, "files")
            descr_dir = os.path.join(self.import_dir, "descriptions")
            for digest in contest_files:
                file_ = os.path.join(files_dir, digest)
                desc = os.path.join(descr_dir, digest)
                if not os.path.exists(file_) or not os.path.exists(desc):
                    logger.error("Some files needed to the contest "
                                 "are missing in the import directory. "
                                 "The import will continue. Be aware.")
                if not self.safe_put_file(file_, desc):
                    logger.critical("Unable to put file `%s' in the database. "
                                    "Aborting. Please remove the contest "
                                    "from the database." % file_)
                    # TODO: remove contest from the database.
                    return False

        logger.info("Import finished (contest id: %s)." % contest_id)
        logger.operation = ""

        # If we extracted an archive, we remove it.
        if self.import_dir != self.import_source:
            shutil.rmtree(self.import_dir)

        return True
예제 #47
0
파일: Batch.py 프로젝트: pombredanne/cms-1
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 1:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.warning("Submission contains %d files, expecting 1" %
                           len(self.job.files))
            return True

        # Create the sandbox
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = self.job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace("%l", language))
        files_to_get[source_filenames[0]] = \
            self.job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self.job.task_type_parameters[0] == "grader":
            source_filenames.insert(0, "grader.%s" % language)
            files_to_get["grader.%s" % language] = \
                self.job.managers["grader.%s" % language].digest

        # Also copy all *.h and *lib.pas graders
        for filename in self.job.managers.iterkeys():
            if filename.endswith('.h') or \
                    filename.endswith('lib.pas'):
                files_to_get[filename] = \
                    self.job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        command = get_compilation_command(language,
                                          source_filenames,
                                          executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, self.job.info))
            self.job.executables[executable_filename] = \
                Executable(digest, executable_filename)

        # Cleanup
        delete_sandbox(sandbox)
예제 #48
0
    def get_params_for_task(self, path, num):
        """Given the path of a task, this function put all needed data
        into FS, and fills the dictionary of parameters required by
        Task.import_from_dict().

        path (string): path of the task.
        num (int): number of the task in the contest task ordering.

        return (dict): info of the task.

        """
        path = os.path.realpath(path)
        super_path, name = os.path.split(path)
        conf = yaml.load(
            codecs.open(os.path.join(super_path, name + ".yaml"), "r",
                        "utf-8"))

        logger.info("Loading parameters for task %s." % name)

        params = {"name": name}
        assert name == conf["nome_breve"]
        params["title"] = conf["nome"]
        if name == params["title"]:
            logger.warning("Short name equals long name (title). "
                           "Is this intended?")
        params["num"] = num
        params["time_limit"] = conf.get("timeout", None)
        params["memory_limit"] = conf.get("memlimit", None)
        params["attachments"] = []  # FIXME - Use auxiliary
        params["statements"] = [
            Statement(
                self.file_cacher.put_file(
                    path=os.path.join(path, "testo", "testo.pdf"),
                    description="Statement for task %s (lang: )" % name),
                "").export_to_dict()
        ]
        params["official_language"] = \
            conf.get("official_language", "en_official")

        params["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name).export_to_dict()
        ]

        # Builds the parameters that depend on the task type
        params["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in Submission.LANGUAGES:
            if os.path.exists(os.path.join(path, "sol", "grader.%s" % (lang))):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in Submission.LANGUAGES:
                grader_filename = os.path.join(path, "sol",
                                               "grader.%s" % (lang))
                if os.path.exists(grader_filename):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=grader_filename,
                                description="Grader for task %s and "
                                "language %s" % (name, lang)),
                            "grader.%s" % (lang)).export_to_dict())
                else:
                    logger.warning("Could not find grader for "
                                   "language %s" % (lang))
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(path, "sol")):
                if other_filename.endswith('.h') or \
                        other_filename.endswith('lib.pas'):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=os.path.join(path, "sol", other_filename),
                                description="Manager %s for task %s" %
                                (other_filename, name)),
                            other_filename).export_to_dict())
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is cor/correttore, then, presuming that the task
        # type is Batch or OutputOnly, we retrieve the comparator
        if os.path.exists(os.path.join(path, "cor", "correttore")):
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "correttore"),
                        description="Manager for task %s" % (name)),
                    "checker").export_to_dict()
            ]
            evaluation_parameter = "comparator"
        else:
            evaluation_parameter = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(path, 'gen', 'GEN')
        with open(gen_filename) as gen_file:
            subtasks = []
            testcases = 0
            points = None
            for line in gen_file:
                line = line.strip()
                splitted = line.split('#', 1)

                if len(splitted) == 1:
                    # This line represents a testcase, otherwise it's
                    # just a blank
                    if splitted[0] != '':
                        testcases += 1

                else:
                    testcase, comment = splitted
                    testcase_detected = False
                    subtask_detected = False
                    if testcase.strip() != '':
                        testcase_detected = True
                    comment = comment.strip()
                    if comment.startswith('ST:'):
                        subtask_detected = True

                    if testcase_detected and subtask_detected:
                        raise Exception("No testcase and subtask in the"
                                        " same line allowed")

                    # This line represents a testcase and contains a
                    # comment, but the comment doesn't start a new
                    # subtask
                    if testcase_detected:
                        testcases += 1

                    # This line starts a new subtask
                    if subtask_detected:
                        # Close the previous subtask
                        if points is None:
                            assert (testcases == 0)
                        else:
                            subtasks.append([points, testcases])
                        # Open the new one
                        testcases = 0
                        points = int(comment[3:].strip())

            # Close last subtask (if no subtasks were defined, just
            # fallback to Sum)
            if points is None:
                params["score_type"] = "Sum"
                input_value = 0.0
                if testcases != 0:
                    input_value = 100.0 / float(testcases)
                params["score_parameters"] = str(input_value)
            else:
                subtasks.append([points, testcases])
                assert (100 == sum([int(st[0]) for st in subtasks]))
                assert (int(conf['n_input']) == sum(
                    [int(st[1]) for st in subtasks]))
                params["score_type"] = "GroupMin"
                params["score_parameters"] = str(subtasks)

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            params["task_type"] = "OutputOnly"
            params["time_limit"] = None
            params["memory_limit"] = None
            params["task_type_parameters"] = '["%s"]' % (evaluation_parameter)
            params["submission_format"] = [
                SubmissionFormatElement("output_%03d.txt" %
                                        (i)).export_to_dict()
                for i in xrange(int(conf["n_input"]))
            ]

        # If there is cor/manager, then the task type is Communication
        elif os.path.exists(os.path.join(path, "cor", "manager")):
            params["task_type"] = "Communication"
            params["task_type_parameters"] = '[]'
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "manager"),
                        description="Manager for task %s" % (name)),
                    "manager").export_to_dict()
            ]
            for lang in Submission.LANGUAGES:
                stub_name = os.path.join(path, "sol", "stub.%s" % lang)
                if os.path.exists(stub_name):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=stub_name,
                                description="Stub for task %s and language %s"
                                % (name, lang)),
                            "stub.%s" % lang).export_to_dict())
                else:
                    logger.warning("Stub for language %s not found." % lang)

        # Otherwise, the task type is Batch
        else:
            params["task_type"] = "Batch"
            params["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                (compilation_param, infile_param, outfile_param,
                 evaluation_parameter)

        public_testcases = conf.get("risultati", "").strip()
        if public_testcases != "":
            public_testcases = [
                int(x.strip()) for x in public_testcases.split(",")
            ]
        else:
            public_testcases = []
        params["testcases"] = []
        for i in xrange(int(conf["n_input"])):
            _input = os.path.join(path, "input", "input%d.txt" % i)
            output = os.path.join(path, "output", "output%d.txt" % i)
            input_digest = self.file_cacher.put_file(
                path=_input, description="Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file(
                path=output, description="Output %d for task %s" % (i, name))
            params["testcases"].append(
                Testcase(input_digest,
                         output_digest,
                         public=(i in public_testcases)).export_to_dict())
            if params["task_type"] == "OutputOnly":
                params["attachments"].append(
                    Attachment(input_digest,
                               "input_%03d.txt" % (i)).export_to_dict())
        params["token_initial"] = conf.get("token_initial", None)
        params["token_max"] = conf.get("token_max", None)
        params["token_total"] = conf.get("token_total", None)
        params["token_min_interval"] = conf.get("token_min_interval", 0)
        params["token_gen_time"] = conf.get("token_gen_time", 0)
        params["token_gen_number"] = conf.get("token_gen_number", 0)

        params["max_submission_number"] = \
            conf.get("max_submission_number", None)
        params["max_user_test_number"] = \
            conf.get("max_user_test_number", None)
        params["min_submission_interval"] = \
            conf.get("min_submission_interval", None)
        params["min_user_test_interval"] = \
            conf.get("min_user_test_interval", None)

        logger.info("Task parameters loaded.")

        return params