Example #1
0
    def get(self, task_id):

        task = Task.get_from_id(task_id, self.sql_session)
        if task is None or task.contest != self.contest:
            raise tornado.web.HTTPError(404)
        statement, name = task.statement, task.name
        self.sql_session.close()

        self.fetch(statement, "application/pdf", "%s.pdf" % name)
Example #2
0
    def dataset_updated(self, task_id):
        """This function updates RWS with new data about a task. It should be
        called after the live dataset of a task is changed.

        task_id (int): id of the task whose dataset has changed.

        """
        with SessionGen(commit=False) as session:
            task = Task.get_from_id(task_id, session)
            dataset_id = task.active_dataset_id

        logger.info("Dataset update for task %d (dataset now is %d)." % (
            task_id, dataset_id))

        submission_ids = get_submissions(self.contest_id, task_id=task_id)

        subchanges = []
        with SessionGen(commit=False) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                submission_result = SubmissionResult.get_from_id(
                    (submission_id, dataset_id), session)

                if submission_result is None:
                    # Not yet compiled, evaluated or scored.
                    score = None
                    ranking_score_details = None
                else:
                    score = submission_result.score
                    try:
                        ranking_score_details = json.loads(
                                submission_result.ranking_score_details)
                    except (json.decoder.JSONDecodeError, TypeError):
                        # It may be blank.
                        ranking_score_details = None

                # Data to send to remote rankings.
                subchange_id = "%s%ss" % \
                    (int(make_timestamp(submission.timestamp)),
                     submission_id)
                subchange_put_data = {
                    "submission": encode_id(submission_id),
                    "time": int(make_timestamp(submission.timestamp))}
                    # We're sending the unrounded score to RWS
                if score is not None:
                    subchange_put_data["score"] = score
                if ranking_score_details is not None:
                    subchange_put_data["extra"] = ranking_score_details
                subchanges.append((subchange_id, subchange_put_data))

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                for subchange_id, data in subchanges:
                    self.subchange_queue.setdefault(
                        ranking,
                        dict())[encode_id(subchange_id)] = data
Example #3
0
    def get(self, task_id):

        self.r_params["task"] = Task.get_from_id(task_id, self.sql_session)
        if self.r_params["task"] is None or \
            self.r_params["task"].contest != self.contest:
            raise tornado.web.HTTPError(404)

        self.r_params["submissions"] = self.sql_session.query(Submission)\
            .filter_by(user=self.current_user)\
            .filter_by(task=self.r_params["task"]).all()

        self.render("task.html", **self.r_params)
Example #4
0
def extract_complexity(task_id, file_lengther=None):
    """Extract the complexity of all submissions of the task. The
    results are stored in a file task_<id>.info

    task_id (int): the id of the task we are interested in.
    file_lengther (class): a File-like object that tell the dimension
                           of the input (see example above for how to
                           write one).

    return (int): 0 if operation was successful.

    """
    with SessionGen() as session:
        task = Task.get_from_id(task_id, session)
        if task is None:
            return -1

        # Extracting the length of the testcase.
        file_cacher = FileCacher()
        testcases_lengths = [
            file_length(testcase.input, file_cacher, file_lengther)
            for testcase in task.testcases
        ]
        file_cacher.purge_cache()

        # Compute the complexity of the solutions.
        with open("task_%s.info" % task_id, "wt") as info:
            for submission in task.contest.get_submissions():
                if submission.task_id == task_id and \
                       submission.evaluated():
                    print submission.user.username
                    result = extract_complexity_submission(
                        testcases_lengths, submission)
                    if result[1] is None:
                        continue
                    info.write("Submission: %s" % submission.id)
                    info.write(" - user: %15s" % submission.user.username)
                    info.write(" - task: %s" % task.name)
                    if result[0] is not None:
                        info.write(" - score: %6.2lf" % result[0])
                    info.write(" - complexity: %20s" %
                               complexity_to_string(result[1]))
                    if result[2] is not None:
                        info.write(" - confidence %5.1lf" % result[2])
                    info.write("\n")

    return 0
Example #5
0
    def dataset_updated(self, task_id):
        """This function updates RWS with new data about a task. It should be
        called after the live dataset of a task is changed.

        task_id (int): id of the task whose dataset has changed.

        """
        with SessionGen(commit=False) as session:
            task = Task.get_from_id(task_id, session)
            dataset = task.active_dataset

            logger.info("Dataset update for task %d (dataset now is %d)." % (
                task.id, dataset.id))

            for submission in task.submissions:
                # Update RWS.
                self.rankings_send_score(submission)
Example #6
0
def extract_complexity(task_id, file_lengther=None):
    """Extract the complexity of all submissions of the task. The
    results are stored in a file task_<id>.info

    task_id (int): the id of the task we are interested in.
    file_lengther (class): a File-like object that tell the dimension
                           of the input (see example above for how to
                           write one).

    return (int): 0 if operation was successful.

    """
    with SessionGen() as session:
        task = Task.get_from_id(task_id, session)
        if task is None:
            return -1

        # Extracting the length of the testcase.
        file_cacher = FileCacher()
        testcases_lengths = [file_length(testcase.input,
                                         file_cacher, file_lengther)
                             for testcase in task.testcases]
        file_cacher.purge_cache()

        # Compute the complexity of the solutions.
        with open("task_%s.info" % task_id, "wt") as info:
            for submission in task.contest.get_submissions():
                if submission.task_id == task_id and \
                       submission.evaluated():
                    print submission.user.username
                    result = extract_complexity_submission(testcases_lengths,
                                                           submission)
                    if result[1] is None:
                        continue
                    info.write("Submission: %s" % submission.id)
                    info.write(" - user: %15s" % submission.user.username)
                    info.write(" - task: %s" % task.name)
                    if result[0] is not None:
                        info.write(" - score: %6.2lf" % result[0])
                    info.write(" - complexity: %20s" %
                               complexity_to_string(result[1]))
                    if result[2] is not None:
                        info.write(" - confidence %5.1lf" % result[2])
                    info.write("\n")

    return 0
Example #7
0
    def post(self, task_id):

        self.timestamp = self.r_params["timestamp"]

        self.task_id = task_id
        self.task = Task.get_from_id(task_id, self.sql_session)

        if self.current_user is None or self.task is None or self.task.contest != self.contest:
            raise tornado.web.HTTPError(404)

        # Enforce minimum time between submissions for the same task.
        last_submission = (
            self.sql_session.query(Submission)
            .filter_by(task_id=self.task.id)
            .filter_by(user_id=self.current_user.id)
            .order_by(Submission.timestamp.desc())
            .first()
        )
        if last_submission is not None and self.timestamp - last_submission.timestamp < config.min_submission_interval:
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Submissions too frequent!"),
                self._("For each task, you can submit " "again after %s seconds from last submission.")
                % config.min_submission_interval,
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(x) != 1 for x in self.request.files.values()):
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Invalid submission format!"),
                self._("Please select the correct files."),
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files.
        if len(self.request.files) == 1 and self.request.files.keys()[0] == "submission":
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Extract the files from the archive.
            temp_archive_file, temp_archive_filename = tempfile.mkstemp(config.temp_dir)
            with os.fdopen(temp_archive_file, "w") as temp_archive_file:
                temp_archive_file.write(archive_data["body"])

            archive_contents = extract_archive(temp_archive_filename, archive_data["filename"])

            if archive_contents is None:
                self.application.service.add_notification(
                    self.current_user.username,
                    int(time.time()),
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."),
                )
                self.redirect("/tasks/%s" % encrypt_number(self.task.id))
                return

            for item in archive_contents:
                self.request.files[item["filename"]] = [item]

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        task_type = get_task_type(task=self.task)
        required = set([x.filename for x in self.task.submission_format])
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION and required.issuperset(provided))):
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Invalid submission format!"),
                self._("Please select the correct files."),
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # Add submitted files. After this, self.files is a dictionary
        # indexed by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        self.files = {}
        for uploaded, data in self.request.files.iteritems():
            self.files[uploaded] = (data[0]["filename"], data[0]["body"])

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous submission. And put
        # them in self.file_digests (i.e., like they have already been
        # sent to FS).
        self.submission_lang = None
        self.file_digests = {}
        self.retrieved = 0
        if task_type.ALLOW_PARTIAL_SUBMISSION and last_submission is not None:
            for filename in required.difference(provided):
                if filename in last_submission.files:
                    # If we retrieve a language-dependent file from
                    # last submission, we take not that language must
                    # be the same.
                    if "%l" in filename:
                        self.submission_lang = last_submission.language
                    self.file_digests[filename] = last_submission.files[filename].digest
                    self.retrieved += 1

        # We need to ensure that everytime we have a .%l in our
        # filenames, the user has one amongst ".cpp", ".c", or ".pas,
        # and that all these are the same (i.e., no mixed-language
        # submissions).
        def which_language(user_filename):
            """Determine the language of user_filename from its
            extension.

            user_filename (string): the file to test.
            return (string): the extension of user_filename, or None
                             if it is not a recognized language.

            """
            extension = os.path.splitext(user_filename)[1]
            try:
                return Submission.LANGUAGES_MAP[extension]
            except KeyError:
                return None

        error = None
        for our_filename in self.files:
            user_filename = self.files[our_filename][0]
            if our_filename.find(".%l") != -1:
                lang = which_language(user_filename)
                if lang is None:
                    error = self._("Cannot recognize submission's language.")
                    break
                elif self.submission_lang is not None and self.submission_lang != lang:
                    error = self._("All sources must be in the same language.")
                    break
                else:
                    self.submission_lang = lang
        if error is not None:
            self.application.service.add_notification(
                self.current_user.username, int(time.time()), self._("Invalid submission!"), error
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # Check if submitted files are small enough.
        if any([len(f[1]) > config.max_submission_length for f in self.files.values()]):
            self.application.service.add_notification(
                self.current_user.username,
                int(time.time()),
                self._("Submission too big!"),
                self._("Each files must be at most %d bytes long.") % config.max_submission_length,
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task.id))
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        self.local_copy_saved = False

        if config.submit_local_copy:
            try:
                path = os.path.join(
                    config.submit_local_copy_path.replace("%s", config.data_dir), self.current_user.username
                )
                if not os.path.exists(path):
                    os.makedirs(path)
                with codecs.open(os.path.join(path, str(self.timestamp)), "w", "utf-8") as file_:
                    pickle.dump((self.contest.id, self.current_user.id, self.task, self.files), file_)
                self.local_copy_saved = True
            except Exception as error:
                logger.error("Submission local copy failed - %s" % traceback.format_exc())
        self.username = self.current_user.username
        self.sql_session.close()

        # We now have to send all the files to the destination...
        try:
            for filename in self.files:
                digest = self.application.service.file_cacher.put_file(
                    description="Submission file %s sent by %s at %d." % (filename, self.username, self.timestamp),
                    binary_data=self.files[filename][1],
                )
                self.file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s" % error)
            if self.local_copy_saved:
                message = "In case of emergency, this server has a local copy."
            else:
                message = "No local copy stored! Your submission was ignored."
            self.application.service.add_notification(
                self.username, int(time.time()), self._("Submission storage failed!"), self._(message)
            )
            self.redirect("/tasks/%s" % encrypt_number(self.task_id))

        # All the files are stored, ready to submit!
        self.sql_session = Session()
        current_user = self.get_current_user()
        self.task = Task.get_from_id(self.task_id, self.sql_session)
        logger.info("All files stored for submission sent by %s" % self.username)
        submission = Submission(
            user=current_user, task=self.task, timestamp=self.timestamp, files={}, language=self.submission_lang
        )

        for filename, digest in self.file_digests.items():
            self.sql_session.add(File(digest, filename, submission))
        self.sql_session.add(submission)
        self.sql_session.commit()
        self.r_params["submission"] = submission
        self.r_params["warned"] = False
        self.application.service.evaluation_service.new_submission(submission_id=submission.id)
        self.application.service.add_notification(
            self.username,
            int(time.time()),
            self._("Submission received"),
            self._("Your submission has been received " "and is currently being evaluated."),
        )
        # The argument (encripted submission id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the submission id).
        self.redirect("/tasks/%s?%s" % (encrypt_number(self.task.id), encrypt_number(submission.id)))
Example #8
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1

        conf = yaml.safe_load(
            io.open(os.path.join(self.path, name + ".yaml"),
                    "rt", encoding="utf-8"))
        task_path = os.path.join(self.path, name)

        logger.info("Loading parameters for task %s." % name)

        # Here we update the time of the last import
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(task_path, ".import_error"))

        args = {}

        args["num"] = num
        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        assert name == args["name"]

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        primary_language = load(conf, None, "primary_language")
        if primary_language is None:
            primary_language = 'it'
        paths = [os.path.join(task_path, "statement", "statement.pdf"),
                 os.path.join(task_path, "testo", "testo.pdf")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file(
                    path=path,
                    description="Statement for task %s (lang: %s)"
                    % (name, primary_language))
                break
        else:
            logger.error("Couldn't find any task statement, aborting...")
            sys.exit(1)
        args["statements"] = [Statement(primary_language, digest)]

        args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name)]

        load(conf, args, "token_initial")
        load(conf, args, "token_max")
        load(conf, args, "token_total")
        load(conf, args, "token_min_interval", conv=make_timedelta)
        load(conf, args, "token_gen_time", conv=make_timedelta)
        load(conf, args, "token_gen_number")

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    task_path, "sol", "grader.%s" % lang)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                grader_filename = os.path.join(
                    task_path, "sol", "grader.%s" % lang)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file(
                        path=grader_filename,
                        description="Grader for task %s and language %s" %
                                    (name, lang))
                    args["managers"] += [
                        Manager("grader.%s" % lang, digest)]
                else:
                    logger.error("Grader for language %s not found " % lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(task_path, "sol")):
                if other_filename.endswith('.h') or \
                        other_filename.endswith('lib.pas'):
                    digest = self.file_cacher.put_file(
                        path=os.path.join(task_path, "sol", other_filename),
                        description="Manager %s for task %s" %
                                    (other_filename, name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(task_path, "check", "checker"),
                 os.path.join(task_path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file(
                    path=path,
                    description="Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(task_path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise it's
                        # just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase_detected = False
                        subtask_detected = False
                        if testcase.strip() != '':
                            testcase_detected = True
                        comment = comment.strip()
                        if comment.startswith('ST:'):
                            subtask_detected = True

                        if testcase_detected and subtask_detected:
                            raise Exception("No testcase and subtask in the"
                                            " same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    if int(conf['n_input']) != 0:
                        input_value = total_value / int(conf['n_input'])
                    args["score_type_parameters"] = str(input_value)
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    assert(int(conf['n_input']) ==
                           sum([int(st[1]) for st in subtasks]))
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = str(subtasks)

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            if int(conf['n_input']) != 0:
                input_value = total_value / int(conf['n_input'])
            args["score_type_parameters"] = str(input_value)

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(int(conf["n_input"]))]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(task_path, "check", "manager"),
                     os.path.join(task_path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[]'
                    digest = self.file_cacher.put_file(
                        path=path,
                        description="Manager for task %s" % name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            task_path, "sol", "stub.%s" % lang)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file(
                                path=stub_name,
                                description="Stub for task %s and "
                                "language %s" % (name, lang))
                            args["managers"] += [
                                Manager("stub.%s" % lang, digest)]
                        else:
                            logger.error("Stub for language %s not "
                                         "found." % lang)
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(int(conf["n_input"])):
            input_digest = self.file_cacher.put_file(
                path=os.path.join(task_path, "input", "input%d.txt" % i),
                description="Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file(
                path=os.path.join(task_path, "output", "output%d.txt" % i),
                description="Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)]
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Example #9
0
    def get_task(self, conf):

        """Produce a Task object.

        Given an object of the second list returned by get_contest,
        construct a full Task object (with all its dependencies) and
        return it. Access the data on the filesystem if needed.

        return (Task): the Task object corresponding to the given dict.

        """

        name = conf["name"]
        num = conf["num"]

        conf = yaml.safe_load(
            io.open(os.path.join(self.path, name + ".yaml"),
                    "rt", encoding="utf-8"))
        task_path = os.path.join(self.path, name)

        logger.info("Loading parameters for task %s." % name)

        args = {}

        args["num"] = num
        load(conf, args, "nome_breve", "name")
        load(conf, args, "nome", "title")

        assert name == args["name"]

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        digest = self.file_cacher.put_file(
            path=os.path.join(task_path, "testo", "testo.pdf"),
            description="Statement for task %s (lang: it)" % name)
        args["statements"] = [Statement("it", digest)]

        args["primary_statements"] = '["it"]'

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name)]

        load(conf, args, "token_initial")
        load(conf, args, "token_max")
        load(conf, args, "token_total")
        load(conf, args, "token_min_interval", conv=make_timedelta)
        load(conf, args, "token_gen_time", conv=make_timedelta)
        load(conf, args, "token_gen_number")

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, "timeout", "time_limit", conv=float)
        load(conf, args, "memlimit", "memory_limit")

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    task_path, "sol", "grader.%s" % lang)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                grader_filename = os.path.join(
                    task_path, "sol", "grader.%s" % lang)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file(
                        path=grader_filename,
                        description="Grader for task %s and language %s" %
                                    (name, lang))
                    args["managers"] += [
                        Manager("grader.%s" % lang, digest)]
                else:
                    logger.error("Grader for language %s not found " % lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(task_path, "sol")):
                if other_filename.endswith('.h') or \
                        other_filename.endswith('lib.pas'):
                    digest = self.file_cacher.put_file(
                        path=os.path.join(task_path, "sol", other_filename),
                        description="Manager %s for task %s" %
                                    (other_filename, name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is cor/correttore, then, presuming that the task
        # type is Batch or OutputOnly, we retrieve the comparator
        if os.path.exists(os.path.join(task_path, "cor", "correttore")):
            digest = self.file_cacher.put_file(
                path=os.path.join(task_path, "cor", "correttore"),
                description="Manager for task %s" % name)
            args["managers"] += [
                Manager("checker", digest)]
            evaluation_param = "comparator"
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(task_path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise it's
                        # just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase_detected = False
                        subtask_detected = False
                        if testcase.strip() != '':
                            testcase_detected = True
                        comment = comment.strip()
                        if comment.startswith('ST:'):
                            subtask_detected = True

                        if testcase_detected and subtask_detected:
                            raise Exception("No testcase and subtask in the"
                                            " same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    if int(conf['n_input']) != 0:
                        input_value = total_value / int(conf['n_input'])
                    args["score_type_parameters"] = str(input_value)
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    assert(int(conf['n_input']) ==
                           sum([int(st[1]) for st in subtasks]))
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = str(subtasks)

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            if int(conf['n_input']) != 0:
                input_value = total_value / int(conf['n_input'])
            args["score_type_parameters"] = str(input_value)

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(int(conf["n_input"]))]

        # If there is cor/manager, then the task type is Communication
        elif os.path.exists(os.path.join(task_path, "cor", "manager")):
            args["task_type"] = "Communication"
            args["task_type_parameters"] = '[]'
            digest = self.file_cacher.put_file(
                path=os.path.join(task_path, "cor", "manager"),
                description="Manager for task %s" % name)
            args["managers"] += [
                Manager("manager", digest)]
            for lang in LANGUAGES:
                stub_name = os.path.join(task_path, "sol", "stub.%s" % lang)
                if os.path.exists(stub_name):
                    digest = self.file_cacher.put_file(
                        path=stub_name,
                        description="Stub for task %s and language %s" %
                                    (name, lang))
                    args["managers"] += [
                        Manager("stub.%s" % lang, digest)]
                else:
                    logger.error("Stub for language %s not found." % lang)

        # Otherwise, the task type is Batch
        else:
            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                (compilation_param, infile_param, outfile_param,
                 evaluation_param)

        args["testcases"] = []
        for i in xrange(int(conf["n_input"])):
            input_digest = self.file_cacher.put_file(
                path=os.path.join(task_path, "input", "input%d.txt" % i),
                description="Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file(
                path=os.path.join(task_path, "output", "output%d.txt" % i),
                description="Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase(i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)]
        public_testcases = conf.get("risultati", "").strip()
        if public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        logger.info("Task parameters loaded.")

        return task