Exemplo n.º 1
0
    def __init__(self, path, contest_id, force):
        self.old_contest_id = contest_id
        self.force = force

        self.file_cacher = FileCacher()

        self.loader = YamlLoader(os.path.realpath(path), self.file_cacher)
Exemplo n.º 2
0
    def __init__(self, path, drop, test, zero_time, user_number):
        self.drop = drop
        self.test = test
        self.zero_time = zero_time
        self.user_number = user_number

        self.file_cacher = FileCacher()

        self.loader = YamlLoader(os.path.realpath(path), self.file_cacher)
Exemplo n.º 3
0
def test_testcases(base_dir, soluzione, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, soluzione),
        "Solution %s for task %s" % (soluzione, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print jobinfo[0],
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus["exit_status"]
        info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                   (job.plus["execution_time"],
                    job.plus["execution_wall_clock_time"],
                    mem_human(job.plus["execution_memory"])))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print
            print "Want to stop and consider everything to timeout? [y/N]",
            if assume is not None:
                print assume
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print "%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))

    return zip(points, comments, info)
Exemplo n.º 4
0
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a disabled FileCacher with a FSBackend in order to avoid to fill
    # the database with junk and to save up space.
    if file_cacher is None:
        file_cacher = FileCacher(path=os.path.join(config.cache_dir,
                                                   'cmsMake'),
                                 enabled=False)

    # Load the task
    if task is None:
        loader = YamlLoader(os.path.realpath(os.path.join(base_dir, "..")),
                            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    if dataset.task_type != "OutputOnly":
        digest = file_cacher.put_file_from_path(
            os.path.join(base_dir, soluzione),
            "Solution %s for task %s" % (soluzione, task.name))
        executables = {
            task.name: Executable(filename=task.name, digest=digest)
        }
        jobs = [(t,
                 EvaluationJob(language=language,
                               task_type=dataset.task_type,
                               task_type_parameters=json.loads(
                                   dataset.task_type_parameters),
                               managers=dict(dataset.managers),
                               executables=executables,
                               input=dataset.testcases[t].input,
                               output=dataset.testcases[t].output,
                               time_limit=dataset.time_limit,
                               memory_limit=dataset.memory_limit))
                for t in dataset.testcases]
        tasktype = get_task_type(dataset=dataset)
    else:
        print("Generating outputs...", end='')
        files = {}
        for t in sorted(dataset.testcases.keys()):
            with file_cacher.get_file(dataset.testcases[t].input) as fin:
                with TemporaryFile() as fout:
                    print(str(t), end='')
                    call(soluzione, stdin=fin, stdout=fout, cwd=base_dir)
                    fout.seek(0)
                    digest = file_cacher.put_file_from_fobj(fout)
                    outname = "output_%s.txt" % t
                    files[outname] = File(filename=outname, digest=digest)
        jobs = [(t,
                 EvaluationJob(task_type=dataset.task_type,
                               task_type_parameters=json.loads(
                                   dataset.task_type_parameters),
                               managers=dict(dataset.managers),
                               files=files,
                               input=dataset.testcases[t].input,
                               output=dataset.testcases[t].output,
                               time_limit=dataset.time_limit,
                               memory_limit=dataset.memory_limit))
                for t in dataset.testcases]
        for k, job in jobs:
            job._key = k
        tasktype = get_task_type(dataset=dataset)
        print()

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        if dataset.task_type != "OutputOnly":
            status = job.plus["exit_status"]
            info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                        (job.plus["execution_time"],
                         job.plus["execution_wall_clock_time"],
                         mem_human(job.plus["execution_memory"])))
        else:
            status = "ok"
            info.append("N/A")
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print()
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print()
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen)))

    return zip(points, comments, info)
Exemplo n.º 5
0
class Reimporter:

    """This script reimports a contest from disk using YamlLoader

    The data parsed by YamlLoader is used to update a Contest that's
    already existing in the database.

    """

    def __init__(self, path, contest_id, force):
        self.old_contest_id = contest_id
        self.force = force

        self.file_cacher = FileCacher()

        self.loader = YamlLoader(os.path.realpath(path), self.file_cacher)

    def _update_columns(self, old_object, new_object):
        for prp in old_object._col_props:
            if hasattr(new_object, prp.key):
                setattr(old_object, prp.key, getattr(new_object, prp.key))

    def _update_object(self, old_object, new_object):
        # This method copies the scalar column properties from the new
        # object into the old one, and then tries to do the same for
        # relationships too. The data model isn't a tree: for example
        # there are two distinct paths from Contest to Submission, one
        # through User and one through Task. Yet, at the moment, if we
        # ignore Submissions and UserTest (and thus their results, too)
        # we get a tree-like structure and Task.active_dataset and
        # Submission.token are the only scalar relationships that don't
        # refer to the parent. Therefore, if we catch these as special
        # cases, we can use a simple DFS to explore the whole data
        # graph, recursing only on "vector" relationships.
        # TODO Find a better way to handle all of this.

        self._update_columns(old_object, new_object)

        for prp in old_object._rel_props:
            old_value = getattr(old_object, prp.key)
            new_value = getattr(new_object, prp.key)

            # Special case #1: Contest.announcements, User.questions,
            #                  User.messages
            if _is_rel(prp, Contest.announcements) or \
                    _is_rel(prp, User.questions) or \
                    _is_rel(prp, User.messages):
                # A loader should not provide new Announcements,
                # Questions or Messages, since they are data generated
                # by the users during the contest: don't update them.
                # TODO Warn the admin if these attributes are non-empty
                # collections.
                pass

            # Special case #2: Task.datasets
            if _is_rel(prp, Task.datasets):
                old_datasets = dict((d.description, d) for d in old_value)
                new_datasets = dict((d.description, d) for d in new_value)

                for key in set(new_datasets.keys()):
                    if key not in old_datasets:
                        # create
                        temp = new_datasets[key]
                        new_value.remove(temp)
                        old_value.append(temp)
                    else:
                        # update
                        self._update_object(old_datasets[key],
                                            new_datasets[key])

            # Special case #3: Task.active_dataset
            elif _is_rel(prp, Task.active_dataset):
                # We don't want to update the existing active dataset.
                pass

            # Special case #4: User.submissions, Task.submissions,
            #                  User.user_tests, Task.user_tests
            elif _is_rel(prp, User.submissions) or \
                    _is_rel(prp, Task.submissions) or \
                    _is_rel(prp, User.user_tests) or \
                    _is_rel(prp, Task.user_tests):
                # A loader should not provide new Submissions or
                # UserTests, since they are data generated by the users
                # during the contest: don't update them.
                # TODO Warn the admin if these attributes are non-empty
                # collections.
                pass

            # Special case #5: Submission.token
            elif _is_rel(prp, Submission.token):
                # We should never reach this point! We should never try
                # to update Submissions! We could even assert False...
                pass

            # General case #1: a dict
            elif isinstance(old_value, dict):
                for key in set(old_value.keys()) | set(new_value.keys()):
                    if key in new_value:
                        if key not in old_value:
                            # create
                            # FIXME This hack is needed because of some
                            # funny behavior of SQLAlchemy-instrumented
                            # collections when copying values, that
                            # resulted in new objects being added to
                            # the session. We need to investigate it.
                            temp = new_value[key]
                            del new_value[key]
                            old_value[key] = temp
                        else:
                            # update
                            self._update_object(old_value[key], new_value[key])
                    else:
                        # delete
                        del old_value[key]

            # General case #2: a list
            elif isinstance(old_value, list):
                old_len = len(old_value)
                new_len = len(new_value)
                for i in xrange(min(old_len, new_len)):
                    self._update_object(old_value[i], new_value[i])
                if old_len > new_len:
                    del old_value[new_len:]
                elif new_len > old_len:
                    for i in xrange(old_len, new_len):
                        # FIXME This hack is needed because of some
                        # funny behavior of SQLAlchemy-instrumented
                        # collections when copying values, that
                        # resulted in new objects being added to the
                        # session. We need to investigate it.
                        temp = new_value[i]
                        del new_value[i]
                        old_value.append(temp)

            # General case #3: a parent object
            elif isinstance(old_value, Base):
                # No need to climb back up the recursion tree...
                pass

            # General case #4: None
            elif old_value is None:
                # That should only happen in case of a scalar
                # relationship (i.e. a many-to-one or a one-to-one)
                # that is nullable. "Parent" relationships aren't
                # nullable, so the only possible cases are the active
                # datasets and the tokens, but we should have already
                # caught them. We could even assert False...
                pass

            else:
                raise RuntimeError(
                    "Unknown type of relationship for %s.%s." %
                    (prp.parent.class_.__name__, prp.key))

    def do_reimport(self):
        """Get the contest from the Loader and merge it."""
        with SessionGen(commit=False) as session:
            # Load the old contest from the database.
            old_contest = Contest.get_from_id(self.old_contest_id, session)
            old_users = dict((x.username, x) for x in old_contest.users)
            old_tasks = dict((x.name, x) for x in old_contest.tasks)

            # Load the new contest from the filesystem.
            new_contest, new_tasks, new_users = self.loader.get_contest()
            new_users = dict((x["username"], x) for x in new_users)
            new_tasks = dict((x["name"], x) for x in new_tasks)

            # Updates contest-global settings that are set in new_contest.
            self._update_columns(old_contest, new_contest)

            # Do the actual merge: compare all users of the old and of
            # the new contest and see if we need to create, update or
            # delete them. Delete only if authorized, fail otherwise.
            users = set(old_users.keys()) | set(new_users.keys())
            for user in users:
                old_user = old_users.get(user, None)
                new_user = new_users.get(user, None)

                if old_user is None:
                    # Create a new user.
                    logger.info("Creating user %s" % user)
                    new_user = self.loader.get_user(new_user)
                    old_contest.users.append(new_user)
                elif new_user is not None:
                    # Update an existing user.
                    logger.info("Updating user %s" % user)
                    new_user = self.loader.get_user(new_user)
                    self._update_object(old_user, new_user)
                else:
                    # Delete an existing user.
                    if self.force:
                        logger.info("Deleting user %s" % user)
                        old_contest.users.remove(old_user)
                    else:
                        logger.critical(
                            "User %s exists in old contest, but "
                            "not in the new one. Use -f to force." %
                            user)
                        return False

            # The same for tasks.
            tasks = set(old_tasks.keys()) | set(new_tasks.keys())
            for task in tasks:
                old_task = old_tasks.get(task, None)
                new_task = new_tasks.get(task, None)

                if old_task is None:
                    # Create a new task.
                    logger.info("Creating task %s" % task)
                    new_task = self.loader.get_task(new_task)
                    old_contest.tasks.append(new_task)
                elif new_task is not None:
                    # Update an existing task.
                    logger.info("Updating task %s" % task)
                    new_task = self.loader.get_task(new_task)
                    self._update_object(old_task, new_task)
                else:
                    # Delete an existing task.
                    if self.force:
                        logger.info("Deleting task %s" % task)
                        old_contest.tasks.remove(old_task)
                    else:
                        logger.critical(
                            "Task %s exists in old contest, but "
                            "not in the new one. Use -f to force." %
                            task)
                        return False

            session.commit()

        logger.info("Reimport finished (contest id: %s)." %
                    self.old_contest_id)

        return True
Exemplo n.º 6
0
Arquivo: Test.py Projeto: ldct/cms
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, soluzione),
        "Solution %s for task %s" % (soluzione, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        language=language,
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus["exit_status"]
        info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                   (job.plus["execution_time"],
                    job.plus["execution_wall_clock_time"],
                    mem_human(job.plus["execution_memory"])))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print()
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print()
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen)))

    return zip(points, comments, info)
Exemplo n.º 7
0
class Importer:

    """This script imports a contest from disk using YamlLoader.

    The data parsed by YamlLoader is used to create a new Contest in
    the database.

    """

    def __init__(self, path, drop, test, zero_time, user_number):
        self.drop = drop
        self.test = test
        self.zero_time = zero_time
        self.user_number = user_number

        self.file_cacher = FileCacher()

        self.loader = YamlLoader(os.path.realpath(path), self.file_cacher)

    def _prepare_db(self):
        logger.info("Creating database structure.")
        if self.drop:
            try:
                with SessionGen() as session:
                    FSObject.delete_all(session)
                    session.commit()
                metadata.drop_all()
            except sqlalchemy.exc.OperationalError as error:
                logger.critical("Unable to access DB.\n%r" % error)
                return False
        try:
            metadata.create_all()
        except sqlalchemy.exc.OperationalError as error:
            logger.critical("Unable to access DB.\n%r" % error)
            return False

    def do_import(self):
        """Get the contest from the Loader and store it."""
        if not self._prepare_db():
            return False

        # Get the contest
        contest, tasks, users = self.loader.get_contest()

        # Get the tasks
        for task in tasks:
            contest.tasks.append(self.loader.get_task(task))

        # Get the users or, if asked, generate them
        if self.user_number is None:
            for user in users:
                contest.users.append(self.loader.get_user(user))
        else:
            logger.info("Generating %s random users." % self.user_number)
            contest.users = [User("User %d" % i,
                                  "Last name %d" % i,
                                  "user%03d" % i)
                             for i in xrange(self.user_number)]

        # Apply the modification flags
        if self.zero_time:
            contest.start = datetime.datetime(1970, 1, 1)
            contest.stop = datetime.datetime(1970, 1, 1)
        elif self.test:
            contest.start = datetime.datetime(1970, 1, 1)
            contest.stop = datetime.datetime(2100, 1, 1)

            for user in contest.users:
                user.password = '******'
                user.ip = None

        # Store
        logger.info("Creating contest on the database.")
        with SessionGen() as session:
            session.add(contest)
            session.commit()
            contest_id = contest.id

        logger.info("Import finished (new contest id: %s)." % contest_id)
Exemplo n.º 8
0
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a disabled FileCacher with a FSBackend in order to avoid to fill
    # the database with junk and to save up space.
    if file_cacher is None:
        file_cacher = FileCacher(path=os.path.join(config.cache_dir,
                                                   'cmsMake'),
                                 enabled=False)

    # Load the task
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    if dataset.task_type != "OutputOnly":
        digest = file_cacher.put_file_from_path(
            os.path.join(base_dir, soluzione),
            "Solution %s for task %s" % (soluzione, task.name))
        executables = {task.name: Executable(filename=task.name,
                                             digest=digest)}
        jobs = [(t, EvaluationJob(
            language=language,
            task_type=dataset.task_type,
            task_type_parameters=json.loads(dataset.task_type_parameters),
            managers=dict(dataset.managers),
            executables=executables,
            input=dataset.testcases[t].input,
            output=dataset.testcases[t].output,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit)) for t in dataset.testcases]
        tasktype = get_task_type(dataset=dataset)
    else:
        print("Generating outputs...", end='')
        files = {}
        for t in sorted(dataset.testcases.keys()):
            with file_cacher.get_file(dataset.testcases[t].input) as fin:
                with TemporaryFile() as fout:
                    print(str(t), end='')
                    call(soluzione, stdin=fin, stdout=fout, cwd=base_dir)
                    fout.seek(0)
                    digest = file_cacher.put_file_from_fobj(fout)
                    outname = "output_%s.txt" % t
                    files[outname] = File(filename=outname, digest=digest)
        jobs = [(t, EvaluationJob(
            task_type=dataset.task_type,
            task_type_parameters=json.loads(dataset.task_type_parameters),
            managers=dict(dataset.managers),
            files=files,
            input=dataset.testcases[t].input,
            output=dataset.testcases[t].output,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit)) for t in dataset.testcases]
        for k, job in jobs:
            job._key = k
        tasktype = get_task_type(dataset=dataset)
        print()

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        if dataset.task_type != "OutputOnly":
            status = job.plus["exit_status"]
            info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                       (job.plus["execution_time"],
                        job.plus["execution_wall_clock_time"],
                        mem_human(job.plus["execution_memory"])))
        else:
            status = "ok"
            info.append("N/A")
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print()
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print()
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen)))

    return zip(points, comments, info)