Exemplo n.º 1
0
def display_breakdown(scores):
    """Prints the point breakdown given a dictionary of scores.

    RETURNS:
    dict; maps partner (str) -> finalized score (float)
    """
    partner_totals = {}

    format.print_line('-')
    print('Point breakdown')
    for (name, partner), (score, total) in scores.items():
        print('    {}: {}/{}'.format(name, score, total))
        partner_totals[partner] = partner_totals.get(partner, 0) + score
    print()

    shared_points = partner_totals.get(None, 0)
    if None in partner_totals:
        del partner_totals[None]

    finalized_scores = {}
    print('Score:')
    if len(partner_totals) == 0:
        print('    {}: {}'.format(NO_PARTNER_NAME, shared_points))
        finalized_scores[NO_PARTNER_NAME] = shared_points
    else:
        for partner, score in sorted(partner_totals.items()):
            print('    Partner {}: {}'.format(partner, score + shared_points))
            finalized_scores[partner] = score + shared_points
    return finalized_scores
Exemplo n.º 2
0
    def run(self, messages):
        """Score tests and print results

        Tests are taken from self.assignment.specified_tests. Each test belongs
        to a partner. If test.partner is omitted (i.e. core.NoValue), the score
        for that test is added to every partner's score.

        If there are no tests, the mapping will only contain one entry, mapping
        "Total" -> 0 (total score).

        If there are no partners specified by the tests, the mapping will only
        contain one entry, mapping "Total" (partner) -> total score (float).
        This assumes there is always at least one partner.
        """
        if self.args.export or not self.args.score:
            return

        format.print_line('~')
        print('Scoring tests')
        print()

        raw_scores = OrderedDict()
        for test in self.assignment.specified_tests:
            assert isinstance(
                test,
                sources_models.Test), 'ScoringProtocol received invalid test'

            log.info('Scoring test {}'.format(test.name))
            partner = test.partner if test.partner != core.NoValue else None
            raw_scores[test.name, partner] = (test.score(), test.points)

        messages['scoring'] = display_breakdown(raw_scores)
        print()
Exemplo n.º 3
0
def grade(questions, messages, env=None, verbose=True):
    format.print_line('~')
    print('Running tests')
    print()
    passed = 0
    failed = 0
    locked = 0

    analytics = {}

    # The environment in which to run the tests.
    for test in questions:
        log.info('Running tests for {}'.format(test.name))
        results = test.run(env)

        # if correct once, set persistent flag
        if results['failed'] == 0 and results['locked'] == 0:
            storage.store(test.name, 'correct', True)

        passed += results['passed']
        failed += results['failed']
        locked += results['locked']
        analytics[test.name] = results

        if not verbose and (failed > 0 or locked > 0):
            # Stop at the first failed test
            break

    format.print_progress_bar('Test summary', passed, failed, locked,
                              verbose=verbose)
    print()

    messages['grading'] = analytics
Exemplo n.º 4
0
    def _run_case(self, test_name, suite_number, case, case_number):
        """A wrapper for case.run().

        Prints informative output and also captures output of the test case
        and returns it as a log. The output is printed only if the case fails,
        or if self.verbose is True.
        """
        output.off()    # Delay printing until case status is determined.
        log_id = output.new_log()
        format.print_line('-')
        print('{} > Suite {} > Case {}'.format(test_name, suite_number,
                                               case_number))
        print()

        success = case.run()
        if success:
            print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        if not success or self.verbose:
            print(''.join(output_log))
        if not success:
            short_name = self.test.get_short_name()
            # TODO: Change when in notebook mode
            print('Run only this test case with '
                '"python3 ok -q {} --suite {} --case {}"'.format(
                    short_name, suite_number, case_number))
        return success
Exemplo n.º 5
0
    def on_interact(self):
        """Run gradeable tests and print results and return analytics.

        RETURNS:
        dict; a mapping of test name -> JSON-serializable object. It is up to
        each test to determine what kind of data it wants to return as
        significant for analytics. However, all tests must include the number
        passed, the number of locked tests and the number of failed tests.
        """
        if self.args.score:
            return

        format.print_line('~')
        print('Running tests')
        print()
        passed = 0
        failed = 0
        locked = 0

        analytics = {}

        for test in self.assignment.specified_tests:
            log.info('Running tests for {}'.format(test.name))
            results = test.run()

            passed += results['passed']
            failed += results['failed']
            locked += results['locked']
            analytics[test.name] = results

        format.print_progress_bar('Test summary', passed, failed, locked)
        return analytics
Exemplo n.º 6
0
def grade(questions, messages, env=None, verbose=True):
    format.print_line('~')
    print('Running tests')
    print()
    passed = 0
    failed = 0
    locked = 0

    analytics = {}

    # The environment in which to run the tests.
    for test in questions:
        log.info('Running tests for {}'.format(test.name))
        results = test.run(env)

        # if correct once, set persistent flag
        if results['failed'] == 0:
            storage.store(test.name, 'correct', True)

        passed += results['passed']
        failed += results['failed']
        locked += results['locked']
        analytics[test.name] = results

        current_directory = os.getcwd()

        if not os.path.exists(current_directory + "/submissions"):            
Exemplo n.º 7
0
    def run(self):
        """Runs the suites associated with this doctest.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        output.off()
        log_id = output.new_log()

        format.print_line('-')
        print('Doctests for {}'.format(self.name))
        print()

        success = self.case.run()
        if success:
            print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        if not success or self.verbose:
            print(''.join(output_log))

        if not success and self.interactive:
            self.console.interact()

        if success:
            return {'passed': 1, 'failed': 0, 'locked': 0}
        else:
            return {'passed': 0, 'failed': 1, 'locked': 0}
Exemplo n.º 8
0
    def unlock(self, interact):
        total_cases = len([case for suite in self.suites
                                for case in suite.cases])
        for suite_num, suite in enumerate(self.suites):
            for case_num, case in enumerate(suite.cases):
                case_id = '{} > Suite {} > Case {}'.format(
                            self.name, suite_num + 1, case_num + 1)

                format.print_line('-')
                print(case_id)
                print('(cases remaining: {})'.format(total_cases))
                print()
                total_cases -= 1

                if case.locked != True:
                    print('-- Already unlocked --')
                    print()
                    continue

                case.unlock(self.unique_id_prefix, case_id, interact)

        assert total_cases == 0, 'Number of cases is incorrect'
        format.print_line('-')
        print('OK! All cases for {} unlocked.'.format(self.name))
        print()
Exemplo n.º 9
0
    def on_interact(self):
        """Run gradeable tests and print results and return analytics.

        RETURNS:
        dict; a mapping of test name -> JSON-serializable object. It is up to
        each test to determine what kind of data it wants to return as
        significant for analytics. However, all tests must include the number
        passed, the number of locked tests and the number of failed tests.
        """
        if self.args.score:
            return

        format.print_line('~')
        print('Running tests')
        print()
        passed = 0
        failed = 0
        locked = 0

        analytics = {}

        for test in self.assignment.specified_tests:
            log.info('Running tests for {}'.format(test.name))
            results = test.run()

            passed += results['passed']
            failed += results['failed']
            locked += results['locked']
            analytics[test.name] = results

        format.print_progress_bar('Test summary', passed, failed, locked)
        return analytics
Exemplo n.º 10
0
    def run(self, messages, env=None):
        """Score tests and print results. Tests are taken from
        self.assignment.specified_tests. A score breakdown by question and the
        total score are both printed.

        ENV is used by the programatic API for Python doctests only.
        """
        if not self.args.score or self.args.testing:
            return

        format.print_line('~')
        print('Scoring tests')
        print()

        raw_scores = OrderedDict()
        for test in self.assignment.specified_tests:
            assert isinstance(test, sources_models.Test), 'ScoringProtocol received invalid test'

            log.info('Scoring test {}'.format(test.name))

            # A hack that allows programmatic API users to plumb a custom
            # environment through to Python tests.
            # Use type to ensure is an actual OkTest and not a subclass
            if type(test) == ok_test_models.OkTest:
                score = test.score(env=env)
            else:
                score = test.score()

            raw_scores[test.name] = (score, test.points)

        messages['scoring'] = display_breakdown(raw_scores, self.args.score_out)
        print()
Exemplo n.º 11
0
    def run(self, messages):
        """Score tests and print results

        Tests are taken from self.assignment.specified_tests. Each test belongs
        to a partner. If test.partner is omitted (i.e. core.NoValue), the score
        for that test is added to every partner's score.

        If there are no tests, the mapping will only contain one entry, mapping
        "Total" -> 0 (total score).

        If there are no partners specified by the tests, the mapping will only
        contain one entry, mapping "Total" (partner) -> total score (float).
        This assumes there is always at least one partner.
        """
        if self.args.export or not self.args.score:
            return

        format.print_line('~')
        print('Scoring tests')
        print()

        raw_scores = OrderedDict()
        for test in self.assignment.specified_tests:
            assert isinstance(test, sources_models.Test), 'ScoringProtocol received invalid test'

            log.info('Scoring test {}'.format(test.name))
            partner = test.partner if test.partner != core.NoValue else None
            raw_scores[test.name, partner] = (test.score(), test.points)

        messages['scoring'] =  display_breakdown(raw_scores)
        print()
Exemplo n.º 12
0
def display_breakdown(scores):
    """Prints the point breakdown given a dictionary of scores.

    RETURNS:
    dict; maps partner (str) -> finalized score (float)
    """
    partner_totals = {}

    format.print_line('-')
    print('Point breakdown')
    for (name, partner), (score, total) in scores.items():
        print('    {}: {}/{}'.format(name, score, total))
        partner_totals[partner] = partner_totals.get(partner, 0) + score
    print()

    shared_points = partner_totals.get(None, 0)
    if None in partner_totals:
        del partner_totals[None]

    finalized_scores = {}
    print('Score:')
    if len(partner_totals) == 0:
        print('    {}: {}'.format(NO_PARTNER_NAME, shared_points))
        finalized_scores[NO_PARTNER_NAME] = shared_points
    else:
        for partner, score in sorted(partner_totals.items()):
            print('    Partner {}: {}'.format(partner, score + shared_points))
            finalized_scores[partner] = score + shared_points
    return finalized_scores
Exemplo n.º 13
0
def grade(questions, messages, env=None, verbose=True):
    format.print_line('~')
    print('Running tests')
    print()
    passed = 0
    failed = 0
    locked = 0

    analytics = {}

    # The environment in which to run the tests.
    for test in questions:
        log.info('Running tests for {}'.format(test.name))
        results = test.run(env)

        # if correct once, set persistent flag
        if results['failed'] == 0 and results['locked'] == 0:
            storage.store(test.name, 'correct', True)

        passed += results['passed']
        failed += results['failed']
        locked += results['locked']
        analytics[test.name] = results

        if not verbose and (failed > 0 or locked > 0):
            # Stop at the first failed test
            break

    format.print_progress_bar('Test summary', passed, failed, locked,
                              verbose=verbose)
    print()

    messages['grading'] = analytics
Exemplo n.º 14
0
    def run(self):
        """Runs the suites associated with this doctest.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        output.off()
        log_id = output.new_log()

        format.print_line('-')
        print('Doctests for {}'.format(self.name))
        print()

        if not self.docstring:
            print('-- No doctests found for {} --'.format(self.name))
            success = False
        else:
            success = self.case.run()
            if success:
                print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        if not success or self.verbose:
            print(''.join(output_log))

        if not success and self.interactive:
            self.console.interact()

        if success:
            return {'passed': 1, 'failed': 0, 'locked': 0}
        else:
            return {'passed': 0, 'failed': 1, 'locked': 0}
Exemplo n.º 15
0
    def run(self, messages, env=None):
        """Score tests and print results. Tests are taken from
        self.assignment.specified_tests. A score breakdown by question and the
        total score are both printed.

        ENV is used by the programatic API for Python doctests only.
        """
        if not self.args.score:
            return

        format.print_line('~')
        print('Scoring tests')
        print()

        raw_scores = OrderedDict()
        for test in self.assignment.specified_tests:
            assert isinstance(test, sources_models.Test), 'ScoringProtocol received invalid test'

            log.info('Scoring test {}'.format(test.name))

            # A hack that allows programmatic API users to plumb a custom
            # environment through to Python tests.
            # Use type to ensure is an actual OkTest and not a subclass
            if type(test) == ok_test_models.OkTest:
                score = test.score(env=env)
            else:
                score = test.score()

            raw_scores[test.name] = (score, test.points)

        messages['scoring'] = display_breakdown(raw_scores, self.args.score_out)
        print()
Exemplo n.º 16
0
 def _print_header(self):
     if getattr(self.cmd_args, 'autobackup_actual_run_sync', False):
         return
     format.print_line('=')
     print('Assignment: {}'.format(self.name))
     print('OK, version {}'.format(client.__version__))
     format.print_line('=')
     print()
Exemplo n.º 17
0
    def score(self):
        format.print_line('-')
        print('Scheme tests in {}'.format(self.file))
        print()
        _, failed = self._run_tests()
        score = 1.0 if failed == 0 else 0.0

        print('Score: {}/1'.format(score))
        print()
        return score
Exemplo n.º 18
0
    def score(self):
        format.print_line('-')
        print('Doctests for {}'.format(self.name))
        print()
        success = self.case.run()
        score = 1.0 if success else 0.0

        print('Score: {}/1'.format(score))
        print()
        return score
Exemplo n.º 19
0
    def score(self):
        format.print_line('-')
        print('Scheme tests in {}'.format(self.file))
        print()
        _, failed = self._run_tests()
        score = 1.0 if failed == 0 else 0.0

        print('Score: {}/1'.format(score))
        print()
        return score
Exemplo n.º 20
0
    def score(self):
        format.print_line('-')
        print('Doctests for {}'.format(self.name))
        print()
        success = self.case.run()
        score = 1.0 if success else 0.0

        print('Score: {}/1'.format(score))
        print()
        return score
Exemplo n.º 21
0
    def on_start(self):
        """Responsible for locking each test."""
        if not self.args.lock:
            return

        format.print_line('~')
        print('Locking tests')
        print()

        for test in self.assignment.test_map.values():
            log.info('Locking {}'.format(test.name))
            test.lock(self._hash_fn)
Exemplo n.º 22
0
    def _compare(self, expected, code):
        try:
            value, printed = self.evaluate(code)
        except ConsoleException as e:
            detail = "{}: {}".format(e.exception_type, str(e.exception))
            actual = CodeAnswer(exception=True,
                                exception_type=e.exception_type,
                                exception_detail=detail.splitlines())
        else:
            if value is not None:
                print(self._output_fn(value))
                printed += self._output_fn(value)
            output = printed.splitlines()
            actual = CodeAnswer(output=output)

        self.cases_total += 1
        if not self.skip_locked_cases and expected.locked:
            if '\n'.join(expected.output) != locking.lock(
                    self.hash_key, actual.dump()):
                print()
                print("# Error: expected and actual results do not match")
                raise ConsoleException
            else:
                return

        correct = (expected.exception == actual.exception
                   and expected.output_lines() == actual.output_lines())
        correct_legacy_exception = (actual.exception
                                    and [actual.exception_type
                                         ] == expected.output_lines())
        if not correct and not correct_legacy_exception:
            print()
            print('# Error: expected')
            print('\n'.join('#     {}'.format(line)
                            for line in expected.output_lines()))
            print('# but got')
            print('\n'.join('#     {}'.format(line)
                            for line in actual.output_lines()))
            # Bail out on first failed test, or if we're showing all test results, bail on infinite loop timeout
            if not self.show_all_cases or (actual.exception
                                           and actual.exception_type
                                           == exceptions.Timeout.__name__):
                raise ConsoleException
            elif self.CASE_PREFIX in code:
                print(":(", f"Test Case {self.cases_total} failed")
                format.print_line('-')
                print()
        elif correct:
            self.cases_passed += 1
            if self.CASE_PREFIX in code:
                print(":D", f"Test Case {self.cases_total} passed")
                format.print_line('-')
                print()
Exemplo n.º 23
0
Arquivo: lock.py Projeto: Kelel1/CS61A
    def run(self, messages):
        """Responsible for locking each test."""
        if not self.args.lock:
            return

        format.print_line('~')
        print('Locking tests')
        print()

        for test in self.assignment.test_map.values():
            log.info('Locking {}'.format(test.name))
            test.lock(self._hash_fn)
Exemplo n.º 24
0
    def run(self, messages):
        """Run gradeable tests and print results and return analytics.

        RETURNS:
        dict; a mapping of test name -> JSON-serializable object. It is up to
        each test to determine what kind of data it wants to return as
        significant for analytics. However, all tests must include the number
        passed, the number of locked tests and the number of failed tests.
        """
        if self.args.score or self.args.export or self.args.unlock or self.args.restore:
            return

        format.print_line('~')
        print('Running tests')
        print()
        passed = 0
        failed = 0
        locked = 0

        analytics = {}
        # check if analytics info is in messages
        if 'analytics' in messages:
            started = messages['analytics']['started']
        else:
            started = None

        for test in self.assignment.specified_tests:
            # run test if the question is not detected, or question detected and started
            if (started is None
                or test.name not in started
                or started[test.name]):

                log.info('Running tests for {}'.format(test.name))
                results = test.run()
                passed += results['passed']
                failed += results['failed']
                locked += results['locked']
                analytics[test.name] = results
            else:
                print('It looks like you haven\'t started {}. Skipping the tests.'.format(test.name))
                print()

            if not self.args.verbose and (failed > 0 or locked > 0):
                # Stop at the first failed test
                break

        format.print_progress_bar('Test summary', passed, failed, locked,
                                  verbose=self.args.verbose)
        print()

        messages['grading'] = analytics
Exemplo n.º 25
0
    def lock(self, hash_fn):
        format.print_line('-')
        print(self.name)

        for suite_num, suite in enumerate(list(self.suites)):
            for case_num, case in enumerate(list(suite.cases)):
                message = '* Suite {} > Case {}: '.format(suite_num, case_num)
                if case.hidden:
                    suite.cases.remove(case)
                    print(message + 'removing hidden case')
                elif case.locked == core.NoValue:
                    case.lock(hash_fn)
                    print(message + 'locking')
                elif case.locked == False:
                    print(message + 'leaving unlocked')
                elif case.locked == True:
                    print(message + 'already unlocked')
Exemplo n.º 26
0
    def run(self):
        """Runs the suites associated with this doctest.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        format.print_line('-')
        print('Scheme tests in {}'.format(self.file))
        print()

        passed, failed = self._run_tests()

        print('{} passed; {} failed'.format(passed, failed))
        if failed == 0 and passed > 0:
            print('-- OK! --')
            print()

        return {'passed': passed, 'failed': failed, 'locked': 0}
Exemplo n.º 27
0
    def run(self):
        """Runs the suites associated with this doctest.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        format.print_line("-")
        print("Scheme tests in {}".format(self.file))
        print()

        passed, failed = self._run_tests()

        print("{} passed; {} failed".format(passed, failed))
        if failed == 0 and passed > 0:
            print("-- OK! --")
            print()

        return {"passed": passed, "failed": failed, "locked": 0}
Exemplo n.º 28
0
    def run(self):
        """Runs the suites associated with this doctest.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        format.print_line('-')
        print('Scheme tests in {}'.format(self.file))
        print()

        passed, failed = self._run_tests()

        print('{} passed; {} failed'.format(passed, failed))
        if failed == 0 and passed > 0:
            print('-- OK! --')
            print()

        return {'passed': passed, 'failed': failed, 'locked': 0}
Exemplo n.º 29
0
def display_breakdown(scores, outfile):
    """Writes the point breakdown to outfile given a dictionary of scores.

    RETURNS:
    dict; 'Total' -> finalized score (float)
    """
    total = 0

    format.print_line('-')
    print('Point breakdown', file=outfile)
    for name, (score, max_score) in scores.items():
        print('    {}: {}/{}'.format(name, score, max_score), file=outfile)
        total += score
    print(file=outfile)

    print('Score:', file=outfile)
    print('    Total: {}'.format(total), file=outfile)
    return {'Total': total}
Exemplo n.º 30
0
    def run(self, messages):
        """Responsible for unlocking each test.

        The unlocking process can be aborted by raising a KeyboardInterrupt or
        an EOFError.

        RETURNS:
        dict; mapping of test name (str) -> JSON-serializable object. It is up
        to each test to determine what information is significant for analytics.
        """
        if not self.args.unlock:
            return

        format.print_line('~')
        print('Unlocking tests')
        print()

        print('At each "{}", type what you would expect the output to be.'.format(
              self.PROMPT))
        print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
        print()

        for test in self.assignment.specified_tests:
            log.info('Unlocking test {}'.format(test.name))
            self.current_test = test.name

            # Reset guidance explanation probability for every question
            self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY

            try:
                test.unlock(self.interact)
            except (KeyboardInterrupt, EOFError):
                try:
                    # TODO(albert): When you use Ctrl+C in Windows, it
                    # throws two exceptions, so you need to catch both
                    # of them. Find a cleaner fix for this.
                    print()
                    print('-- Exiting unlocker --')
                except (KeyboardInterrupt, EOFError):
                    pass
                print()
                break
        messages['unlock'] = self.analytics
Exemplo n.º 31
0
    def run(self, messages):
        """Responsible for unlocking each test.

        The unlocking process can be aborted by raising a KeyboardInterrupt or
        an EOFError.

        RETURNS:
        dict; mapping of test name (str) -> JSON-serializable object. It is up
        to each test to determine what information is significant for analytics.
        """
        if not self.args.unlock:
            return

        format.print_line('~')
        print('Unlocking tests')
        print()

        print('At each "{}", type what you would expect the output to be.'.format(
              self.PROMPT))
        print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
        print()

        for test in self.assignment.specified_tests:
            log.info('Unlocking test {}'.format(test.name))
            self.current_test = test.name

            # Reset guidance explanation probability for every question
            self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY

            try:
                test.unlock(self.interact)
            except (KeyboardInterrupt, EOFError):
                try:
                    # TODO(albert): When you use Ctrl+C in Windows, it
                    # throws two exceptions, so you need to catch both
                    # of them. Find a cleaner fix for this.
                    print()
                    print('-- Exiting unlocker --')
                except (KeyboardInterrupt, EOFError):
                    pass
                print()
                break
        messages['unlock'] = self.analytics
Exemplo n.º 32
0
def display_breakdown(scores, outfile=None):
    """Writes the point breakdown to `outfile` given a dictionary of scores.
    `outfile` should be a string.  If `outfile` is None, write to stdout.

    RETURNS:
    dict; 'Total' -> finalized score (float)
    """
    total = 0
    outfile = open(outfile, 'w') if outfile else sys.stdout

    format.print_line('-')
    print('Point breakdown', file=outfile)
    for name, (score, max_score) in scores.items():
        print('    {}: {}/{}'.format(name, score, max_score), file=outfile)
        total += score
    print(file=outfile)

    print('Score:', file=outfile)
    print('    Total: {}'.format(total), file=outfile)
    return {'Total': total}
Exemplo n.º 33
0
def display_breakdown(scores, outfile=None):
    """Writes the point breakdown to `outfile` given a dictionary of scores.
    `outfile` should be a string.  If `outfile` is None, write to stdout.

    RETURNS:
    dict; 'Total' -> finalized score (float)
    """
    total = 0
    outfile = open(outfile, 'w') if outfile else sys.stdout

    format.print_line('-')
    print('Point breakdown', file=outfile)
    for name, (score, max_score) in scores.items():
        print('    {}: {}/{}'.format(name, score, max_score), file=outfile)
        total += score
    print(file=outfile)

    print('Score:', file=outfile)
    print('    Total: {}'.format(total), file=outfile)
    return {'Total': total}
Exemplo n.º 34
0
    def unlock(self, interact):
        total_cases = len([case for suite in self.suites
                                for case in suite.cases])
        for suite_num, suite in enumerate(self.suites):
            for case_num, case in enumerate(suite.cases):
                case_id = '{} > Suite {} > Case {}'.format(
                            self.name, suite_num + 1, case_num + 1)

                format.print_line('-')
                print(case_id)
                print('(cases remaining: {})'.format(total_cases))
                print()
                total_cases -= 1

                if case.locked != True:
                    print('-- Already unlocked --')
                    print()
                    continue

                case.unlock(self.unique_id_prefix, case_id, interact)
Exemplo n.º 35
0
    def run(self, env):
        """Runs the suites associated with this doctest.

        NOTE: env is intended only for use with the programmatic API to support
        Python OK tests. It is not used here.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        output.off()
        log_id = output.new_log()

        format.print_line('-')
        print('Doctests for {}'.format(self.name))
        print()

        if not self.docstring:
            print('-- No doctests found for {} --'.format(self.name))
            if self.ignore_empty:
                success = True
            else:
                success = False
        else:
            success = self.case.run()
            if success:
                print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        if not success or self.verbose:
            print(''.join(output_log))

        if not success and self.interactive:
            self.console.interact()

        if success:
            return {'passed': 1, 'failed': 0, 'locked': 0}
        else:
            return {'passed': 0, 'failed': 1, 'locked': 0}
Exemplo n.º 36
0
    def run(self, env):
        """Runs the suites associated with this doctest.

        NOTE: env is intended only for use with the programmatic API to support
        Python OK tests. It is not used here.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        format.print_line('-')
        print('Scheme tests in {}'.format(self.file))
        print()

        passed, failed = self._run_tests()

        print('{} passed; {} failed'.format(passed, failed))
        if failed == 0 and passed > 0:
            print('-- OK! --')
            print()

        return {'passed': passed, 'failed': failed, 'locked': 0}
Exemplo n.º 37
0
    def lock(self, hash_fn):
        format.print_line('-')
        print(self.name)

        for suite_num, suite in enumerate(list(self.suites)):
            for case_num, case in enumerate(list(suite.cases)):
                message = '* Suite {} > Case {}: '.format(suite_num, case_num)
                if case.hidden:
                    suite.cases.remove(case)
                    print(message + 'removing hidden case')
                elif case.locked == core.NoValue:
                    case.lock(hash_fn)
                    print(message + 'locking')
                elif case.locked == False:
                    print(message + 'leaving unlocked')
                elif case.locked == True:
                    print(message + 'already unlocked')
            if not suite.cases:
                self.suites.remove(suite)
                print('* Suite {}: removing empty suite'.format(suite_num))
        print()
Exemplo n.º 38
0
    def run(self, env):
        """Runs the suites associated with this doctest.

        NOTE: env is intended only for use with the programmatic API to support
        Python OK tests. It is not used here.

        RETURNS:
        bool; True if the doctest completely passes, False otherwise.
        """
        format.print_line('-')
        print('Scheme tests in {}'.format(self.file))
        print()

        passed, failed = self._run_tests()

        print('{} passed; {} failed'.format(passed, failed))
        if failed == 0 and passed > 0:
            print('-- OK! --')
            print()

        return {'passed': passed, 'failed': failed, 'locked': 0}
Exemplo n.º 39
0
class Suite(core.Serializable):
    type = core.String()
    scored = core.Boolean(default=True)
    cases = core.List()

    def __init__(self, verbose, interactive, timeout=None, **fields):
        super().__init__(**fields)
        self.verbose = verbose
        self.interactive = interactive
        self.timeout = timeout

    def run(self, test_name, suite_number):
        """Subclasses should override this method to run tests.

        PARAMETERS:
        test_name    -- str; name of the parent test.
        suite_number -- int; suite number, assumed to be 1-indexed.

        RETURNS:
        dict; results of the following form:
        {
            'passed': int,
            'failed': int,
            'locked': int,
        }
        """
        raise NotImplementedError

    def _run_case(self, test_name, suite_number, case, case_number):
        """A wrapper for case.run().

        Prints informative output and also captures output of the test case
        and returns it as a log. The output is suppressed -- it is up to the
        calling function to decide whether or not to print the log.
        """
        output.off()    # Delay printing until case status is determined.
        log_id = output.new_log()

        format.print_line('-')
        print('{} > Suite {} > Case {}'.format(test_name, suite_number,
                                               case_number))
        print()

        success = case.run()
        if success:
            print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        return success, output_log
Exemplo n.º 40
0
    def on_interact(self):
        """Responsible for unlocking each test.

        The unlocking process can be aborted by raising a KeyboardInterrupt or
        an EOFError.

        RETURNS:
        dict; mapping of test name (str) -> JSON-serializable object. It is up
        to each test to determine what information is significant for analytics.
        """
        if not self.args.unlock:
            return

        format.print_line('~')
        print('Unlocking tests')
        print()

        print('At each "{}", type what you would expect the output to be.'.format(
              self.PROMPT))
        print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
        print()

        analytics = {}
        for test in self.assignment.specified_tests:
            log.info('Unlocking test {}'.format(test.name))
            try:
                analytics[test.name] = test.unlock(self.interact)
            except (KeyboardInterrupt, EOFError):
                try:
                    # TODO(albert): When you use Ctrl+C in Windows, it
                    # throws two exceptions, so you need to catch both
                    # of them. Find a cleaner fix for this.
                    print()
                    print('-- Exiting unlocker --')
                except (KeyboardInterrupt, EOFError):
                    pass
                print()
                break
        return analytics
Exemplo n.º 41
0
    def _run_case(self, test_name, suite_number, case, case_number):
        """A wrapper for case.run().

        Prints informative output and also captures output of the test case
        and returns it as a log. The output is suppressed -- it is up to the
        calling function to decide whether or not to print the log.
        """
        output.off()    # Delay printing until case status is determined.
        log_id = output.new_log()

        format.print_line('-')
        print('{} > Suite {} > Case {}'.format(test_name, suite_number,
                                               case_number))
        print()

        success = case.run()
        if success:
            print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        return success, output_log
Exemplo n.º 42
0
 def _print_header(self):
     format.print_line('=')
     print('Assignment: {}'.format(self.name))
     print('OK, version {}'.format(client.__version__))
     format.print_line('=')
     print()
Exemplo n.º 43
0
class OkTest(models.Test):
    suites = core.List()
    description = core.String(optional=True)

    def __init__(self, file, suite_map, assign_name, verbose, interactive,
                 timeout=None, **fields):
        super().__init__(**fields)
        self.file = file
        self.suite_map = suite_map
        self.verbose = verbose
        self.interactive = interactive
        self.timeout = timeout
        self.assignment_name = assign_name

    def post_instantiation(self):
        for i, suite in enumerate(self.suites):
            if not isinstance(suite, dict):
                raise ex.SerializeException('Test cases must be dictionaries')
            elif 'type' not in suite:
                raise ex.SerializeException('Suites must have field "type"')
            elif suite['type'] not in self.suite_map:
                raise ex.SerializeException('Invalid suite type: '
                                            '{}'.format(suite['type']))
            self.suites[i] = self.suite_map[suite['type']](
                    self.verbose, self.interactive, self.timeout, **suite)

    def run(self):
        """Runs the suites associated with this OK test.

        RETURNS:
        dict; the results for this test, in the form
        {
            'passed': int,
            'failed': int,
            'locked': int,
        }
        """
        passed, failed, locked = 0, 0, 0
        for i, suite in enumerate(self.suites):
            results = suite.run(self.name, i + 1)

            passed += results['passed']
            failed += results['failed']
            locked += results['locked']

            if not self.verbose and (failed > 0 or locked > 0):
                # Stop at the first failed test
                break

        if locked > 0:
            print()
            print('There are still locked tests! '
                  'Use the -u option to unlock them')

        if type(self.description) == str and self.description:
            print()
            print(self.description)
            print()
        return {
            'passed': passed,
            'failed': failed,
            'locked': locked,
        }

    def score(self):
        """Runs test cases and computes the score for this particular test.

        Scores are determined by aggregating results from suite.run() for each
        suite. A suite is considered passed only if it results in no locked
        nor failed results.

        The points available for this test are distributed evenly across
        scoreable (i.e. unlocked and 'scored' = True) suites.
        """
        passed, total = 0, 0
        for i, suite in enumerate(self.suites):
            if not suite.scored:
                continue

            total += 1
            results = suite.run(self.name, i + 1)

            if results['locked'] == 0 and results['failed'] == 0:
                passed += 1
        if total > 0:
            score = passed * self.points / total
        else:
            score = 0.0

        format.print_progress_bar(self.name, passed, total - passed, 0)
        print()
        return score

    def unlock(self, interact):
        total_cases = len([case for suite in self.suites
                                for case in suite.cases])
        for suite_num, suite in enumerate(self.suites):
            for case_num, case in enumerate(suite.cases):
                case_id = '{} > Suite {} > Case {}'.format(
                            self.name, suite_num + 1, case_num + 1)

                format.print_line('-')
                print(case_id)
                print('(cases remaining: {})'.format(total_cases))
                print()
                total_cases -= 1

                if case.locked != True:
                    print('-- Already unlocked --')
                    print()
                    continue

                case.unlock(self.unique_id_prefix, case_id, interact)

        assert total_cases == 0, 'Number of cases is incorrect'
        format.print_line('-')
        print('OK! All cases for {} unlocked.'.format(self.name))
        print()
Exemplo n.º 44
0
 def _print_header(self):
     format.print_line('=')
     print('Assignment: {}'.format(self.name))
     print('OK, version {}'.format(client.__version__))
     format.print_line('=')
     print()
Exemplo n.º 45
0
class Suite(core.Serializable):
    type = core.String()
    scored = core.Boolean(default=True)
    cases = core.List()

    def __init__(self, test, verbose, interactive, timeout=None, **fields):
        super().__init__(**fields)
        self.test = test
        self.verbose = verbose
        self.interactive = interactive
        self.timeout = timeout
        self.run_only = []

    def run(self, test_name, suite_number, env=None):
        """Subclasses should override this method to run tests.

        PARAMETERS:
        test_name    -- str; name of the parent test.
        suite_number -- int; suite number, assumed to be 1-indexed.
        env          -- dict; used by programmatic API to provide a
                        custom environment to run tests with.

        RETURNS:
        dict; results of the following form:
        {
            'passed': int,
            'failed': int,
            'locked': int,
        }
        """
        raise NotImplementedError

    def enumerate_cases(self):
        enumerated = enumerate(self.cases)
        if self.run_only:
            return [x for x in enumerated if x[0] + 1 in self.run_only]
        return enumerated

    def _run_case(self, test_name, suite_number, case, case_number):
        """A wrapper for case.run().

        Prints informative output and also captures output of the test case
        and returns it as a log. The output is printed only if the case fails,
        or if self.verbose is True.
        """
        output.off()  # Delay printing until case status is determined.
        log_id = output.new_log()
        format.print_line('-')
        print('{} > Suite {} > Case {}'.format(test_name, suite_number,
                                               case_number))
        print()

        success = case.run()
        if success:
            print('-- OK! --')

        output.on()
        output_log = output.get_log(log_id)
        output.remove_log(log_id)

        if not success or self.verbose:
            print(''.join(output_log))
        if not success:
            short_name = self.test.get_short_name()
            # TODO: Change when in notebook mode
            print('Run only this test case with '
                  '"python3 ok -q {} --suite {} --case {}"'.format(
                      short_name, suite_number, case_number))
        return success