示例#1
0
def grade(questions, messages, env=None, verbose=True):
    format.print_line('~')
    print('Running tests')
    print()
    passed = 0
    failed = 0
    locked = 0

    analytics = {}

    # The environment in which to run the tests.
    for test in questions:
        log.info('Running tests for {}'.format(test.name))
        results = test.run(env)

        # if correct once, set persistent flag
        if results['failed'] == 0 and results['locked'] == 0:
            storage.store(test.name, 'correct', True)

        passed += results['passed']
        failed += results['failed']
        locked += results['locked']
        analytics[test.name] = results

        if not verbose and (failed > 0 or locked > 0):
            # Stop at the first failed test
            break

    format.print_progress_bar('Test summary', passed, failed, locked,
                              verbose=verbose)
    print()

    messages['grading'] = analytics
示例#2
0
def grade(questions, messages, env=None, verbose=True):
    format.print_line('~')
    print('Running tests')
    print()
    passed = 0
    failed = 0
    locked = 0

    analytics = {}

    # The environment in which to run the tests.
    for test in questions:
        log.info('Running tests for {}'.format(test.name))
        results = test.run(env)

        # if correct once, set persistent flag
        if results['failed'] == 0 and results['locked'] == 0:
            storage.store(test.name, 'correct', True)

        passed += results['passed']
        failed += results['failed']
        locked += results['locked']
        analytics[test.name] = results

        if not verbose and (failed > 0 or locked > 0):
            # Stop at the first failed test
            break

    format.print_progress_bar('Test summary', passed, failed, locked,
                              verbose=verbose)
    print()

    messages['grading'] = analytics
示例#3
0
    def score(self):
        """Runs test cases and computes the score for this particular test.

        Scores are determined by aggregating results from suite.run() for each
        suite. A suite is considered passed only if it results in no locked
        nor failed results.

        The points available for this test are distributed evenly across
        scoreable (i.e. unlocked and 'scored' = True) suites.
        """
        passed, total = 0, 0
        for i, suite in enumerate(self.suites):
            if not suite.scored:
                continue

            total += 1
            results = suite.run(self.name, i + 1)

            if results['locked'] == 0 and results['failed'] == 0:
                passed += 1
        if total > 0:
            score = passed * self.points / total
        else:
            score = 0.0

        format.print_progress_bar(self.name, passed, total - passed, 0)
        print()
        return score
示例#4
0
    def on_interact(self):
        """Run gradeable tests and print results and return analytics.

        RETURNS:
        dict; a mapping of test name -> JSON-serializable object. It is up to
        each test to determine what kind of data it wants to return as
        significant for analytics. However, all tests must include the number
        passed, the number of locked tests and the number of failed tests.
        """
        if self.args.score:
            return

        format.print_line('~')
        print('Running tests')
        print()
        passed = 0
        failed = 0
        locked = 0

        analytics = {}

        for test in self.assignment.specified_tests:
            log.info('Running tests for {}'.format(test.name))
            results = test.run()

            passed += results['passed']
            failed += results['failed']
            locked += results['locked']
            analytics[test.name] = results

        format.print_progress_bar('Test summary', passed, failed, locked)
        return analytics
示例#5
0
    def on_interact(self):
        """Run gradeable tests and print results and return analytics.

        RETURNS:
        dict; a mapping of test name -> JSON-serializable object. It is up to
        each test to determine what kind of data it wants to return as
        significant for analytics. However, all tests must include the number
        passed, the number of locked tests and the number of failed tests.
        """
        if self.args.score:
            return

        format.print_line('~')
        print('Running tests')
        print()
        passed = 0
        failed = 0
        locked = 0

        analytics = {}

        for test in self.assignment.specified_tests:
            log.info('Running tests for {}'.format(test.name))
            results = test.run()

            passed += results['passed']
            failed += results['failed']
            locked += results['locked']
            analytics[test.name] = results

        format.print_progress_bar('Test summary', passed, failed, locked)
        return analytics
示例#6
0
    def run(self, messages):
        """Run gradeable tests and print results and return analytics.

        RETURNS:
        dict; a mapping of test name -> JSON-serializable object. It is up to
        each test to determine what kind of data it wants to return as
        significant for analytics. However, all tests must include the number
        passed, the number of locked tests and the number of failed tests.
        """
        if self.args.score or self.args.export or self.args.unlock or self.args.restore:
            return

        format.print_line('~')
        print('Running tests')
        print()
        passed = 0
        failed = 0
        locked = 0

        analytics = {}
        # check if analytics info is in messages
        if 'analytics' in messages:
            started = messages['analytics']['started']
        else:
            started = None

        for test in self.assignment.specified_tests:
            # run test if the question is not detected, or question detected and started
            if (started is None
                or test.name not in started
                or started[test.name]):

                log.info('Running tests for {}'.format(test.name))
                results = test.run()
                passed += results['passed']
                failed += results['failed']
                locked += results['locked']
                analytics[test.name] = results
            else:
                print('It looks like you haven\'t started {}. Skipping the tests.'.format(test.name))
                print()

            if not self.args.verbose and (failed > 0 or locked > 0):
                # Stop at the first failed test
                break

        format.print_progress_bar('Test summary', passed, failed, locked,
                                  verbose=self.args.verbose)
        print()

        messages['grading'] = analytics
示例#7
0
                    print( "---------------------------------------------------------------------\nYou got it in your first try, congrats!")

            # Send submissions to refazer server
            refazerObj = {"EndPoint": endpoint, "Question": question, "IncorrectCode": incorrectCode,
                "CorrectCode": correctCode}

            jsonRefazer = json.dumps(refazerObj)
            headers = {'Content-Type': 'application/json'}
            requests.post("http://refazer-online.azurewebsites.net/api/examples", data=jsonRefazer, headers=headers).content

        else:
            sub_list = os.listdir(current_directory + "/submissions/wrong_submissions")
            count_of_subs = len(sub_list)

            copyfile(current_directory + "/hw02.py",
                    current_directory + "/submissions/wrong_submissions/wrong_sub" + str(
                    count_of_subs + 1) + ".py")


        if not verbose and (failed > 0 or locked > 0):
            # Stop at the first failed test
            break

    format.print_progress_bar('Test summary', passed, failed, locked,
                              verbose=verbose)
    print()

    messages['grading'] = analytics

protocol = GradingProtocol