Example #1
0
def main():
    """Runs the test"""

    univ_wash = Institution.create(name='University of Washington')
    sasha = Person.create(name='Aleksandr Aravkin')
    timothy = Person.create(name='Timothy Moore')

    InstitutionPerson.create(institution=univ_wash, person=sasha)
    InstitutionPerson.create(institution=univ_wash, person=timothy)

    amath352 = Group.create(institution=univ_wash, name='AMATH 352 Spring 2019', active=True)

    PersonGroup.create(person=sasha, group=amath352, leader=True)
    PersonGroup.create(person=timothy, group=amath352, leader=False)

    assignment = Assignment.create(name='HW 1', group=amath352, creator=sasha, created_at=datetime.datetime.now(),
                                   visible_at=datetime.datetime.now(), late_at=datetime.datetime.fromtimestamp(time.time() + 600),
                                   late_penalty=0.2, closed_at=datetime.datetime.fromtimestamp(time.time() + 1200))

    p1_verfile = MatlabFile.create(name='problem1.m', contents='disp(\'problem1.m\'); points = 5;')
    Problem.create(assignment=assignment, points_out_of=5, verification_entry_file=p1_verfile)

    p1_submfile = MatlabFile.create(name='problem1.m', contents='a1 = 3')
    submission = Submission.create(assignment=assignment, submittor=timothy, submitted_at=datetime.datetime.now(),
                                   submission_entry_file=p1_submfile)
    grader.grade(submission)
Example #2
0
def test_grader():
    name = 'TEST_001'

    code = """def f(x, y):\n    return x+y"""
    out = grade(name, code)
    logging.warning(out)
    assert out['correct']
    assert out['score'] == 1

    code = """def f(x,y):\n    print x+y"""
    out = grade(name, code)
    logging.warning(out)
    assert not out['correct']
    assert out['score'] == 0
Example #3
0
    def run_simulation(self, num_of_turns=300):
        return_scores = list()

        for i in range(num_of_turns):
            # agent think and validation of the decision
            move_result = self.__environment.move_searchagent(
                self.__searchagent.agent_id(),
                self.__searchagent.think(
                    self.__environment.get_adjacent_tiles(
                        self.__searchagent.agent_id())))
            if move_result[1] == True:
                # agent move if the move is valid
                self.__searchagent.move(move_result[0])
                if self.__environment.check_target(
                        self.__searchagent.agent_id()):
                    self.__searchagent.search_success()
            else:
                # if the move is invalid, returns a value far beyond the lowest possible value
                self.__searchagent.reset()
                return -10000
            # self.__environment.draw()
        return_scores = grade(self.__searchagent)
        self.__environment.clear()
        self.__searchagent.reset()
        return return_scores
def test_PROBLEM_ID():
    """
    Problem name: PROBLEM-ID    # This name must be put in the openEDX problem.
    Part of:      WEEK 0
    Exercise:     <Put a short task description here>
    """
    id = 'PROBLEM-ID'

    # Assert, the sample solution is accepted as correct
    code = """def add(x,y):\n    return x+y"""
    out = grade(id, code)
    logging.warning(out)  # pytest will display warnings if assert fails.
    assert out['correct']

    # Assert some common errors are recognized
    code = """def add(x,y):\n    return x-y"""
    out = grade(id, code)
    logging.warning(out)
    assert not out['correct']

    code = """def f(x,y):\n    return x+y"""
    out = grade(id, code)
    logging.warning(out)
    assert not out['correct']
Example #5
0
def grade(repo_name, args):
    """
    Grades a repository for a homework by calling the grader module.
    :param repo_name: string
    :param args: arguments for grading
    :return: int, the grade
    """
    return_score = 0
    print('> Grading', repo_name, HW_DIR)
    hw_path = os.path.join(REPOS_DIR, repo_name, HW_DIR)
    tests_path = os.path.join(TESTS_DIR, HW_DIR)
    rubric_filename = "{0}.rubric.txt".format(HW_DIR)
    rubric_path = os.path.join(hw_path, rubric_filename)
    try:
        # Skip if graded
        if not args.force and os.path.exists(rubric_path):
            print('> Skip, graded')
            return
        # Make sure the homework directory exists
        if not os.path.exists(hw_path):
            os.makedirs(hw_path)
        # Delete existing report file
        os.remove(rubric_path)
        # Copy files into the testing directory
        for file in HW_FILES:
            shutil.copy(os.path.join(hw_path, file), tests_path)
        # Run grader
        argv = ['-v', tests_path, '-o', rubric_path]
        if len(HW_FILES) > 0:
            argv.append('-d')
            argv.extend(HW_FILES)
        return_score = grader.grade(argv)
    except FileNotFoundError as e:
        __report_zero(rubric_path, "I cannot find the required file {0}.".format(e.filename))
    except grader.TestingFailureError as e:
        __report_zero(rubric_path, "Automated testing crashed.")

    # Cleanup
    try:
        for file in HW_FILES:
            os.remove(os.path.join(tests_path, file))
    except FileNotFoundError:
        pass
    return return_score
def test_EXAMPLE_001():
    """
    Problem name: EXAMPLE-001
    Part of:      WEEK 0
    Exercise:     Ask the user to input a time in seconds and print
                  the time formatted as `H:M:S``.
                  Example input from the user: `4928`
                  Expected result: '1:22:8' (Note: not '1:22:08')
    """
    # Specify problem ID
    id = 'EXAMPLE-001'

    # Assert, the sample solution is accepted as correct
    code = """
user_in = input('Please enter an integer number of seconds: ')
total_sec = int(user_in)
hours = total_sec // 3600
minutes = (total_sec // 60) % 60
seconds = total_sec % 60
print('{}:{}:{}'.format(hours, minutes, seconds))
"""
    out = grade(id, code)
    logging.warning(out)  # pytest will display warnings if assert fails.
    assert out['correct']
Example #7
0
import github
import grader
import sys
from pull_request import PullRequest
import re

PULL_REQUEST_URL = sys.argv[1]


def create_pull_request(html_url):
    regex = re.compile('https://github\.com/([^/]+/[^/]+)/pull/(\d+)')
    match = regex.match(html_url.strip())
    if match is None:
        raise ValueError("Invalid pull request URL.")

    repo = match.group(1)
    pr_number = match.group(2)

    api_url = "https://api.github.com/repos/{}/pulls/{}".format(
        repo, pr_number)
    pr_data = github.api_request(api_url)
    return PullRequest(pr_data)


if __name__ == '__main__':
    pr = create_pull_request(PULL_REQUEST_URL)
    github_username = pr.username()
    print(github_username)
    grader.grade(pr)
Example #8
0
        resultwriter.writerow(["NetID", "Grade", "Add Comments"])

        submitted_net_ids = set()
        for pr in pull_requests:
            print('-------')

            github_username = pr.username()
            print(github_username)

            student = students_by_github_username.get(github_username)
            if student is None:
                print("WARNING: not enrolled.")
                continue

            net_id = student.net_id
            if net_id in submitted_net_ids:
                print("WARNING: has a more recent submission")
                continue

            print(pr.url() + '/files')

            score, comment = grader.grade(pr)
            # TODO put in reasoning for score

            resultwriter.writerow([net_id, score, comment])

            submitted_net_ids.add(net_id)

        print('-------')
        print("Number of submissions:", len(submitted_net_ids))
Example #9
0
         #update assignStatus
         sql = "UPDATE assignStatus SET status = %s, timeStamp = %s WHERE assignmentId = %s"
         val = (assignmentStatus, timeStamp, assignmentId)
         mycursor.execute(sql, val)
         mydb.commit()
         #update assignResponse
         sqlResponse = "UPDATE assignResponse SET response = %s WHERE assignmentId = %s"
         valResponse = (assignmentResponse, assignmentId)
         mycursor.execute(sqlResponse, valResponse)
         mydb.commit()
     else:
         executeSave = False
 if executeSave:
     if assignmentStatus == "complete":
         #grade the assignment
         grade = grader.grade(uid, assignmentId, True,
                              timezoneOffset)
         #get the lessonId
         sql = "SELECT * FROM assignLesson WHERE assignmentId =%s"
         u = (assignmentId, )
         mycursor.execute(sql, u)
         lessonId = mycursor.fetchone()[1]
         #get the lesson json
         sql = "SELECT * FROM lessonJSON WHERE lessonId =%s"
         u = (lessonId, )
         mycursor.execute(sql, u)
         lessonJson = json.loads(mycursor.fetchone()[1])
         #get the time at start of today
         dt = datetime.date.today()
         serverUtcOffset = (time.mktime(time.localtime()) -
                            time.mktime(time.gmtime()))
         #in seconds
Example #10
0
SURVEY_DATA_PATH = sys.argv[1]
OUTPUT_PATH = sys.argv[2]

if __name__ == '__main__':
    students = get_students(SURVEY_DATA_PATH)
    with open(OUTPUT_PATH, 'w') as csvfile:
        resultwriter = csv.writer(csvfile)
        # this matches the grading spreadsheet template provided by CMS
        resultwriter.writerow(["NetID", "Grade", "Add Comments"])
        submitted_net_ids = []
        for net_id in students:
            print('-------')
            website, response = fetch(net_id)
            print(website)

            if response is None:
                print("WARNING: no valid website for the assign.")
                '''web = score = 0
                #comment = 'Website available : {website}/50, Return right time : {score}/50'.format(website=web, score=score)
                #resultwriter.writerow([net_id, 0, comment])'''
                continue

            score, comment = grader.grade(response)

            resultwriter.writerow([net_id, score, comment])
            print(score, comment)
            submitted_net_ids.append(net_id)

        print('-------')
        print("Number of submissions:", len(submitted_net_ids))
Example #11
0
def main():
    """Entry point"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--input-database',
                        action='store',
                        help='The path to the folder which jobs are pushed to',
                        default='in')
    parser.add_argument(
        '--output-database',
        action='store',
        help='The path to the folder which evaluations are pushed to',
        default='out')
    parser.add_argument(
        '--loop',
        action='store_true',
        help=
        'Causes this to continuously read from the queue rather than terminate upon completion'
    )
    parser.add_argument(
        '--sleep-time',
        action='store',
        type=float,
        help=
        'Only used if --loop is set: time to sleep in seconds while waiting for work',
        default=0.1)
    parser.add_argument(
        '--logging-conf',
        action='store',
        help=
        'The path to the json file from which we logging.config.dictConfig',
        default='conf/logging.json')
    parser.add_argument(
        '--no-output',
        action='store_true',
        help='If set then this does not push completed jobs to the output queue'
    )
    parser.add_argument(
        '--skip-bad',
        action='store_true',
        help=
        'If set then this skips bad entries instead of nacking and exitting')
    args = parser.parse_args()

    verify_database_filepath(args.input_database)
    verify_database_filepath(args.output_database)

    load_logging(args.logging_conf)
    logger = logging.getLogger(__name__)

    jobque = persistqueue.SQLiteAckQueue(args.input_database)
    if not args.no_output:
        outque = persistqueue.SQLiteAckQueue(args.output_database)
    stop = False
    while not stop:
        if jobque.size == 0:
            if not args.loop:
                return
            time.sleep(args.sleep_time)
            continue

        job = jobque.get()
        success = False
        try:
            if not isinstance(job, int):
                raise ValueError(
                    f'expected job is int (id of submission to grade or regrade)'
                )

            logger.info('Grading submission %s', job)
            grader.grade(Submission.get_by_id(job))
            success = True
        except:
            logger.error('failed to grade job', exc_info=1)
            if not args.skip_bad:
                raise
            else:
                logger.info(
                    'skipping failed job instead of erroring (skip_bad is True)'
                )
        finally:
            if success or args.skip_bad:
                jobque.ack(job)
                if not args.no_output:
                    outque.put(job)
            else:
                logger.error(
                    'Failed to process job %s - nacking and terminating', job)
                jobque.nack(job)
                stop = True
Example #12
0
def autograde(lab_base_name, week_name, download):

    # Which set of tests to run? or all?

    #test_sets = which_tests()  # TODO return to this, but chance are, grade all.

    # load student data

    student_data = db.load_student_data(config.student_data_file)

    print(student_data)


    # Week 1 repo is called JAG_1

    #for test_set in test_sets:
    #print('\n\n**** TESTING TEST SET ' + test_set + " ****\n\n")

    #student_repos = db.get_repo_list()

    # Get my stuff from git, has original tests etc.

    if download[0]:
        cloner.clone('minneapolis-edu', my_repo, download_dir)

    grade_json_file = os.path.join(download_dir, my_repo, 'grades', test_set + '.json')

    grade_scheme = json.load(open(grade_json_file))
    print('Grade scheme used will be ' + str(grade_scheme))



    for student in student_data:


        print('\n\n**** Grading Student ' + student[1] + " " + student[0] + ' *****\n\n')
        student_github_id = student[1]

        their_repo = lab_base_name + '-' + student_github_id

        # Comment these lines to NOT overwrite student code

        if download[1]:
            repo_fetched = cloner.clone('mctc-itec', their_repo, download_dir)  # download_dir = student_code
        else:
            repo_fetched = True  # ummmmmmm....TODO check if exists

        if not repo_fetched:
            print("The repo " + their_repo + " was not found. Skipping this student.")
            continue

        tester.fetch_tests(download_dir, their_repo, my_repo)

        # TODO Check if code builds?  Student needs to fix build errors before any grading happens

        # Run tests
        if tester.test(download_dir, their_repo) == 'build error':
            total = 0
            results = {}

        else:
            # Grade
            print('down', download_dir)
            print('their repo', their_repo)
            print('test', test_set)

            results, total = grader.grade(os.path.join(download_dir, their_repo), test_set, grade_scheme)

            if not results or not total:
                print("ERROR no results generated for " + student[0] + " " + student[1] + "Check for surefile reports generated, does student code compile?")
                continue


            # Find extra credit tokens for this student
            week_number = int(week_name.replace('week_', ''))
            #extra_credit_tokens = add_extra_credit.token_search(student_name, week_number, len(results))

            # save to DB as entry in grade table
            #if extra_credit_tokens:
                # name, week_1, [3, 4]   <- list of questions to add a point
                #db.save_extra_credit(student[0], test_set, extra_credit_tokens)


        print('\n\nResults for student ' + student[2] + "\n" + str(results) + "\nTotal points: " + str(total) + '\n\n')

        db.save_results(student[0], test_set, results, total) # starID, week_1, dictionary of results, total points




    # Print a summary

    print("SUMMARY OF LAB: " + week_name)

    for student in student_data:
        print(student[0], db.grade_for_lab(student[0], week_name))
Example #13
0
                        "{})".format(err[0].__name__, err[1]),
                        error=True)
            traceback.print_exc()
            sys.exit(util.ERR_CRITERIA_IMPORT)

        grade_filename = c.name + (c.group if c.group else "") + "-grade.txt"

    if args.mode == 'grade':
        if os.path.isfile(grade_filename):
            util.sprint("refusing to overwrite existing grade file")
            sys.exit(util.ERR_GRADE_FILE_EXISTS)

        if not args.submission_files:
            util.sprint("warning: no submission files specified")

        any_missing = grader.grade(c, args.submission_files, grade_filename)
        if any_missing:
            sys.exit(util.EXIT_WITH_MISSING)
        else:
            sys.exit()

    elif args.mode == 'submit':
        from functools import reduce
        import tarfile
        import random

        chars = [str(i) for i in range(10)] + \
                [chr(ord('a') + i) for i in range(26)]

        rand = reduce(str.__add__, [random.choice(chars) for _ in range(32)])
        submit_dir = config.dropbox_dir + os.sep + c.name + \