Esempio n. 1
0
def search_similar_repos(access_token=None, assignment_id=None,
                         language='python', template_name=None,
                         keyword='def ', weeks_past=12,
                         issue_title=None, issue_body=None):
    logger = jobs.get_job_logger()
    logger.info('Starting Github Search...')

    assign = Assignment.query.filter_by(id=assignment_id).one_or_none()
    if not assign:
        logger.info("Could not find assignment")
        return
    if not assign.files:
        logger.info("Upload template files for this assignment to search.")
        return

    possible_file_names = list(assign.files.keys())
    if template_name not in possible_file_names:
        logger.info("{} is not in {}".format(template_name, possible_file_names))

    source_file = assign.files[template_name]
    repos = get_online_repos(source_file, logger, language, access_token,
                             keyword=keyword)
    if not repos:
        logger.warning("No repos found. Try a different keyword?")
        return
    recent_repos = list_recent_repos(repos, logger, access_token, weeks_past)

    if issue_title and issue_body:
        for repo in recent_repos:
            repo_name = recent_repos[repo]['repository']['full_name']
            file_github_issue(repo_name, logger, access_token, issue_title, issue_body)

    return "Found {} recent repos and {} total repos".format(len(recent_repos), len(repos))
Esempio n. 2
0
def enroll_students(canvas_course_id):
    logger = jobs.get_job_logger()
    row_format = '{email!s:<35} {name!s:<35} {sid!s:<11}'
    canvas_course = CanvasCourse.query.get(canvas_course_id)
    enrollment_info = []
    logger.info(row_format.format(email='EMAIL', name='NAME', sid='SID'))
    for student in api.get_students(canvas_course):
        if not student.get("email"):
            logger.warn(
                "Student {} not enrolled, missing email address".format(
                    student))
            continue
        info = {
            'email': student['email'],
            'name': student['name'],
            'sid': student['sis_user_id'],
            'class_account': '',
            'section': '',
        }
        logger.info(row_format.format(**info))
        enrollment_info.append(info)
    created, updated = Enrollment.create(canvas_course.course_id,
                                         enrollment_info)
    message = 'Added {new}, updated {old} student enrollments'.format(
        new=created, old=updated)
    logger.info(message)
    return message
Esempio n. 3
0
def test_job(duration=0, should_fail=False):
    logger = jobs.get_job_logger()

    logger.info('Starting...')
    time.sleep(duration)
    if should_fail:
        1 / 0
    logger.info('Finished!')
Esempio n. 4
0
def upload_scores(canvas_assignment_id):
    logger = jobs.get_job_logger()
    canvas_assignment = CanvasAssignment.query.get(canvas_assignment_id)
    canvas_course = canvas_assignment.canvas_course
    assignment = canvas_assignment.assignment
    course = assignment.course

    logger.info('Starting bCourses upload')
    logger.info('bCourses assignment URL: {}'.format(canvas_assignment.url))
    logger.info('OK assignment: {}'.format(assignment.display_name))
    logger.info('Scores: {}'.format(', '.join(canvas_assignment.score_kinds)))

    students = api.get_students(canvas_course)
    old_scores = api.get_scores(canvas_assignment)
    new_scores = {}
    stats = collections.Counter()

    row_format = '{!s:>10}  {!s:<55}  {!s:<6}  {!s:>9}  {!s:>9}'
    logger.info(row_format.format('STUDENT ID', 'EMAIL', 'BACKUP', 'OLD SCORE', 'NEW SCORE'))

    for student in students:
        canvas_user_id = student['id']
        sid = student['sis_user_id']
        enrollments = Enrollment.query.filter_by(
            course_id=canvas_course.course_id,
            sid=sid,
            role=constants.STUDENT_ROLE,
        ).all()
        emails = ','.join(enrollment.user.email for enrollment in enrollments) or 'None'
        scores = []
        for enrollment in enrollments:
            user_ids = assignment.active_user_ids(enrollment.user_id)
            scores.extend(assignment.scores(user_ids))
        scores = [s for s in scores if s.kind in canvas_assignment.score_kinds]
        old_score = old_scores.get(canvas_user_id)
        if not scores:
            new_score = None
            backup_id = None
            stats['no_scores'] += 1
        else:
            max_score = max(scores, key=lambda score: score.score)
            new_score = max_score.score
            backup_id = encode_id(max_score.backup_id)
            if old_score != new_score:
                new_scores[canvas_user_id] = new_score
                stats['updated'] += 1
            else:
                stats['not_changed'] += 1
        logger.info(row_format.format(sid, emails, backup_id, old_score, new_score))

    if new_scores:
        api.put_scores(canvas_assignment, new_scores)

    stats = ('{updated} updated, {not_changed} not changed, '
             '{no_scores} no scores'.format(**stats))
    logger.info(stats)
    return stats
Esempio n. 5
0
def test_job(duration=0, should_fail=False):
    logger = jobs.get_job_logger()

    logger.info('Starting...')
    time.sleep(duration)
    if should_fail:
        1/0
    logger.info('Finished!')
    return "Waited for <b>{}</b> seconds!".format(duration)
Esempio n. 6
0
File: upload.py Progetto: kvakil/ok
def upload_scores(canvas_assignment_id):
    logger = jobs.get_job_logger()
    canvas_assignment = CanvasAssignment.query.get(canvas_assignment_id)
    canvas_course = canvas_assignment.canvas_course
    assignment = canvas_assignment.assignment
    course = assignment.course

    logger.info('Starting bCourses upload')
    logger.info('bCourses assignment URL: {}'.format(canvas_assignment.url))
    logger.info('OK assignment: {}'.format(assignment.display_name))
    logger.info('Scores: {}'.format(', '.join(canvas_assignment.score_kinds)))

    students = api.get_students(canvas_course)
    old_scores = api.get_scores(canvas_assignment)
    new_scores = {}
    stats = collections.Counter()

    row_format = '{!s:>10}  {!s:<55}  {!s:<6}  {!s:>9}  {!s:>9}'
    logger.info(row_format.format('STUDENT ID', 'EMAIL', 'BACKUP', 'OLD SCORE', 'NEW SCORE'))

    for student in students:
        canvas_user_id = student['id']
        sid = student['sis_user_id']
        enrollments = Enrollment.query.filter_by(
            course_id=canvas_course.course_id,
            sid=sid,
            role=constants.STUDENT_ROLE,
        ).all()
        emails = ','.join(enrollment.user.email for enrollment in enrollments) or 'None'
        scores = []
        for enrollment in enrollments:
            user_ids = assignment.active_user_ids(enrollment.user_id)
            scores.extend(assignment.scores(user_ids))
        scores = [s for s in scores if s.kind in canvas_assignment.score_kinds]
        old_score = old_scores.get(canvas_user_id)
        if not scores:
            new_score = None
            backup_id = None
            stats['no_scores'] += 1
        else:
            max_score = max(scores, key=lambda score: score.score)
            new_score = max_score.score
            backup_id = encode_id(max_score.backup_id)
            if old_score != new_score:
                new_scores[canvas_user_id] = new_score
                stats['updated'] += 1
            else:
                stats['not_changed'] += 1
        logger.info(row_format.format(sid, emails, backup_id, old_score, new_score))

    api.put_scores(canvas_assignment, new_scores)

    logger.info('{updated} updated, {not_changed} not changed, '
        '{no_scores} no scores'.format(**stats))
Esempio n. 7
0
def score_from_csv(assign_id, rows, kind='total', invalid=None, message=None):
    """
    Job for uploading Scores.

    @param ``rows`` should be a list of records (mappings),
        with labels `email` and `score`
    """
    log = jobs.get_job_logger()
    current_user = jobs.get_current_job().user
    assign = Assignment.query.get(assign_id)

    message = message or '{} score for {}'.format(kind.title(), assign.display_name)

    def log_err(msg):
        log.info('\t!  {}'.format(msg))

    log.info("Uploading scores for {}:\n".format(assign.display_name))

    if invalid:
        log_err('skipping {} invalid entries on lines:'.format(len(invalid)))
        for line in invalid:
            log_err('\t{}'.format(line))
        log.info('')

    success, total = 0, len(rows)
    for i, row in enumerate(rows, start=1):
        try:
            email, score = row['email'], row['score']
            user = User.query.filter_by(email=email).one()

            backup = Backup.query.filter_by(assignment=assign, submitter=user, submit=True).first()
            if not backup:
                backup = Backup.create(submitter=user, assignment=assign, submit=True)

            uploaded_score = Score(grader=current_user, assignment=assign,
                    backup=backup, user=user, score=score, kind=kind, message=message)

            db.session.add(uploaded_score)
            uploaded_score.archive_duplicates()

        except SQLAlchemyError:
            print_exc()
            log_err('error: user with email `{}` does not exist'.format(email))
        else:
            success += 1
        if i % 100 == 0:
            log.info('\nUploaded {}/{} Scores\n'.format(i, total))
    db.session.commit()

    log.info('\nSuccessfully uploaded {} "{}" scores (with {} errors)'.format(success, kind, total - success))

    return '/admin/course/{cid}/assignments/{aid}/scores'.format(
                cid=jobs.get_current_job().course_id, aid=assign_id)
Esempio n. 8
0
def email_scores(assignment_id, score_tags, subject, body,
                 reply_to=None, dry_run=False):
    log = jobs.get_job_logger()
    job_creator = jobs.get_current_job().user

    assign = Assignment.query.get(assignment_id)

    students = [e.user for e in (Enrollment.query
                        .options(db.joinedload('user'))
                        .filter(Enrollment.role == STUDENT_ROLE,
                                Enrollment.course == assign.course)
                        .all())]

    email_counter = 0
    seen_ids = set()
    for student in students:
        if student.id in seen_ids:
            continue
        user_ids = assign.active_user_ids(student.id)
        seen_ids |= user_ids
        scores = [s for s in assign.scores(user_ids) if s.kind in score_tags]
        if scores:
            users = User.query.filter(User.id.in_(user_ids))
            primary, cc = users[0].email, [u.email for u in users[1:]]
            if dry_run:
                primary, cc = job_creator.email, []

            result = send_email(primary,
                subject, body,
                cc=cc,
                template='email/scores.html',
                title=subject,
                from_name=assign.course.display_name,
                scores=scores,
                reply_to=reply_to,
                link_text="View on okpy.org",
                link="https://okpy.org/" + assign.name, # Don't have url_for
                assignment=assign.display_name)

            if result:
                log.info("Sent to {}".format(', '.join([primary] + cc)))
                email_counter += 1

        # Send a few emails in dry run mode.
        if dry_run and email_counter >= 2:
            message = "Run with dry run mode"
            log.info(message)
            return message

    message = "Sent {} emails".format(email_counter)
    log.info(message)
    return message
Esempio n. 9
0
def assign_scores(assign_id, score, kind, message, deadline,
                     include_backups=True):
    logger = jobs.get_job_logger()
    current_user = jobs.get_current_job().user

    assignment = Assignment.query.get(assign_id)
    students = [e.user_id for e in assignment.course.get_students()]
    submission_time = server_time_obj(deadline, assignment.course)

    # Find all submissions (or backups) before the deadline
    backups = Backup.query.filter(
        Backup.assignment_id == assign_id,
        or_(Backup.created <= deadline, Backup.custom_submission_time <= deadline)
    ).order_by(Backup.created.desc()).group_by(Backup.submitter_id)

    if not include_backups:
        backups = backups.filter(Backup.submit == True)

    all_backups =  backups.all()

    if not all_backups:
        logger.info("No submissions were found with a deadline of {}."
                    .format(deadline))
        return "No Scores Created"

    total_count = len(all_backups)
    logger.info("Found {} eligible submissions...".format(total_count))

    score_counter, seen = 0, set()

    for back in all_backups:
        if back.creator in seen:
            score_counter += 1
            continue
        new_score = Score(score=score, kind=kind, message=message,
                          user_id=back.submitter_id,
                          assignment=assignment, backup=back,
                          grader=current_user)
        db.session.add(new_score)
        new_score.archive_duplicates()
        db.session.commit()

        score_counter += 1
        if score_counter % 5 == 0:
            logger.info("Scored {} of {}".format(score_counter, total_count))
        seen |= back.owners()

    result = "Left {} '{}' scores of {}".format(score_counter, kind.title(), score)
    logger.info(result)
    return result
Esempio n. 10
0
def export_grades():
    logger = jobs.get_job_logger()
    current_user = jobs.get_current_job().user
    course = Course.query.get(jobs.get_current_job().course_id)
    assignments = course.assignments
    students = (Enrollment.query.options(db.joinedload('user')).filter(
        Enrollment.role == STUDENT_ROLE, Enrollment.course == course).all())

    headers, assignments = get_headers(assignments)
    logger.info("Using these headers:")
    for header in headers:
        logger.info('\t' + header)
    logger.info('')

    total_students = len(students)

    users = [student.user for student in students]
    user_ids = [user.id for user in users]

    all_scores = collect_all_scores(assignments, user_ids)

    with io.StringIO() as f:
        writer = csv.writer(f)
        writer.writerow(headers)  # write headers

        for i, student in enumerate(students, start=1):
            row = export_student_grades(student, assignments, all_scores)
            writer.writerow(row)
            if i % 50 == 0:
                logger.info('Exported {}/{}'.format(i, total_students))
        f.seek(0)
        created_time = local_time(dt.datetime.now(),
                                  course,
                                  fmt='%b-%-d %Y at %I-%M%p')
        csv_filename = '{course_name} Grades ({date}).csv'.format(
            course_name=course.display_name, date=created_time)
        # convert to bytes for csv upload
        csv_bytes = io.BytesIO(bytearray(f.read(), 'utf-8'))
        upload = ExternalFile.upload(csv_bytes,
                                     user_id=current_user.id,
                                     name=csv_filename,
                                     course_id=course.id,
                                     prefix='jobs/exports/{}/'.format(
                                         course.offering))

    logger.info('\nDone!\n')
    logger.info("Saved as: {0}".format(upload.object_name))
    return "/files/{0}".format(encode_id(upload.id))
Esempio n. 11
0
def autograde_assignment(assignment_id):
    """Autograde all enrolled students for this assignment.

    We set up a state machine for each backup to check its progress through
    the autograder. If any step takes too long, we'll retry autograding that
    backup. Ultimately, a backup is considered done when we confirm that
    we've received a new score, or if we have reached the retry limit.
    """
    logger = jobs.get_job_logger()
    assignment = Assignment.query.get(assignment_id)
    course_submissions = assignment.course_submissions(include_empty=False)
    backup_ids = set(fs['backup']['id'] for fs in course_submissions
                     if fs['backup'])
    return autograde_backups(assignment,
                             jobs.get_current_job().user_id, backup_ids,
                             logger)
Esempio n. 12
0
def test_job(duration=0, should_fail=False, make_file=False):
    logger = jobs.get_job_logger()

    logger.info('Starting...')
    time.sleep(duration)
    if should_fail:
        1/0
    if make_file:
        upload = ExternalFile.upload(data(duration+1), user_id=1, course_id=1,
                                     name='temp.okfile', prefix='jobs/example/')
        logger.info("Saved as: {}".format(upload.object_name))
        logger.info('File ID: {0}'.format(encode_id(upload.id)))
        msg = ("Waited for <a href='/files/{0}'> {1} seconds </a>"
               .format(encode_id(upload.id), duration))
    else:
        msg = "Waited for <b>{}</b> seconds!".format(duration)
    logger.info('Finished!')
    return msg
Esempio n. 13
0
def audit_missing_scores(assign_id):
    logger = jobs.get_job_logger()

    assignment = Assignment.query.get(assign_id)
    data = assignment.course_submissions()

    students_with_subms = set(s['user']['email'] for s in data if s['backup'])
    students_without_subms = set(s['user']['email'] for s in data
                                 if not s['backup'])

    logger.info('Students with submissions: {}'.format(
        len(students_with_subms)))
    logger.info('Students without submissions: {}'.format(
        len(students_without_subms)))

    query = (Score.query.options(db.joinedload('backup')).filter_by(
        assignment=assignment, archived=False))

    has_scores = defaultdict(set)

    all_scores = query.all()
    for score in all_scores:
        submitters = score.backup.enrollment_info()
        for s in submitters:
            has_scores[score.kind].add(s.user.email)

    logger.info("---" * 20)
    for score_kind in has_scores:
        difference = students_with_subms.difference(has_scores[score_kind])
        logger.info("Number of students with {} scores is {}".format(
            score_kind, len(has_scores[score_kind])))
        logger.info("Number of students without {} scores is {}".format(
            score_kind, len(difference)))

        if difference and len(difference) < 200:
            logger.info("Students without {} scores: {}".format(
                score_kind, ', '.join(difference)))
        elif len(difference) >= 200:
            # Avoid creating very long lines.
            subset = list(difference)[:200]
            logger.info(
                "{} students do not have {} scores. Here are a few: {}".format(
                    len(difference), score_kind, ', '.join(subset)))
        logger.info("---" * 20)
Esempio n. 14
0
def autograde_assignment(assignment_id):
    """Autograde all enrolled students for this assignment.

    We set up a state machine for each backup to check its progress through
    the autograder. If any step takes too long, we'll retry autograding that
    backup. Ultimately, a backup is considered done when we confirm that
    we've received a new score, or if we have reached the retry limit.
    """
    logger = jobs.get_job_logger()
    assignment = Assignment.query.get(assignment_id)
    course_submissions = assignment.course_submissions(include_empty=False)
    backup_ids = set(fs['backup']['id'] for fs in course_submissions if fs['backup'])
    try:
        autograde_backups(assignment, jobs.get_current_job().user_id, backup_ids, logger)
    except ValueError:
        logger.info('Could not autograde backups - Please add an autograding key.')
        return
    return '/admin/course/{cid}/assignments/{aid}/scores'.format(
                cid=jobs.get_current_job().course_id, aid=assignment.id)
Esempio n. 15
0
def export_assignment(assignment_id, anonymized):
    """ Generate a zip file of submissions from enrolled students.

    Final Submission: One submission per student/group
        Zip Strucutre: cal-cs61a../[email protected]@b.com/abc12d/hog.py
    Anonymized: Submission without identifying info
        Zip Strucutre: cal-cs61a../{hash}/hog.py
    """
    logger = jobs.get_job_logger()

    assignment = Assignment.query.get(assignment_id)
    requesting_user = jobs.get_current_job().user

    if not assignment:
        logger.warning("No assignment found")
        raise Exception("No Assignment")

    if not Assignment.can(assignment, requesting_user, "download"):
        raise Exception("{} does not have enough permission"
                        .format(requesting_user.email))
    if anonymized:
        logger.info("Starting anonymized submission export")
    else:
        logger.info("Starting final submission export")
    course = assignment.course
    with io.BytesIO() as bio:
        # Get a handle to the in-memory zip in append mode
        with zipfile.ZipFile(bio, "w", zipfile.ZIP_DEFLATED, False) as zf:
            zf.external_attr = 0o655 << 16
            export_loop(bio, zf, logger, assignment, anonymized)
            created_time = local_time(dt.datetime.now(), course, fmt='%m-%d-%I-%M-%p')
            zip_name = '{}_{}.zip'.format(assignment.name.replace('/', '-'), created_time)

        bio.seek(0)
        # Close zf handle to finish writing zipfile
        logger.info("Uploading...")
        upload = ExternalFile.upload(bio, user_id=requesting_user.id, name=zip_name,
                                     course_id=course.id,
                                     prefix='jobs/exports/{}/'.format(course.offering))

    logger.info("Saved as: {0}".format(upload.object_name))
    msg = "/files/{0}".format(encode_id(upload.id))
    return msg
Esempio n. 16
0
def audit_missing_scores(assign_id):
    logger = jobs.get_job_logger()

    assignment = Assignment.query.get(assign_id)
    data = assignment.course_submissions()

    students_with_subms = set(s['user']['email'] for s in data
                              if s['backup'])
    students_without_subms = set(s['user']['email'] for s in data
                                 if not s['backup'])

    logger.info('Students with submissions: {}'.format(len(students_with_subms)))
    logger.info('Students without submissions: {}'.format(len(students_without_subms)))

    query = (Score.query.options(db.joinedload('backup'))
                  .filter_by(assignment=assignment, archived=False))

    has_scores = defaultdict(set)

    all_scores = query.all()
    for score in all_scores:
        submitters = score.backup.enrollment_info()
        for s in submitters:
            has_scores[score.kind].add(s.user.email)

    logger.info("---"*20)
    for score_kind in has_scores:
        difference = students_with_subms.difference(has_scores[score_kind])
        logger.info("Number of students with {} scores is {}".format(score_kind,
                                                                     len(has_scores[score_kind])))
        logger.info("Number of students without {} scores is {}".format(score_kind,
                                                                        len(difference)))

        if difference and len(difference) < 200:
            logger.info("Students without {} scores: {}".format(score_kind, ', '.join(difference)))
        elif len(difference) >= 200:
            # Avoid creating very long lines.
            subset = list(difference)[:200]
            logger.info("{} students do not have {} scores. Here are a few: {}"
                        .format(len(difference), score_kind, ', '.join(subset)))
        logger.info("---"*20)
Esempio n. 17
0
def test_job(duration=0, should_fail=False, make_file=False):
    logger = jobs.get_job_logger()

    logger.info('Starting...')
    time.sleep(duration)
    if should_fail:
        1 / 0
    if make_file:
        upload = ExternalFile.upload(data(duration + 1),
                                     user_id=1,
                                     course_id=1,
                                     name='temp.okfile',
                                     prefix='jobs/example/')
        logger.info("Saved as: {}".format(upload.object_name))
        logger.info('File ID: {0}'.format(encode_id(upload.id)))
        msg = ("Waited for <a href='/files/{0}'> {1} seconds </a>".format(
            encode_id(upload.id), duration))
    else:
        msg = "Waited for <b>{}</b> seconds!".format(duration)
    logger.info('Finished!')
    return msg
Esempio n. 18
0
def enroll_students(canvas_course_id):
    logger = jobs.get_job_logger()
    row_format = '{email!s:<35} {name!s:<35} {sid!s:<11}'
    canvas_course = CanvasCourse.query.get(canvas_course_id)
    enrollment_info = []
    logger.info(row_format.format(email='EMAIL', name='NAME', sid='SID'))
    for student in api.get_students(canvas_course):
        info = {
            'email': student['email'],
            'name': student['name'],
            'sid': student['sis_user_id'],
            'class_account': '',
            'section': '',
        }
        logger.info(row_format.format(**info))
        enrollment_info.append(info)
    created, updated = Enrollment.create(canvas_course.course_id, enrollment_info)
    message = 'Added {new}, updated {old} student enrollments'.format(
        new=created, old=updated)
    logger.info(message)
    return message
Esempio n. 19
0
def autograde_assignment(assignment_id):
    """Autograde all enrolled students for this assignment.

    We set up a state machine for each backup to check its progress through
    the autograder. If any step takes too long, we'll retry autograding that
    backup. Ultimately, a backup is considered done when we confirm that
    we've received a new score, or if we have reached the retry limit.
    """
    logger = jobs.get_job_logger()
    assignment = Assignment.query.get(assignment_id)
    course_submissions = assignment.course_submissions(include_empty=False)
    backup_ids = set(fs['backup']['id'] for fs in course_submissions
                     if fs['backup'])
    try:
        autograde_backups(assignment, current_user.id, backup_ids, logger)
    except ValueError:
        logger.info(
            'Could not autograde backups - Please add an autograding key.')
        return
    return '/admin/course/{cid}/assignments/{aid}/scores'.format(
        cid=jobs.get_current_job().course_id, aid=assignment.id)
Esempio n. 20
0
def export_grades():
    logger = jobs.get_job_logger()
    current_user = jobs.get_current_job().user
    course = Course.query.get(jobs.get_current_job().course_id)
    assignments = course.assignments
    students = (Enrollment.query
      .options(db.joinedload('user'))
      .filter(Enrollment.role == STUDENT_ROLE, Enrollment.course == course)
      .all())

    headers, assignments = get_headers(assignments)
    logger.info("Using these headers:")
    for header in headers:
        logger.info('\t' + header)
    logger.info('')

    total_students = len(students)
    with io.StringIO() as f:
        writer = csv.writer(f)
        writer.writerow(headers) # write headers

        for i, student in enumerate(students, start=1):
            row = export_student_grades(student, assignments)
            writer.writerow(row)
            if i % 50 == 0:
                logger.info('Exported {}/{}'.format(i, total_students))
        f.seek(0)
        created_time = local_time(dt.datetime.now(), course, fmt='%b-%-d %Y at %I-%M%p')
        csv_filename = '{course_name} Grades ({date}).csv'.format(
                course_name=course.display_name, date=created_time)
        # convert to bytes for csv upload
        csv_bytes = io.BytesIO(bytearray(f.read(), 'utf-8'))
        upload = ExternalFile.upload(csv_bytes, user_id=current_user.id, name=csv_filename,
                         course_id=course.id,
                         prefix='jobs/exports/{}/'.format(course.offering))

    logger.info('\nDone!\n')
    logger.info("Saved as: {0}".format(upload.object_name))
    return "/files/{0}".format(encode_id(upload.id))
Esempio n. 21
0
def search_similar_repos(access_token=None, assignment_id=None,
                         language='python', template_name=None,
                         keyword='def ', weeks_past=12,
                         issue_title=None, issue_body=None):
    logger = jobs.get_job_logger()
    logger.info('Starting Github Search...')

    assign = Assignment.query.filter_by(id=assignment_id).one_or_none()
    if not assign:
        logger.info("Could not find assignment")
        return
    if not assign.files:
        logger.info("Upload template files for this assignment to search.")
        return

    possible_file_names = list(assign.files.keys())
    if template_name not in possible_file_names:
        logger.info("{} is not in {}".format(template_name, possible_file_names))

    source_file = assign.files[template_name]
    repos = get_online_repos(source_file, logger, language, access_token,
                             keyword=keyword)
    if not repos:
        logger.warning("No repos found. Try a different keyword?")
        return
    recent_repos = list_recent_repos(repos, logger, access_token, weeks_past)

    if issue_title and issue_body:
        for repo in recent_repos:
            repo_name = recent_repos[repo]['repository']['full_name']
            file_github_issue(repo_name, logger, access_token, issue_title, issue_body)

    stats = "Found {} recent repos and {} total repos".format(len(recent_repos),
                                                              len(repos))
    logger.info(stats)
    return stats
Esempio n. 22
0
def assign_scores(assign_id, score, kind, message, deadline,
                     include_backups=True, grade_backups=False):
    logger = jobs.get_job_logger()
    current_user = jobs.get_current_job().user

    assignment = Assignment.query.get(assign_id)
    students = [e.user_id for e in assignment.course.get_students()]
    submission_time = server_time_obj(deadline, assignment.course)

    # Find all submissions (or backups) before the deadline
    backups = Backup.query.filter(
        Backup.assignment_id == assign_id,
        or_(Backup.created <= deadline, Backup.custom_submission_time <= deadline)
    ).group_by(Backup.submitter_id).order_by(Backup.created.desc())

    if not include_backups:
        backups = backups.filter(Backup.submit == True)

    all_backups =  backups.all()

    if not all_backups:
        logger.info("No submissions were found with a deadline of {}."
                    .format(deadline))
        return "No Scores Created"

    score_counter, seen = 0, set()

    unique_backups = []

    for back in all_backups:
        if back.creator not in seen:
            unique_backups.append(back)
            seen |= back.owners()

    total_count = len(unique_backups)
    logger.info("Found {} unique and eligible submissions...".format(total_count))

    if grade_backups:
        logger.info('\nAutograding {} backups'.format(total_count))
        backup_ids = [back.id for back in unique_backups]
        try:
            autograde_backups(assignment, current_user.id, backup_ids, logger)
        except ValueError:
            logger.info('Could not autograde backups - Please add an autograding key.')
    else:
        for back in unique_backups:
            new_score = Score(score=score, kind=kind, message=message,
                              user_id=back.submitter_id,
                              assignment=assignment, backup=back,
                              grader=current_user)

            db.session.add(new_score)
            new_score.archive_duplicates()

            score_counter += 1
            if score_counter % 100 == 0:
                logger.info("Scored {} of {}".format(score_counter, total_count))

        # only commit if all scores were successfully added
        db.session.commit()

    logger.info("Left {} '{}' scores of {}".format(score_counter, kind.title(), score))
    return '/admin/course/{cid}/assignments/{aid}/scores'.format(
                cid=jobs.get_current_job().course_id, aid=assignment.id)
Esempio n. 23
0
def autograde_assignment(assignment_id):
    """Autograde all enrolled students for this assignment.

    We set up a state machine for each backup to check its progress through
    the autograder. If any step takes too long, we'll retry autograding that
    backup. Ultimately, a backup is considered done when we confirm that
    we've received a new score, or if we have reached the retry limit.
    """
    logger = jobs.get_job_logger()

    assignment = Assignment.query.get(assignment_id)
    course_submissions = assignment.course_submissions(include_empty=False)
    backup_ids = set(fs['backup']['id'] for fs in course_submissions
                     if fs['backup'])
    token = create_autograder_token(jobs.get_current_job().user_id)

    # start by sending a batch of all backups
    start_time = time.time()
    job_ids = send_batch(token, assignment, backup_ids)
    tasks = [
        GradingTask(
            status=GradingStatus.QUEUED,
            backup_id=backup_id,
            job_id=job_id,
            retries=0,
        ) for backup_id, job_id in job_ids.items()
    ]
    num_tasks = len(tasks)

    def retry_task(task):
        if task.retries >= MAX_RETRIES:
            logger.error(
                'Did not receive a score for backup {} after {} retries'.
                format(utils.encode_id(task.backup_id), MAX_RETRIES))
            task.set_status(GradingStatus.FAILED)
        else:
            task.set_status(GradingStatus.QUEUED)
            task.job_id = autograde_backup(token, assignment, task.backup_id)
            task.retries += 1

    while True:
        time.sleep(POLL_INTERVAL)
        results = check_job_results([task.job_id for task in tasks])

        graded = len([
            task for task in tasks
            if task.status in (GradingStatus.DONE, GradingStatus.FAILED)
        ])
        logger.info('Graded {:>4}/{} ({:>5.1f}%)'.format(
            graded, num_tasks, 100 * graded / num_tasks))
        if graded == num_tasks:
            break

        for task in tasks:
            hashid = utils.encode_id(task.backup_id)
            if task.status == GradingStatus.QUEUED:
                result = results[task.job_id]
                if not result:
                    logger.warning(
                        'Autograder job {} disappeared, retrying'.format(
                            task.job_id))
                    retry_task(task)
                elif result['status'] != 'queued':
                    logger.debug(
                        'Autograder job {} for backup {} started'.format(
                            task.job_id, hashid))
                    task.set_status(GradingStatus.RUNNING)
                elif task.expired(QUEUED_TIMEOUT):
                    logger.warning(
                        'Autograder job {} queued longer than {} seconds, retrying'
                        .format(task.job_id, QUEUED_TIMEOUT))
                    retry_task(task)
            elif task.status == GradingStatus.RUNNING:
                result = results[task.job_id]
                if not result:
                    logger.warning(
                        'Autograder job {} disappeared, retrying'.format(
                            task.job_id))
                    retry_task(task)
                elif result['status'] == 'finished':
                    logger.debug(
                        'Autograder job {} for backup {} finished'.format(
                            task.job_id, hashid))
                    task.set_status(GradingStatus.WAITING)
                elif result['status'] == 'failed':
                    logger.warning('Autograder job {} failed, retrying'.format(
                        task.job_id))
                    retry_task(task)
                elif task.expired(RUNNING_TIMEOUT):
                    logger.warning(
                        'Autograder job {} running longer than {} seconds, retrying'
                        .format(task.job_id, RUNNING_TIMEOUT))
                    retry_task(task)
            elif task.status == GradingStatus.WAITING:
                score = Score.query.filter(
                    Score.backup_id == task.backup_id, Score.archived == False,
                    Score.created >
                    datetime.datetime.fromtimestamp(start_time)).first()
                if score:
                    logger.debug('Received score for backup {}'.format(hashid))
                    task.set_status(GradingStatus.DONE)
                elif task.expired(WAITING_TIMEOUT):
                    logger.warning(
                        'Did not receive score for backup {} in {} seconds, retrying'
                        .format(hashid, WAITING_TIMEOUT))
                    retry_task(task)

    # report summary
    statuses = collections.Counter(task.status for task in tasks)
    message = '{} graded, {} failed'.format(statuses[GradingStatus.DONE],
                                            statuses[GradingStatus.FAILED])
    logger.info(message)
    return message
Esempio n. 24
0
def submit_to_moss(moss_id=None, file_regex=".*", assignment_id=None, language=None,
                   subtract_template=False):
    logger = jobs.get_job_logger()
    logger.info('Starting MOSS Export...')

    assign = Assignment.query.filter_by(id=assignment_id).one_or_none()
    if not assign:
        logger.info("Could not find assignment")
        return

    subms = assign.course_submissions(include_empty=False)

    subm_keys = set()
    for subm in subms:
        if subm['backup']['id'] in subm_keys:
            continue
        else:
            subm_keys.add(subm['backup']['id'])

        if subm['group']:
            group_members = subm['group']['group_member_emails'] or []
            group_members.append(subm['user']['email'])
            logger.info("{} -> {}".format(encode_id(subm['backup']['id']),
                                          ', '.join(group_members)))
        else:
            logger.info("{} -> {}".format(encode_id(subm['backup']['id']),
                                          subm['user']['email']))

    backup_query = (Backup.query.options(db.joinedload('messages'))
                          .filter(Backup.id.in_(subm_keys))
                          .order_by(Backup.created.desc())
                          .all())

    logger.info("Retreived {} final submissions".format(len(subm_keys)))
    # TODO: Customize the location of the tmp writing (especially useful during dev)

    with tempfile.TemporaryDirectory() as tmp_dir:
        # Copy in the moss script
        with open('server/jobs/moss-submission.pl', 'r') as f:
            moss_script = f.read()

        moss_script = moss_script.replace('YOUR_USER_ID_HERE', str(moss_id))
        with open(tmp_dir + "/moss.pl", 'w') as script:
            script.write(moss_script)

        match_pattern = re.compile(file_regex)
        ignored_files = set()

        template_files = []
        for template in assign.files:
            dest = os.path.join(tmp_dir, template)
            with open(dest, 'w') as f:
                f.write(assign.files[template])
            template_files.append(template)
        logger.info("Using template files: {}".format(' '.join(template_files)))

        if subtract_template:
            logger.info("Subtract Template Enabled: Not sending templates through MOSS")
            templates = ''
        else:
            templates = ' '.join(["-b {file}".format(file=f) for f in template_files])

        for backup in backup_query:
            # Write file into file
            file_contents = [m for m in backup.messages if m.kind == 'file_contents']
            if not file_contents:
                logger.info("{} didn't have any file contents".format(backup.hashid))
                continue
            contents = file_contents[0].contents
            dest_dir = os.path.join(tmp_dir, backup.hashid)

            if not os.path.isdir(dest_dir):
                os.makedirs(dest_dir)

            for file in contents:
                if file == 'submit':  # ignore fake file from ok-client
                    continue
                if subtract_template and file in assign.files:
                    # Compare to template and only include lines that new
                    template, source = assign.files[file], contents[file]
                    d = difflib.Differ(linejunk=difflib.IS_LINE_JUNK,
                                       charjunk=difflib.IS_CHARACTER_JUNK)
                    diff = d.compare(template.splitlines(keepends=True),
                                     source.splitlines(keepends=True))
                    added = [line[1:] for line in diff if line[0] == '+']
                    contents[file] = ''.join(added)

                if match_pattern.match(file):
                    with open(os.path.join(dest_dir, file), 'w') as f:
                        f.write(contents[file])
                else:
                    ignored_files.add(file)

        # tmp_dir contains folders of the form: backup_hashid/file1.py
        os.chdir(tmp_dir)
        all_student_files = glob.glob("*/*")

        logger.info("Wrote all files to {}".format(tmp_dir))

        if ignored_files:
            logger.info("Regex {} ignored files with names: {}".format(file_regex,
                                                                       ignored_files))
        else:
            logger.info("Regex {} has captured all possible files".format(file_regex))

        if not all_student_files:
            raise Exception("Did not match any files")

        # Ensure that all of the files are in the tmp_dir (and not elsewhere)
        command = ("perl moss.pl -l {lang} {templates} -d {folder}"
                   .format(lang=language, templates=templates,
                           folder=' '.join(all_student_files)))

        logger.info("Running {}".format(command[:100] + ' ...'))

        try:
            process = subprocess.check_output(shlex.split(command),
                                              stderr=subprocess.STDOUT)
            moss_output = process.decode("utf-8")
            logger.info(moss_output)
            last_line = moss_output
            if 'moss.stanford' in last_line:
                return last_line
        except subprocess.CalledProcessError as e:
            logger.warning("There was an error running the Moss Upload.")
            logger.info("{}".format(e.output.decode('utf-8')))
            raise e
Esempio n. 25
0
def assign_scores(assign_id,
                  score,
                  kind,
                  message,
                  deadline,
                  include_backups=True,
                  grade_backups=False):
    logger = jobs.get_job_logger()
    current_user = jobs.get_current_job().user

    assignment = Assignment.query.get(assign_id)
    students = [e.user_id for e in assignment.course.get_students()]
    submission_time = server_time_obj(deadline, assignment.course)

    # Find all submissions (or backups) before the deadline
    backups = Backup.query.filter(
        Backup.assignment_id == assign_id,
        or_(Backup.created <= deadline,
            Backup.custom_submission_time <= deadline)).group_by(
                Backup.submitter_id).order_by(Backup.created.desc())

    if not include_backups:
        backups = backups.filter(Backup.submit == True)

    all_backups = backups.all()

    if not all_backups:
        logger.info("No submissions were found with a deadline of {}.".format(
            deadline))
        return "No Scores Created"

    score_counter, seen = 0, set()

    unique_backups = []

    for back in all_backups:
        if back.creator not in seen:
            unique_backups.append(back)
            seen |= back.owners()

    total_count = len(unique_backups)
    logger.info(
        "Found {} unique and eligible submissions...".format(total_count))

    if grade_backups:
        logger.info('\nAutograding {} backups'.format(total_count))
        backup_ids = [back.id for back in unique_backups]
        try:
            autograde_backups(assignment, current_user.id, backup_ids, logger)
        except ValueError:
            logger.info(
                'Could not autograde backups - Please add an autograding key.')
    else:
        for back in unique_backups:
            new_score = Score(score=score,
                              kind=kind,
                              message=message,
                              user_id=back.submitter_id,
                              assignment=assignment,
                              backup=back,
                              grader=current_user)

            db.session.add(new_score)
            new_score.archive_duplicates()

            score_counter += 1
            if score_counter % 100 == 0:
                logger.info("Scored {} of {}".format(score_counter,
                                                     total_count))

        # only commit if all scores were successfully added
        db.session.commit()

    logger.info("Left {} '{}' scores of {}".format(score_counter, kind.title(),
                                                   score))
    return '/admin/course/{cid}/assignments/{aid}/scores'.format(
        cid=jobs.get_current_job().course_id, aid=assignment.id)
Esempio n. 26
0
def moss_submit(moss_id, submissions, ref_submissions, language, template,
            review_threshold=101, max_matches=MAX_MATCHES, file_regex='.*',
            num_results=NUM_RESULTS):
    """ Sends SUBMISSIONS and REF_SUBMISSIONS to Moss using MOSS_ID,
    LANGUAGE, and MAX_MATCHES.
    Stores results involving SUBMISSIONS in database.
    """

    # ISSUE:  Does not work for .ipynb files well (maybe just use sources?)

    logger = jobs.get_job_logger()
    logger.info('Connecting to Moss...')
    moss = socket.socket()
    moss.connect(('moss.stanford.edu', 7690))
    moss.send('moss {}\n'.format(moss_id).encode())
    moss.send('directory 1\n'.encode())
    moss.send('X 0\n'.encode())
    moss.send('maxmatches {}\n'.format(max_matches).encode())
    moss.send('show {}\n'.format(num_results).encode())
    print(num_results)
    moss.send('language {}\n'.format(language).encode())
    moss_success = moss.recv(1024).decode().strip()
    print(moss_success)
    moss_success = moss_success == 'yes'
    if not moss_success:
        moss.close()
        logger.info('FAILED to connect to Moss.  Common issues:') 
        logger.info('- Make sure your Moss ID is a number, and not your email address.')
        logger.info('- Check you typed your Moss ID correctly.')
        return

    subm_keys = set()
    hashed_subm_keys = set()
    for subm in submissions:
        subm_keys.add(subm['backup']['id'])
        hashed_subm_keys.add(encode_id(subm['backup']['id']))
    for subm in ref_submissions:
        subm_keys.add(subm['backup']['id'])

    backup_query = (Backup.query.options(db.joinedload('messages'))
                          .filter(Backup.id.in_(subm_keys))
                          .order_by(Backup.created.desc())
                          .all())

    match_pattern = re.compile(file_regex)
    if template:
        logger.info('Uploading template...')
        merged_contents = ""
        for filename in template:
            if filename == 'submit' or not match_pattern.match(filename):
                continue
            merged_contents += template[filename] + '\n'
        send_file(moss, 'allcode', merged_contents, 0, language)
    fid = 0
    logger.info('Uploading submissions...')
    for backup in backup_query:
        file_contents = [m for m in backup.messages if m.kind == 'file_contents']
        if not file_contents:
            logger.info("{} didn't have any file contents".format(backup.hashid))
            continue
        contents = file_contents[0].contents
        merged_contents = ""
        for filename in sorted(contents.keys()):
            if filename == 'submit' or not match_pattern.match(filename):
                continue
            merged_contents += contents[filename] + '\n'
        fid += 1
        path = os.path.join(backup.hashid, 'allcode')
        send_file(moss, path, merged_contents, fid, language)
    moss.send("query 0 Submitted via okpy.org\n".encode())
    logger.info('Awaiting response...')
    url = moss.recv(1024).decode().strip()
    moss.send("end\n".encode())
    moss.close()
    logger.info('Moss results at: {}'.format(url))
    parse_moss_results(url, hashed_subm_keys, logger, match_pattern,
                    template, review_threshold)
Esempio n. 27
0
File: moss.py Progetto: xiaomeow/ok
def submit_to_moss(moss_id=None,
                   file_regex=".*",
                   assignment_id=None,
                   language=None):
    logger = jobs.get_job_logger()
    logger.info('Starting MOSS Export...')

    assign = Assignment.query.filter_by(id=assignment_id).one_or_none()
    if not assign:
        logger.info("Could not find assignment")
        return

    subms = assign.course_submissions(include_empty=False)

    subm_keys = set()
    for subm in subms:
        if subm['backup']['id'] in subm_keys:
            continue
        else:
            subm_keys.add(subm['backup']['id'])

        if subm['group']:
            group_members = subm['group']['group_member_emails'] or []
            group_members.append(subm['user']['email'])
            logger.info("{} -> {}".format(encode_id(subm['backup']['id']),
                                          ', '.join(group_members)))
        else:
            logger.info("{} -> {}".format(encode_id(subm['backup']['id']),
                                          subm['user']['email']))

    backup_query = (Backup.query.options(db.joinedload('messages')).filter(
        Backup.id.in_(subm_keys)).order_by(Backup.created.desc()).all())

    logger.info("Retreived {} final submissions".format(len(subm_keys)))
    # TODO: Customize the location of the tmp writing (especially useful during dev)

    with tempfile.TemporaryDirectory() as tmp_dir:
        # Copy in the moss script
        with open('server/jobs/moss-submission.pl', 'r') as f:
            moss_script = f.read()

        moss_script = moss_script.replace('YOUR_USER_ID_HERE', str(moss_id))
        with open(tmp_dir + "/moss.pl", 'w') as script:
            script.write(moss_script)

        match_pattern = re.compile(file_regex)
        ignored_files = set()

        for backup in backup_query:
            # Write file into file
            file_contents = [
                m for m in backup.messages if m.kind == 'file_contents'
            ]
            if not file_contents:
                logger.info("{} didn't have any file contents".format(
                    backup.hashid))
                continue
            contents = file_contents[0].contents
            dest_dir = "{}/{}/".format(tmp_dir, backup.hashid)

            if not os.path.exists(dest_dir):
                os.makedirs(dest_dir)

            for file in contents:
                if file == 'submit':  # ignore fake file from ok-client
                    continue

                if match_pattern.match(file):
                    with open(dest_dir + file, 'w') as f:
                        f.write(contents[file])
                else:
                    ignored_files.add(file)

        # tmp_dir contains folders of the form: backup_hashid/file1.py
        os.chdir(tmp_dir)
        all_student_files = glob.glob("*/*")

        logger.info("Wrote all files to {}".format(tmp_dir))

        if ignored_files:
            logger.info("Regex {} ignored files with names: {}".format(
                file_regex, ignored_files))
        else:
            logger.info(
                "Regex {} has captured all possible files".format(file_regex))

        template_files = []
        for template in assign.files:
            dest = "{}/{}".format(tmp_dir, template)
            with open(dest, 'w') as f:
                f.write(assign.files[template])

            template_files.append(template)

        logger.info("Using template files: {}".format(
            ' '.join(template_files)))

        templates = ' '.join(
            ["-b {file}".format(file=f) for f in template_files])

        if not all_student_files:
            raise Exception("Did not match any files")

        # Ensure that all of the files are in the tmp_dir (and not elsewhere)
        command = ("perl moss.pl -l {lang} {templates} -d {folder}".format(
            lang=language,
            templates=templates,
            folder=' '.join(all_student_files)))

        logger.info("Running {}".format(command[:100] + ' ...'))

        try:
            process = subprocess.check_output(shlex.split(command),
                                              stderr=subprocess.STDOUT)
            logger.info(process.decode("utf-8"))
        except subprocess.CalledProcessError as e:
            logger.warning("There was an error running the Moss Upload.")
            logger.info("{}".format(e.output.decode('utf-8')))
            raise e
Esempio n. 28
0
def grade_on_effort(assignment_id, full_credit, late_multiplier, required_questions, grading_url):
    logger = jobs.get_job_logger()

    current_user = jobs.get_current_job().user
    assignment = Assignment.query.get(assignment_id)
    submissions = assignment.course_submissions(include_empty=False)

    # archive all previous effort scores for this assignment
    scores = Score.query.filter(
        Score.kind == 'effort',
        Score.assignment_id == assignment_id).all()
    for score in scores:
        db.session.delete(score)

    seen = set()
    stats = Counter()
    manual, late, not_perfect = [], [], []
    for i, subm in enumerate(submissions, 1):
        user_id = int(subm['user']['id'])
        if user_id in seen:
            continue

        latest_backup = Backup.query.get(subm['backup']['id'])
        submission_time = get_submission_time(latest_backup, assignment)
        backup, submission_time = find_best_scoring(latest_backup,
                submission_time, assignment, required_questions, full_credit)

        try:
            score, messages = effort_score(backup, full_credit, required_questions)
        except AssertionError:
            manual.append(backup)
            continue
        else:
            score, messages = handle_late(backup, assignment,
                    late, submission_time, score, messages, late_multiplier)

        if score < full_credit and backup.hashid not in late:
            not_perfect.append(backup)

        messages.append('\nFinal Score: {}'.format(score))
        messages.append('Your final score will be the max of either this score or the `Total` score (if exists)')
        new_score = Score(score=score, kind='effort',
                message='\n'.join(messages), user_id=backup.submitter_id,
                assignment=assignment, backup=backup, grader=current_user)
        db.session.add(new_score)

        if i % 100 == 0:
            logger.info('Scored {}/{}'.format(i, len(submissions)))

        if subm['group']:
            member_ids = {int(id) for id in subm['group']['group_member'].split(',')}
            seen |= member_ids
            stats[score] += len(member_ids)
        else:
            seen.add(user_id)
            stats[score] += 1

    # Commit all scores at once
    db.session.commit()

    logger.info('Scored {}/{}'.format(i, len(submissions)))
    logger.info('done!')

    if len(late) > 0:
        logger.info('\n{} Late:'.format(len(late)))
        for backup_id in late:
            logger.info('  {}'.format(grading_url + backup_id))

    logger.info('\nScore Distribution:')
    sorted_scores = sorted(stats.items(), key=lambda p: -p[0])
    for score, count in sorted_scores:
        logger.info('  {} - {}'.format(str(score).rjust(3), count))

    needs_autograding = len(manual) + len(not_perfect)
    if needs_autograding > 0:
        logger.info('\nAutograding {} manual and/or not perfect backups'.format(needs_autograding))
        backup_ids = [backup.id for backup in manual + not_perfect]
        try:
            autograde_backups(assignment, current_user.id, backup_ids, logger)
        except ValueError:
            logger.info('Could not autograde backups - Please add an autograding key.')

    db.session.commit()
    return '/admin/course/{cid}/assignments/{aid}/scores'.format(
                cid=jobs.get_current_job().course_id, aid=assignment_id)