def download_all_grades(course):
    enrollments = get('enrollments', course=course,
                      data={'type[]': 'StudentEnrollment',
                            'state[]': ['active','completed']},
                      all=True)
    return {str(e['user_id']): e['grades']['current_score']
            for e in enrollments}
Exemplo n.º 2
0
def make_groups(course, file, category):
    category = get('group_categories/{}'.format(category), course=None)
    log("Clearing out groups for category:", category['name'])
    old_groups = get('group_categories/{}/groups'.format(category), course=None)
    if isinstance(old_groups, dict) and old_groups['status'] == 'not found':
        pass
    else:
        for group in old_groups:
            log("\tDeleting group", group['name'])
            delete('groups/{}'.format(group['id']), course=None)
    log("Reading new groups")
    with open(file) as inp:
        groups = [line.split(",") for line in inp]
    log("Creating", len(groups), "groups.")
    for i, group_ids in enumerate(groups):
        log("\tCreating Group", i)
        group = post('group_categories/{}/groups'.format(category['id']), 
                     course=None, data= {'name': "Group {}".format(i)})
        for user in group_ids:
            log("\t\tAdding", user)
            post('groups/{}/memberships'.format(group['id']), 
                 data={'user_id': user}, course=None)
def process_quiz(quiz_id, format, path, course):
    print(quiz_id)
    # Download overall course grades for course-level discrimation
    course_scores = download_all_grades(course)
    # Process quiz data
    df = pd.read_csv(path, dtype=str)
    anonymous = 'id' not in df.columns
    FIRST_COLUMN = 5 if anonymous else 8
    # Grab the header as a single row to extract point columns
    header = pd.read_csv(path, nrows=1, header=None)
    # Grab out the actual columns of data
    df_submissions_subtable = df.iloc[:,FIRST_COLUMN:-3]
    attempts = df.iloc[:,FIRST_COLUMN-1].map(int)
    user_ids = None if anonymous else df.iloc[:,1]
    overall_score = df.iloc[:,-1].map(float)
    # Question IDs are stored in alternating columns as "ID: Text"
    question_ids = [x.split(':')[0] for x in
                    df_submissions_subtable.columns[::2]]
    results = []
    for i, question_id in enumerate(question_ids):
        # Actual student submission is in alternating columns
        submissions = df_submissions_subtable.iloc[:, i*2]
        scores = df_submissions_subtable.iloc[:, 1+i*2].map(float)
        max_score = float(header.iloc[0,FIRST_COLUMN+1+i*2])
        question = get('quizzes/{quiz}/questions/{qid}'
                       .format(quiz=quiz_id, qid=question_id),
                       course=course)
        question_type = question['question_type']
        processor = QUESTION_TYPES.get(question_type, DefaultQuestionType)
        q = processor(question, submissions, attempts, user_ids,
                      scores, overall_score, course_scores, max_score,
                      anonymous, path)
        q.analyze()
        if format == 'text':
            print(q.to_text().encode("ascii", errors='replace')
                  .decode())
        elif format == 'html':
            q.to_html()
        elif format == 'pdf':
            q.to_html()
        elif format == 'json':
            results.append(q.to_json())
    if format == 'json':
        json_path = change_extension(path, 'json')
        with open(json_path, 'w') as out:
            json.dump(results, out, indent=2)
        return json_path
    return True
def download_all_quizzes(format, filename, course, ignore):
    quizzes = get('quizzes', all=True, course=course)
    paths = [download_quiz(quiz['id'], format, filename, course, ignore)
            for quiz in quizzes]
    if format == 'json':
        all_data = []
        for path in paths:
            with open(path) as inp:
                all_data += json.load(inp)
        if filename is None:
            path = 'quizzes/{}/'.format(course)
            os.makedirs(path, exist_ok=True)
            path += 'combined.json'
        else:
            path = filename+'/combined.json'
        with open(path, 'w') as out:
            json.dump(all_data, out)
        return paths
log_file.write("<div style='margin-left:10px'>")

# TA Map
print("Reading TA Map")
ta_map_filename = courses[COURSE]["ta_map"]
ta_lookup = yaml_load(ta_map_filename)
group_ta_lookup = defaultdict(list)
for group_name, ta_name in ta_lookup.items():
    group_ta_lookup[ta_name].append(group_name)
tas = set(ta_lookup.values())
log(len(tas), "TAs")
known_groups = set(ta_lookup.keys())
log(len(known_groups), "groups")
# Download groups
print("Downloading groups")
groups = get('groups', all=True, course=COURSE)
groups = [g for g in groups if g['name'] in known_groups]
# Download users
print("Downloading users")
users = get('users', all=True, course=COURSE)
user_lookup = {u['id']: u for u in users}
# Download mapping
print("Downloading group/user mapping")
group_users = {}
user_ta_lookup = {}
for group in groups:
    group_id = group['id']
    group_name = group['name']
    group_membership = get('groups/{}/users'.format(group_id),
                           course=None,
                           all=True)
Exemplo n.º 6
0
def export_dates(course, filename):
    # Assignments (including quizzes)
    assignments = get('assignments',
                      all=True,
                      course=course,
                      data={"include[]": ['overrides']})
    assignments = {a['id']: a for a in assignments}
    quiz_lookup = {
        a['quiz_id']: a['id']
        for a in assignments.values() if 'quiz_id' in a
    }
    # Sections
    sections = get('sections', all=True, course=course)
    # Modules
    modules = get('modules', all=True, course=course)
    # Organize assignments by module
    latest_header = ''
    assignment_modules = []
    unseen = set(assignments.keys())
    for module in modules:
        new_module = {
            'module': module['id'],
            'name': module['name'],
            'assignments': []
        }
        MODULE_URL = 'modules/{}/items'.format(module['id'])
        module_items = get(MODULE_URL, all=True, course=course)
        for mi in module_items:
            assignment_ids = []
            if mi['type'] == 'Assignment':
                assignment_ids.append(mi['content_id'])
            elif mi['type'] == 'Quiz':
                if mi['content_id'] not in quiz_lookup:
                    continue
                assignment_ids.append(quiz_lookup[mi['content_id']])
            elif mi['type'] == 'SubHeader' and mi['indent'] == 0:
                latest_header = mi['title']
                continue
            else:
                continue
            for aid in assignment_ids:
                assignment = assignments[aid]
                assignment['_class_name'] = latest_header
                position = mi['position']
                new_module['assignments'].insert(position, assignment)
                unseen.remove(aid)
        assignment_modules.append(new_module)

    # Generate CSV
    section_names = ','.join(
        ["{} ({}),,".format(s['name'], s['id']) for s in sections])
    section_headers = ', Open Date, Due Date, Lock Date' * (1 + len(sections))
    if filename is None:
        filename = 'dates/' + str(course) + '_dates.csv'
    log("Writing to", filename)
    with open(filename, 'w') as out:
        out.write(",,,," + section_names + "\n")
        out.write("Module, Class, Name, ID" + section_headers + "\n")
        # Print anything found in a module
        for m in assignment_modules:
            for a in m['assignments']:
                out.write('"{}",'.format(m['name']))
                out.write('"{}","{}",{}'.format(a['_class_name'], a['name'],
                                                a['id']))
                for s in sections:
                    out.write(",,,")

                out.write("\n")
        # Print any unlisted assignments
        for u in natsorted(unseen, key=lambda u: assignments[u]['name']):
            out.write('"Unlisted",')
            out.write(',"{}",{}'.format(assignments[u]['name'], u))
            for s in sections:
                out.write(",,,")
            out.write("\n")
Exemplo n.º 7
0
def import_dates(course, filename):
    if filename is None:
        filename = 'dates/' + str(course) + '_dates.csv'
    log("Reading from", filename)
    dates = []
    with open(filename) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        header = next(csv_reader)
        next(csv_reader)
        sections = list(extract_sections_from_header(header))
        for line in csv_reader:
            m, k, a, id = line[:4]
            sections_times = {}
            for i, (name, section_id) in enumerate(sections):
                offset = i * 3 + 4
                times = line[offset:offset + 3]
                fill_missing_times(times)
                sections_times[int(section_id)] = times
            dates.append((m, k, a, int(id), sections_times))
    log("Processed", len(dates), "entries")

    log("Downloading assignments")
    assignments = get('assignments',
                      all=True,
                      course=course,
                      data={"include[]": ['overrides']})
    overrides_lookup = {a['id']: a['overrides'] for a in assignments}
    log("Downloaded", len(assignments), "assignments")

    log("Uploading section dates")
    put_count, post_count = 0, 0
    for (m, k, a, aid, section_dates) in tqdm(dates):
        overrides = overrides_lookup[aid]
        override_sections = {
            o['course_section_id']: o
            for o in overrides if 'course_section_id' in o
        }
        for sid, times in section_dates.items():
            o, d, l = map(to_iso8601, times)
            if sid in override_sections:
                override = override_sections[sid]
                oid = override['id']
                put('assignments/{aid}/overrides/{oid}'.format(aid=aid,
                                                               oid=oid),
                    data={
                        'assignment_override[due_at]': d,
                        'assignment_override[unlock_at]': o,
                        'assignment_override[lock_at]': l
                    },
                    course=course)
                put_count += 1
            else:
                post('assignments/{aid}/overrides'.format(aid=aid),
                     data={
                         'assignment_override[course_section_id]': sid,
                         'assignment_override[due_at]': d,
                         'assignment_override[unlock_at]': o,
                         'assignment_override[lock_at]': l
                     },
                     course=course)
                post_count += 1
    log("Created", post_count, "new overrides")
    log("Updated", put_count, "old overrides")

    log("Verifying assignments")
    assignments = get('assignments',
                      all=True,
                      course=course,
                      data={"include[]": ['overrides']})
    for assignment in assignments:
        aid = assignment['id']
        overrides = assignment['overrides']
        for override in overrides:
            log("{name} for {section}:\t{due_date},\t{lock_date},\t{open_date}"
                .format(name=assignment['name'],
                        section=override['title'],
                        due_date=override.get('due_at', 'None'),
                        lock_date=override.get('lock_at', 'None'),
                        open_date=override.get('unlock_at', 'None')))
log_file.write("<div style='margin-left:10px'>")

# TA Map
print("Reading TA Map")
ta_map_filename = courses[COURSE]["ta_map"]
ta_lookup = yaml_load(ta_map_filename)
group_ta_lookup = defaultdict(list)
for group_name, ta_name in ta_lookup.items():
    group_ta_lookup[ta_name].append(group_name)
tas = set(ta_lookup.values())
log(len(tas), "TAs")
known_groups = set(ta_lookup.keys())
log(len(known_groups), "groups")
# Download groups
print("Downloading groups")
groups = get('groups', all=True, course=COURSE)
groups = [g for g in groups if g['name'] in known_groups]
# Download users
print("Downloading users")
users = get('users',
            all=True,
            course=COURSE,
            data={
                'enrollment_type[]':
                'student',
                'enrollment_state[]':
                ['active', 'invited', 'rejected', 'completed', 'inactive']
            })
user_lookup = {u['id']: u for u in users}
get_user_id = lambda u: u['id']
student_index = {
Exemplo n.º 9
0
def show_categories(course):
    categories = get('group_categories', all=True, course=course)
    for category in categories:
        print(category['id'], ': ', category['name'], sep='')