Exemplo n.º 1
0
def mark_course(row):
    scores = [
        'quiz_score', 'assign_score', 'clicker_score', 'final_score',
        'midterm_1', 'midterm_2'
    ]
    row_scores = row[scores]
    s = make_tuple(row_scores)
    final_weight = 0.4
    clicker_weight = 0.05
    mid1_weight = 0.15
    mid2_weight = 0.15
    quiz_weight = 0.10
    assign_weight = 0.15
    if s.midterm_1 == 0.0:
        print('missed midterm 1', row)
        final_weight = final_weight + 0.15
        mid1_weight = 0.
    if s.midterm_2 == 0.0:
        print('missed midterm 2', row)
        final_weight = final_weight + 0.15
        mid2_weight = 0.
    if s.clicker_score < s.final_score:
        clicker_weight = 0.
        final_weight = final_weight + 0.05
    weights = mid1_weight + mid2_weight + clicker_weight + quiz_weight + assign_weight + final_weight
    if weights != 1:
        raise ValueError(f'{row}')
    grade = s.midterm_1*mid1_weight + s.midterm_2*mid2_weight + s.final_score*final_weight +\
            s.assign_score*assign_weight + s.quiz_score*quiz_weight + s.clicker_score*clicker_weight + 1
    return grade
Exemplo n.º 2
0
def main(the_args=None):
    parser = make_parser()
    args = parser.parse_args(the_args)
    with open(args.file_list, 'r') as f:
        name_dict = json.load(f)
    n = make_tuple(name_dict)
    base_dir = Path(os.environ['HOME']) / Path(n.data_dir)
    ind_grades = base_dir / Path(n.ind_file)
    print(f"reading {ind_grades}")
    df_ind = pd.read_excel(ind_grades)
    df_ind.set_index('STUDENT ID', inplace=True, drop=False)
    fsc_path = base_dir / Path(n.fsc_list)
    df_fsc = pd.read_excel(fsc_path)
    pdb.set_trace()
    df_fsc.set_index('Student Number', inplace=True, drop=False)
    key_path = base_dir / Path(n.key_file)
    df_key = pd.read_csv(str(key_path))
    score = grade_ids(df_ind, df_key)
    df_ind['check_score'] = copy.deepcopy(score)
    # #group_ques=grade_ques(df_groupraw,df_key)
    # #ind_ques=grade_ques(df_ind,df_key)
    df_names = df_fsc[['Student Number', 'Surname', 'Preferred Name']]
    df_names = df_names.set_index('Student Number', drop=False)
    out = list(df_ind.apply(display_marks, axis=1, args=(
        df_key,
        df_names,
    )))
    print(f'ready to print {len(out)} exams')

    def sortit(the_dict):
        return (the_dict['LAST NAME'], the_dict['FIRST NAME'],
                the_dict['STUDENT ID'])

    out.sort(key=sortit)

    text = """
    ::

         {LAST NAME:s} {FIRST NAME:s}:   EOSC340 Term 2, 2017 MT2         , SCORE=  {grade:<d}%
                       |SN:{STUDENT ID:s}   Score=({Total Score:d}/{possible:d})  Test Form {TEST FORM:s}

                   Qnum|{numbers:<s}
                   Ans | {questions:<s}
                   key | {answers:<s}
                       | {xlist:<s}
    """
    text = textwrap.dedent(text)
    with open('out.rst', 'w') as f:

        for count, item in enumerate(out):
            if count % 6 == 0:
                f.write('.. raw:: pdf\n')
                f.write('\n    PageBreak\n\n')
            f.write(text.format_map(item))
Exemplo n.º 3
0
def main(the_args=None):
    parser = make_parser()
    args = parser.parse_args()

    with open(args.json_file, 'r') as f:
        name_dict = json.load(f)
    n = make_tuple(name_dict)
    #----------------------
    # read in the gradebook
    #----------------------
    root_dir = Path(os.environ['HOME']) / Path(n.data_dir)
    grade_book = root_dir / Path(n.grade_book)
    with open(grade_book, 'r', encoding='ISO-8859-1') as f:
        df_gradebook = pd.read_csv(f)
    df_gradebook = df_gradebook.fillna(0)
    fsc_list = root_dir / Path(n.fsc_list)
    with open(fsc_list, 'rb') as f:
        #out=f.readlines()
        df_fsc = pd.read_excel(f, sheet=None)
    #
    # read in the group exam
    #
    root_dir = Path(os.environ['HOME']) / Path(n.final_dir)
    group_file = root_dir / Path(n.group_final)
    with open(group_file, 'rb') as f:
        group_grades = pd.read_excel(f, sheet=None)
    df_group = grade_group(group_grades)

    final_dir = Path(os.environ['HOME']) / Path(n.final_dir)

    pdb.set_trace()

    #-----------------
    # drop the mute/not mute row
    # and save points possbile for final write
    #-----------------
    df_gradebook = df_gradebook.drop([0])
    points_possible = df_gradebook.iloc[0, :]
    df_gradebook = clean_id(df_gradebook, id_col='SIS User ID')
    df_gradebook = stringify_column(df_gradebook, id_col='ID')
    df_gradebook.iloc[0, :] = points_possible
    grade_cols = list(df_gradebook.columns.values)
    grade_col_dict = get_col_names(day_re, assign_re, grade_cols)
    quiz_list = [item for item in grade_col_dict.keys() if item[0] == 'q']
    assign_list = [item for item in grade_col_dict.keys() if item[0] == 'a']
    rename_dict = dict()
    quiz_list.extend(assign_list)
    quiz_list.sort()
    for key in quiz_list:
        rename_dict[grade_col_dict[key]] = key
    df_gradebook.rename(columns=rename_dict, inplace=True)
    df_gradebook = pd.DataFrame(df_gradebook[quiz_list])

    return df_gradebook
Exemplo n.º 4
0
def main(the_args=None):
    parser = make_parser()
    args = parser.parse_args(the_args)
    print(args.numbers)
    with open(args.json_file, 'r') as f:
        name_dict = json.load(f)
    n = make_tuple(name_dict)
    df_exams = get_exams(n)
    home_dir = Path(os.environ['HOME'])
    all_points = home_dir / Path(n.data_dir) / Path(n.all_points)
    with open(all_points, 'r', encoding='utf-8-sig') as f:
        #
        # skip row 1 (muted/not muted)
        #
        df_all_points = pd.read_csv(f, sep=',', index_col=False)
        df_all_points = clean_id(df_all_points, id_col='SIS User ID')
        df_all_points.fillna(0., inplace=True)
        df_all_points = pd.DataFrame(
            df_all_points[['assign_score', 'clicker_score', 'bonus']])

    df_all_points = pd.merge(df_all_points,
                             df_exams,
                             how='left',
                             left_index=True,
                             right_index=True,
                             sort=False)

    for item in args.numbers:
        row = df_all_points.loc[item]
        print(print_num(row))
        fsc_list = home_dir / Path(n.data_dir) / Path(n.fsc_list)

    with open(fsc_list, 'rb') as f:
        df_fsc = pd.read_excel(f, index_col=False)
        df_fsc.fillna(0., inplace=True)
        df_fsc = clean_id(df_fsc, id_col='Student Number')
    df_fsc = pd.DataFrame(df_fsc[['Surname', 'Given Name', 'Student Number']])
    df_fsc = pd.merge(df_fsc,
                      df_all_points[['posted', 'course_corrected']],
                      how='left',
                      left_index=True,
                      right_index=True,
                      sort=False)
    hit = df_fsc['posted'].values < df_fsc['course_corrected'].values
    df_fsc = pd.DataFrame(df_fsc.loc[hit])
    df_fsc['posted'] = np.round(df_fsc['posted'].values).astype(np.int)
    df_fsc['course_corrected'] = np.round(
        df_fsc['course_corrected'].values).astype(np.int)
    with open('posted_revised.csv', 'w', encoding='utf-8-sig') as f:
        df_fsc.to_csv(f, index=False, sep=',')
    pdb.set_trace()
    return None
Exemplo n.º 5
0
def main(the_args=None):
    #
    # make_parser uses sys.args by default,
    # the_args can be set during code testing
    #
    parser=make_parser()
    args=parser.parse_args()
    quiztype,quiznum = list(args.column)
    quiznum = f'{int(quiznum):02d}'
    keep_rows=[]
    with open(args.json_file,'r') as f:
        name_dict=json.load(f)
    n=make_tuple(name_dict)
    #----------------------
    # read in the gradebook
    #----------------------
    root_dir = Path(os.environ['HOME']) / Path(n.data_dir)
    quiz_dir = Path(os.environ['HOME']) / Path(n.data_dir)
    grade_book =  root_dir / Path(n.grade_book)
    #pdb.set_trace()
    #
    # skip mute/readonly line after headers
    #
    with open(grade_book,'r',encoding='utf-8-sig') as f:
        df_gradebook=pd.read_csv(f,index_col=False,skiprows=[1])
        points_possible = pd.Series(df_gradebook.iloc[0,:])
    df_gradebook=df_gradebook.fillna(0)
    #-----------------
    # drop the mute/not mute row
    # and save points possbile for final write
    #-----------------
    df_gradebook=clean_id(df_gradebook, id_col = 'SIS User ID')
    df_gradebook=stringify_column(df_gradebook, id_col = 'ID')
    df_gradebook.iloc[0,:] = points_possible
    grade_cols = list(df_gradebook.columns.values)
    dumplist=[]
    #--------------------
    # get all assignment and quiz column headers from gradebook
    # and save in grade_col_dict
    #---------------------
    for item in grade_cols:
        day_out = day_re.match(item)
        assign_out = assign_re.match(item)
        if day_out:
            daynum=('q',day_out.groups(1)[0])
            dumplist.append((daynum,item))
        elif assign_out:
            assignnum=('a',assign_out.groups(1)[0])
            dumplist.append((assignnum,item))
        else:
            continue
        
    grade_col_dict=dict(dumplist)
    score_column = grade_col_dict[(quiztype,quiznum)]
    pdb.set_trace()
    new_score=pd.Series(df_gradebook[score_column])
    old_score = pd.Series(new_score)
    # boost everyone's score to 11.5 for quiz 24
    #----------------
    hit = new_score > 0.
    new_score[hit] = 11.5
    mergebook = pd.DataFrame(df_gradebook)
    df_check=pd.DataFrame(mergebook[['Student',score_column]])
    df_check['new_score'] = new_score
    new_name = f'{quiztype}_{quiznum}_check.csv'
    new_name = root_dir / Path(new_name)
    with open(new_name,'w',encoding='utf-8-sig') as f:
        df_check.to_csv(f,index=False,sep=',')
    #---------------------
    # now make a new gradebook to upload the new_score column
    # this gradebook has the quiz score header so canvas will overwrite
    #---------------------
    mandatory_columns = list(mergebook.columns[:5])
    mandatory_columns = mandatory_columns + [score_column] 
    df_upload= pd.DataFrame(mergebook[mandatory_columns])
    for item in [1,2,3,4]:
        points_possible[item] = ' '
    df_upload.iloc[0,:] = points_possible[mandatory_columns]
    total_points = points_possible[score_column]
    hit = df_upload[score_column] > total_points
    df_upload.loc[hit,score_column] = total_points
    new_name = f'{quiztype}_{quiznum}_upload.csv'
    new_name = root_dir / Path(new_name)
    with open(new_name,'w',encoding='utf-8-sig') as f:
        df_upload.to_csv(f,index=False,sep=',')
    print(f'created: {str(new_name)}')
    return None
Exemplo n.º 6
0
def main(the_args=None):

    parser = make_parser()
    args = parser.parse_args(the_args)

    with open(args.json_file, 'r') as f:
        name_dict = json.load(f)
    n = make_tuple(name_dict)
    home_dir = Path(os.environ['HOME'])
    clickers = home_dir / Path(n.data_dir) / Path(n.pha_clickers)
    with open(clickers, 'r', encoding='utf-8-sig') as f:
        df_clickers = pd.read_csv(f, sep=',')
        df_clickers.fillna(0., inplace=True)
        df_clickers = clean_id(df_clickers, id_col='Student')

    fsc_list = home_dir / Path(n.data_dir) / Path(n.fsc_list)
    with open(fsc_list, 'rb') as f:
        df_fsc = pd.read_excel(f)
        df_fsc.fillna(0., inplace=True)
        #pdb.set_trace()
        df_fsc = clean_id(df_fsc, id_col='Student Number')

    grade_book = home_dir / Path(n.data_dir) / Path(n.grade_book)
    with open(grade_book, 'r', encoding='utf-8-sig') as f:
        df_gradebook = pd.read_csv(f, sep=',')
        df_gradebook = clean_id(df_gradebook, id_col='SIS User ID')
        df_gradebook.fillna(0., inplace=True)

    #-------
    # make a dictinoary with dict[clicker_id]=sis_id
    #-------
    id_dict = {}
    for sis_id, item in df_gradebook.iterrows():
        key = f"{int(item['ID']):d}"
        id_dict[key] = sis_id

    sis_col = []
    fake_id_counter = 97  #this is 'a'
    for clicker_id in df_clickers.index:
        try:
            sis_col.append(id_dict[clicker_id])
        except KeyError:  #student not in gradebook
            sis_id = chr(fake_id_counter) * 8
            sis_col.append(sis_id)
            fake_id_counter += 1
    df_clickers['SIS User ID'] = sis_col
    df_clickers = df_clickers.set_index('SIS User ID', drop=False)
    col_dict = {}
    regexs = [part_re, perf_re]
    names = ['part', 'perf']
    for col in df_clickers.columns:
        for the_name, re_exp in zip(names, regexs):
            the_match = re_exp.match(col)
            if the_match:
                the_sess, the_date = the_match.groups()
                print(f'match: {col}, {the_sess}, {the_date}')
                date = parse(the_date, dayfirst=False)
                vals = df_clickers[col].values
                num_vals = []
                for item in vals:
                    try:
                        num_vals.append(float(item))
                    except:
                        num_vals.append(0)
                col_dict[(the_name, the_sess)] = dict(col=col,
                                                      date=the_date,
                                                      vals=num_vals)
    scores = df_clickers.iloc[:, 5:-2].values
    cumscore = np.sum(scores, axis=1)
    df_clickers['clicker_score'] = cumscore
    only_scores = df_clickers[['clicker_score']]
    mergebook = pd.merge(df_gradebook,
                         only_scores,
                         how='left',
                         left_index=True,
                         right_index=True,
                         sort=False)
    return mergebook, df_fsc
Exemplo n.º 7
0
def main(the_args=None):
    parser = make_parser()
    args = parser.parse_args(the_args)
    with open(args.file_list, 'r') as f:
        name_dict = json.load(f)
    n = make_tuple(name_dict)
    #
    # get the final individual
    #
    #
    # get the class list
    #
    root_dir = Path(os.environ['HOME']) / Path(n.data_dir)
    module_path = root_dir / Path(n.correction_module_path)
    spec = importlib.util.spec_from_file_location(n.correction_module_name,
                                                  module_path)
    fix_grades = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(fix_grades)
    # Optional; only necessary if you want to be able to import the module
    # by name later.
    # sys.modules[module_name] = module

    official_list = root_dir / Path(n.fsc_list)
    with open(official_list, 'rb') as f:
        df_fsc = pd.read_excel(f, sheet=None)
    #
    # get the group final
    #
    group_file = root_dir / Path(n.group_file)
    with open(group_file, 'rb') as f:
        group_grades = pd.read_excel(f, sheet=None)
    columns = group_grades.columns

    group_ids = []
    #
    # the group excel file has 4 columns for student ids
    #
    for col in columns[:4]:
        the_ids = convert_ids(group_grades[col])
        group_ids.extend(the_ids)
    #
    # get the individual final
    #
    ind_file = root_dir / Path(n.ind_file)
    with open(ind_file, 'rb') as f:
        ind_grades = pd.read_excel(f, sheet=None)
    ind_ids = clean_id(ind_grades, id_col='STUDENT ID')
    official_ids = clean_id(df_fsc, id_col='Student Number')
    print(f'number of ind exams: {len(ind_ids)}')
    print(f'number of group exams: {len(group_ids)}')
    #
    # find official ideas that appear
    #
    missing = set(official_ids.index.values) - set(ind_ids.index.values)
    print('\nmissed exam individual\n')
    for number in missing:
        hit = official_ids.index.values == number
        info = official_ids[hit][['Surname', 'Student Number']].values[0]
        print(*info)

    print('\nmissed group exam\n')
    missed_group = set(official_ids.index.values) - set(group_ids)
    for number in missed_group:
        hit = official_ids.index.values == number
        info = official_ids[hit][['Surname', 'Student Number']].values[0]
        print(*info)

    def find_closest(the_id, good_ids):
        score_list = []
        for choice in good_ids:
            score_list.append(fuzz.ratio(the_id, choice))
        score_array = np.array(score_list)
        max_index = np.argmax(score_array)
        good_choice = good_ids[max_index]
        return good_choice

    print('\nindividual exam: suggest close ids if typos\n')

    for item in ind_ids.index:
        if item not in official_ids.index.values:
            print(f'individ. miss on {item}')
            nearest = find_closest(item, official_ids.index.values)
            print(f'possible value is {nearest}')

    if len(group_ids) > 0:
        print('\nnow group: suggest close ids\n')

    for item in group_ids:
        if item not in official_ids.index.values:
            print(f'group miss on {item}')
            nearest = find_closest(item, official_ids.index.values)
            print(f'possible value is {nearest}')

    df_group = grade_group(group_grades)
    df_group = fix_grades.fix_group(df_group)
    df_ind = pd.DataFrame(ind_ids[['LAST NAME', 'FIRST NAME',
                                   'Percent Score']])
    df_ind['id'] = df_ind.index
    new_name_dict = {'Percent Score': 'ind_score'}
    df_ind.rename(columns=new_name_dict, inplace=True)
    canvas_grades = pd.merge(df_ind,
                             df_group,
                             how='left',
                             left_on='id',
                             right_on='id',
                             sort=False)
    combined_scores = canvas_grades.apply(mark_combined, axis=1)
    canvas_grades['combined'] = combined_scores
    from e340py.get_grade_frames import make_canvas_df
    canvas_path = root_dir / Path(n.grade_book)
    with open(canvas_path, 'r', encoding='ISO-8859-1') as f:
        df_canvas = pd.read_csv(f)
    #
    # drop the mute/unmute column
    #
    #df_canvas=df_canvas.drop(df_canvas.index[0])
    points_possible = df_canvas.iloc[0, :5].values
    points_possible[1:5] = [' ', ' ', ' ', ' ']
    df_canvas = clean_id(df_canvas, id_col='SIS User ID')
    df_upload = pd.DataFrame(df_canvas.iloc[:, :5])
    df_upload.iloc[1, :] = points_possible
    df_canvas = df_canvas.fillna(0)
    # new_name_dict={'ind_score':'m2_ind_score','group_score':'m2_group_score',
    #                'combined':'m2_combined'}
    # canvas_grades.rename(columns=new_name_dict,inplace=True)
    df_upload = pd.merge(df_upload,
                         canvas_grades,
                         how='left',
                         left_index=True,
                         right_on='id',
                         sort=False)
    del df_upload['id']
    del df_upload['LAST NAME']
    del df_upload['FIRST NAME']
    df_upload.iloc[0, 5:] = 100
    new_name = 'mid1_upload.csv'
    with open(new_name, 'w', encoding='utf-8-sig') as f:
        df_upload.to_csv(f, index=False, sep=',')
    pdb.set_trace()
    missing_group = canvas_grades['group_score'] < 10.
    canvas_grades.loc[missing_group, 'group_score'] = np.nan
    columns = ['ind_score', 'group_score', 'combined']
    fig, ax = plt.subplots(2, 2, figsize=(10, 10))
    plots = [ax[0, 0], ax[0, 1], ax[1, 0]]
    for column, plot in zip(columns, plots):
        data = canvas_grades[column].values
        data = data[~np.isnan(data)]
        data_median = np.median(data)
        plot.hist(data)
        plot.set_title(f'{column} median= {data_median}')
    bad = ax[1, 1]
    fig.delaxes(bad)
    fig.canvas.draw()
    fig.savefig('grades.png')

    pdb.set_trace()
    print('\ndone\n')
Exemplo n.º 8
0
def main(the_args=None):

    parser = make_parser()
    args = parser.parse_args(the_args)

    with open(args.json_file, 'r') as f:
        name_dict = json.load(f)
    n = make_tuple(name_dict)
    home_dir = Path(os.environ['HOME'])
    #
    #  read in all the clickers
    #
    clickers_mac = home_dir / Path(n.data_dir) / Path(n.clickers_mac)
    df_clickers_mac = clean_clickers(clickers_mac)
    clickers_win = home_dir / Path(n.data_dir) / Path(n.clickers_win)
    df_clickers_win = clean_clickers(clickers_win)
    clickers_cj = home_dir / Path(n.clickers_cj)
    df_clickers_cj = clean_clickers(clickers_cj)

    df_clickers = pd.merge(df_clickers_win,
                           df_clickers_mac,
                           how='left',
                           left_index=True,
                           right_index=True,
                           sort=False)
    df_clickers = pd.merge(df_clickers,
                           df_clickers_cj,
                           how='left',
                           left_index=True,
                           right_index=True,
                           sort=False)

    fsc_list = home_dir / Path(n.data_dir) / Path(n.fsc_list)
    with open(fsc_list, 'rb') as f:
        df_fsc = pd.read_excel(f, index_col=False)
        df_fsc.fillna(0., inplace=True)
        df_fsc = clean_id(df_fsc, id_col='Student Number')

    fsc_list = home_dir / Path(n.data_dir) / Path(n.posted)
    with open(fsc_list, 'rb') as f:
        df_posted_fsc = pd.read_excel(f, index_col=False)
        df_posted_fsc.fillna(0., inplace=True)
        df_posted_fsc = clean_id(df_fsc, id_col='Student Number')

    grade_book = home_dir / Path('course_total') / Path(n.grade_book)
    with open(grade_book, 'r', encoding='utf-8-sig') as f:
        #
        # skip row 1 (muted/not muted)
        #
        df_gradebook = pd.read_csv(f, sep=',', index_col=False, skiprows=[1])
        points_possible = df_gradebook.iloc[0, :]
        df_workbook = pd.DataFrame(df_gradebook)
        final_col_dict = {}
        for col in df_workbook.columns:
            the_match = re_final.match(col)
            if the_match:
                the_type = the_match.group('type')
                print(f"{'*'*20}\n{the_type}")
                final_col_dict[the_type] = col
        #
        # now drop points_possible
        #
        df_workbook = clean_id(df_workbook, id_col='SIS User ID')
        df_workbook.fillna(0., inplace=True)

    grade_cols = df_workbook.columns
    mid_col_list = []
    assign_col_list = []
    quiz_col_list = []
    for col in df_workbook.columns:
        if mid_re.match(col):
            print('!!: ', col)
            mid_col_list.append(col)
            continue
        elif assign_re.match(col):
            print('!!: ', col)
            assign_col_list.append(col)
            continue
        elif day_re.match(col):
            print('!!: ', col)
            quiz_col_list.append(col)
            continue
        else:
            continue
    #-------
    # make a dictinoary with dict[clicker_id]=sis_id
    #-------
    id_dict = {}
    for sis_id, item in df_workbook.iterrows():
        key = f"{int(item['ID']):d}"
        id_dict[key] = sis_id

    sis_col = []
    fake_id_counter = 97  #this is 'a'
    for clicker_id in df_clickers.index:
        try:
            sis_col.append(id_dict[clicker_id])
        except KeyError:  #student not in gradebook
            sis_id = chr(fake_id_counter) * 8
            sis_col.append(sis_id)
            fake_id_counter += 1
    df_clickers['SIS User ID'] = sis_col
    df_clickers = df_clickers.set_index('SIS User ID', drop=False)
    #
    # get only the clicker scores columns that match re_total
    #
    #
    quiz_column_list = []
    for col in df_clickers.columns:
        the_match = re_total.match(col)
        if the_match:
            the_sess, the_date = the_match.groups()
            print(f'quiz match: {col}, {the_sess}, {the_date}')
            quiz_column_list.append(col)
    df_scores = pd.DataFrame(df_clickers[quiz_column_list])
    out = df_scores.apply(mark_clickers, axis=1)
    df_clickers['clicker_score'] = out
    df_workbook = pd.merge(df_workbook,
                           df_clickers[['clicker_score']],
                           how='left',
                           left_index=True,
                           right_index=True,
                           sort=False)
    assign_marks = df_workbook.apply(mark_assigns,
                                     args=(assign_col_list, points_possible),
                                     axis=1)
    df_workbook['assign_score'] = assign_marks
    quiz_marks = df_workbook.apply(mark_quizzes,
                                   args=(quiz_col_list, points_possible),
                                   axis=1)
    df_workbook['quiz_score'] = quiz_marks
    df_final = read_finals(n)
    final_scores = df_final.apply(mark_final, axis=1)
    df_final['final_score'] = final_scores
    df_workbook = pd.merge(
        df_workbook,
        df_final[['final_score', 'ind_score', 'group_score']],
        how='left',
        left_index=True,
        right_index=True,
        sort=False)
    cols = [
        'Student', 'quiz_score', 'assign_score', 'clicker_score',
        'final_score', 'ind_score', 'group_score'
    ]
    cols.extend(mid_col_list)
    df_course = pd.DataFrame(df_workbook[cols])
    df_course.to_csv('gradebook.csv')
    new_name_dict = {
        mid_col_list[0]: 'midterm_1',
        mid_col_list[1]: 'midterm_2'
    }
    df_course.rename(columns=new_name_dict, inplace=True)
    df_fsc_out = pd.DataFrame(df_fsc[['Surname', 'Given Name']])
    pdb.set_trace()
    df_fsc_out = pd.merge(df_fsc_out,
                          df_course,
                          how='left',
                          left_index=True,
                          right_index=True,
                          sort=False)
    course_grade = df_fsc_out.apply(mark_course, axis=1)
    course_grade[np.isnan(course_grade)] = 0.
    df_fsc_out['course_corrected'] = np.round(course_grade).astype(np.int)
    df_fsc_out['posted'] = df_posted_fsc['Percent Grade']
    df_course = pd.merge(df_course,
                         df_fsc_out[['posted', 'course_corrected']],
                         how='left',
                         left_index=True,
                         right_index=True,
                         sort=False)
    del df_course['Student']
    pdb.set_trace()
    #
    # work with the real gradebook which has points possible
    #
    df_upload = pd.DataFrame(df_workbook.iloc[:, :5])
    df_upload = pd.merge(df_upload,
                         df_course,
                         how='left',
                         left_index=True,
                         right_index=True,
                         sort=False)
    columns = list(df_workbook.columns[:5])
    pdb.set_trace()
    columns.extend([
        'quiz_score', 'assign_score', 'clicker_score', 'bonus', 'posted',
        'course_corrected'
    ])
    df_upload['bonus'] = 1.
    df_upload = pd.DataFrame(df_upload[columns])
    total_score = points_possible.values
    total_score[5:8] = 100.
    total_score[8] = 1.
    total_score[9] = 100.
    total_score[10] = 100.
    upload_possible = pd.Series(total_score[:11], index=df_upload.columns)
    for item in [1, 2, 3, 4]:
        upload_possible[item] = ' '
    df_upload = df_upload[columns]
    df_upload.iloc[0, :] = upload_possible
    with open('upload_revised.csv', 'w', encoding='utf-8-sig') as f:
        df_upload.to_csv(f, index=False, sep=',')
    # df_upload=pd.DataFrame(df_workbook)
    # score_column = final_col_dict['Individual']
    # df_upload[score_column]=df_upload['ind_score']
    # mandatory_columns = list(df_upload.columns[:5])
    # mandatory_columns = mandatory_columns + [score_column]
    # df_upload= pd.DataFrame(df_upload[mandatory_columns])
    # points_upload=pd.Series(points_possible)
    # for item in [1,2,3,4]:
    #     points_upload[item] = ' '
    # df_upload.iloc[0,:] = points_upload[mandatory_columns]
    # with open('ind_final.csv','w',encoding='utf-8-sig') as f:
    #     df_upload.to_csv(f,index=False,sep=',')

    # df_upload=pd.DataFrame(df_workbook)
    # score_column = final_col_dict['Group']
    # df_upload[score_column]=df_upload['group_score']
    # mandatory_columns = list(df_upload.columns[:5])
    # mandatory_columns = mandatory_columns + [score_column]
    # df_upload= pd.DataFrame(df_upload[mandatory_columns])
    # df_upload.iloc[0,:] = points_upload[mandatory_columns]
    # with open('group_final.csv','w',encoding='utf-8-sig') as f:
    #     df_upload.to_csv(f,index=False,sep=',')

    # df_upload=pd.DataFrame(df_workbook)
    # score_column = final_col_dict['Combined']
    # df_upload[score_column]=df_upload['final_score']
    # mandatory_columns = list(df_upload.columns[:5])
    # mandatory_columns = mandatory_columns + [score_column]
    # df_upload= pd.DataFrame(df_upload[mandatory_columns])
    # df_upload.iloc[0,:] = points_upload[mandatory_columns]
    # with open('combined_final.csv','w',encoding='utf-8-sig') as f:
    #     df_upload.to_csv(f,index=False,sep=',')
    # full_grade = df_fsc_out['course'].values
    # full_grade[np.isnan(full_grade)]=0.
    # df_fsc['Percent Grade'] = np.round(full_grade).astype(np.int)
    # pdb.set_trace()
    # fsc_list = home_dir / Path(n.data_dir)/ Path('total_upload.xls')
    # with open(fsc_list,'wb') as f:
    #     df_fsc.to_excel(f)

    return df_fsc_out