示例#1
0
def grade_distribution(request, course_id=0):
    logger.info(grade_distribution.__name__)

    current_user = request.user.get_username()
    grade_score_sql = "select current_grade,(select current_grade from user where sis_name=" \
                      "%(current_user)s and course_id=%(course_id)s) as current_user_grade from user where course_id=%(course_id)s;"
    df = pd.read_sql(grade_score_sql,
                     conn,
                     params={
                         "current_user": current_user,
                         'course_id': course_id
                     })
    if df.empty or df['current_grade'].isnull().all():
        return HttpResponse(json.dumps({}), content_type='application/json')
    number_of_students = df.shape[0]
    df = df[df['current_grade'].notnull()]
    df['current_grade'] = df['current_grade'].astype(float)
    if df[df['current_grade'] > 100.0].shape[0] > 0:
        df['graph_upper_limit'] = int(
            (5 * round(float(df['current_grade'].max()) / 5) + 5))
    else:
        df['current_grade'] = df['current_grade'].apply(lambda x: 99.99
                                                        if x == 100.00 else x)
        df['graph_upper_limit'] = 100
    average_grade = df['current_grade'].mean().round(2)
    df['tot_students'] = number_of_students
    df['grade_avg'] = average_grade

    # json for eventlog
    data = {"course_id": course_id}
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_GRADE_DISTRIBUTION.value,
             extra=data)

    return HttpResponse(df.to_json(orient='records'))
示例#2
0
def update_user_default_selection_for_views(request, course_id=0):
    logger.info(update_user_default_selection_for_views.__name__)
    current_user = request.user.get_username()
    default_selection = json.loads(request.body.decode("utf-8"))
    logger.info(default_selection)
    default_type = list(default_selection.keys())[0]
    default_type_value = default_selection.get(default_type)
    logger.info(
        f"request to set default for type: {default_type} and default_type value: {default_type_value}"
    )
    # json for eventlog
    data = {
        "course_id": course_id,
        "default_type": default_type,
        "default_value": default_type_value
    }
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_SET_DEFAULT.value,
             extra=data)
    key = 'default'
    try:
        obj, create_or_update_bool = UserDefaultSelection.objects.set_user_defaults(
            course_id, current_user, default_type, default_type_value)
        logger.info(
            f"""setting default returns with success with response {obj.__dict__} and entry created or Updated: {create_or_update_bool}
                        for user {current_user} in course {course_id} """)
        value = 'success'
    except (ObjectDoesNotExist, Exception) as e:
        logger.info(
            f"updating default failed due to {e} for user {current_user} in course: {course_id} "
        )
        value = 'fail'
    return HttpResponse(json.dumps({key: value}),
                        content_type='application/json')
示例#3
0
def grade_distribution(request: HttpRequest, course_id: 0) -> HttpResponse:
    logger.info(grade_distribution.__name__)

    course_id = canvas_id_to_incremented_id(course_id)

    current_user = request.user.get_username()

    grade_score_sql = f"""select current_grade,
       (select show_grade_counts From course where id=%(course_id)s) as show_number_on_bars,
    (select current_grade from user where sis_name=%(current_user)s and course_id=%(course_id)s) as current_user_grade
        from user where course_id=%(course_id)s and enrollment_type='StudentEnrollment';
                    """
    df = pd.read_sql(grade_score_sql,
                     conn,
                     params={
                         "current_user": current_user,
                         'course_id': course_id
                     })
    if df.empty or df['current_grade'].isnull().all():
        return HttpResponse(json.dumps({}), content_type='application/json')

    df['tot_students'] = df.shape[0]
    df = df[df['current_grade'].notnull()]
    df['current_grade'] = df['current_grade'].astype(float)
    df['grade_avg'] = df['current_grade'].mean().round(2)
    df['median_grade'] = df['current_grade'].median().round(2)
    df['show_number_on_bars'] = df['show_number_on_bars'].apply(
        lambda x: True if x == 1 else False)

    df.sort_values(by=['current_grade'], inplace=True)
    df.reset_index(drop=True, inplace=True)
    grades = df['current_grade'].values.tolist()
    logger.debug(f"Grades distribution: {grades}")
    BinningGrade = find_binning_grade_value(grades)
    if BinningGrade is not None and not BinningGrade.binning_all:
        df['current_grade'] = df['current_grade'].replace(
            df['current_grade'].head(BinningGrade.index), BinningGrade.value)
    df['show_dash_line'] = show_dashed_line(df['current_grade'].iloc[0],
                                            BinningGrade)

    if df[df['current_grade'] > 100.0].shape[0] > 0:
        df['graph_upper_limit'] = int(
            (5 * round(float(df['current_grade'].max()) / 5) + 5))
    else:
        df['current_grade'] = df['current_grade'].apply(lambda x: 99.99
                                                        if x == 100.00 else x)
        df['graph_upper_limit'] = 100

    # json for eventlog
    data = {
        "course_id": course_id,
        "show_number_on_bars": df['show_number_on_bars'].values[0]
    }
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_GRADE_DISTRIBUTION.value,
             extra=data)

    return HttpResponse(df.to_json(orient='records'))
示例#4
0
    def execute_graphql_request(self, request, data, query, variables, operation_name, show_graphiql=False):
        if operation_name == 'Assignment':
            event_data = {
                "course_id": canvas_id_to_incremented_id(variables['courseId']),
            }
            eventlog(request.user, EventLogTypes.EVENT_VIEW_ASSIGNMENT_PLANNING_WITH_GOAL_SETTING.value, extra=event_data)


        return super(DashboardGraphQLView, self).execute_graphql_request(
            request, data, query, variables, operation_name, show_graphiql
        )
示例#5
0
    def mutate(self, info, data=None):
        user = info.context.user
        # permissions checks
        if not user.is_authenticated:
            raise GraphQLError(
                'You must be logged in to update this resource!')

        course_id = data.course_id
        canvas_course_id = data.canvas_course_id

        if not course_id and not canvas_course_id:
            raise GraphQLError('You must provide courseId or canvasCourseId')

        if not course_id:
            course = Course.objects.get(canvas_id=canvas_course_id)
            course_id = course.id

        if not is_admin_or_enrolled_in_course_id.test(user, course_id):
            raise GraphQLError(
                'You do not have permission to update this resource!')

        event_log = {} if not data.default_view_value else data.default_view_value[
            'event']

        # check if exists
        user_default_selection, created = UserDefaultSelection.objects.get_or_create(
            course_id=course_id,
            user_sis_name=user.get_username(),
            default_view_type=data.default_view_type,
        )
        user_defaults = data.default_view_value
        try:
            event_log = user_defaults.pop('event', {})
        except KeyError:
            logger.error(
                f'event key is missing from user default object The event key captures the event log actions.'
            )

        user_default_selection.default_view_value = json.dumps(user_defaults)
        user_default_selection.save()

        event_log_data = {
            "course_id": course_id,
            "default_type": data.default_view_type,
            "default_value": event_log
        }
        eventlog(user,
                 EventLogTypes.EVENT_VIEW_SET_DEFAULT.value,
                 extra=event_log_data)

        # Notice we return an instance of this mutation
        return UserDefaultSelectionMutation(
            user_default_selection=user_default_selection)
示例#6
0
def update_user_default_selection_for_views(request, course_id=0):
    """

    :param request: HTTP `PUT` req.; body should contain a single JSON pair, `{"key": value}`
    :param course_id: Integer Canvas course ID number, typically six digits or less.
    :return: HttpResponse containing `{"default": "success"}` or `{"default": "fail"}`
    """
    logger.info(update_user_default_selection_for_views.__name__)
    course_id = canvas_id_to_incremented_id(course_id)
    current_user = request.user.get_username()
    default_selection = json.loads(request.body.decode("utf-8"))
    logger.info(default_selection)
    default_type = list(default_selection.keys())[0]
    default_type_value = default_selection.get(default_type)
    logger.info(
        f"request to set default for type: {default_type} and default_type value: {default_type_value}"
    )
    # json for eventlog
    data = {
        "course_id": course_id,
        "default_type": default_type,
        "default_value": default_type_value
    }
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_SET_DEFAULT.value,
             extra=data)
    key = 'default'
    try:
        obj, create_or_update_bool = UserDefaultSelection.objects. \
            set_user_defaults(int(course_id), current_user, default_type, default_type_value)
        logger.info(
            f"""setting default returns with success with response {obj.__dict__} and entry created or Updated: {create_or_update_bool}
                        for user {current_user} in course {course_id} """)
        value = 'success'
    except (ObjectDoesNotExist, Exception) as e:
        logger.info(
            f"updating default failed due to {e} for user {current_user} in course: {course_id} "
        )
        value = 'fail'
    return HttpResponse(json.dumps({key: value}),
                        content_type='application/json')
示例#7
0
def assignments(request, course_id=0):

    logger.info(assignments.__name__)

    course_id = canvas_id_to_incremented_id(course_id)

    current_user = request.user.get_username()
    df_default_display_settings()

    percent_selection = float(request.GET.get('percent', '0.0'))

    # json for eventlog
    data = {"course_id": course_id, "percent_selection": percent_selection}
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_ASSIGNMENT_PLANNING.value,
             extra=data)

    logger.info(
        'selection from assignment Planning {}'.format(percent_selection))

    assignments_in_course = get_course_assignments(course_id)

    if assignments_in_course.empty:
        return HttpResponse(json.dumps([]), content_type='application/json')

    assignment_submissions = get_user_assignment_submission(
        current_user, assignments_in_course, course_id)

    df = pd.merge(assignments_in_course,
                  assignment_submissions,
                  on='assignment_id',
                  how='left')
    if df.empty:
        logger.info(
            'There are no assignment data in the course %s for user %s ' %
            (course_id, current_user))
        return HttpResponse(json.dumps([]), content_type='application/json')

    df.sort_values(by='due_date', inplace=True)
    df.drop(columns=['assignment_id', 'due_date', 'grp_id'], inplace=True)
    df.drop_duplicates(keep='first', inplace=True)

    # instructor might not ever see the avg score as he don't have grade in assignment. we don't have role described in the flow to open the gates for him
    if not request.user.is_superuser:
        df['avg_score'] = df.apply(no_show_avg_score_for_ungraded_assignments,
                                   axis=1)
    df['avg_score'] = df['avg_score'].fillna('Not available')

    # operate on dataframe copy to prevent Pandas "SettingWithCopyWarning" warning
    df_progressbar = df.loc[df['towards_final_grade'] > 0.0].copy()
    df_progressbar[['score']] = df_progressbar[['score']].astype(float)
    df_progressbar['graded'] = df_progressbar['graded'].fillna(False)
    df_progressbar['submitted'] = df_progressbar['submitted'].fillna(False)
    df_progressbar[['score']] = df_progressbar[['score']].astype(float)
    df_progressbar['percent_gotten'] = df_progressbar.apply(
        lambda x: user_percent(x), axis=1)
    df_progressbar.sort_values(by=['graded', 'due_date_mod'],
                               ascending=[False, True],
                               inplace=True)
    df_progressbar.reset_index(inplace=True)
    df_progressbar.drop(columns=['index'], inplace=True)

    assignment_data = {}
    assignment_data['progress'] = json.loads(
        df_progressbar.to_json(orient='records'))

    # Group the data according the assignment prep view
    df_plan = df.loc[df['towards_final_grade'] >= percent_selection].copy()
    df_plan.reset_index(inplace=True)
    df_plan.drop(columns=['index'], inplace=True)
    logger.debug('The Dataframe for the assignment planning %s ' % df_plan)
    grouped = df_plan.groupby(['week', 'due_dates'])

    assignment_list = []
    for name, group in grouped:
        # name is a tuple of (week,due_date) => (1,'06/23/2018')
        # group is a dataframe based on grouping by week,due_date
        dic = {}
        group.drop(['week', 'due_dates'], axis=1, inplace=True)
        dic['week'] = name[0]
        dic['due_date'] = name[1]
        dic['assign'] = json.loads(group.to_json(orient='records'))
        assignment_list.append(dic)
    week_list = set()
    for item in assignment_list:
        week_list.add(item['week'])
    weeks = sorted(week_list)
    full = []
    for i, week in enumerate(weeks):
        data = {}
        data["week"] = np.uint64(week).item()
        data["id"] = i + 1
        dd_items = data["due_date_items"] = []
        for item in assignment_list:
            assignment_due_date_grp = {}
            if item['week'] == week:
                assignment_due_date_grp['due_date'] = item['due_date']
                assignment_due_date_grp['assignment_items'] = item['assign']
                dd_items.append(assignment_due_date_grp)
        full.append(data)
    assignment_data['plan'] = json.loads(json.dumps(full))
    return HttpResponse(json.dumps(assignment_data),
                        content_type='application/json')
示例#8
0
def resource_access_within_week(request, course_id=0):

    course_id = canvas_id_to_incremented_id(course_id)

    current_user = request.user.get_username()

    logger.debug("current_user="******",")

    filter_list = []
    for filter_value in filter_values:
        if filter_value != '':
            filter_list.extend(RESOURCE_VALUES[filter_value.lower()]['types'])

    # json for eventlog
    data = {
        "week_num_start": week_num_start,
        "week_num_end": week_num_end,
        "grade": grade,
        "course_id": course_id,
        "resource_type": filter_values
    }
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_RESOURCE_ACCESS.value,
             extra=data)

    # get total number of student within the course_id
    total_number_student_sql = "select count(*) from user where course_id = %(course_id)s and enrollment_type='StudentEnrollment'"
    if (grade == GRADE_A):
        total_number_student_sql += " and current_grade >= 90"
    elif (grade == GRADE_B):
        total_number_student_sql += " and current_grade >= 80 and current_grade < 90"
    elif (grade == GRADE_C):
        total_number_student_sql += " and current_grade >= 70 and current_grade < 80"

    total_number_student_df = pd.read_sql(total_number_student_sql,
                                          conn,
                                          params={"course_id": course_id})
    total_number_student = total_number_student_df.iloc[0, 0]
    logger.info(f"course_id {course_id} total student={total_number_student}")
    if total_number_student == 0:
        logger.info(
            f"There are no students in the percent grade range {grade} for course {course_id}"
        )
        return HttpResponse("{}")

    course_date_start = get_course_date_start(course_id)

    start = course_date_start + timedelta(days=(week_num_start * 7))
    end = course_date_start + timedelta(days=(week_num_end * 7))
    logger.debug("course_start=" + str(course_date_start) + " start=" +
                 str(start) + " end=" + str(end))

    # get time range based on week number passed in via request

    sqlString = f"""SELECT a.resource_id as resource_id, r.resource_type as resource_type, r.name as resource_name, u.current_grade as current_grade, a.user_id as user_id
                    FROM resource r, resource_access a, user u, course c, academic_terms t
                    WHERE a.resource_id = r.resource_id and a.user_id = u.user_id
                    and a.course_id = c.id and c.term_id = t.id
                    and a.access_time > %(start_time)s
                    and a.access_time < %(end_time)s
                    and a.course_id = %(course_id)s
                    and u.course_id = %(course_id)s
                    and u.enrollment_type = 'StudentEnrollment' """

    startTimeString = start.strftime('%Y%m%d') + "000000"
    endTimeString = end.strftime('%Y%m%d') + "000000"
    logger.debug(sqlString)
    logger.debug("start time=" + startTimeString + " end_time=" +
                 endTimeString)
    df = pd.read_sql(sqlString,
                     conn,
                     params={
                         "start_time": startTimeString,
                         "end_time": endTimeString,
                         "course_id": course_id
                     })
    logger.debug(df)

    # return if there is no data during this interval
    if (df.empty):
        return HttpResponse("{}")

    # group by resource_id, and resource_name
    # reformat for output
    df['resource_id_name'] = df['resource_id'].astype(str).str.cat(
        df['resource_name'], sep=';')

    df = df.drop(['resource_id', 'resource_name'], axis=1)
    df.set_index(['resource_id_name'])
    # drop resource records when the resource has been accessed multiple times by one user
    df.drop_duplicates(inplace=True)

    # map point grade to letter grade
    df['grade'] = df['current_grade'].map(gpa_map)

    # calculate the percentage
    df['percent'] = df.groupby([
        'resource_id_name', 'grade'
    ])['resource_id_name'].transform('count') / total_number_student

    df = df.drop(['current_grade', 'user_id'], axis=1)
    # now only keep the resource access stats by grade level
    df.drop_duplicates(inplace=True)

    resource_id_name = df["resource_id_name"].unique()

    #df.reset_index(inplace=True)

    # zero filled dataframe with resource name as row name, and grade as column name
    output_df = pd.DataFrame(0.0,
                             index=resource_id_name,
                             columns=[
                                 GRADE_A, GRADE_B, GRADE_C, GRADE_LOW,
                                 NO_GRADE_STRING, RESOURCE_TYPE_STRING
                             ])
    output_df = output_df.rename_axis('resource_id_name')
    output_df = output_df.astype({RESOURCE_TYPE_STRING: str})

    for index, row in df.iterrows():
        # set value
        output_df.at[row['resource_id_name'], row['grade']] = row['percent']
        output_df.at[row['resource_id_name'],
                     RESOURCE_TYPE_STRING] = row[RESOURCE_TYPE_STRING]
    output_df.reset_index(inplace=True)

    # now insert person's own viewing records: what resources the user has viewed, and the last access timestamp
    selfSqlString = f"""select CONCAT(r.resource_id, ';', r.name) as resource_id_name, count(*) as self_access_count, max(a.access_time) as self_access_last_time 
                    from resource_access a, user u, resource r 
                    where a.user_id = u.user_id 
                    and a.resource_id = r.resource_id 
                    and u.sis_name=%(current_user)s 
                    and a.course_id = %(course_id)s
                    group by CONCAT(r.resource_id, ';', r.name)"""
    logger.debug(selfSqlString)
    logger.debug("current_user="******"current_user": current_user,
                             "course_id": course_id
                         })

    output_df = output_df.join(selfDf.set_index('resource_id_name'),
                               on='resource_id_name',
                               how='left')
    output_df["total_percent"] = output_df.apply(lambda row: row[
        GRADE_A] + row[GRADE_B] + row[GRADE_C] + row[GRADE_LOW] + row.NO_GRADE,
                                                 axis=1)

    if (grade != "all"):
        # drop all other grades
        grades = [GRADE_A, GRADE_B, GRADE_C, GRADE_LOW, NO_GRADE_STRING]
        for i_grade in grades:
            if (i_grade == grade):
                output_df["total_percent"] = output_df[i_grade]
            else:
                output_df = output_df.drop([i_grade], axis=1)

    output_df = output_df[output_df.resource_type.isin(filter_list)]

    # if no checkboxes are checked send nothing
    if (output_df.empty):
        return HttpResponse("{}")

    # only keep rows where total_percent > 0
    output_df = output_df[output_df.total_percent > 0]

    # time 100 to show the percentage
    output_df["total_percent"] *= 100
    # round all numbers to whole numbers
    output_df = output_df.round(0)

    output_df.fillna(0, inplace=True)  #replace null value with 0

    output_df[['resource_id_part', 'resource_name_part'
               ]] = output_df['resource_id_name'].str.split(';', expand=True)

    output_df['resource_name'] = output_df.apply(lambda row: (
        RESOURCE_ACCESS_CONFIG.get(row.resource_type).get("urls").get("prefix")
        + row.resource_id_part + RESOURCE_ACCESS_CONFIG.get(row.resource_type).
        get("urls").get("postfix") + CANVAS_FILE_ID_NAME_SEPARATOR + row.
        resource_name_part + CANVAS_FILE_ID_NAME_SEPARATOR + RESOURCE_VALUES.
        get(RESOURCE_VALUES_MAP.get(row.resource_type)).get('icon')),
                                                 axis=1)
    # RESOURCE_VALUES_MAP {'canvas': 'files', 'leccap': 'videos', 'mivideo': 'videos'}
    output_df['resource_type'] = output_df['resource_type'].replace(
        RESOURCE_VALUES_MAP)
    output_df.drop(
        columns=['resource_id_part', 'resource_name_part', 'resource_id_name'],
        inplace=True)

    logger.debug(output_df.to_json(orient='records'))

    return HttpResponse(output_df.to_json(orient='records'),
                        content_type='application/json')
示例#9
0
def file_access_within_week(request, course_id=0):

    current_user = request.user.get_username()

    logger.debug("current_user="******"week_num_start": week_num_start,
        "week_num_end": week_num_end,
        "grade": grade,
        "course_id": course_id
    }
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_FILE_ACCESS.value,
             extra=data)

    # get total number of student within the course_id
    total_number_student_sql = "select count(*) from user where course_id = %(course_id)s"
    total_number_student_df = pd.read_sql(total_number_student_sql,
                                          conn,
                                          params={"course_id": course_id})
    total_number_student = total_number_student_df.iloc[0, 0]
    logger.debug("course_id_string" + course_id + " total student=" +
                 str(total_number_student))

    term_date_start = AcademicTerms.objects.course_date_start(course_id)

    start = term_date_start + timedelta(days=(week_num_start * 7))
    end = term_date_start + timedelta(days=(week_num_end * 7))
    logger.debug("term_start=" + str(term_date_start) + " start=" +
                 str(start) + " end=" + str(end))

    # get time range based on week number passed in via request

    sqlString = "SELECT a.file_id as file_id, f.name as file_name, u.current_grade as current_grade, a.user_id as user_id " \
                "FROM file f, file_access a, user u, course c, academic_terms t  " \
                "WHERE a.file_id =f.ID and a.user_id = u.ID  " \
                "and f.course_id = c.id and c.term_id = t.id " \
                "and a.access_time > %(start_time)s " \
                "and a.access_time < %(end_time)s " \
                "and f.course_id = %(course_id)s "
    startTimeString = start.strftime('%Y%m%d') + "000000"
    endTimeString = end.strftime('%Y%m%d') + "000000"
    logger.debug(sqlString)
    logger.debug("start time=" + startTimeString + " end_time=" +
                 endTimeString)
    df = pd.read_sql(sqlString,
                     conn,
                     params={
                         "start_time": startTimeString,
                         "end_time": endTimeString,
                         "course_id": course_id
                     })
    logger.debug(df)

    # return if there is no data during this interval
    if (df.empty):
        return HttpResponse("no data")

    # group by file_id, and file_name
    # reformat for output
    df['file_id_name'] = df['file_id'].str.cat(df['file_name'], sep=';')

    df = df.drop(['file_id', 'file_name'], axis=1)
    df.set_index(['file_id_name'])
    # drop file records when the file has been accessed multiple times by one user
    df.drop_duplicates(inplace=True)

    # map point grade to letter grade
    df['grade'] = df['current_grade'].map(gpa_map)

    # calculate the percentage
    df['percent'] = round(
        df.groupby(['file_id_name', 'grade'
                    ])['file_id_name'].transform('count') /
        total_number_student, 2)

    df = df.drop(['current_grade', 'user_id'], axis=1)
    # now only keep the file access stats by grade level
    df.drop_duplicates(inplace=True)

    file_id_name = df["file_id_name"].unique()

    #df.reset_index(inplace=True)

    # zero filled dataframe with file name as row name, and grade as column name
    output_df = pd.DataFrame(
        0.0,
        index=file_id_name,
        columns=[GRADE_A, GRADE_B, GRADE_C, GRADE_LOW, NO_GRADE_STRING])
    output_df = output_df.rename_axis('file_id_name')

    for index, row in df.iterrows():
        # set value
        output_df.at[row['file_id_name'], row['grade']] = row['percent']
    output_df.reset_index(inplace=True)

    # now insert person's own viewing records: what files the user has viewed, and the last access timestamp
    # now insert person's own viewing records: what files the user has viewed, and the last access timestamp
    selfSqlString = "select CONCAT(f.id, ';', f.name) as file_id_name, count(*) as self_access_count, max(a.access_time) as self_access_last_time " \
                    "from file_access a, user u, file f " \
                    "where a.user_id = u.id " \
                    "and a.file_id = f.ID " \
                    "and u.sis_name=%(current_user)s " \
                    "group by CONCAT(f.id, ';', f.name)"
    logger.debug(selfSqlString)
    logger.debug("current_user="******"current_user": current_user})

    output_df = output_df.join(selfDf.set_index('file_id_name'),
                               on='file_id_name',
                               how='left')
    output_df["total_count"] = output_df.apply(lambda row: row["90-100"] + row[
        "80-89"] + row["70-79"] + row["low_grade"] + row.NO_GRADE,
                                               axis=1)

    if (grade != "all"):
        # drop all other grades
        grades = [GRADE_A, GRADE_B, GRADE_C, GRADE_LOW, NO_GRADE_STRING]
        for i_grade in grades:
            if (i_grade == grade):
                output_df["total_count"] = output_df[i_grade]
            else:
                output_df = output_df.drop([i_grade], axis=1)

    # only keep rows where total_count > 0
    output_df = output_df[output_df.total_count > 0]

    # time 100 to show the percentage
    output_df["total_count"] = output_df["total_count"] * 100
    # round all numbers to one decimal point
    output_df = output_df.round(DECIMAL_ROUND_DIGIT)

    output_df.fillna(0, inplace=True)  #replace null value with 0

    output_df['file_id_part'], output_df['file_name_part'] = output_df[
        'file_id_name'].str.split(';', 1).str
    output_df['file_name'] = output_df.apply(
        lambda row: CANVAS_FILE_PREFIX + row.file_id_part + CANVAS_FILE_POSTFIX
        + CANVAS_FILE_ID_NAME_SEPARATOR + row.file_name_part,
        axis=1)
    output_df.drop(columns=['file_id_part', 'file_name_part', 'file_id_name'],
                   inplace=True)
    logger.debug(output_df.to_json(orient='records'))

    return HttpResponse(output_df.to_json(orient='records'))
示例#10
0
def grade_distribution(request, course_id=0):
    logger.info(grade_distribution.__name__)

    course_id = canvas_id_to_incremented_id(course_id)

    current_user = request.user.get_username()

    grade_score_sql = f"""select current_grade,
       (select show_grade_counts From course where id=%(course_id)s) as show_number_on_bars,
       (select current_grade from user where sis_name=%(current_user)s and course_id=%(course_id)s) as current_user_grade
       from user where course_id=%(course_id)s and enrollment_type=%(enrollment_type)s
       """
    df = pd.read_sql(grade_score_sql,
                     conn,
                     params={
                         'current_user': current_user,
                         'course_id': course_id,
                         'enrollment_type': 'StudentEnrollment'
                     })
    if df.empty or df.count().current_grade < 6:
        logger.info(
            f"Not enough students grades (only {df.count().current_grade}) in a course {course_id} to show the view"
        )
        return HttpResponse(json.dumps({}), content_type='application/json')

    grade_view_data = dict()
    summary = dict()
    summary['current_user_grade'] = df['current_user_grade'].values[0]
    summary['tot_students'] = df.shape[0]
    df = df[df['current_grade'].notnull()]
    df['current_grade'] = df['current_grade'].astype(float)
    summary['grade_avg'] = df['current_grade'].mean().round(2)
    summary['median_grade'] = df['current_grade'].median().round(2)
    summary['show_number_on_bars'] = False
    if df['show_number_on_bars'].values[0] == 1:
        summary['show_number_on_bars'] = True

    df.sort_values(by=['current_grade'], inplace=True)
    df.reset_index(drop=True, inplace=True)
    grades = df['current_grade'].values.tolist()
    logger.debug(f"Grades distribution: {grades}")
    BinningGrade = find_binning_grade_value(grades)
    if BinningGrade is not None and not BinningGrade.binning_all:
        scores_to_replace = df['current_grade'].head(
            BinningGrade.index).to_list()
        df['current_grade'] = df['current_grade'].replace(
            scores_to_replace, BinningGrade.value)
    summary['show_dash_line'] = show_dashed_line(df['current_grade'].iloc[0],
                                                 BinningGrade)

    if df[df['current_grade'] > 100.0].shape[0] > 0:
        summary['graph_upper_limit'] = int(
            (5 * round(float(df['current_grade'].max()) / 5) + 5))
    else:
        df['current_grade'] = df['current_grade'].apply(lambda x: 99.99
                                                        if x == 100.00 else x)
        summary['graph_upper_limit'] = 100

    grade_view_data['summary'] = summary
    grade_view_data['grades'] = df['current_grade'].values.tolist()

    # json for eventlog
    data = {
        "course_id": course_id,
        "show_number_on_bars": int(df['show_number_on_bars'].values[0])
    }
    eventlog(request.user,
             EventLogTypes.EVENT_VIEW_GRADE_DISTRIBUTION.value,
             extra=data)

    return HttpResponse(json.dumps(grade_view_data))
示例#11
0
def assignments(request, course_id=0):
    logger.info(assignments.__name__)

    current_user = request.user.get_username()
    df_default_display_settings()

    percent_selection = float(request.GET.get('percent', '0.0'))

    # json for eventlog
    data = {
        "course_id": course_id,
        "percent_selection": percent_selection
    }
    eventlog(request.user, EVENT_VIEW_ASSIGNMENT_PLANNING, extra=data)

    logger.info('selection from assignment Planning {}'.format(percent_selection))

    assignments_in_course = get_course_assignments(course_id)

    if assignments_in_course.empty:
        return HttpResponse(json.dumps([]), content_type='application/json')

    assignment_submissions = get_user_assignment_submission(current_user, assignments_in_course, course_id)

    df = pd.merge(assignments_in_course, assignment_submissions, on='assignment_id', how='left')
    if df.empty:
        logger.info('There are no assignment data in the course %s for user %s ' % (course_id, current_user))
        return HttpResponse(json.dumps([]), content_type='application/json')

    df.sort_values(by='due_date', inplace=True)
    df.drop(columns=['assignment_id', 'due_date'], inplace=True)
    df.drop_duplicates(keep='first', inplace=True)

    df3 = df[df['towards_final_grade'] > 0.0]
    df3[['score']] = df3[['score']].astype(float)
    df3['percent_gotten'] = df3.apply(user_percent, axis=1)
    df3['graded'] = df3['graded'].fillna(False)
    df3[['score']] = df3[['score']].astype(float)
    df3['percent_gotten'] = df3.apply(user_percent, axis=1)
    df3.sort_values(by=['graded', 'due_date_mod'], ascending=[False, True], inplace=True)
    df3.reset_index(inplace=True)
    df3.drop(columns=['index'], inplace=True)

    assignment_data = {}
    assignment_data['progress'] = json.loads(df3.to_json(orient='records'))

    # Group the data according the assignment prep view
    df2 = df[df['towards_final_grade'] >= percent_selection]
    df2.reset_index(inplace=True)
    df2.drop(columns=['index'], inplace=True)
    logger.debug('The Dataframe for the assignment planning %s ' % df2)
    grouped = df2.groupby(['week', 'due_dates'])

    assignment_list = []
    for name, group in grouped:
        # name is a tuple of (week,due_date) => (1,'06/23/2018')
        # group is a dataframe based on grouping by week,due_date
        dic = {}
        group.drop(['week', 'due_dates'], axis=1, inplace=True)
        dic['week'] = name[0]
        dic['due_date'] = name[1]
        dic['assign'] = json.loads(group.to_json(orient='records'))
        assignment_list.append(dic)
    week_list = set()
    for item in assignment_list:
        week_list.add(item['week'])
    weeks = sorted(week_list)
    full = []
    for i, week in enumerate(weeks):
        data = {}
        data["week"] = np.uint64(week).item()
        data["id"] = i + 1
        dd_items = data["due_date_items"] = []
        for item in assignment_list:
            assignment_due_date_grp = {}
            if item['week'] == week:
                assignment_due_date_grp['due_date'] = item['due_date']
                assignment_due_date_grp['assignment_items'] = item['assign']
                dd_items.append(assignment_due_date_grp)
        full.append(data)
    assignment_data['plan'] = json.loads(json.dumps(full))
    return HttpResponse(json.dumps(assignment_data), content_type='application/json')