Beispiel #1
0
def is_student_calibrated(request):
    """
    Decides if student has fulfilled criteria for peer grading calibration for a given location (problem id).
    Input:
        student id, problem_id
    Output:
        Dictionary with boolean calibrated indicating whether or not student has finished calibration.

    Note: Location in the database is currently being used as the problem id.
    """

    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.check_calibration_status(
        problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Beispiel #2
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
           'min_for_ml' -- minimum needed to make ML model
    """

    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")

    if not course_id:
        error_message="Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [x['location'] for x in
                            list(Submission.objects.filter(course_id=course_id).values('location').distinct())]

    if len(locations_for_course)==0:
        error_message="No problems associated with course."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    location_info=[]
    for location in locations_for_course:
        problem_name = Submission.objects.filter(location=location)[0].problem_id
        submissions_pending = staff_grading_util.submissions_pending_instructor(location, state_in=[SubmissionState.waiting_to_be_graded]).count()
        finished_instructor_graded = staff_grading_util.finished_submissions_graded_by_instructor(location).count()
        min_scored_for_location=settings.MIN_TO_USE_PEER
        location_ml_count = Submission.objects.filter(location=location, preferred_grader_type="ML").count()
        if location_ml_count>0:
            min_scored_for_location=settings.MIN_TO_USE_ML

        submissions_required = max([0,min_scored_for_location-finished_instructor_graded])

        problem_name_from_location=location.split("://")[1]
        location_dict={
            'location' : location,
            'problem_name' : problem_name,
            'problem_name_from_location' : problem_name_from_location,
            'num_graded' : finished_instructor_graded,
            'num_pending' : submissions_pending,
            'num_required' : submissions_required,
            'min_for_ml' : settings.MIN_TO_USE_ML,
            }
        location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list' : location_info},
                                  _INTERFACE_VERSION)
Beispiel #3
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET",
                                    _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag),
                                        _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    pl = peer_grading_util.PeerLocation(location, student_id)
    student_sub_count = pl.submitted_count()

    submissions_graded = pl.graded_count()
    submissions_required = pl.required_count()
    submissions_available = pl.pending_count()

    peer_data = {
        'count_graded': submissions_graded,
        'count_required': submissions_required,
        'student_sub_count': student_sub_count,
        'count_available': submissions_available
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Beispiel #4
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET", _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag), _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    pl = peer_grading_util.PeerLocation(location,student_id)
    student_sub_count = pl.submitted_count()

    submissions_graded = pl.graded_count()
    submissions_required = pl.required_count()
    submissions_available = pl.pending_count()

    peer_data = {
        'count_graded' : submissions_graded,
        'count_required' : submissions_required,
        'student_sub_count' : student_sub_count,
        'count_available' : submissions_available
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Beispiel #5
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET", _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag), _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    student_sub_count= peer_grading_util.get_required_peer_grading_for_location({'student_id' : student_id, 'location' : location, 'preferred_grader_type' : "PE"})
    submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(location,student_id).count()
    submissions_required = settings.REQUIRED_PEER_GRADING_PER_STUDENT*student_sub_count

    ##Check to see if submissions were available to grade in the past week
    notification_seen_recently = NotificationsSeen.check_for_recent_notifications(
        student_id = student_id,
        location = location,
        notification_type=NotificationTypes.peer_grading,
        recent_notification_interval=settings.PEER_GRADING_TIMEOUT_INTERVAL
    )

    if not notification_seen_recently:
        submissions_required = submissions_graded

    peer_data = {
        'count_graded' : submissions_graded,
        'count_required' : submissions_required,
        'student_sub_count' : student_sub_count,
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Beispiel #6
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x["location"] for x in list(Submission.objects.filter(course_id=course_id).values("location").distinct())
    ]

    location_info = []
    for location in locations_for_course:
        student_sub_count = peer_grading_util.get_required_peer_grading_for_location(
            {"student_id": student_id, "location": location, "preferred_grader_type": "PE"}
        )
        if student_sub_count > 0:
            problem_name = Submission.objects.filter(location=location)[0].problem_id
            submissions_pending = peer_grading_util.peer_grading_submissions_pending_for_location(
                location, student_id
            ).count()
            submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(
                location, student_id
            ).count()
            submissions_required = max(
                [0, (settings.REQUIRED_PEER_GRADING_PER_STUDENT * student_sub_count) - submissions_graded]
            )

            problem_name_from_location = location.split("://")[1]
            if submissions_graded > 0 or submissions_pending > 0:
                location_dict = {
                    "location": location,
                    "problem_name": problem_name,
                    "num_graded": submissions_graded,
                    "num_required": submissions_required,
                    "num_pending": submissions_pending,
                }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({"problem_list": location_info}, _INTERFACE_VERSION)
Beispiel #7
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
           'min_for_ml' -- minimum needed to make ML model
    """

    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")

    if not course_id:
        error_message="Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [x['location'] for x in
                            list(Submission.objects.filter(course_id=course_id).values('location').distinct())]

    if len(locations_for_course)==0:
        error_message="No problems associated with course."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    location_info=[]
    for location in locations_for_course:
        sl = staff_grading_util.StaffLocation(location)
        control = SubmissionControl(sl.latest_submission())
        problem_name = sl.problem_name()
        submissions_pending = sl.pending_count()
        finished_instructor_graded = sl.graded_count()

        submissions_required = max([0, sl.minimum_to_score() - finished_instructor_graded])

        problem_name_from_location=location.split("://")[1]
        location_dict={
            'location': location,
            'problem_name': problem_name,
            'problem_name_from_location': problem_name_from_location,
            'num_graded': finished_instructor_graded,
            'num_pending': submissions_pending,
            'num_required': submissions_required,
            'min_for_ml': control.minimum_to_use_ai,
            }
        location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list' : location_info},
                                  _INTERFACE_VERSION)
Beispiel #8
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x['location'] for x in Submission.objects.filter(
            course_id=course_id).values('location').distinct()
    ]

    location_info = []
    for location in locations_for_course:
        pl = peer_grading_util.PeerLocation(location, student_id)
        if pl.submitted_count() > 0:
            problem_name = pl.problem_name()
            submissions_pending = pl.pending_count()
            submissions_graded = pl.graded_count()
            submissions_required = max(
                [0, pl.required_count() - submissions_graded])

            if (submissions_graded > 0 or submissions_pending > 0
                    or control_util.SubmissionControl.peer_grade_finished_subs(
                        pl)):
                location_dict = {
                    'location': location,
                    'problem_name': problem_name,
                    'num_graded': submissions_graded,
                    'num_required': submissions_required,
                    'num_pending': submissions_pending,
                }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list': location_info},
                                  _INTERFACE_VERSION)
Beispiel #9
0
def get_next_submission(request):
    """
    Gets next submission from controller for peer grading.
    Input:
        Get request with the following keys:
           grader_id - Student id of the grader
           location - The problem id to get peer grading for.
    """

    if request.method != "GET":
        log.debug("Improper request method")
        raise Http404

    grader_id = request.GET.get("grader_id")
    location = request.GET.get("location")

    if not grader_id or not location:
        error_message = "Failed to find needed keys 'grader_id' and 'location'"
        log.debug(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    (found, sub_id) = peer_grading_util.get_single_peer_grading_item(
        location, grader_id)

    if not found:
        error_message = "You have completed all of the existing peer grading or there are no more submissions waiting to be peer graded."
        log.debug(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=int(sub_id))
    except:
        log.debug("Could not find submission with id {0}".format(sub_id))
        return util._error_response("Error getting grading.",
                                    _INTERFACE_VERSION)

    if sub.state != SubmissionState.being_graded:
        log.debug(
            "Submission with id {0} has incorrect internal state {1}.".format(
                sub_id, sub.state))
        return util._error_response("Error getting grading.",
                                    _INTERFACE_VERSION)

    response = {
        'submission_id': sub_id,
        'submission_key': sub.xqueue_submission_key,
        'student_response': sub.student_response,
        'prompt': sub.prompt,
        'rubric': sub.rubric,
        'max_score': sub.max_score,
    }

    #log.debug(response)
    return util._success_response(response, _INTERFACE_VERSION)
Beispiel #10
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message="Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [x['location'] for x in
                            Submission.objects.filter(course_id=course_id).values('location').distinct()]

    location_info=[]
    for location in locations_for_course:
        pl = peer_grading_util.PeerLocation(location,student_id)
        if pl.submitted_count()>0:
            problem_name = pl.problem_name()
            submissions_pending = pl.pending_count()
            submissions_graded = pl.graded_count()
            submissions_required = max([0,pl.required_count() - submissions_graded])

            if (submissions_graded > 0 or
                submissions_pending > 0 or
                control_util.SubmissionControl.peer_grade_finished_subs(pl)):
                location_dict={
                    'location' : location,
                    'problem_name' : problem_name,
                    'num_graded' : submissions_graded,
                    'num_required' : submissions_required,
                    'num_pending' : submissions_pending,
                    }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list' : location_info},
        _INTERFACE_VERSION)
Beispiel #11
0
def get_next_submission(request):
    """
    Gets next submission from controller for peer grading.
    Input:
        Get request with the following keys:
           grader_id - Student id of the grader
           location - The problem id to get peer grading for.
    """

    if request.method != "GET":
        log.error("Improper request method")
        raise Http404

    grader_id = request.GET.get("grader_id")
    location = request.GET.get("location")

    if not grader_id or not location:
        error_message="Failed to find needed keys 'grader_id' and 'location'"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    pl = peer_grading_util.PeerLocation(location, grader_id)
    (found, sub_id) = pl.next_item()

    if not found:
        error_message="You have completed all of the existing peer grading or there are no more submissions waiting to be peer graded."
        log.error(error_message)
        return  util._error_response(error_message, _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=int(sub_id))
    except Exception:
        log.exception("Could not find submission with id {0}".format(sub_id))
        return util._error_response("Error getting grading.", _INTERFACE_VERSION)

    if sub.state != SubmissionState.being_graded:
        log.error("Submission with id {0} has incorrect internal state {1}.".format(sub_id, sub.state))
        return util._error_response("Error getting grading.", _INTERFACE_VERSION)

    response = {
        'submission_id': sub_id,
        'submission_key': sub.xqueue_submission_key,
        'student_response': sub.student_response,
        'prompt': sub.prompt,
        'rubric': sub.rubric,
        'max_score': sub.max_score,
    }

    return util._success_response(response, _INTERFACE_VERSION)
Beispiel #12
0
def timing_metrics(request):
    """
    Request is an HTTP get request with the following keys:
        Course_id
        Grader_type
        Location
    """

    if request.method != "POST":
        return util._error_response("Must make a POST request.", _INTERFACE_VERSION)

    arguments,title=metrics_util.get_arguments(request)
    success, response=metrics_util.generate_timing_response(arguments,title)

    if not success:
        return util._error_response(str(response),_INTERFACE_VERSION)

    return util._success_response({'img' : response}, _INTERFACE_VERSION)
Beispiel #13
0
def timing_metrics(request):
    """
    Request is an HTTP get request with the following keys:
        Course_id
        Grader_type
        Location
    """

    if request.method != "POST":
        return util._error_response("Must make a POST request.", _INTERFACE_VERSION)

    arguments,title=metrics_util.get_arguments(request)
    success, response=metrics_util.generate_timing_response(arguments,title)

    if not success:
        return util._error_response(str(response),_INTERFACE_VERSION)

    return util._success_response({'img' : response}, _INTERFACE_VERSION)
Beispiel #14
0
def get_notifications(request):
    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")

    if not course_id:
        error_message="Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    success, staff_needs_to_grade = staff_grading_util.get_staff_grading_notifications(course_id)
    if not success:
        return util._error_response(staff_needs_to_grade, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response({'staff_needs_to_grade' : staff_needs_to_grade}, _INTERFACE_VERSION)
Beispiel #15
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET",
                                    _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag),
                                        _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    student_sub_count = peer_grading_util.get_required_peer_grading_for_location(
        {
            'student_id': student_id,
            'location': location,
            'preferred_grader_type': "PE"
        })
    submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(
        location, student_id).count()
    submissions_required = settings.REQUIRED_PEER_GRADING_PER_STUDENT * student_sub_count

    ##Check to see if submissions were available to grade in the past week
    notification_seen_recently = NotificationsSeen.check_for_recent_notifications(
        student_id=student_id,
        location=location,
        notification_type=NotificationTypes.peer_grading,
        recent_notification_interval=settings.PEER_GRADING_TIMEOUT_INTERVAL)

    if not notification_seen_recently:
        submissions_required = submissions_graded

    peer_data = {
        'count_graded': submissions_graded,
        'count_required': submissions_required,
        'student_sub_count': student_sub_count,
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Beispiel #16
0
def get_notifications(request):
    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message="Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    pc = peer_grading_util.PeerCourse(course_id,student_id)
    success, student_needs_to_peer_grade = pc.notifications()
    if not success:
        return util._error_response(student_needs_to_peer_grade, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response({'student_needs_to_peer_grade' : student_needs_to_peer_grade}, _INTERFACE_VERSION)
Beispiel #17
0
def get_notifications(request):
    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")

    if not course_id:
        error_message = "Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    success, staff_needs_to_grade = staff_grading_util.get_staff_grading_notifications(
        course_id)
    if not success:
        return util._error_response(staff_needs_to_grade, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(
        {'staff_needs_to_grade': staff_needs_to_grade}, _INTERFACE_VERSION)
Beispiel #18
0
def show_calibration_essay(request):
    """
    Shows a calibration essay when it receives a GET request.
    Input:
        Http request containing problem_id and student_id
    Output:
        Http response containing essay data (submission id, submission key, student response, prompt, rubric, max_score)
        Or error
    """
    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.get_calibration_essay(problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Beispiel #19
0
def show_calibration_essay(request):
    """
    Shows a calibration essay when it receives a GET request.
    Input:
        Http request containing problem_id and student_id
    Output:
        Http response containing essay data (submission id, submission key, student response, prompt, rubric, max_score)
        Or error
    """
    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.get_calibration_essay(problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Beispiel #20
0
def is_student_calibrated(request):
    """
    Decides if student has fulfilled criteria for peer grading calibration for a given location (problem id).
    Input:
        student id, problem_id
    Output:
        Dictionary with boolean calibrated indicating whether or not student has finished calibration.

    Note: Location in the database is currently being used as the problem id.
    """

    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.check_calibration_status(problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Beispiel #21
0
def get_notifications(request):
    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    pc = peer_grading_util.PeerCourse(course_id, student_id)
    success, student_needs_to_peer_grade = pc.notifications()
    if not success:
        return util._error_response(student_needs_to_peer_grade,
                                    _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(
        {'student_needs_to_peer_grade': student_needs_to_peer_grade},
        _INTERFACE_VERSION)
Beispiel #22
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    location: string
    grader_id: int
    submission_id: int
    score: int
    feedback: string
    submission_key : string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()
    log.debug(post_data)

    for tag in ["location", "grader_id", "submission_id", "submission_key", "score", "feedback", "submission_flagged"]:
        if not tag in post_data:
            return util._error_response("Cannot find needed key {0} in request.".format(tag), _INTERFACE_VERSION)

    location = post_data["location"]
    grader_id = post_data["grader_id"]
    submission_id = post_data["submission_id"]

    # Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data["submission_key"]
    score = post_data["score"]

    # This is done to ensure that response is properly formatted on the lms side.
    feedback_dict = post_data["feedback"]

    rubric_scores_complete = request.POST.get("rubric_scores_complete", False)
    rubric_scores = request.POST.getlist("rubric_scores", [])

    is_submission_flagged = request.POST.get("submission_flagged", False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = is_submission_flagged.lower() == "true"

    status = GraderStatus.success
    confidence = 1.0

    is_answer_unknown = request.POST.get("answer_unknown", False)
    if isinstance(is_answer_unknown, basestring):
        is_answer_unknown = is_answer_unknown.lower() == "true"

    if is_answer_unknown:
        status = GraderStatus.failure
        confidence = 0.0

    try:
        score = int(score)
    except ValueError:
        return util._error_response("Expected integer score.  Got {0}".format(score), _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)},
        )

    success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub)
    if not success:
        return util._error_response("grade_save_error", _INTERFACE_VERSION, data={"msg": error_message})

    d = {
        "submission_id": submission_id,
        "score": score,
        "feedback": feedback_dict,
        "grader_id": grader_id,
        "grader_type": "PE",
        # Humans always succeed (if they grade at all)...
        "status": status,
        # ...and they're always confident too.
        "confidence": confidence,
        # And they don't make any errors
        "errors": "",
        "rubric_scores_complete": rubric_scores_complete,
        "rubric_scores": rubric_scores,
        "is_submission_flagged": is_submission_flagged,
    }

    # Currently not posting back to LMS.  Only saving grader object, and letting controller decide when to post back.
    (success, header) = grader_util.create_and_handle_grader_object(d)
    if not success:
        return util._error_response("There was a problem saving the grade.  Contact support.", _INTERFACE_VERSION)

    # xqueue_session=util.xqueue_login()
    # error,msg = util.post_results_to_xqueue(xqueue_session,json.dumps(header),json.dumps(post_data))

    util.log_connection_data()
    return util._success_response({"msg": "Posted to queue."}, _INTERFACE_VERSION)
Beispiel #23
0
def get_next_submission(request):
    """
    Supports GET request with the following arguments:
    course_id -- the course for which to return a submission.
    grader_id -- LMS user_id of the requesting user

    Returns json dict with the following keys:

    version: '1'  (number)

    success: bool

    if success:
      'submission_id': a unique identifier for the submission, to be passed
                       back with the grade.

      'submission': the submission, rendered as read-only html for grading

      'rubric': the rubric, also rendered as html.

      'prompt': the question prompt, also rendered as html.

      'message': if there was no submission available, but nothing went wrong,
                there will be a message field.
    else:
      'error': if success is False, will have an error message with more info.
    }
    """

    if request.method != "GET":
        raise Http404

    course_id = request.GET.get('course_id')
    grader_id = request.GET.get('grader_id')
    location = request.GET.get('location')
    found = False

    if not (course_id or location) or not grader_id:
        return util._error_response("required_parameter_missing", _INTERFACE_VERSION)

    if location:
        sl = staff_grading_util.StaffLocation(location)
        (found, sid) = sl.next_item()

    # TODO: save the grader id and match it in save_grade to make sure things
    # are consistent.
    if not location:
        sc = staff_grading_util.StaffCourse(course_id)
        (found, sid) = sc.next_item()

    if not found:
        return util._success_response({'message': 'No more submissions to grade.'},
                                      _INTERFACE_VERSION)
    try:
        submission = Submission.objects.get(id=int(sid))
    except Submission.DoesNotExist:
        log.error("Couldn't find submission %s for instructor grading", sid)
        return util._error_response('failed_to_load_submission',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': sid})

    #Get error metrics from ml grading, and get into dictionary form to pass down to staff grading view
    success, ml_error_info=ml_grading_util.get_ml_errors(submission.location)
    if success:
        ml_error_message=staff_grading_util.generate_ml_error_message(ml_error_info)
    else:
        ml_error_message=ml_error_info

    ml_error_message="Machine learning error information: " + ml_error_message

    sl = staff_grading_util.StaffLocation(submission.location)
    control = SubmissionControl(sl.latest_submission())
    if submission.state != SubmissionState.being_graded:
        log.error("Instructor grading got submission {0} in an invalid state {1} ".format(sid, submission.state))
        return util._error_response('wrong_internal_state',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': sid,
                                     'submission_state': submission.state})

    response = {'submission_id': sid,
                'submission': submission.student_response,
                'rubric': submission.rubric,
                'prompt': submission.prompt,
                'max_score': submission.max_score,
                'ml_error_info': ml_error_message,
                'problem_name': submission.problem_id,
                'num_graded': sl.graded_count(),
                'num_pending': sl.pending_count(),
                'min_for_ml': control.minimum_to_use_ai,
                }

    util.log_connection_data()
    return util._success_response(response, _INTERFACE_VERSION)
Beispiel #24
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    course_id: int
    grader_id: int
    submission_id: int
    score: int
    feedback: string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        return util._error_response("Request needs to be GET", _INTERFACE_VERSION)

    course_id = request.POST.get('course_id')
    grader_id = request.POST.get('grader_id')
    submission_id = request.POST.get('submission_id')
    score = request.POST.get('score')
    feedback = request.POST.get('feedback')
    skipped = request.POST.get('skipped')=="True"
    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])
    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = is_submission_flagged.lower() == 'true'

    if (# These have to be truthy
        not (course_id and grader_id and submission_id) or
        # These have to be non-None
        score is None or feedback is None):
        return util._error_response("required_parameter_missing", _INTERFACE_VERSION)

    if skipped:
        success, sub = staff_grading_util.set_instructor_grading_item_back_to_preferred_grader(submission_id)

        if not success:
            return util._error_response(sub, _INTERFACE_VERSION)

        return util._success_response({}, _INTERFACE_VERSION)

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Expected integer score.  Got {0}".format(score)})

    try:
        sub=Submission.objects.get(id=submission_id)
    except Exception:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)}
        )

    first_sub_for_location=Submission.objects.filter(location=sub.location).order_by('date_created')[0]
    rubric= first_sub_for_location.rubric
    rubric_success, parsed_rubric =  rubric_functions.parse_rubric(rubric)

    if rubric_success:
        success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub)
        if not success:
            return util._error_response(
                "grade_save_error",
                _INTERFACE_VERSION,
                data={"msg": error_message}
            )

    d = {'submission_id': submission_id,
         'score': score,
         'feedback': feedback,
         'grader_id': grader_id,
         'grader_type': 'IN',
         # Humans always succeed (if they grade at all)...
         'status': GraderStatus.success,
         # ...and they're always confident too.
         'confidence': 1.0,
         #And they don't make errors
         'errors' : "",
         'rubric_scores_complete' : rubric_scores_complete,
         'rubric_scores' : rubric_scores,
         'is_submission_flagged' : is_submission_flagged,
         }

    success, header = grader_util.create_and_handle_grader_object(d)

    if not success:
        return util._error_response("grade_save_error", _INTERFACE_VERSION,
                                    data={'msg': 'Internal error'})

    util.log_connection_data()
    return util._success_response({}, _INTERFACE_VERSION)
Beispiel #25
0
def get_next_submission(request):
    """
    Supports GET request with the following arguments:
    course_id -- the course for which to return a submission.
    grader_id -- LMS user_id of the requesting user

    Returns json dict with the following keys:

    version: '1'  (number)

    success: bool

    if success:
      'submission_id': a unique identifier for the submission, to be passed
                       back with the grade.

      'submission': the submission, rendered as read-only html for grading

      'rubric': the rubric, also rendered as html.

      'prompt': the question prompt, also rendered as html.

      'message': if there was no submission available, but nothing went wrong,
                there will be a message field.
    else:
      'error': if success is False, will have an error message with more info.
    }
    """

    if request.method != "GET":
        raise Http404

    course_id = request.GET.get('course_id')
    grader_id = request.GET.get('grader_id')
    location = request.GET.get('location')

    log.debug(
        "Getting next submission for instructor grading for course: {0}.".
        format(course_id))

    if not (course_id or location) or not grader_id:

        return util._error_response("required_parameter_missing",
                                    _INTERFACE_VERSION)

    if location:
        (
            found, id
        ) = staff_grading_util.get_single_instructor_grading_item_for_location(
            location)

    # TODO: save the grader id and match it in save_grade to make sure things
    # are consistent.
    if not location:
        (found,
         id) = staff_grading_util.get_single_instructor_grading_item(course_id)

    if not found:
        return util._success_response(
            {'message': 'No more submissions to grade.'}, _INTERFACE_VERSION)

    try:
        submission = Submission.objects.get(id=int(id))
    except Submission.DoesNotExist:
        log.error("Couldn't find submission %s for instructor grading", id)
        return util._error_response('failed_to_load_submission',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': id})

    #Get error metrics from ml grading, and get into dictionary form to pass down to staff grading view
    success, ml_error_info = ml_grading_util.get_ml_errors(submission.location)
    if success:
        ml_error_message = staff_grading_util.generate_ml_error_message(
            ml_error_info)
    else:
        ml_error_message = ml_error_info

    ml_error_message = "Machine learning error information: " + ml_error_message

    if submission.state != 'C':
        log.error(
            "Instructor grading got a submission (%s) in an invalid state: ",
            id, submission.state)
        return util._error_response('wrong_internal_state',
                                    _INTERFACE_VERSION,
                                    data={
                                        'submission_id': id,
                                        'submission_state': submission.state
                                    })

    num_graded, num_pending = staff_grading_util.count_submissions_graded_and_pending_instructor(
        submission.location)

    response = {
        'submission_id':
        id,
        'submission':
        submission.student_response,
        'rubric':
        submission.rubric,
        'prompt':
        submission.prompt,
        'max_score':
        submission.max_score,
        'ml_error_info':
        ml_error_message,
        'problem_name':
        submission.problem_id,
        'num_graded':
        staff_grading_util.finished_submissions_graded_by_instructor(
            submission.location).count(),
        'num_pending':
        staff_grading_util.submissions_pending_instructor(
            submission.location,
            state_in=[SubmissionState.waiting_to_be_graded]).count(),
        'min_for_ml':
        settings.MIN_TO_USE_ML,
    }

    util.log_connection_data()
    log.debug("Sending success response back to instructor grading!")
    log.debug("Sub id from get next: {0}".format(submission.id))
    return util._success_response(response, _INTERFACE_VERSION)
Beispiel #26
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
           'min_for_ml' -- minimum needed to make ML model
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")

    if not course_id:
        error_message = "Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x['location'] for x in list(
            Submission.objects.filter(
                course_id=course_id).values('location').distinct())
    ]

    if len(locations_for_course) == 0:
        error_message = "No problems associated with course."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    location_info = []
    for location in locations_for_course:
        problem_name = Submission.objects.filter(
            location=location)[0].problem_id
        submissions_pending = staff_grading_util.submissions_pending_instructor(
            location, state_in=[SubmissionState.waiting_to_be_graded]).count()
        finished_instructor_graded = staff_grading_util.finished_submissions_graded_by_instructor(
            location).count()
        min_scored_for_location = settings.MIN_TO_USE_PEER
        location_ml_count = Submission.objects.filter(
            location=location, preferred_grader_type="ML").count()
        if location_ml_count > 0:
            min_scored_for_location = settings.MIN_TO_USE_ML

        submissions_required = max(
            [0, min_scored_for_location - finished_instructor_graded])

        problem_name_from_location = location.split("://")[1]
        location_dict = {
            'location': location,
            'problem_name': problem_name,
            'problem_name_from_location': problem_name_from_location,
            'num_graded': finished_instructor_graded,
            'num_pending': submissions_pending,
            'num_required': submissions_required,
            'min_for_ml': settings.MIN_TO_USE_ML,
        }
        location_info.append(location_dict)

    util.log_connection_data()
    log.debug(location_info)
    return util._success_response({'problem_list': location_info},
                                  _INTERFACE_VERSION)
Beispiel #27
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    course_id: int
    grader_id: int
    submission_id: int
    score: int
    feedback: string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        return util._error_response("Request needs to be GET",
                                    _INTERFACE_VERSION)

    course_id = request.POST.get('course_id')
    grader_id = request.POST.get('grader_id')
    submission_id = request.POST.get('submission_id')
    score = request.POST.get('score')
    feedback = request.POST.get('feedback')
    skipped = request.POST.get('skipped') == "True"
    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])
    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = is_submission_flagged.lower() == 'true'

    if (  # These have to be truthy
            not (course_id and grader_id and submission_id) or
            # These have to be non-None
            score is None or feedback is None):
        return util._error_response("required_parameter_missing",
                                    _INTERFACE_VERSION)

    if skipped:
        log.debug(submission_id)
        success, sub = staff_grading_util.set_instructor_grading_item_back_to_ml(
            submission_id)

        if not success:
            return util._error_response(sub, _INTERFACE_VERSION)

        return util._success_response({}, _INTERFACE_VERSION)

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Expected integer score.  Got {0}".format(score)})

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={
                "msg": "Submission id {0} is not valid.".format(submission_id)
            })

    first_sub_for_location = Submission.objects.filter(
        location=sub.location).order_by('date_created')[0]
    rubric = first_sub_for_location.rubric
    rubric_success, parsed_rubric = rubric_functions.parse_rubric(rubric)

    if rubric_success:
        success, error_message = grader_util.validate_rubric_scores(
            rubric_scores, rubric_scores_complete, sub)
        if not success:
            return util._error_response("grade_save_error",
                                        _INTERFACE_VERSION,
                                        data={"msg": error_message})

    d = {
        'submission_id': submission_id,
        'score': score,
        'feedback': feedback,
        'grader_id': grader_id,
        'grader_type': 'IN',
        # Humans always succeed (if they grade at all)...
        'status': GraderStatus.success,
        # ...and they're always confident too.
        'confidence': 1.0,
        #And they don't make errors
        'errors': "",
        'rubric_scores_complete': rubric_scores_complete,
        'rubric_scores': rubric_scores,
        'is_submission_flagged': is_submission_flagged,
    }

    success, header = grader_util.create_and_handle_grader_object(d)

    if not success:
        return util._error_response("grade_save_error",
                                    _INTERFACE_VERSION,
                                    data={'msg': 'Internal error'})

    util.log_connection_data()
    return util._success_response({}, _INTERFACE_VERSION)
Beispiel #28
0
def save_calibration_essay(request):
    """
    Saves a calibration essay sent back from LMS.
    Input:
        request dict containing keys student_id, location, calibration_essay_id, score, submission_key, feedback
    Output:
        Boolean indicating success in saving calibration essay or not.
    """

    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()

    for tag in [
            'location', 'student_id', 'calibration_essay_id', 'submission_key',
            'score', 'feedback'
    ]:
        if not tag in post_data:
            return util._error_response(
                "Cannot find needed key {0} in request.".format(tag),
                _INTERFACE_VERSION)

    location = post_data['location']
    student_id = post_data['student_id']
    submission_id = post_data['calibration_essay_id']
    score = post_data['score']
    feedback = post_data['feedback']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "Expected integer score.  Got {0}".format(score),
            _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={
                "msg": "Submission id {0} is not valid.".format(submission_id)
            })

    d = {
        'submission_id': submission_id,
        'score': score,
        'feedback': feedback,
        'student_id': student_id,
        'location': location,
        'rubric_scores_complete': rubric_scores_complete,
        'rubric_scores': rubric_scores,
    }

    (success, data) = calibration.create_and_save_calibration_record(d)

    if not success:
        error_msg = "Failed to create and save calibration record. {0}".format(
            data)
        log.error(error_msg)
        return util._error_response(error_msg, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(
        {
            'message': "Successfully saved calibration record.",
            'actual_score': data['actual_score'],
            'actual_rubric': data['actual_rubric'],
            'actual_feedback': data['actual_feedback']
        }, _INTERFACE_VERSION)
Beispiel #29
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x['location'] for x in list(
            Submission.objects.filter(
                course_id=course_id).values('location').distinct())
    ]

    location_info = []
    for location in locations_for_course:
        student_sub_count = peer_grading_util.get_required_peer_grading_for_location(
            {
                'student_id': student_id,
                'location': location,
                'preferred_grader_type': "PE"
            })
        if student_sub_count > 0:
            problem_name = Submission.objects.filter(
                location=location)[0].problem_id
            submissions_pending = peer_grading_util.peer_grading_submissions_pending_for_location(
                location, student_id).count()
            submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(
                location, student_id).count()
            submissions_required = max([
                0,
                (settings.REQUIRED_PEER_GRADING_PER_STUDENT *
                 student_sub_count) - submissions_graded
            ])

            problem_name_from_location = location.split("://")[1]
            if submissions_graded > 0 or submissions_pending > 0:
                location_dict = {
                    'location': location,
                    'problem_name': problem_name,
                    'num_graded': submissions_graded,
                    'num_required': submissions_required,
                    'num_pending': submissions_pending,
                }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list': location_info},
                                  _INTERFACE_VERSION)
Beispiel #30
0
def save_calibration_essay(request):
    """
    Saves a calibration essay sent back from LMS.
    Input:
        request dict containing keys student_id, location, calibration_essay_id, score, submission_key, feedback
    Output:
        Boolean indicating success in saving calibration essay or not.
    """

    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()

    for tag in ['location', 'student_id', 'calibration_essay_id', 'submission_key', 'score', 'feedback']:
        if not tag in post_data:
            return util._error_response("Cannot find needed key {0} in request.".format(tag), _INTERFACE_VERSION)

    location = post_data['location']
    student_id = post_data['student_id']
    submission_id = post_data['calibration_essay_id']
    score = post_data['score']
    feedback = post_data['feedback']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    try:
        score = int(score)
    except ValueError:
        return util._error_response("Expected integer score.  Got {0}".format(score), _INTERFACE_VERSION)

    try:
        sub=Submission.objects.get(id=submission_id)
    except Exception:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)}
        )

    d = {'submission_id': submission_id,
         'score': score,
         'feedback': feedback,
         'student_id': student_id,
         'location': location,
         'rubric_scores_complete' : rubric_scores_complete,
         'rubric_scores' : rubric_scores,
    }

    (success, data) = calibration.create_and_save_calibration_record(d)

    if not success:
        error_msg = "Failed to create and save calibration record. {0}".format(data)
        log.error(error_msg)
        return util._error_response(error_msg, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response({'message' : "Successfully saved calibration record.", 'actual_score' : data['actual_score'], 'actual_rubric' : data['actual_rubric'], 'actual_feedback' : data['actual_feedback']}, _INTERFACE_VERSION)
Beispiel #31
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    location: string
    grader_id: int
    submission_id: int
    score: int
    feedback: string
    submission_key : string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()
    log.debug(post_data)

    for tag in [
            'location', 'grader_id', 'submission_id', 'submission_key',
            'score', 'feedback', 'submission_flagged'
    ]:
        if not tag in post_data:
            return util._error_response(
                "Cannot find needed key {0} in request.".format(tag),
                _INTERFACE_VERSION)

    location = post_data['location']
    grader_id = post_data['grader_id']
    submission_id = post_data['submission_id']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']
    score = post_data['score']

    #This is done to ensure that response is properly formatted on the lms side.
    feedback_dict = post_data['feedback']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = (is_submission_flagged.lower() == "true")

    status = GraderStatus.success
    confidence = 1.0

    is_answer_unknown = request.POST.get('answer_unknown', False)
    if isinstance(is_answer_unknown, basestring):
        is_answer_unknown = (is_answer_unknown.lower() == "true")

    if is_answer_unknown:
        status = GraderStatus.failure
        confidence = 0.0

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "Expected integer score.  Got {0}".format(score),
            _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={
                "msg": "Submission id {0} is not valid.".format(submission_id)
            })

    success, error_message = grader_util.validate_rubric_scores(
        rubric_scores, rubric_scores_complete, sub)
    if not success:
        return util._error_response("grade_save_error",
                                    _INTERFACE_VERSION,
                                    data={"msg": error_message})

    d = {
        'submission_id': submission_id,
        'score': score,
        'feedback': feedback_dict,
        'grader_id': grader_id,
        'grader_type': 'PE',
        # Humans always succeed (if they grade at all)...
        'status': status,
        # ...and they're always confident too.
        'confidence': confidence,
        #And they don't make any errors
        'errors': "",
        'rubric_scores_complete': rubric_scores_complete,
        'rubric_scores': rubric_scores,
        'is_submission_flagged': is_submission_flagged,
    }

    #Currently not posting back to LMS.  Only saving grader object, and letting controller decide when to post back.
    (success, header) = grader_util.create_and_handle_grader_object(d)
    if not success:
        return util._error_response(
            "There was a problem saving the grade.  Contact support.",
            _INTERFACE_VERSION)

    #xqueue_session=util.xqueue_login()
    #error,msg = util.post_results_to_xqueue(xqueue_session,json.dumps(header),json.dumps(post_data))

    util.log_connection_data()
    return util._success_response({'msg': "Posted to queue."},
                                  _INTERFACE_VERSION)
Beispiel #32
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    location: string
    grader_id: int
    submission_id: int
    score: int
    feedback: string
    submission_key : string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()

    for tag in ['location', 'grader_id', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged']:
        if not tag in post_data:
            return util._error_response("Cannot find needed key {0} in request.".format(tag), _INTERFACE_VERSION)

    location = post_data['location']
    grader_id = post_data['grader_id']
    submission_id = post_data['submission_id']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']
    score = post_data['score']

    #This is done to ensure that response is properly formatted on the lms side.
    feedback_dict = post_data['feedback']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = (is_submission_flagged.lower()=="true")

    status = GraderStatus.success
    confidence = 1.0

    is_answer_unknown = request.POST.get('answer_unknown', False)
    if isinstance(is_answer_unknown, basestring):
        is_answer_unknown = (is_answer_unknown.lower()=="true")

    if is_answer_unknown:
        status = GraderStatus.failure
        confidence = 0.0

    try:
        score = int(score)
    except ValueError:
        #Score may not be filled out if answer_unknown or flagged
        if is_answer_unknown or is_submission_flagged:
            score = 0
        else:
            return util._error_response("Expected integer score.  Got {0}".format(score), _INTERFACE_VERSION)

    try:
        sub=Submission.objects.get(id=submission_id)
    except Exception:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)}
        )

    #Patch to handle rubric scores in the case of "I don't know" or flagging if scores aren't filled out
    if is_answer_unknown or is_submission_flagged and len(rubric_scores)==0:
        success, targets=rubric_functions.generate_targets_from_rubric(sub.rubric)
        rubric_scores = [0 for l in targets]
        rubric_scores_complete = True

    success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub)
    if not success:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": error_message}
        )

    d = {'submission_id': submission_id,
         'score': score,
         'feedback': feedback_dict,
         'grader_id': grader_id,
         'grader_type': 'PE',
         # Humans always succeed (if they grade at all)...
         'status': status,
         # ...and they're always confident too.
         'confidence': confidence,
         #And they don't make any errors
         'errors' : "",
         'rubric_scores_complete' : rubric_scores_complete,
         'rubric_scores' : rubric_scores,
         'is_submission_flagged' : is_submission_flagged,
    }

    #Currently not posting back to LMS.  Only saving grader object, and letting controller decide when to post back.
    (success, header) = grader_util.create_and_handle_grader_object(d)
    if not success:
        return util._error_response("There was a problem saving the grade.  Contact support.", _INTERFACE_VERSION)

    #xqueue_session=util.xqueue_login()
    #error,msg = util.post_results_to_xqueue(xqueue_session,json.dumps(header),json.dumps(post_data))

    util.log_connection_data()
    return util._success_response({'msg': "Posted to queue."}, _INTERFACE_VERSION)
Beispiel #33
0
def get_next_submission(request):
    """
    Supports GET request with the following arguments:
    course_id -- the course for which to return a submission.
    grader_id -- LMS user_id of the requesting user

    Returns json dict with the following keys:

    version: '1'  (number)

    success: bool

    if success:
      'submission_id': a unique identifier for the submission, to be passed
                       back with the grade.

      'submission': the submission, rendered as read-only html for grading

      'rubric': the rubric, also rendered as html.

      'prompt': the question prompt, also rendered as html.

      'message': if there was no submission available, but nothing went wrong,
                there will be a message field.
    else:
      'error': if success is False, will have an error message with more info.
    }
    """

    if request.method != "GET":
        raise Http404

    course_id = request.GET.get('course_id')
    grader_id = request.GET.get('grader_id')
    location = request.GET.get('location')

    log.debug("Getting next submission for instructor grading for course: {0}."
              .format(course_id))


    if not (course_id or location) or not grader_id:
   
        return util._error_response("required_parameter_missing", _INTERFACE_VERSION)

    if location:
        (found, id) = staff_grading_util.get_single_instructor_grading_item_for_location(location)

    # TODO: save the grader id and match it in save_grade to make sure things
    # are consistent.
    if not location:
        (found, id) = staff_grading_util.get_single_instructor_grading_item(course_id)

    if not found:
        return util._success_response({'message': 'No more submissions to grade.'},
                                      _INTERFACE_VERSION)

    try:
        submission = Submission.objects.get(id=int(id))
    except Submission.DoesNotExist:
        log.error("Couldn't find submission %s for instructor grading", id)
        return util._error_response('failed_to_load_submission',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': id})

    #Get error metrics from ml grading, and get into dictionary form to pass down to staff grading view
    success, ml_error_info=ml_grading_util.get_ml_errors(submission.location)
    if success:
        ml_error_message=staff_grading_util.generate_ml_error_message(ml_error_info)
    else:
        ml_error_message=ml_error_info

    ml_error_message="Machine learning error information: " + ml_error_message

    if submission.state != 'C':
        log.error("Instructor grading got a submission (%s) in an invalid state: ",
            id, submission.state)
        return util._error_response('wrong_internal_state',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': id,
                                     'submission_state': submission.state})

    num_graded, num_pending = staff_grading_util.count_submissions_graded_and_pending_instructor(submission.location)

    response = {'submission_id': id,
                'submission': submission.student_response,
                'rubric': submission.rubric,
                'prompt': submission.prompt,
                'max_score': submission.max_score,
                'ml_error_info' : ml_error_message,
                'problem_name' : submission.problem_id,
                'num_graded' : staff_grading_util.finished_submissions_graded_by_instructor(submission.location).count(),
                'num_pending' : staff_grading_util.submissions_pending_instructor(submission.location, 
                                    state_in=[SubmissionState.waiting_to_be_graded]).count(),
                'min_for_ml' : settings.MIN_TO_USE_ML,
                }

    util.log_connection_data()
    log.debug("Sending success response back to instructor grading!")
    log.debug("Sub id from get next: {0}".format(submission.id))
    return util._success_response(response, _INTERFACE_VERSION)