Exemple #1
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET",
                                    _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag),
                                        _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    pl = peer_grading_util.PeerLocation(location, student_id)
    student_sub_count = pl.submitted_count()

    submissions_graded = pl.graded_count()
    submissions_required = pl.required_count()
    submissions_available = pl.pending_count()

    peer_data = {
        'count_graded': submissions_graded,
        'count_required': submissions_required,
        'student_sub_count': student_sub_count,
        'count_available': submissions_available
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Exemple #2
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
           'min_for_ml' -- minimum needed to make ML model
    """

    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")

    if not course_id:
        error_message="Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [x['location'] for x in
                            list(Submission.objects.filter(course_id=course_id).values('location').distinct())]

    if len(locations_for_course)==0:
        error_message="No problems associated with course."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    location_info=[]
    for location in locations_for_course:
        problem_name = Submission.objects.filter(location=location)[0].problem_id
        submissions_pending = staff_grading_util.submissions_pending_instructor(location, state_in=[SubmissionState.waiting_to_be_graded]).count()
        finished_instructor_graded = staff_grading_util.finished_submissions_graded_by_instructor(location).count()
        min_scored_for_location=settings.MIN_TO_USE_PEER
        location_ml_count = Submission.objects.filter(location=location, preferred_grader_type="ML").count()
        if location_ml_count>0:
            min_scored_for_location=settings.MIN_TO_USE_ML

        submissions_required = max([0,min_scored_for_location-finished_instructor_graded])

        problem_name_from_location=location.split("://")[1]
        location_dict={
            'location' : location,
            'problem_name' : problem_name,
            'problem_name_from_location' : problem_name_from_location,
            'num_graded' : finished_instructor_graded,
            'num_pending' : submissions_pending,
            'num_required' : submissions_required,
            'min_for_ml' : settings.MIN_TO_USE_ML,
            }
        location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list' : location_info},
                                  _INTERFACE_VERSION)
Exemple #3
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET", _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag), _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    pl = peer_grading_util.PeerLocation(location,student_id)
    student_sub_count = pl.submitted_count()

    submissions_graded = pl.graded_count()
    submissions_required = pl.required_count()
    submissions_available = pl.pending_count()

    peer_data = {
        'count_graded' : submissions_graded,
        'count_required' : submissions_required,
        'student_sub_count' : student_sub_count,
        'count_available' : submissions_available
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Exemple #4
0
def is_student_calibrated(request):
    """
    Decides if student has fulfilled criteria for peer grading calibration for a given location (problem id).
    Input:
        student id, problem_id
    Output:
        Dictionary with boolean calibrated indicating whether or not student has finished calibration.

    Note: Location in the database is currently being used as the problem id.
    """

    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.check_calibration_status(
        problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Exemple #5
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET", _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag), _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    student_sub_count= peer_grading_util.get_required_peer_grading_for_location({'student_id' : student_id, 'location' : location, 'preferred_grader_type' : "PE"})
    submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(location,student_id).count()
    submissions_required = settings.REQUIRED_PEER_GRADING_PER_STUDENT*student_sub_count

    ##Check to see if submissions were available to grade in the past week
    notification_seen_recently = NotificationsSeen.check_for_recent_notifications(
        student_id = student_id,
        location = location,
        notification_type=NotificationTypes.peer_grading,
        recent_notification_interval=settings.PEER_GRADING_TIMEOUT_INTERVAL
    )

    if not notification_seen_recently:
        submissions_required = submissions_graded

    peer_data = {
        'count_graded' : submissions_graded,
        'count_required' : submissions_required,
        'student_sub_count' : student_sub_count,
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Exemple #6
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x["location"] for x in list(Submission.objects.filter(course_id=course_id).values("location").distinct())
    ]

    location_info = []
    for location in locations_for_course:
        student_sub_count = peer_grading_util.get_required_peer_grading_for_location(
            {"student_id": student_id, "location": location, "preferred_grader_type": "PE"}
        )
        if student_sub_count > 0:
            problem_name = Submission.objects.filter(location=location)[0].problem_id
            submissions_pending = peer_grading_util.peer_grading_submissions_pending_for_location(
                location, student_id
            ).count()
            submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(
                location, student_id
            ).count()
            submissions_required = max(
                [0, (settings.REQUIRED_PEER_GRADING_PER_STUDENT * student_sub_count) - submissions_graded]
            )

            problem_name_from_location = location.split("://")[1]
            if submissions_graded > 0 or submissions_pending > 0:
                location_dict = {
                    "location": location,
                    "problem_name": problem_name,
                    "num_graded": submissions_graded,
                    "num_required": submissions_required,
                    "num_pending": submissions_pending,
                }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({"problem_list": location_info}, _INTERFACE_VERSION)
Exemple #7
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
           'min_for_ml' -- minimum needed to make ML model
    """

    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")

    if not course_id:
        error_message="Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [x['location'] for x in
                            list(Submission.objects.filter(course_id=course_id).values('location').distinct())]

    if len(locations_for_course)==0:
        error_message="No problems associated with course."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    location_info=[]
    for location in locations_for_course:
        sl = staff_grading_util.StaffLocation(location)
        control = SubmissionControl(sl.latest_submission())
        problem_name = sl.problem_name()
        submissions_pending = sl.pending_count()
        finished_instructor_graded = sl.graded_count()

        submissions_required = max([0, sl.minimum_to_score() - finished_instructor_graded])

        problem_name_from_location=location.split("://")[1]
        location_dict={
            'location': location,
            'problem_name': problem_name,
            'problem_name_from_location': problem_name_from_location,
            'num_graded': finished_instructor_graded,
            'num_pending': submissions_pending,
            'num_required': submissions_required,
            'min_for_ml': control.minimum_to_use_ai,
            }
        location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list' : location_info},
                                  _INTERFACE_VERSION)
Exemple #8
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x['location'] for x in Submission.objects.filter(
            course_id=course_id).values('location').distinct()
    ]

    location_info = []
    for location in locations_for_course:
        pl = peer_grading_util.PeerLocation(location, student_id)
        if pl.submitted_count() > 0:
            problem_name = pl.problem_name()
            submissions_pending = pl.pending_count()
            submissions_graded = pl.graded_count()
            submissions_required = max(
                [0, pl.required_count() - submissions_graded])

            if (submissions_graded > 0 or submissions_pending > 0
                    or control_util.SubmissionControl.peer_grade_finished_subs(
                        pl)):
                location_dict = {
                    'location': location,
                    'problem_name': problem_name,
                    'num_graded': submissions_graded,
                    'num_required': submissions_required,
                    'num_pending': submissions_pending,
                }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list': location_info},
                                  _INTERFACE_VERSION)
Exemple #9
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message="Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [x['location'] for x in
                            Submission.objects.filter(course_id=course_id).values('location').distinct()]

    location_info=[]
    for location in locations_for_course:
        pl = peer_grading_util.PeerLocation(location,student_id)
        if pl.submitted_count()>0:
            problem_name = pl.problem_name()
            submissions_pending = pl.pending_count()
            submissions_graded = pl.graded_count()
            submissions_required = max([0,pl.required_count() - submissions_graded])

            if (submissions_graded > 0 or
                submissions_pending > 0 or
                control_util.SubmissionControl.peer_grade_finished_subs(pl)):
                location_dict={
                    'location' : location,
                    'problem_name' : problem_name,
                    'num_graded' : submissions_graded,
                    'num_required' : submissions_required,
                    'num_pending' : submissions_pending,
                    }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list' : location_info},
        _INTERFACE_VERSION)
Exemple #10
0
def get_notifications(request):
    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")

    if not course_id:
        error_message="Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    success, staff_needs_to_grade = staff_grading_util.get_staff_grading_notifications(course_id)
    if not success:
        return util._error_response(staff_needs_to_grade, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response({'staff_needs_to_grade' : staff_needs_to_grade}, _INTERFACE_VERSION)
Exemple #11
0
def get_peer_grading_data_for_location(request):
    if request.method != 'GET':
        return util._error_response("Request type must be GET",
                                    _INTERFACE_VERSION)

    for tag in ['student_id', 'location']:
        if tag not in request.GET:
            return util._error_response("Missing required key {0}".format(tag),
                                        _INTERFACE_VERSION)

    location = request.GET.get('location')
    student_id = request.GET.get('student_id')

    student_sub_count = peer_grading_util.get_required_peer_grading_for_location(
        {
            'student_id': student_id,
            'location': location,
            'preferred_grader_type': "PE"
        })
    submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(
        location, student_id).count()
    submissions_required = settings.REQUIRED_PEER_GRADING_PER_STUDENT * student_sub_count

    ##Check to see if submissions were available to grade in the past week
    notification_seen_recently = NotificationsSeen.check_for_recent_notifications(
        student_id=student_id,
        location=location,
        notification_type=NotificationTypes.peer_grading,
        recent_notification_interval=settings.PEER_GRADING_TIMEOUT_INTERVAL)

    if not notification_seen_recently:
        submissions_required = submissions_graded

    peer_data = {
        'count_graded': submissions_graded,
        'count_required': submissions_required,
        'student_sub_count': student_sub_count,
    }

    util.log_connection_data()
    return util._success_response(peer_data, _INTERFACE_VERSION)
Exemple #12
0
def get_notifications(request):
    if request.method!="GET":
        error_message="Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id=request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message="Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    pc = peer_grading_util.PeerCourse(course_id,student_id)
    success, student_needs_to_peer_grade = pc.notifications()
    if not success:
        return util._error_response(student_needs_to_peer_grade, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response({'student_needs_to_peer_grade' : student_needs_to_peer_grade}, _INTERFACE_VERSION)
Exemple #13
0
def get_notifications(request):
    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")

    if not course_id:
        error_message = "Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    success, staff_needs_to_grade = staff_grading_util.get_staff_grading_notifications(
        course_id)
    if not success:
        return util._error_response(staff_needs_to_grade, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(
        {'staff_needs_to_grade': staff_needs_to_grade}, _INTERFACE_VERSION)
Exemple #14
0
def show_calibration_essay(request):
    """
    Shows a calibration essay when it receives a GET request.
    Input:
        Http request containing problem_id and student_id
    Output:
        Http response containing essay data (submission id, submission key, student response, prompt, rubric, max_score)
        Or error
    """
    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.get_calibration_essay(problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Exemple #15
0
def show_calibration_essay(request):
    """
    Shows a calibration essay when it receives a GET request.
    Input:
        Http request containing problem_id and student_id
    Output:
        Http response containing essay data (submission id, submission key, student response, prompt, rubric, max_score)
        Or error
    """
    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.get_calibration_essay(problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Exemple #16
0
def is_student_calibrated(request):
    """
    Decides if student has fulfilled criteria for peer grading calibration for a given location (problem id).
    Input:
        student id, problem_id
    Output:
        Dictionary with boolean calibrated indicating whether or not student has finished calibration.

    Note: Location in the database is currently being used as the problem id.
    """

    if request.method != "GET":
        raise Http404

    problem_id = request.GET.get("problem_id")
    student_id = request.GET.get("student_id")

    success, data = calibration.check_calibration_status(problem_id, student_id)

    if not success:
        return util._error_response(data, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(data, _INTERFACE_VERSION)
Exemple #17
0
def get_notifications(request):
    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    pc = peer_grading_util.PeerCourse(course_id, student_id)
    success, student_needs_to_peer_grade = pc.notifications()
    if not success:
        return util._error_response(student_needs_to_peer_grade,
                                    _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(
        {'student_needs_to_peer_grade': student_needs_to_peer_grade},
        _INTERFACE_VERSION)
Exemple #18
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course_id request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")
    student_id = request.GET.get("student_id")

    if not course_id or not student_id:
        error_message = "Missing needed tag course_id or student_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x['location'] for x in list(
            Submission.objects.filter(
                course_id=course_id).values('location').distinct())
    ]

    location_info = []
    for location in locations_for_course:
        student_sub_count = peer_grading_util.get_required_peer_grading_for_location(
            {
                'student_id': student_id,
                'location': location,
                'preferred_grader_type': "PE"
            })
        if student_sub_count > 0:
            problem_name = Submission.objects.filter(
                location=location)[0].problem_id
            submissions_pending = peer_grading_util.peer_grading_submissions_pending_for_location(
                location, student_id).count()
            submissions_graded = peer_grading_util.peer_grading_submissions_graded_for_location(
                location, student_id).count()
            submissions_required = max([
                0,
                (settings.REQUIRED_PEER_GRADING_PER_STUDENT *
                 student_sub_count) - submissions_graded
            ])

            problem_name_from_location = location.split("://")[1]
            if submissions_graded > 0 or submissions_pending > 0:
                location_dict = {
                    'location': location,
                    'problem_name': problem_name,
                    'num_graded': submissions_graded,
                    'num_required': submissions_required,
                    'num_pending': submissions_pending,
                }
                location_info.append(location_dict)

    util.log_connection_data()
    return util._success_response({'problem_list': location_info},
                                  _INTERFACE_VERSION)
Exemple #19
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    location: string
    grader_id: int
    submission_id: int
    score: int
    feedback: string
    submission_key : string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()
    log.debug(post_data)

    for tag in ["location", "grader_id", "submission_id", "submission_key", "score", "feedback", "submission_flagged"]:
        if not tag in post_data:
            return util._error_response("Cannot find needed key {0} in request.".format(tag), _INTERFACE_VERSION)

    location = post_data["location"]
    grader_id = post_data["grader_id"]
    submission_id = post_data["submission_id"]

    # Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data["submission_key"]
    score = post_data["score"]

    # This is done to ensure that response is properly formatted on the lms side.
    feedback_dict = post_data["feedback"]

    rubric_scores_complete = request.POST.get("rubric_scores_complete", False)
    rubric_scores = request.POST.getlist("rubric_scores", [])

    is_submission_flagged = request.POST.get("submission_flagged", False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = is_submission_flagged.lower() == "true"

    status = GraderStatus.success
    confidence = 1.0

    is_answer_unknown = request.POST.get("answer_unknown", False)
    if isinstance(is_answer_unknown, basestring):
        is_answer_unknown = is_answer_unknown.lower() == "true"

    if is_answer_unknown:
        status = GraderStatus.failure
        confidence = 0.0

    try:
        score = int(score)
    except ValueError:
        return util._error_response("Expected integer score.  Got {0}".format(score), _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)},
        )

    success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub)
    if not success:
        return util._error_response("grade_save_error", _INTERFACE_VERSION, data={"msg": error_message})

    d = {
        "submission_id": submission_id,
        "score": score,
        "feedback": feedback_dict,
        "grader_id": grader_id,
        "grader_type": "PE",
        # Humans always succeed (if they grade at all)...
        "status": status,
        # ...and they're always confident too.
        "confidence": confidence,
        # And they don't make any errors
        "errors": "",
        "rubric_scores_complete": rubric_scores_complete,
        "rubric_scores": rubric_scores,
        "is_submission_flagged": is_submission_flagged,
    }

    # Currently not posting back to LMS.  Only saving grader object, and letting controller decide when to post back.
    (success, header) = grader_util.create_and_handle_grader_object(d)
    if not success:
        return util._error_response("There was a problem saving the grade.  Contact support.", _INTERFACE_VERSION)

    # xqueue_session=util.xqueue_login()
    # error,msg = util.post_results_to_xqueue(xqueue_session,json.dumps(header),json.dumps(post_data))

    util.log_connection_data()
    return util._success_response({"msg": "Posted to queue."}, _INTERFACE_VERSION)
Exemple #20
0
def get_next_submission(request):
    """
    Supports GET request with the following arguments:
    course_id -- the course for which to return a submission.
    grader_id -- LMS user_id of the requesting user

    Returns json dict with the following keys:

    version: '1'  (number)

    success: bool

    if success:
      'submission_id': a unique identifier for the submission, to be passed
                       back with the grade.

      'submission': the submission, rendered as read-only html for grading

      'rubric': the rubric, also rendered as html.

      'prompt': the question prompt, also rendered as html.

      'message': if there was no submission available, but nothing went wrong,
                there will be a message field.
    else:
      'error': if success is False, will have an error message with more info.
    }
    """

    if request.method != "GET":
        raise Http404

    course_id = request.GET.get('course_id')
    grader_id = request.GET.get('grader_id')
    location = request.GET.get('location')
    found = False

    if not (course_id or location) or not grader_id:
        return util._error_response("required_parameter_missing", _INTERFACE_VERSION)

    if location:
        sl = staff_grading_util.StaffLocation(location)
        (found, sid) = sl.next_item()

    # TODO: save the grader id and match it in save_grade to make sure things
    # are consistent.
    if not location:
        sc = staff_grading_util.StaffCourse(course_id)
        (found, sid) = sc.next_item()

    if not found:
        return util._success_response({'message': 'No more submissions to grade.'},
                                      _INTERFACE_VERSION)
    try:
        submission = Submission.objects.get(id=int(sid))
    except Submission.DoesNotExist:
        log.error("Couldn't find submission %s for instructor grading", sid)
        return util._error_response('failed_to_load_submission',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': sid})

    #Get error metrics from ml grading, and get into dictionary form to pass down to staff grading view
    success, ml_error_info=ml_grading_util.get_ml_errors(submission.location)
    if success:
        ml_error_message=staff_grading_util.generate_ml_error_message(ml_error_info)
    else:
        ml_error_message=ml_error_info

    ml_error_message="Machine learning error information: " + ml_error_message

    sl = staff_grading_util.StaffLocation(submission.location)
    control = SubmissionControl(sl.latest_submission())
    if submission.state != SubmissionState.being_graded:
        log.error("Instructor grading got submission {0} in an invalid state {1} ".format(sid, submission.state))
        return util._error_response('wrong_internal_state',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': sid,
                                     'submission_state': submission.state})

    response = {'submission_id': sid,
                'submission': submission.student_response,
                'rubric': submission.rubric,
                'prompt': submission.prompt,
                'max_score': submission.max_score,
                'ml_error_info': ml_error_message,
                'problem_name': submission.problem_id,
                'num_graded': sl.graded_count(),
                'num_pending': sl.pending_count(),
                'min_for_ml': control.minimum_to_use_ai,
                }

    util.log_connection_data()
    return util._success_response(response, _INTERFACE_VERSION)
Exemple #21
0
def handle_single_item(controller_session):
    sub_get_success, content = get_item_from_controller(controller_session)
    #Grade and handle here
    if sub_get_success:
        transaction.commit()
        sub = Submission.objects.get(id=int(content['submission_id']))
        sl = staff_grading_util.StaffLocation(sub.location)
        subs_graded_by_instructor = sl.graded()
        first_sub = subs_graded_by_instructor.order_by('date_created')[0]
        parsed_rubric=rubric_functions.parse_rubric(first_sub.rubric)


        #strip out unicode and other characters in student response
        #Needed, or grader may potentially fail
        #TODO: Handle unicode in student responses properly
        student_response = sub.student_response.encode('ascii', 'ignore')

        #Get the latest created model for the given location
        transaction.commit()

        location_suffixes=ml_grading_util.generate_rubric_location_suffixes(subs_graded_by_instructor, grading = True)

        if len(location_suffixes)>0:
            rubric_scores_complete=True
            rubric_scores=[]

        for m in xrange(0,len(location_suffixes)):
            suffix = location_suffixes[m]
            success, created_model=ml_grading_util.get_latest_created_model(sub.location + suffix)

            if not success:
                log.error("Could not identify a valid created model!")
                if m==0:
                    results= RESULT_FAILURE_DICT
                    formatted_feedback="error"
                    status=GraderStatus.failure
                    statsd.increment("open_ended_assessment.grading_controller.call_ml_grader",
                        tags=["success:False"])

            else:

                #Create grader path from location in submission
                grader_path = os.path.join(settings.ML_MODEL_PATH,created_model.model_relative_path)
                model_stored_in_s3=created_model.model_stored_in_s3

                success, grader_data=load_model_file(created_model,use_full_path=False)
                if success:
                    results = grade.grade(grader_data, student_response)
                else:
                    results=RESULT_FAILURE_DICT

                #If the above fails, try using the full path in the created_model object
                if not results['success'] and not created_model.model_stored_in_s3:
                    grader_path=created_model.model_full_path
                    try:
                        success, grader_data=load_model_file(created_model,use_full_path=True)
                        if success:
                            results = grade.grade(grader_data, student_response)
                        else:
                            results=RESULT_FAILURE_DICT
                    except Exception:
                        error_message="Could not find a valid model file."
                        log.exception(error_message)
                        results=RESULT_FAILURE_DICT

                log.info("ML Grader:  Success: {0} Errors: {1}".format(results['success'], results['errors']))
                statsd.increment("open_ended_assessment.grading_controller.call_ml_grader",
                    tags=["success:{0}".format(results['success']), 'location:{0}'.format(sub.location)])

                #Set grader status according to success/fail
                if results['success']:
                    status = GraderStatus.success
                else:
                    status = GraderStatus.failure

            if m==0:
                final_results=results
            elif results['success']==False:
                rubric_scores_complete = False
            else:
                rubric_scores.append(int(results['score']))
        if len(rubric_scores)==0:
            rubric_scores_complete=False

        grader_dict = {
            'score': int(final_results['score']),
            'feedback': json.dumps(results['feedback']),
            'status': status,
            'grader_id': 1,
            'grader_type': "ML",
            'confidence': results['confidence'],
            'submission_id': sub.id,
            'errors' : ' ' .join(results['errors']),
            'rubric_scores_complete' : rubric_scores_complete,
            'rubric_scores' : json.dumps(rubric_scores),
            }
        #Create grader object in controller by posting back results
        created, msg = util._http_post(
            controller_session,
            urlparse.urljoin(settings.GRADING_CONTROLLER_INTERFACE['url'],
                project_urls.ControllerURLs.put_result),
            grader_dict,
            settings.REQUESTS_TIMEOUT,
        )
    else:
        log.error("Error getting item from controller or no items to get.")
        statsd.increment("open_ended_assessment.grading_controller.call_ml_grader",
            tags=["success:False"])

    util.log_connection_data()
    return sub_get_success
Exemple #22
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    course_id: int
    grader_id: int
    submission_id: int
    score: int
    feedback: string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        return util._error_response("Request needs to be GET", _INTERFACE_VERSION)

    course_id = request.POST.get('course_id')
    grader_id = request.POST.get('grader_id')
    submission_id = request.POST.get('submission_id')
    score = request.POST.get('score')
    feedback = request.POST.get('feedback')
    skipped = request.POST.get('skipped')=="True"
    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])
    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = is_submission_flagged.lower() == 'true'

    if (# These have to be truthy
        not (course_id and grader_id and submission_id) or
        # These have to be non-None
        score is None or feedback is None):
        return util._error_response("required_parameter_missing", _INTERFACE_VERSION)

    if skipped:
        success, sub = staff_grading_util.set_instructor_grading_item_back_to_preferred_grader(submission_id)

        if not success:
            return util._error_response(sub, _INTERFACE_VERSION)

        return util._success_response({}, _INTERFACE_VERSION)

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Expected integer score.  Got {0}".format(score)})

    try:
        sub=Submission.objects.get(id=submission_id)
    except Exception:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)}
        )

    first_sub_for_location=Submission.objects.filter(location=sub.location).order_by('date_created')[0]
    rubric= first_sub_for_location.rubric
    rubric_success, parsed_rubric =  rubric_functions.parse_rubric(rubric)

    if rubric_success:
        success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub)
        if not success:
            return util._error_response(
                "grade_save_error",
                _INTERFACE_VERSION,
                data={"msg": error_message}
            )

    d = {'submission_id': submission_id,
         'score': score,
         'feedback': feedback,
         'grader_id': grader_id,
         'grader_type': 'IN',
         # Humans always succeed (if they grade at all)...
         'status': GraderStatus.success,
         # ...and they're always confident too.
         'confidence': 1.0,
         #And they don't make errors
         'errors' : "",
         'rubric_scores_complete' : rubric_scores_complete,
         'rubric_scores' : rubric_scores,
         'is_submission_flagged' : is_submission_flagged,
         }

    success, header = grader_util.create_and_handle_grader_object(d)

    if not success:
        return util._error_response("grade_save_error", _INTERFACE_VERSION,
                                    data={'msg': 'Internal error'})

    util.log_connection_data()
    return util._success_response({}, _INTERFACE_VERSION)
Exemple #23
0
def save_calibration_essay(request):
    """
    Saves a calibration essay sent back from LMS.
    Input:
        request dict containing keys student_id, location, calibration_essay_id, score, submission_key, feedback
    Output:
        Boolean indicating success in saving calibration essay or not.
    """

    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()

    for tag in ['location', 'student_id', 'calibration_essay_id', 'submission_key', 'score', 'feedback']:
        if not tag in post_data:
            return util._error_response("Cannot find needed key {0} in request.".format(tag), _INTERFACE_VERSION)

    location = post_data['location']
    student_id = post_data['student_id']
    submission_id = post_data['calibration_essay_id']
    score = post_data['score']
    feedback = post_data['feedback']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    try:
        score = int(score)
    except ValueError:
        return util._error_response("Expected integer score.  Got {0}".format(score), _INTERFACE_VERSION)

    try:
        sub=Submission.objects.get(id=submission_id)
    except Exception:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)}
        )

    d = {'submission_id': submission_id,
         'score': score,
         'feedback': feedback,
         'student_id': student_id,
         'location': location,
         'rubric_scores_complete' : rubric_scores_complete,
         'rubric_scores' : rubric_scores,
    }

    (success, data) = calibration.create_and_save_calibration_record(d)

    if not success:
        error_msg = "Failed to create and save calibration record. {0}".format(data)
        log.error(error_msg)
        return util._error_response(error_msg, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response({'message' : "Successfully saved calibration record.", 'actual_score' : data['actual_score'], 'actual_rubric' : data['actual_rubric'], 'actual_feedback' : data['actual_feedback']}, _INTERFACE_VERSION)
Exemple #24
0
def save_calibration_essay(request):
    """
    Saves a calibration essay sent back from LMS.
    Input:
        request dict containing keys student_id, location, calibration_essay_id, score, submission_key, feedback
    Output:
        Boolean indicating success in saving calibration essay or not.
    """

    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()

    for tag in [
            'location', 'student_id', 'calibration_essay_id', 'submission_key',
            'score', 'feedback'
    ]:
        if not tag in post_data:
            return util._error_response(
                "Cannot find needed key {0} in request.".format(tag),
                _INTERFACE_VERSION)

    location = post_data['location']
    student_id = post_data['student_id']
    submission_id = post_data['calibration_essay_id']
    score = post_data['score']
    feedback = post_data['feedback']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "Expected integer score.  Got {0}".format(score),
            _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={
                "msg": "Submission id {0} is not valid.".format(submission_id)
            })

    d = {
        'submission_id': submission_id,
        'score': score,
        'feedback': feedback,
        'student_id': student_id,
        'location': location,
        'rubric_scores_complete': rubric_scores_complete,
        'rubric_scores': rubric_scores,
    }

    (success, data) = calibration.create_and_save_calibration_record(d)

    if not success:
        error_msg = "Failed to create and save calibration record. {0}".format(
            data)
        log.error(error_msg)
        return util._error_response(error_msg, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._success_response(
        {
            'message': "Successfully saved calibration record.",
            'actual_score': data['actual_score'],
            'actual_rubric': data['actual_rubric'],
            'actual_feedback': data['actual_feedback']
        }, _INTERFACE_VERSION)
Exemple #25
0
def get_problem_list(request):
    """
    Get the list of problems that need grading in course request.GET['course_id'].

    Returns:
        list of dicts with keys
           'location'
           'problem_name'
           'num_graded' -- number graded
           'num_pending' -- number pending in the queue
           'min_for_ml' -- minimum needed to make ML model
    """

    if request.method != "GET":
        error_message = "Request needs to be GET."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    course_id = request.GET.get("course_id")

    if not course_id:
        error_message = "Missing needed tag course_id"
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    locations_for_course = [
        x['location'] for x in list(
            Submission.objects.filter(
                course_id=course_id).values('location').distinct())
    ]

    if len(locations_for_course) == 0:
        error_message = "No problems associated with course."
        log.error(error_message)
        return util._error_response(error_message, _INTERFACE_VERSION)

    location_info = []
    for location in locations_for_course:
        problem_name = Submission.objects.filter(
            location=location)[0].problem_id
        submissions_pending = staff_grading_util.submissions_pending_instructor(
            location, state_in=[SubmissionState.waiting_to_be_graded]).count()
        finished_instructor_graded = staff_grading_util.finished_submissions_graded_by_instructor(
            location).count()
        min_scored_for_location = settings.MIN_TO_USE_PEER
        location_ml_count = Submission.objects.filter(
            location=location, preferred_grader_type="ML").count()
        if location_ml_count > 0:
            min_scored_for_location = settings.MIN_TO_USE_ML

        submissions_required = max(
            [0, min_scored_for_location - finished_instructor_graded])

        problem_name_from_location = location.split("://")[1]
        location_dict = {
            'location': location,
            'problem_name': problem_name,
            'problem_name_from_location': problem_name_from_location,
            'num_graded': finished_instructor_graded,
            'num_pending': submissions_pending,
            'num_required': submissions_required,
            'min_for_ml': settings.MIN_TO_USE_ML,
        }
        location_info.append(location_dict)

    util.log_connection_data()
    log.debug(location_info)
    return util._success_response({'problem_list': location_info},
                                  _INTERFACE_VERSION)
Exemple #26
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    course_id: int
    grader_id: int
    submission_id: int
    score: int
    feedback: string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        return util._error_response("Request needs to be GET",
                                    _INTERFACE_VERSION)

    course_id = request.POST.get('course_id')
    grader_id = request.POST.get('grader_id')
    submission_id = request.POST.get('submission_id')
    score = request.POST.get('score')
    feedback = request.POST.get('feedback')
    skipped = request.POST.get('skipped') == "True"
    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])
    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = is_submission_flagged.lower() == 'true'

    if (  # These have to be truthy
            not (course_id and grader_id and submission_id) or
            # These have to be non-None
            score is None or feedback is None):
        return util._error_response("required_parameter_missing",
                                    _INTERFACE_VERSION)

    if skipped:
        log.debug(submission_id)
        success, sub = staff_grading_util.set_instructor_grading_item_back_to_ml(
            submission_id)

        if not success:
            return util._error_response(sub, _INTERFACE_VERSION)

        return util._success_response({}, _INTERFACE_VERSION)

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Expected integer score.  Got {0}".format(score)})

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={
                "msg": "Submission id {0} is not valid.".format(submission_id)
            })

    first_sub_for_location = Submission.objects.filter(
        location=sub.location).order_by('date_created')[0]
    rubric = first_sub_for_location.rubric
    rubric_success, parsed_rubric = rubric_functions.parse_rubric(rubric)

    if rubric_success:
        success, error_message = grader_util.validate_rubric_scores(
            rubric_scores, rubric_scores_complete, sub)
        if not success:
            return util._error_response("grade_save_error",
                                        _INTERFACE_VERSION,
                                        data={"msg": error_message})

    d = {
        'submission_id': submission_id,
        'score': score,
        'feedback': feedback,
        'grader_id': grader_id,
        'grader_type': 'IN',
        # Humans always succeed (if they grade at all)...
        'status': GraderStatus.success,
        # ...and they're always confident too.
        'confidence': 1.0,
        #And they don't make errors
        'errors': "",
        'rubric_scores_complete': rubric_scores_complete,
        'rubric_scores': rubric_scores,
        'is_submission_flagged': is_submission_flagged,
    }

    success, header = grader_util.create_and_handle_grader_object(d)

    if not success:
        return util._error_response("grade_save_error",
                                    _INTERFACE_VERSION,
                                    data={'msg': 'Internal error'})

    util.log_connection_data()
    return util._success_response({}, _INTERFACE_VERSION)
Exemple #27
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    location: string
    grader_id: int
    submission_id: int
    score: int
    feedback: string
    submission_key : string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()
    log.debug(post_data)

    for tag in [
            'location', 'grader_id', 'submission_id', 'submission_key',
            'score', 'feedback', 'submission_flagged'
    ]:
        if not tag in post_data:
            return util._error_response(
                "Cannot find needed key {0} in request.".format(tag),
                _INTERFACE_VERSION)

    location = post_data['location']
    grader_id = post_data['grader_id']
    submission_id = post_data['submission_id']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']
    score = post_data['score']

    #This is done to ensure that response is properly formatted on the lms side.
    feedback_dict = post_data['feedback']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = (is_submission_flagged.lower() == "true")

    status = GraderStatus.success
    confidence = 1.0

    is_answer_unknown = request.POST.get('answer_unknown', False)
    if isinstance(is_answer_unknown, basestring):
        is_answer_unknown = (is_answer_unknown.lower() == "true")

    if is_answer_unknown:
        status = GraderStatus.failure
        confidence = 0.0

    try:
        score = int(score)
    except ValueError:
        return util._error_response(
            "Expected integer score.  Got {0}".format(score),
            _INTERFACE_VERSION)

    try:
        sub = Submission.objects.get(id=submission_id)
    except:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={
                "msg": "Submission id {0} is not valid.".format(submission_id)
            })

    success, error_message = grader_util.validate_rubric_scores(
        rubric_scores, rubric_scores_complete, sub)
    if not success:
        return util._error_response("grade_save_error",
                                    _INTERFACE_VERSION,
                                    data={"msg": error_message})

    d = {
        'submission_id': submission_id,
        'score': score,
        'feedback': feedback_dict,
        'grader_id': grader_id,
        'grader_type': 'PE',
        # Humans always succeed (if they grade at all)...
        'status': status,
        # ...and they're always confident too.
        'confidence': confidence,
        #And they don't make any errors
        'errors': "",
        'rubric_scores_complete': rubric_scores_complete,
        'rubric_scores': rubric_scores,
        'is_submission_flagged': is_submission_flagged,
    }

    #Currently not posting back to LMS.  Only saving grader object, and letting controller decide when to post back.
    (success, header) = grader_util.create_and_handle_grader_object(d)
    if not success:
        return util._error_response(
            "There was a problem saving the grade.  Contact support.",
            _INTERFACE_VERSION)

    #xqueue_session=util.xqueue_login()
    #error,msg = util.post_results_to_xqueue(xqueue_session,json.dumps(header),json.dumps(post_data))

    util.log_connection_data()
    return util._success_response({'msg': "Posted to queue."},
                                  _INTERFACE_VERSION)
Exemple #28
0
def save_grade(request):
    """
    Supports POST requests with the following arguments:

    location: string
    grader_id: int
    submission_id: int
    score: int
    feedback: string
    submission_key : string

    Returns json dict with keys

    version: int
    success: bool
    error: string, present if not success
    """
    if request.method != "POST":
        raise Http404

    post_data = request.POST.dict().copy()

    for tag in ['location', 'grader_id', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged']:
        if not tag in post_data:
            return util._error_response("Cannot find needed key {0} in request.".format(tag), _INTERFACE_VERSION)

    location = post_data['location']
    grader_id = post_data['grader_id']
    submission_id = post_data['submission_id']

    #Submission key currently unused, but plan to use it for validation in the future.
    submission_key = post_data['submission_key']
    score = post_data['score']

    #This is done to ensure that response is properly formatted on the lms side.
    feedback_dict = post_data['feedback']

    rubric_scores_complete = request.POST.get('rubric_scores_complete', False)
    rubric_scores = request.POST.getlist('rubric_scores', [])

    is_submission_flagged = request.POST.get('submission_flagged', False)
    if isinstance(is_submission_flagged, basestring):
        is_submission_flagged = (is_submission_flagged.lower()=="true")

    status = GraderStatus.success
    confidence = 1.0

    is_answer_unknown = request.POST.get('answer_unknown', False)
    if isinstance(is_answer_unknown, basestring):
        is_answer_unknown = (is_answer_unknown.lower()=="true")

    if is_answer_unknown:
        status = GraderStatus.failure
        confidence = 0.0

    try:
        score = int(score)
    except ValueError:
        #Score may not be filled out if answer_unknown or flagged
        if is_answer_unknown or is_submission_flagged:
            score = 0
        else:
            return util._error_response("Expected integer score.  Got {0}".format(score), _INTERFACE_VERSION)

    try:
        sub=Submission.objects.get(id=submission_id)
    except Exception:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": "Submission id {0} is not valid.".format(submission_id)}
        )

    #Patch to handle rubric scores in the case of "I don't know" or flagging if scores aren't filled out
    if is_answer_unknown or is_submission_flagged and len(rubric_scores)==0:
        success, targets=rubric_functions.generate_targets_from_rubric(sub.rubric)
        rubric_scores = [0 for l in targets]
        rubric_scores_complete = True

    success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub)
    if not success:
        return util._error_response(
            "grade_save_error",
            _INTERFACE_VERSION,
            data={"msg": error_message}
        )

    d = {'submission_id': submission_id,
         'score': score,
         'feedback': feedback_dict,
         'grader_id': grader_id,
         'grader_type': 'PE',
         # Humans always succeed (if they grade at all)...
         'status': status,
         # ...and they're always confident too.
         'confidence': confidence,
         #And they don't make any errors
         'errors' : "",
         'rubric_scores_complete' : rubric_scores_complete,
         'rubric_scores' : rubric_scores,
         'is_submission_flagged' : is_submission_flagged,
    }

    #Currently not posting back to LMS.  Only saving grader object, and letting controller decide when to post back.
    (success, header) = grader_util.create_and_handle_grader_object(d)
    if not success:
        return util._error_response("There was a problem saving the grade.  Contact support.", _INTERFACE_VERSION)

    #xqueue_session=util.xqueue_login()
    #error,msg = util.post_results_to_xqueue(xqueue_session,json.dumps(header),json.dumps(post_data))

    util.log_connection_data()
    return util._success_response({'msg': "Posted to queue."}, _INTERFACE_VERSION)
Exemple #29
0
def handle_single_location(location):
    try:
        transaction.commit()
        gc.collect()
        sl = staff_grading_util.StaffLocation(location)
        subs_graded_by_instructor = sl.graded()
        log.info("Checking location {0} to see if essay count {1} greater than min {2}".format(
            location,
            subs_graded_by_instructor.count(),
            settings.MIN_TO_USE_ML,
        ))
        graded_sub_count=subs_graded_by_instructor.count()

        #check to see if there are enough instructor graded essays for location
        if graded_sub_count >= settings.MIN_TO_USE_ML:

            location_suffixes=ml_grading_util.generate_rubric_location_suffixes(subs_graded_by_instructor, grading=False)

            if settings.MAX_TO_USE_ML<graded_sub_count:
                graded_sub_count = settings.MAX_TO_USE_ML

            subs_graded_by_instructor  = subs_graded_by_instructor[:settings.MAX_TO_USE_ML]

            sub_rubric_scores=[]
            if len(location_suffixes)>0:
                for sub in subs_graded_by_instructor:
                    success, scores = controller.rubric_functions.get_submission_rubric_instructor_scores(sub)
                    sub_rubric_scores.append(scores)

            for m in xrange(0,len(location_suffixes)):
                log.info("Currently on location {0}.  Greater than zero is a rubric item.".format(m))
                suffix=location_suffixes[m]
                #Get paths to ml model from database
                relative_model_path, full_model_path= ml_grading_util.get_model_path(location + suffix)
                #Get last created model for given location
                transaction.commit()
                success, latest_created_model=ml_grading_util.get_latest_created_model(location + suffix)

                if success:
                    sub_count_diff=graded_sub_count-latest_created_model.number_of_essays
                else:
                    sub_count_diff = graded_sub_count

                #Retrain if no model exists, or every 5 graded essays.
                if not success or sub_count_diff>=5:

                    text = [str(i.student_response.encode('ascii', 'ignore')) for i in subs_graded_by_instructor]
                    ids=[i.id for i in subs_graded_by_instructor]

                    #TODO: Make queries more efficient
                    #This is for the basic overall score
                    if m==0:
                        scores = [z.get_last_grader().score for z in list(subs_graded_by_instructor)]
                    else:
                        scores=[z[m-1] for z in sub_rubric_scores]

                    #Get the first graded submission, so that we can extract metadata like rubric, etc, from it
                    first_sub=subs_graded_by_instructor[0]

                    prompt = str(first_sub.prompt.encode('ascii', 'ignore'))
                    rubric = str(first_sub.rubric.encode('ascii', 'ignore'))

                    transaction.commit()

                    #Checks to see if another model creator process has started amodel for this location
                    success, model_started, created_model = ml_grading_util.check_if_model_started(location + suffix)

                    #Checks to see if model was started a long time ago, and removes and retries if it was.
                    if model_started:
                        now = timezone.now()
                        second_difference = (now - created_model.date_modified).total_seconds()
                        if second_difference > settings.TIME_BEFORE_REMOVING_STARTED_MODEL:
                            log.error("Model for location {0} started over {1} seconds ago, removing and re-attempting.".format(
                                location + suffix, settings.TIME_BEFORE_REMOVING_STARTED_MODEL))
                            created_model.delete()
                            model_started = False

                    if not model_started:
                        created_model_dict_initial={
                            'max_score' : first_sub.max_score,
                            'prompt' : prompt,
                            'rubric' : rubric,
                            'location' : location + suffix,
                            'course_id' : first_sub.course_id,
                            'submission_ids_used' : json.dumps(ids),
                            'problem_id' :  first_sub.problem_id,
                            'model_relative_path' : relative_model_path,
                            'model_full_path' : full_model_path,
                            'number_of_essays' : graded_sub_count,
                            'creation_succeeded': False,
                            'creation_started' : True,
                            'creation_finished' : False,
                            }
                        transaction.commit()
                        success, initial_id = ml_grading_util.save_created_model(created_model_dict_initial)
                        transaction.commit()

                        results = create.create(text, scores, prompt)

                        scores = [int(score_item) for score_item in scores]
                        #Add in needed stuff that ml creator does not pass back
                        results.update({'text' : text, 'score' : scores, 'model_path' : full_model_path,
                                        'relative_model_path' : relative_model_path, 'prompt' : prompt})

                        #Try to create model if ml model creator was successful
                        if results['success']:
                            try:
                                success, s3_public_url = save_model_file(results,settings.USE_S3_TO_STORE_MODELS)
                                results.update({'s3_public_url' : s3_public_url, 'success' : success})
                                if not success:
                                    results['errors'].append("Could not save model.")
                            except Exception:
                                results['errors'].append("Could not save model.")
                                results['s3_public_url'] = ""
                                log.exception("Problem saving ML model.")

                            created_model_dict_final={
                                'cv_kappa' : results['cv_kappa'],
                                'cv_mean_absolute_error' : results['cv_mean_absolute_error'],
                                'creation_succeeded': results['success'],
                                's3_public_url' : results['s3_public_url'],
                                'model_stored_in_s3' : settings.USE_S3_TO_STORE_MODELS,
                                's3_bucketname' : str(settings.S3_BUCKETNAME),
                                'creation_finished' : True,
                                'model_relative_path' : relative_model_path,
                                'model_full_path' : full_model_path,
                                'location' : location + suffix,
                                }

                            transaction.commit()
                            success, id = ml_grading_util.save_created_model(created_model_dict_final,update_model=True,update_id=initial_id)
                        else:
                            log.error("Could not create an ML model.  Have you installed all the needed requirements for ease?  This is for location {0} and rubric item {1}".format(location, m))

                        if not success:
                            log.error("ModelCreator creation failed.  Error: {0}".format(id))
                            statsd.increment("open_ended_assessment.grading_controller.call_ml_creator",
                                tags=["success:False", "location:{0}".format(location)])

                        log.info("Location: {0} Creation Status: {1} Errors: {2}".format(
                            full_model_path,
                            results['success'],
                            results['errors'],
                        ))
                        statsd.increment("open_ended_assessment.grading_controller.call_ml_creator",
                            tags=["success:{0}".format(results['success']), "location:{0}".format(location)])
        util.log_connection_data()
    except Exception:
        log.exception("Problem creating model for location {0}".format(location))
        statsd.increment("open_ended_assessment.grading_controller.call_ml_creator",
            tags=["success:Exception", "location:{0}".format(location)])
Exemple #30
0
def handle_single_item(controller_session):
    sub_get_success, content = get_item_from_controller(controller_session)
    #Grade and handle here
    if sub_get_success:
        transaction.commit()
        sub = Submission.objects.get(id=int(content['submission_id']))
        sl = staff_grading_util.StaffLocation(sub.location)
        subs_graded_by_instructor = sl.graded()
        first_sub = subs_graded_by_instructor.order_by('date_created')[0]
        parsed_rubric = rubric_functions.parse_rubric(first_sub.rubric)

        #strip out unicode and other characters in student response
        #Needed, or grader may potentially fail
        #TODO: Handle unicode in student responses properly
        student_response = sub.student_response.encode('ascii', 'ignore')

        #Get the latest created model for the given location
        transaction.commit()

        location_suffixes = ml_grading_util.generate_rubric_location_suffixes(
            subs_graded_by_instructor, grading=True)

        if len(location_suffixes) > 0:
            rubric_scores_complete = True
            rubric_scores = []

        for m in xrange(0, len(location_suffixes)):
            suffix = location_suffixes[m]
            success, created_model = ml_grading_util.get_latest_created_model(
                sub.location + suffix)

            if not success:
                log.error("Could not identify a valid created model!")
                if m == 0:
                    results = RESULT_FAILURE_DICT
                    formatted_feedback = "error"
                    status = GraderStatus.failure
                    statsd.increment(
                        "open_ended_assessment.grading_controller.call_ml_grader",
                        tags=["success:False"])

            else:

                #Create grader path from location in submission
                grader_path = os.path.join(settings.ML_MODEL_PATH,
                                           created_model.model_relative_path)
                model_stored_in_s3 = created_model.model_stored_in_s3

                success, grader_data = load_model_file(created_model,
                                                       use_full_path=False)
                if success:
                    results = grade.grade(grader_data, student_response)
                else:
                    results = RESULT_FAILURE_DICT

                #If the above fails, try using the full path in the created_model object
                if not results[
                        'success'] and not created_model.model_stored_in_s3:
                    grader_path = created_model.model_full_path
                    try:
                        success, grader_data = load_model_file(
                            created_model, use_full_path=True)
                        if success:
                            results = grade.grade(grader_data,
                                                  student_response)
                        else:
                            results = RESULT_FAILURE_DICT
                    except Exception:
                        error_message = "Could not find a valid model file."
                        log.exception(error_message)
                        results = RESULT_FAILURE_DICT

                log.info("ML Grader:  Success: {0} Errors: {1}".format(
                    results['success'], results['errors']))
                statsd.increment(
                    "open_ended_assessment.grading_controller.call_ml_grader",
                    tags=[
                        "success:{0}".format(results['success']),
                        'location:{0}'.format(sub.location)
                    ])

                #Set grader status according to success/fail
                if results['success']:
                    status = GraderStatus.success
                else:
                    status = GraderStatus.failure

            if m == 0:
                final_results = results
            elif results['success'] == False:
                rubric_scores_complete = False
            else:
                rubric_scores.append(int(results['score']))
        if len(rubric_scores) == 0:
            rubric_scores_complete = False

        grader_dict = {
            'score': int(final_results['score']),
            'feedback': json.dumps(results['feedback']),
            'status': status,
            'grader_id': 1,
            'grader_type': "ML",
            'confidence': results['confidence'],
            'submission_id': sub.id,
            'errors': ' '.join(results['errors']),
            'rubric_scores_complete': rubric_scores_complete,
            'rubric_scores': json.dumps(rubric_scores),
        }
        #Create grader object in controller by posting back results
        created, msg = util._http_post(
            controller_session,
            urlparse.urljoin(settings.GRADING_CONTROLLER_INTERFACE['url'],
                             project_urls.ControllerURLs.put_result),
            grader_dict,
            settings.REQUESTS_TIMEOUT,
        )
    else:
        log.error("Error getting item from controller or no items to get.")
        statsd.increment(
            "open_ended_assessment.grading_controller.call_ml_grader",
            tags=["success:False"])

    util.log_connection_data()
    return sub_get_success
Exemple #31
0
def get_next_submission(request):
    """
    Supports GET request with the following arguments:
    course_id -- the course for which to return a submission.
    grader_id -- LMS user_id of the requesting user

    Returns json dict with the following keys:

    version: '1'  (number)

    success: bool

    if success:
      'submission_id': a unique identifier for the submission, to be passed
                       back with the grade.

      'submission': the submission, rendered as read-only html for grading

      'rubric': the rubric, also rendered as html.

      'prompt': the question prompt, also rendered as html.

      'message': if there was no submission available, but nothing went wrong,
                there will be a message field.
    else:
      'error': if success is False, will have an error message with more info.
    }
    """

    if request.method != "GET":
        raise Http404

    course_id = request.GET.get('course_id')
    grader_id = request.GET.get('grader_id')
    location = request.GET.get('location')

    log.debug(
        "Getting next submission for instructor grading for course: {0}.".
        format(course_id))

    if not (course_id or location) or not grader_id:

        return util._error_response("required_parameter_missing",
                                    _INTERFACE_VERSION)

    if location:
        (
            found, id
        ) = staff_grading_util.get_single_instructor_grading_item_for_location(
            location)

    # TODO: save the grader id and match it in save_grade to make sure things
    # are consistent.
    if not location:
        (found,
         id) = staff_grading_util.get_single_instructor_grading_item(course_id)

    if not found:
        return util._success_response(
            {'message': 'No more submissions to grade.'}, _INTERFACE_VERSION)

    try:
        submission = Submission.objects.get(id=int(id))
    except Submission.DoesNotExist:
        log.error("Couldn't find submission %s for instructor grading", id)
        return util._error_response('failed_to_load_submission',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': id})

    #Get error metrics from ml grading, and get into dictionary form to pass down to staff grading view
    success, ml_error_info = ml_grading_util.get_ml_errors(submission.location)
    if success:
        ml_error_message = staff_grading_util.generate_ml_error_message(
            ml_error_info)
    else:
        ml_error_message = ml_error_info

    ml_error_message = "Machine learning error information: " + ml_error_message

    if submission.state != 'C':
        log.error(
            "Instructor grading got a submission (%s) in an invalid state: ",
            id, submission.state)
        return util._error_response('wrong_internal_state',
                                    _INTERFACE_VERSION,
                                    data={
                                        'submission_id': id,
                                        'submission_state': submission.state
                                    })

    num_graded, num_pending = staff_grading_util.count_submissions_graded_and_pending_instructor(
        submission.location)

    response = {
        'submission_id':
        id,
        'submission':
        submission.student_response,
        'rubric':
        submission.rubric,
        'prompt':
        submission.prompt,
        'max_score':
        submission.max_score,
        'ml_error_info':
        ml_error_message,
        'problem_name':
        submission.problem_id,
        'num_graded':
        staff_grading_util.finished_submissions_graded_by_instructor(
            submission.location).count(),
        'num_pending':
        staff_grading_util.submissions_pending_instructor(
            submission.location,
            state_in=[SubmissionState.waiting_to_be_graded]).count(),
        'min_for_ml':
        settings.MIN_TO_USE_ML,
    }

    util.log_connection_data()
    log.debug("Sending success response back to instructor grading!")
    log.debug("Sub id from get next: {0}".format(submission.id))
    return util._success_response(response, _INTERFACE_VERSION)
Exemple #32
0
def handle_single_location(location):
    try:
        transaction.commit()
        gc.collect()
        subs_graded_by_instructor = staff_grading_util.finished_submissions_graded_by_instructor(location)
        log.debug("Checking location {0} to see if essay count {1} greater than min {2}".format(
            location,
            subs_graded_by_instructor.count(),
            settings.MIN_TO_USE_ML,
        ))
        graded_sub_count=subs_graded_by_instructor.count()

        #check to see if there are enough instructor graded essays for location
        if graded_sub_count >= settings.MIN_TO_USE_ML:

            location_suffixes=ml_grading_util.generate_rubric_location_suffixes(subs_graded_by_instructor, grading=False)
            sub_rubric_scores=[]
            if len(location_suffixes)>0:
                for sub in subs_graded_by_instructor:
                    success, scores = controller.rubric_functions.get_submission_rubric_instructor_scores(sub)
                    sub_rubric_scores.append(scores)

            if settings.MAX_TO_USE_ML<graded_sub_count:
                graded_sub_count = settings.MAX_TO_USE_ML

            subs_graded_by_instructor  = subs_graded_by_instructor[:settings.MAX_TO_USE_ML]
            for m in xrange(0,len(location_suffixes)):
                log.debug("Currently on location {0}.  Greater than zero is a rubric item.".format(m))
                suffix=location_suffixes[m]
                #Get paths to ml model from database
                relative_model_path, full_model_path= ml_grading_util.get_model_path(location + suffix)
                #Get last created model for given location
                transaction.commit()
                success, latest_created_model=ml_grading_util.get_latest_created_model(location + suffix)

                if success:
                    sub_count_diff=graded_sub_count-latest_created_model.number_of_essays
                else:
                    sub_count_diff = graded_sub_count

                #Retrain if no model exists, or every 5 graded essays.
                if not success or sub_count_diff>=5:

                    text = [str(i.student_response.encode('ascii', 'ignore')) for i in subs_graded_by_instructor]
                    ids=[i.id for i in subs_graded_by_instructor]

                    #TODO: Make queries more efficient
                    #This is for the basic overall score
                    if m==0:
                        scores = [z.get_last_grader().score for z in list(subs_graded_by_instructor)]
                    else:
                        scores=[z[m-1] for z in sub_rubric_scores]

                    #Get the first graded submission, so that we can extract metadata like rubric, etc, from it
                    first_sub=subs_graded_by_instructor[0]

                    prompt = str(first_sub.prompt.encode('ascii', 'ignore'))
                    rubric = str(first_sub.rubric.encode('ascii', 'ignore'))

                    transaction.commit()

                    #Checks to see if another model creator process has started amodel for this location
                    success, model_started, created_model = ml_grading_util.check_if_model_started(location + suffix)

                    #Checks to see if model was started a long time ago, and removes and retries if it was.
                    if model_started:
                        now = timezone.now()
                        second_difference = (now - created_model.date_modified).total_seconds()
                        if second_difference > settings.TIME_BEFORE_REMOVING_STARTED_MODEL:
                            log.error("Model for location {0} started over {1} seconds ago, removing and re-attempting.".format(
                                location + suffix, settings.TIME_BEFORE_REMOVING_STARTED_MODEL))
                            created_model.delete()
                            model_started = False

                    if not model_started:
                        created_model_dict_initial={
                            'max_score' : first_sub.max_score,
                            'prompt' : prompt,
                            'rubric' : rubric,
                            'location' : location + suffix,
                            'course_id' : first_sub.course_id,
                            'submission_ids_used' : json.dumps(ids),
                            'problem_id' :  first_sub.problem_id,
                            'model_relative_path' : relative_model_path,
                            'model_full_path' : full_model_path,
                            'number_of_essays' : graded_sub_count,
                            'creation_succeeded': False,
                            'creation_started' : True,
                            'creation_finished' : False,
                            }
                        transaction.commit()
                        success, initial_id = ml_grading_util.save_created_model(created_model_dict_initial)
                        transaction.commit()

                        results = create.create(text, scores, prompt)

                        scores = [int(score_item) for score_item in scores]
                        #Add in needed stuff that ml creator does not pass back
                        results.update({'text' : text, 'score' : scores, 'model_path' : full_model_path,
                                        'relative_model_path' : relative_model_path, 'prompt' : prompt})

                        #Try to create model if ml model creator was successful
                        if results['success']:
                            try:
                                success, s3_public_url = save_model_file(results,settings.USE_S3_TO_STORE_MODELS)
                                results.update({'s3_public_url' : s3_public_url, 'success' : success})
                                if not success:
                                    results['errors'].append("Could not save model.")
                            except:
                                results['errors'].append("Could not save model.")
                                results['s3_public_url'] = ""
                                log.exception("Problem saving ML model.")

                        created_model_dict_final={
                            'cv_kappa' : results['cv_kappa'],
                            'cv_mean_absolute_error' : results['cv_mean_absolute_error'],
                            'creation_succeeded': results['success'],
                            's3_public_url' : results['s3_public_url'],
                            'model_stored_in_s3' : settings.USE_S3_TO_STORE_MODELS,
                            's3_bucketname' : str(settings.S3_BUCKETNAME),
                            'creation_finished' : True,
                            'model_relative_path' : relative_model_path,
                            'model_full_path' : full_model_path,
                            'location' : location + suffix,
                            }

                        transaction.commit()
                        success, id = ml_grading_util.save_created_model(created_model_dict_final,update_model=True,update_id=initial_id)

                        if not success:
                            log.error("ModelCreator creation failed.  Error: {0}".format(id))
                            statsd.increment("open_ended_assessment.grading_controller.call_ml_creator",
                                tags=["success:False", "location:{0}".format(location)])

                        log.debug("Location: {0} Creation Status: {1} Errors: {2}".format(
                            full_model_path,
                            results['success'],
                            results['errors'],
                        ))
                        statsd.increment("open_ended_assessment.grading_controller.call_ml_creator",
                            tags=["success:{0}".format(results['success']), "location:{0}".format(location)])
        util.log_connection_data()
    except:
        log.exception("Problem creating model for location {0}".format(location))
        statsd.increment("open_ended_assessment.grading_controller.call_ml_creator",
            tags=["success:Exception", "location:{0}".format(location)])
Exemple #33
0
def get_next_submission(request):
    """
    Supports GET request with the following arguments:
    course_id -- the course for which to return a submission.
    grader_id -- LMS user_id of the requesting user

    Returns json dict with the following keys:

    version: '1'  (number)

    success: bool

    if success:
      'submission_id': a unique identifier for the submission, to be passed
                       back with the grade.

      'submission': the submission, rendered as read-only html for grading

      'rubric': the rubric, also rendered as html.

      'prompt': the question prompt, also rendered as html.

      'message': if there was no submission available, but nothing went wrong,
                there will be a message field.
    else:
      'error': if success is False, will have an error message with more info.
    }
    """

    if request.method != "GET":
        raise Http404

    course_id = request.GET.get('course_id')
    grader_id = request.GET.get('grader_id')
    location = request.GET.get('location')

    log.debug("Getting next submission for instructor grading for course: {0}."
              .format(course_id))


    if not (course_id or location) or not grader_id:
   
        return util._error_response("required_parameter_missing", _INTERFACE_VERSION)

    if location:
        (found, id) = staff_grading_util.get_single_instructor_grading_item_for_location(location)

    # TODO: save the grader id and match it in save_grade to make sure things
    # are consistent.
    if not location:
        (found, id) = staff_grading_util.get_single_instructor_grading_item(course_id)

    if not found:
        return util._success_response({'message': 'No more submissions to grade.'},
                                      _INTERFACE_VERSION)

    try:
        submission = Submission.objects.get(id=int(id))
    except Submission.DoesNotExist:
        log.error("Couldn't find submission %s for instructor grading", id)
        return util._error_response('failed_to_load_submission',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': id})

    #Get error metrics from ml grading, and get into dictionary form to pass down to staff grading view
    success, ml_error_info=ml_grading_util.get_ml_errors(submission.location)
    if success:
        ml_error_message=staff_grading_util.generate_ml_error_message(ml_error_info)
    else:
        ml_error_message=ml_error_info

    ml_error_message="Machine learning error information: " + ml_error_message

    if submission.state != 'C':
        log.error("Instructor grading got a submission (%s) in an invalid state: ",
            id, submission.state)
        return util._error_response('wrong_internal_state',
                                    _INTERFACE_VERSION,
                                    data={'submission_id': id,
                                     'submission_state': submission.state})

    num_graded, num_pending = staff_grading_util.count_submissions_graded_and_pending_instructor(submission.location)

    response = {'submission_id': id,
                'submission': submission.student_response,
                'rubric': submission.rubric,
                'prompt': submission.prompt,
                'max_score': submission.max_score,
                'ml_error_info' : ml_error_message,
                'problem_name' : submission.problem_id,
                'num_graded' : staff_grading_util.finished_submissions_graded_by_instructor(submission.location).count(),
                'num_pending' : staff_grading_util.submissions_pending_instructor(submission.location, 
                                    state_in=[SubmissionState.waiting_to_be_graded]).count(),
                'min_for_ml' : settings.MIN_TO_USE_ML,
                }

    util.log_connection_data()
    log.debug("Sending success response back to instructor grading!")
    log.debug("Sub id from get next: {0}".format(submission.id))
    return util._success_response(response, _INTERFACE_VERSION)