예제 #1
0
    def grade_answers(self, answers):
        """
        Grade student responses.  Called by capa_module.check_problem.

        `answers` is a dict of all the entries from request.POST, but with the first part
        of each key removed (the string before the first "_").

        Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123

        Calls the Response for each question in this problem, to do the actual grading.
        """

        # if answers include File objects, convert them to filenames.
        self.student_answers = convert_files_to_filenames(answers)
        return self._grade_answers(answers)
예제 #2
0
    def grade_answers(self, answers):
        """
        Grade student responses.  Called by capa_module.check_problem.

        `answers` is a dict of all the entries from request.POST, but with the first part
        of each key removed (the string before the first "_").

        Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123

        Calls the Response for each question in this problem, to do the actual grading.
        """

        # if answers include File objects, convert them to filenames.
        self.student_answers = convert_files_to_filenames(answers)
        return self._grade_answers(answers)
예제 #3
0
    def check_problem(self, data):
        """
        Checks whether answers to a problem are correct

        Returns a map of correct/incorrect answers:
          {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
           'contents' : html}
        """
        event_info = dict()
        event_info['state'] = self.lcp.get_state()
        event_info['problem_id'] = self.location.url()

        answers = self.make_dict_of_responses(data)
        event_info['answers'] = convert_files_to_filenames(answers)

        # Too late. Cannot submit
        if self.closed():
            event_info['failure'] = 'closed'
            self.system.track_function('problem_check_fail', event_info)
            raise NotFoundError('Problem is closed')

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info['failure'] = 'unreset'
            self.system.track_function('problem_check_fail', event_info)
            raise NotFoundError('Problem must be reset before it can be checked again')

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.system.xqueue['waittime']
            if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
                msg = u'You must wait at least {wait} seconds between submissions'.format(
                    wait=waittime_between_requests)
                return {'success': msg, 'html': ''}  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.attempts = self.attempts + 1
            self.lcp.done = True
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check",
                        exc_info=True)

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.system.user_is_staff:
                msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                msg = u"Ошибка: {msg}".format(msg=inst.message)

            return {'success': msg}

        except Exception as err:
            if self.system.DEBUG:
                msg = u"Error checking problem: {}".format(err.message)
                msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
                return {'success': msg}
            raise

        self.publish_grade()

        # success = correct if ALL questions in this problem are correct
        success = 'correct'
        for answer_id in correct_map:
            if not correct_map.is_correct(answer_id):
                success = 'incorrect'

        # NOTE: We are logging both full grading and queued-grading submissions. In the latter,
        #       'success' will always be incorrect
        event_info['correct_map'] = correct_map.get_dict()
        event_info['success'] = success
        event_info['attempts'] = self.attempts
        self.system.track_function('problem_check', event_info)

        if hasattr(self.system, 'psychometrics_handler'):  # update PsychometricsData using callback
            self.system.psychometrics_handler(self.get_state_for_lcp())

        # render problem into HTML
        #self.id = 'input_i4x-Yandex-1-problem-528707f13bbf4f2a85f720b7dd15fde9_2_1'
        # self.id = "ac8c5016c3af4e66ab38123ab0f0260b"
        #self.id = "i4x://Yandex/1/problem/6903bf4724c5465597d02c58d5777c59"
        #i4x://Yandex/1/problem/ac8c5016c3af4e66ab38123ab0f0260b
        html = self.get_problem_html(encapsulate=False)

        return {'success': success,
                'contents': html,
                }
예제 #4
0
    def check_problem(self, data):
        """
        Checks whether answers to a problem are correct

        Returns a map of correct/incorrect answers:
          {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
           'contents' : html}
        """
        event_info = dict()
        event_info["state"] = self.lcp.get_state()
        event_info["problem_id"] = self.location.url()

        answers = self.make_dict_of_responses(data)
        event_info["answers"] = convert_files_to_filenames(answers)

        # Too late. Cannot submit
        if self.closed():
            event_info["failure"] = "closed"
            self.system.track_function("problem_check_fail", event_info)
            raise NotFoundError("Problem is closed")

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info["failure"] = "unreset"
            self.system.track_function("problem_check_fail", event_info)
            raise NotFoundError("Problem must be reset before it can be checked again")

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.system.xqueue["waittime"]
            if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
                msg = u"You must wait at least {wait} seconds between submissions".format(
                    wait=waittime_between_requests
                )
                return {"success": msg, "html": ""}  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.attempts = self.attempts + 1
            self.lcp.done = True
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check", exc_info=True)

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.system.user_is_staff:
                msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                msg = u"Error: {msg}".format(msg=inst.message)

            return {"success": msg}

        except Exception as err:
            if self.system.DEBUG:
                msg = u"Error checking problem: {}".format(err.message)
                msg += u"\nTraceback:\n{}".format(traceback.format_exc())
                return {"success": msg}
            raise

        published_grade = self.publish_grade()

        # success = correct if ALL questions in this problem are correct
        success = "correct"
        for answer_id in correct_map:
            if not correct_map.is_correct(answer_id):
                success = "incorrect"

        # NOTE: We are logging both full grading and queued-grading submissions. In the latter,
        #       'success' will always be incorrect
        event_info["grade"] = published_grade["grade"]
        event_info["max_grade"] = published_grade["max_grade"]
        event_info["correct_map"] = correct_map.get_dict()
        event_info["success"] = success
        event_info["attempts"] = self.attempts
        self.system.track_function("problem_check", event_info)

        if hasattr(self.system, "psychometrics_handler"):  # update PsychometricsData using callback
            self.system.psychometrics_handler(self.get_state_for_lcp())

        # render problem into HTML
        html = self.get_problem_html(encapsulate=False)

        return {"success": success, "contents": html}
예제 #5
0
    def check_problem(self, get):
        ''' Checks whether answers to a problem are correct, and
            returns a map of correct/incorrect answers:

            {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
             'contents' : html}
            '''
        event_info = dict()
        event_info['state'] = self.lcp.get_state()
        event_info['problem_id'] = self.location.url()

        answers = self.make_dict_of_responses(get)
        event_info['answers'] = convert_files_to_filenames(answers)
        # Too late. Cannot submit
        if self.closed():
            event_info['failure'] = 'closed'
            self.system.track_function('save_problem_check_fail', event_info)
            raise NotFoundError('Problem is closed')

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info['failure'] = 'unreset'
            self.system.track_function('save_problem_check_fail', event_info)
            raise NotFoundError('Problem must be reset before it can be checked again')

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.system.xqueue['waittime']
            if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
                msg = 'You must wait at least %d seconds between submissions' % waittime_between_requests
                return {'success': msg, 'html': ''}  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check",
                        exc_info=True)

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.system.user_is_staff:
                msg = "Staff debug info: %s" % traceback.format_exc()

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                msg = "Error: %s" % str(inst.message)

            return {'success': msg}

        except Exception, err:
            if self.system.DEBUG:
                msg = "Error checking problem: " + str(err)
                msg += '\nTraceback:\n' + traceback.format_exc()
                return {'success': msg}
            raise
예제 #6
0
    def check_problem(self, data):
        """
        Checks whether answers to a problem are correct

        Returns a map of correct/incorrect answers:
          {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
           'contents' : html}
        """
        event_info = dict()
        event_info['state'] = self.lcp.get_state()
        event_info['problem_id'] = self.location.url()

        answers = self.make_dict_of_responses(data)
        event_info['answers'] = convert_files_to_filenames(answers)

        # Too late. Cannot submit
        if self.closed():
            event_info['failure'] = 'closed'
            self.system.track_function('problem_check_fail', event_info)
            raise NotFoundError('Problem is closed')

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info['failure'] = 'unreset'
            self.system.track_function('problem_check_fail', event_info)
            raise NotFoundError(
                'Problem must be reset before it can be checked again')

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.system.xqueue['waittime']
            if (current_time - prev_submit_time
                ).total_seconds() < waittime_between_requests:
                msg = u'You must wait at least {wait} seconds between submissions'.format(
                    wait=waittime_between_requests)
                return {
                    'success': msg,
                    'html': ''
                }  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.attempts = self.attempts + 1
            self.lcp.done = True
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check",
                        exc_info=True)

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.system.user_is_staff:
                msg = u"Staff debug info: {tb}".format(
                    tb=cgi.escape(traceback.format_exc()))

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                msg = u"Error: {msg}".format(msg=inst.message)

            return {'success': msg}

        except Exception as err:
            if self.system.DEBUG:
                msg = u"Error checking problem: {}".format(err.message)
                msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
                return {'success': msg}
            raise

        self.publish_grade()

        # success = correct if ALL questions in this problem are correct
        success = 'correct'
        for answer_id in correct_map:
            if not correct_map.is_correct(answer_id):
                success = 'incorrect'

        # NOTE: We are logging both full grading and queued-grading submissions. In the latter,
        #       'success' will always be incorrect
        event_info['correct_map'] = correct_map.get_dict()
        event_info['success'] = success
        event_info['attempts'] = self.attempts
        self.system.track_function('problem_check', event_info)

        if hasattr(self.system, 'psychometrics_handler'
                   ):  # update PsychometricsData using callback
            self.system.psychometrics_handler(self.get_state_for_lcp())

        # render problem into HTML
        html = self.get_problem_html(encapsulate=False)

        return {
            'success': success,
            'contents': html,
        }
예제 #7
0
    def check_problem(self, data):
        """
        Checks whether answers to a problem are correct

        Returns a map of correct/incorrect answers:
          {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
           'contents' : html}
        """
        event_info = dict()
        event_info['state'] = self.lcp.get_state()
        event_info['problem_id'] = self.location.url()

        answers = self.make_dict_of_responses(data)
        answers_without_files = convert_files_to_filenames(answers)
        event_info['answers'] = answers_without_files

        metric_name = u'capa.check_problem.{}'.format

        _ = self.runtime.service(self, "i18n").ugettext

        # Too late. Cannot submit
        if self.closed():
            event_info['failure'] = 'closed'
            self.runtime.track_function('problem_check_fail', event_info)
            if dog_stats_api:
                dog_stats_api.increment(metric_name('checks'), [u'result:failed', u'failure:closed'])
            raise NotFoundError(_("Problem is closed."))

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info['failure'] = 'unreset'
            self.runtime.track_function('problem_check_fail', event_info)
            if dog_stats_api:
                dog_stats_api.increment(metric_name('checks'), [u'result:failed', u'failure:unreset'])
            raise NotFoundError(_("Problem must be reset before it can be checked again."))

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.runtime.xqueue['waittime']
            if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
                msg = _(u"You must wait at least {wait} seconds between submissions.").format(
                    wait=waittime_between_requests)
                return {'success': msg, 'html': ''}  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.attempts = self.attempts + 1
            self.lcp.done = True
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check",
                        exc_info=True)

            # Save the user's state before failing
            self.set_state_from_lcp()

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.runtime.user_is_staff:
                msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                # Translators: {msg} will be replaced with a problem's error message.
                msg = _(u"Error: {msg}").format(msg=inst.message)

            return {'success': msg}

        except Exception as err:
            # Save the user's state before failing
            self.set_state_from_lcp()

            if self.runtime.DEBUG:
                msg = u"Error checking problem: {}".format(err.message)
                msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
                return {'success': msg}
            raise

        published_grade = self.publish_grade()

        # success = correct if ALL questions in this problem are correct
        success = 'correct'
        for answer_id in correct_map:
            if not correct_map.is_correct(answer_id):
                success = 'incorrect'

        # NOTE: We are logging both full grading and queued-grading submissions. In the latter,
        #       'success' will always be incorrect
        event_info['grade'] = published_grade['grade']
        event_info['max_grade'] = published_grade['max_grade']
        event_info['correct_map'] = correct_map.get_dict()
        event_info['success'] = success
        event_info['attempts'] = self.attempts
        event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
        self.runtime.track_function('problem_check', event_info)

        if dog_stats_api:
            dog_stats_api.increment(metric_name('checks'), [u'result:success'])
            dog_stats_api.histogram(
                metric_name('correct_pct'),
                float(published_grade['grade']) / published_grade['max_grade'],
            )
            dog_stats_api.histogram(
                metric_name('attempts'),
                self.attempts,
            )

        if hasattr(self.runtime, 'psychometrics_handler'):  # update PsychometricsData using callback
            self.runtime.psychometrics_handler(self.get_state_for_lcp())

        # render problem into HTML
        html = self.get_problem_html(encapsulate=False)

        return {
            'success': success,
            'contents': html,
        }
예제 #8
0
    def check_problem(self, get):
        ''' Checks whether answers to a problem are correct, and
            returns a map of correct/incorrect answers:

            {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
             'contents' : html}
            '''
        event_info = dict()
        event_info['state'] = self.lcp.get_state()
        event_info['problem_id'] = self.location.url()

        answers = self.make_dict_of_responses(get)
        event_info['answers'] = convert_files_to_filenames(answers)
        # Too late. Cannot submit
        if self.closed():
            event_info['failure'] = 'closed'
            self.system.track_function('save_problem_check_fail', event_info)
            raise NotFoundError('Problem is closed')

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info['failure'] = 'unreset'
            self.system.track_function('save_problem_check_fail', event_info)
            raise NotFoundError(
                'Problem must be reset before it can be checked again')

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.system.xqueue['waittime']
            if (current_time - prev_submit_time
                ).total_seconds() < waittime_between_requests:
                msg = 'You must wait at least %d seconds between submissions' % waittime_between_requests
                return {
                    'success': msg,
                    'html': ''
                }  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check",
                        exc_info=True)

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.system.user_is_staff:
                msg = "Staff debug info: %s" % traceback.format_exc()

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                msg = "Error: %s" % str(inst.message)

            return {'success': msg}

        except Exception, err:
            if self.system.DEBUG:
                msg = "Error checking problem: " + str(err)
                msg += '\nTraceback:\n' + traceback.format_exc()
                return {'success': msg}
            raise