def create(cls, max_attempts=None, attempts=None, correct=False, last_submission_time=None, submission_wait_seconds=None): """ Optional parameters here are cut down to what we actually use vs. the regular CapaFactory. """ location = BlockUsageLocator(CourseLocator('edX', 'capa_test', 'run', deprecated=True), 'problem', 'SampleProblem{0}'.format(cls.next_num()), deprecated=True) field_data = {'data': cls.sample_problem_xml} if max_attempts is not None: field_data['max_attempts'] = max_attempts if last_submission_time is not None: field_data['last_submission_time'] = last_submission_time if submission_wait_seconds is not None: field_data['submission_wait_seconds'] = submission_wait_seconds descriptor = Mock(weight="1") if attempts is not None: # converting to int here because I keep putting "0" and "1" in the tests # since everything else is a string. field_data['attempts'] = int(attempts) system = get_test_system() system.render_template = Mock( return_value="<div>Test Template HTML</div>") module = CapaModule( descriptor, system, DictFieldData(field_data), ScopeIds(None, None, location, location), ) if correct: # Could set the internal state formally, but here we just jam in the score. module.score = Score(raw_earned=1, raw_possible=1) else: module.score = Score(raw_earned=0, raw_possible=1) return module
def calculate_score(self): """ Returns the score calculated from the current problem state. """ # Awards full points for completing a survey earned_score = 1 return Score(raw_earned=earned_score, raw_possible=1)
def from_json(self, value): if value is None: return value if isinstance(value, Score): return value if set(value) != {'raw_earned', 'raw_possible'}: raise TypeError('Scores must contain only a raw earned and raw possible value. Got {}'.format( set(value) )) raw_earned = value['raw_earned'] raw_possible = value['raw_possible'] if raw_possible < 0: raise ValueError( 'Error deserializing field of type {0}: Expected a positive number for raw_possible, got {1}.'.format( self.display_name, raw_possible, ) ) if not (0 <= raw_earned <= raw_possible): raise ValueError( 'Error deserializing field of type {0}: Expected raw_earned between 0 and {1}, got {2}.'.format( self.display_name, raw_possible, raw_earned ) ) return Score(raw_earned, raw_possible)
def get_score(self): """ Return the problem's current score as raw values. """ self.raw_earned = self.score return Score(self.raw_earned, self.max_score())
def _process_result(result): if result.ready(): # Clear the task ID so we know there is no task running. self.check_id = "" if (result.successful() and isinstance(result.result, dict) and not result.result.get('error')): status = result.result score = Score(raw_earned=status['pass'], raw_possible=status['total']) self.set_score(score) # A publish event is necessary for calculating grades self.publish_grade() else: status = { 'status': 'ERROR', 'error_msg': 'Unexpected result: %s' % repr(result.result) # noqa: E501 } else: status = {'status': 'CHECK_PROGRESS_PENDING'} # Store the result self.check_status = status return status
def test_problem_score_override(self, problem_weight, override_score): """ Verify that it allows score override for problem weight greater than 1. """ # Check assumptions / initial conditions: self.block.weight = problem_weight self.block.save() self.assertEqual(self.block.raw_possible, 1) self.assertEqual(self.block.raw_earned, 0) self.assertFalse(self.block.completed) expected_earned_score = round(problem_weight * override_score, 2) # Override score raw_earned = override_score / self.block.weight raw_possible = self.block.max_score() / self.block.weight override_score_for_learner = Score(raw_earned=raw_earned, raw_possible=raw_possible) self.block.set_score(override_score_for_learner) self.block.save() self.assertEqual(self.block.score, override_score_for_learner) weighted_grade = round(self.block.weighted_grade(), 1) self.assertEqual(weighted_grade, expected_earned_score)
def get_score(self): """ Returns user's current (saved) score for the problem as raw values. """ if self._get_raw_earned_if_set() is None: self.raw_earned = self._learner_raw_score() return Score(self.raw_earned, self.raw_possible)
def compare_scores(self, correct, total): """ Returns the result of comparison using custom operator """ result = False global scoreTotal if total: # getting percentage score for that section percentage = (correct / total) * 100 if self.operator == 'eq': result = percentage == self.ref_value if self.operator == 'noeq': result = percentage != self.ref_value if self.operator == 'lte': result = percentage <= self.ref_value if self.operator == 'gte': result = percentage >= self.ref_value if self.operator == 'lt': result = percentage < self.ref_value if self.operator == 'gt': result = percentage > self.ref_value scoreTotal = Score(percentage, 100.0) self.set_score(scoreTotal) LOGGER.info("scoretotal ") LOGGER.info(scoreTotal) return result
def get_score(self): """ Return the problem's current score as raw values. """ if self._get_raw_earned_if_set() is None: self.raw_earned = self._learner_raw_score() return Score(self.raw_earned, self.max_score())
def _mark_complete_and_publish_grade(self): """ Helper method to update `self.completed` and submit grade event if appropriate conditions met. """ # pylint: disable=fixme # TODO: (arguable) split this method into "clean" functions (with no side effects and implicit state) # This method implicitly depends on self.item_state (via _is_answer_correct and _learner_raw_score) # and also updates self.raw_earned if some conditions are met. As a result this method implies some order of # invocation: # * it should be called after learner-caused updates to self.item_state is applied # * it should be called before self.item_state cleanup is applied (i.e. returning misplaced items to item bank) # * it should be called before any method that depends on self.raw_earned (i.e. self._get_feedback) # Splitting it into a "clean" functions will allow to capture this implicit invocation order in caller method # and help avoid bugs caused by invocation order violation in future. # There's no going back from "completed" status to "incomplete" self.completed = self.completed or self._is_answer_correct() or not self.attempts_remain current_raw_earned = self._learner_raw_score() # ... and from higher grade to lower # if we have an old-style (i.e. unreliable) grade, override no matter what saved_raw_earned = self._get_raw_earned_if_set() if current_raw_earned is None or current_raw_earned > saved_raw_earned: self.raw_earned = current_raw_earned self._publish_grade(Score(self.raw_earned, self.max_score()))
def check(self, data, suffix=''): # self.runtime.publish(self, "grade", # { value: 1.0, # max_value: 2.0 }) maxSymbols = 10 symbols = min(len(data['data']), maxSymbols) self._publish_grade(Score(symbols, maxSymbols)) return {"result": symbols}
def test_publish_grade(self): """ Verifies that publish event is fired with expected event data. """ self.assertPublishEvent(self.event_data) self.block.set_score(Score(raw_possible=1, raw_earned=1)) self.event_data['value'] = 1 self.assertPublishEvent(self.event_data)
def test_weighted_grade(self, raw_possible, weight, grade): """ Tests that weighted grade returns expected expected output with given input. """ self.block.weight = weight self.block.set_score(Score(raw_possible=raw_possible, raw_earned=1)) self.block.save() self.assertEqual(self.block.weighted_grade(), grade)
def calculate_score(self, result): """ Calculate a new raw score based on the state of the problem. This method should not modify the state of the XBlock. Returns: Score(raw_earned=float, raw_possible=float) """ earned = self.extract_earned_test_scores(result) possible = self.max_score() return Score(raw_earned=earned, raw_possible=possible)
def score_published_handler(sender, block, user, raw_earned, raw_possible, only_if_higher, **kwargs): # pylint: disable=unused-argument """ Handles whenever a block's score is published. Returns whether the score was actually updated. """ update_score = True if only_if_higher: previous_score = get_score(user.id, block.location) if previous_score is not None: prev_raw_earned, prev_raw_possible = (previous_score.grade, previous_score.max_grade) if not is_score_higher_or_equal(prev_raw_earned, prev_raw_possible, raw_earned, raw_possible): update_score = False log.warning( u"Grades: Rescore is not higher than previous: " u"user: {}, block: {}, previous: {}/{}, new: {}/{} ". format( user, block.location, prev_raw_earned, prev_raw_possible, raw_earned, raw_possible, )) if update_score: # Set the problem score in CSM. score_modified_time = set_score(user.id, block.location, raw_earned, raw_possible) # Set the problem score on the xblock. if isinstance(block, ScorableXBlockMixin): block.set_score( Score(raw_earned=raw_earned, raw_possible=raw_possible)) # Fire a signal (consumed by enqueue_subsection_update, below) PROBLEM_RAW_SCORE_CHANGED.send( sender=None, raw_earned=raw_earned, raw_possible=raw_possible, weight=getattr(block, 'weight', None), user_id=user.id, course_id=unicode(block.location.course_key), usage_id=unicode(block.location), only_if_higher=only_if_higher, modified=score_modified_time, score_db_table=ScoreDatabaseTableEnum.courseware_student_module, score_deleted=kwargs.get('score_deleted', False), ) return update_score
def get_score(self): """ Return a raw score already persisted on the XBlock. Should not perform new calculations. Returns: Score(raw_earned=float, raw_possible=float) """ score = get_score(self.runtime.user_id, self.location) score = score or {'grade': 0, 'max_grade': 1} return Score(raw_earned=score['grade'], raw_possible=score['max_grade'])
def get_score(self): """ Return a raw score already persisted on the XBlock. Should not perform new calculations. Returns: Score(raw_earned=float, raw_possible=float) """ if not self.score: logger.warning("No score is earned for this block yet") else: return Score(raw_earned=self.score.get('raw_earned'), raw_possible=self.score.get('raw_possible'))
def publish_grade(self, score=None): """ Publishes the student's current grade to the system as an event """ if score is None: score = Score(earned=0, possible=self.max_score()) self.runtime.publish(self, 'grade', { 'value': score.raw_earned, 'max_value': score.raw_possible, }) return {'grade': score.raw_earned, 'max_grade': score.raw_possible}
def set_score2(self, data, suffix=''): """ An example handler, which increments the data. """ # indicator is now 100... if data['key'] == 'hundred': self.score2 = 1 else: self.score2 = 0 self._publish_grade(Score(self.score2, self.max_score())) return {"score": self.score2}
def _calculate_score(self): """ Calculate user score and provide relevant context """ raw_possible = 1.0 raw_earned = 0.0 actual, expected, error, comparison = attempt_safe( self.dataset, self.answer_query, self.verify_query, self.is_ordered, self.raw_response) if comparison: raw_earned = 1.0 score = Score( raw_earned=raw_earned, raw_possible=raw_possible, ) return (score, actual, expected, error, comparison)
def test_set_score(self): """ Verify set score method updates relevant attributes of the D&D block. """ self.assertEqual(self.block.raw_earned, 0) self.assertEqual(self.block.raw_possible, 1) possible = 0.6 earned_percent = 50 / 100.0 # 50% earned from the total. earned_score = 0.3 self.block.set_score( Score(raw_earned=earned_score, raw_possible=possible)) self.assertEqual(self.block.raw_earned, earned_score) self.assertEqual(self.block.raw_possible, possible) self.assertEqual(self.block.weighted_grade(), earned_percent)
def set_score2(self, data, suffix=''): """ An example handler, which increments the data. """ # indicator is now 100... if data['key'] == 'hundred': self.score2 = 100 else: self.score2 = 0 event_data = {'value': self.score2 / 100, 'max_value': 1.0} self.runtime.publish(self, 'grade', event_data) self._publish_grade(Score(self.raw_earned, self.max_score())) self.runtime.publish(self, "progress", {}) url = "https://fork.kodaktor.ru/publog3?EDXEDX-4---------" urllib.urlopen(url + 'score --- published') return {"score": self.score2}
def _calculate_score(self): """ Calculate user score and provide relevant context """ raw_possible = 1.0 raw_earned = 0.0 database = DATABASES[self.dataset] problem = SqlProblem( database, self.answer_query, self.verify_query, self.is_ordered, ) response = self.raw_response actual, expected, error, comparison = problem.attempt(response) if comparison: raw_earned = 1.0 score = Score( raw_earned=raw_earned, raw_possible=raw_possible, ) return (score, actual, expected, error, comparison)
def calculate_score(self): """ Returns a newly-calculated raw score on the problem for the learner based on the learner's current state. """ return Score(self._learner_raw_score(), self.max_score())
import pkg_resources import re from xblock.core import XBlock from xblock.fragment import Fragment from xblock.fields import Scope, Integer, String, Float from xblockutils.studio_editable import StudioEditableXBlockMixin from xblock.scorable import ScorableXBlockMixin from xblock.scorable import Score from xblock.validation import ValidationMessage from courseware.model_data import ScoresClient from opaque_keys.edx.keys import UsageKey from opaque_keys import InvalidKeyError LOGGER = logging.getLogger(__name__) scoreTotal = Score(0.0, 100.0) def load(path): """Handy helper for getting resources from our kit.""" data = pkg_resources.resource_string(__name__, path) return data.decode("utf8") def _actions_generator(block): # pylint: disable=unused-argument """ Generates a list of possible actions to take when the condition is met """ return [{ "display_name": "Display a message", "value": "display_message"
def score(self): """ Returns learners saved score. """ return Score(self.raw_earned, self.raw_possible)
def get_score(self): """ Return the problem's current score as raw values. """ return Score(1, self.max_score())
def get_score(self): return Score(raw_earned=max(self.student_score, 0.0), raw_possible=1.0)
def override_score_module_state(xmodule_instance_args, module_descriptor, student_module, task_input): ''' Takes an XModule descriptor and a corresponding StudentModule object, and performs an override on the student's problem score. Throws exceptions if the override is fatal and should be aborted if in a loop. In particular, raises UpdateProblemModuleStateError if module fails to instantiate, or if the module doesn't support overriding, or if the score used for override is outside the acceptable range of scores (between 0 and the max score for the problem). Returns True if problem was successfully overriden for the given student, and False if problem encountered some kind of error in overriding. ''' # unpack the StudentModule: course_id = student_module.course_id student = student_module.student usage_key = student_module.module_state_key with modulestore().bulk_operations(course_id): course = get_course_by_id(course_id) instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, course=course) if instance is None: # Either permissions just changed, or someone is trying to be clever # and load something they shouldn't have access to. msg = u"No module {location} for student {student}--access denied?".format( location=usage_key, student=student) TASK_LOG.warning(msg) return UPDATE_STATUS_FAILED if not hasattr(instance, 'set_score'): msg = "Scores cannot be overridden for this problem type." raise UpdateProblemModuleStateError(msg) weighted_override_score = float(task_input['score']) if not (0 <= weighted_override_score <= instance.max_score()): msg = "Score must be between 0 and the maximum points available for the problem." raise UpdateProblemModuleStateError(msg) # Set the tracking info before this call, because it makes downstream # calls that create events. We retrieve and store the id here because # the request cache will be erased during downstream calls. create_new_event_transaction_id() set_event_transaction_type(grades_events.GRADES_OVERRIDE_EVENT_TYPE) problem_weight = instance.weight if instance.weight is not None else 1 if problem_weight == 0: msg = "Scores cannot be overridden for a problem that has a weight of zero." raise UpdateProblemModuleStateError(msg) else: instance.set_score( Score(raw_earned=weighted_override_score / problem_weight, raw_possible=instance.max_score() / problem_weight)) instance.publish_grade() instance.save() TASK_LOG.debug( u"successfully processed score override for course %(course)s, problem %(loc)s " u"and student %(student)s", dict(course=course_id, loc=usage_key, student=student)) return UPDATE_STATUS_SUCCEEDED