def map(entity): mapper_params = context.get().mapreduce_spec.mapper.params namespace = mapper_params['course'] unit_id = mapper_params['unit_id'] ignore_order = mapper_params['ignore_order'] app_context = sites.get_app_context_for_namespace(namespace) course = courses.Course(None, app_context=app_context) unit = course.find_unit_by_id(str(unit_id)) if verify.UNIT_TYPE_ASSESSMENT == unit.type: grader = unit.workflow.get_grader() if grader == courses.AUTO_GRADER: pass else: return else: return enable_negative_marking = unit.enable_negative_marking submission = student_work.Submission.get_contents( unit.unit_id, entity.get_key()) if not submission: return old_score = course.get_score(entity, unit.unit_id) new_score = scorer.score_assessment(submission, unit.html_content, enable_negative_marking, ignore_order=ignore_order) utils.set_score(entity, unit.unit_id, new_score) entity.put() yield (str(old_score), new_score)
def map(entity): if entity.completed_count == 0: return # Get the scoring method namespace = namespace_manager.get_namespace() app_context = sites.get_app_context_for_namespace(namespace) course = courses.Course.get(app_context) unit = course.find_unit_by_id(entity.unit_id) content = question.SubjectiveAssignmentRESTHandler.get_content( course, unit) method = content.get('scoring_method') # Get the manual evaluation steps steps = staff.ManualEvaluationStep.all().filter( 'manual_evaluation_summary_key =', entity.key()).filter( 'state =', staff.REVIEW_STATE_COMPLETED).filter('removed =', False) # Calculate final score final_score = manage.Manager.calculate_final_score(steps, method) if final_score is None: return student = models.Student.get(entity.reviewee_key) utils.set_score(student, str(entity.unit_id), final_score) student.put()
def store_score(course, student, assessment_type, score): """Stores a student's score on a particular assessment. Args: course: the course containing the assessment. student: the student whose data is stored. assessment_type: the type of the assessment. score: the student's score on this assessment. Returns: the result of the assessment, if appropriate. """ utils.set_score(student, assessment_type, score)
def store_score(student, assessment_type, score): """Stores a student's score on a particular assessment. Args: student: the student whose data is stored. assessment_type: the type of the assessment. score: the student's score on this assessment. Returns: the (possibly modified) assessment_type, which the caller can use to render an appropriate response page. """ # FIXME: Course creators can edit this code to implement custom # assessment scoring and storage behavior # TODO(pgbovine): Note that the latest version of answers are always saved, # but scores are only saved if they're higher than the previous attempt. # This can lead to unexpected analytics behavior. Resolve this. existing_score = utils.get_score(student, assessment_type) # remember to cast to int for comparison if (existing_score is None) or (score > int(existing_score)): utils.set_score(student, assessment_type, score) # special handling for computing final score: if assessment_type == 'postcourse': midcourse_score = utils.get_score(student, 'midcourse') if midcourse_score is None: midcourse_score = 0 else: midcourse_score = int(midcourse_score) if existing_score is None: postcourse_score = score else: postcourse_score = int(existing_score) if score > postcourse_score: postcourse_score = score # Calculate overall score based on a formula overall_score = int((0.3 * midcourse_score) + (0.7 * postcourse_score)) # TODO(pgbovine): this changing of assessment_type is ugly ... if overall_score >= 70: assessment_type = 'postcourse_pass' else: assessment_type = 'postcourse_fail' utils.set_score(student, 'overall_score', overall_score) return assessment_type
def store_score(course, student, assessment_type, score): """Stores a student's score on a particular assessment. Args: course: the course containing the assessment. student: the student whose data is stored. assessment_type: the type of the assessment. score: the student's score on this assessment. Returns: the result of the assessment, if appropriate. """ # FIXME: Course creators can edit this code to implement custom # assessment scoring and storage behavior # TODO(pgbovine): Note that the latest version of answers are always saved, # but scores are only saved if they're higher than the previous attempt. # This can lead to unexpected analytics behavior. Resolve this. existing_score = course.get_score(student, assessment_type) # remember to cast to int for comparison if (existing_score is None) or (score > int(existing_score)): utils.set_score(student, assessment_type, score)
def score(cls, student, course, assessment_id, score, errors): unit = course.find_unit_by_id(assessment_id) error_str = 'Student: %s, course: %s, assessment_id: %s, error: %s' if not unit: errors.append( error_str % (student.email, course, assessment_id, 'Unit not found')) return False if cls.has_deadline_passed(unit): errors.append(error_str % (student.email, course._namespace, assessment_id, 'Deadline Passed')) return False submit_only_once = unit.workflow.submit_only_once() already_submitted = False submitted_contents = cls.get_student_answer(unit, student) if submitted_contents and submitted_contents['submitted']: already_submitted = True if submit_only_once and already_submitted: errors.append(error_str % (student.email, course._namespace, assessment_id, 'Already submitted')) answer_dict = dict() answer_dict['details'] = ('Last submitted on %s' % datetime.datetime.now()) answer_dict['submitted'] = True # TODO(rthakker) Do this write in bulk student_work.Submission.write(assessment_id, student.get_key(), transforms.dumps(answer_dict)) utils.set_score(student, assessment_id, score) # TODO(rthakker) Do this write in bulk course.get_progress_tracker().put_custom_unit_completed( student, assessment_id) return True
def evaluate(self, student, is_public, lang, filename, answer): lang_data = self.get_lang_specifc_data(lang) full_code = self.get_full_code(lang_data, answer) pa_id = self._unit.properties.get(base.ProgAssignment.PA_ID_KEY) if not is_public: # The default id stored in the unit properties is for public test # case. If the submission is for private test cases we need to get # id for private test cases. pa_id = pa_id[:-1] + chr(ord(pa_id[-1:]) - 1) program_name = ( self._course.app_context.get_namespace_name().replace(' ', '_') + '.' + lang) ignore_presentation_errors = self._content['ignore_presentation_errors'] if is_public: tests = self._content['public_testcase'] else: tests = self._content['private_testcase'] evaluation_result = self.evalute_code( self._course, self._course_settings, self._unit, full_code, program_name, pa_id, filename, tests, ignore_presentation_errors, lang) if (result.Status.BACKEND_ERROR == evaluation_result.status or result.Status.OTHER == evaluation_result.status): return evaluation_result self.update_test_case_stats( student, self._unit, is_public, evaluation_result.test_case_results) if is_public: self.store_server_response( student, self._unit.unit_id, evaluation_result) return evaluation_result score = 0.0 pwt = 0 nwt = 0 for index, stat in enumerate(evaluation_result.test_case_results): if len(tests) <= int(index): weight = 1 else: test = tests[int(index)] weight = test['weight'] if stat.passed: pwt += weight else: nwt += weight if (pwt+nwt) > 0: score = (100 * pwt) / (pwt+nwt) evaluation_result.score = score utils.set_score(student, self._unit.unit_id, score) self.store_submission_response( student, self._unit.unit_id, evaluation_result) student.put() return evaluation_result
def store_score(course, student, assessment_name, assessment_type,score): """Stores a student's score on a particular assessment. Args: course: the course containing the assessment. student: the student whose data is stored. assessment_type: the type of the assessment. score: the student's score on this assessment. Returns: the result of the assessment, if appropriate. """ # FIXME: Course creators can edit this code to implement custom # assessment scoring and storage behavior # TODO(pgbovine): Note that the latest version of answers are always saved, # but scores are only saved if they're higher than the previous attempt. # This can lead to unexpected analytics behavior. Resolve this. existing_score = course.get_score(student, assessment_name) # remember to cast to int for comparison # logging.error('assessment name : %s exist score : %s score %s ',assessment_name,existing_score, score) if assessment_name != 'postcourse': if (existing_score is None) or (score > int(existing_score)): utils.set_score(student, assessment_name, score) # special handling for computing final score: if assessment_name == 'postcourse': # midcourse_score = utils.get_score(student, 'midcourse') # if midcourse_score is None: # midcourse_score = 0 # else: # midcourse_score = int(midcourse_score) if existing_score is None: postcourse_score = score else: postcourse_score = int(existing_score) if score > postcourse_score: postcourse_score = score # Calculate overall score based on a formula overall_score = calc_total_score(student) # logging.error('overall_score : %s ', overall_score) # if utils.get_score(student, 'postcourse') == 0 and (overall_score > -1) : # utils.set_score(student, 'postcourse', overall_score) # utils.set_score(student, 'overall_score', overall_score) # TODO(pgbovine): this changing of assessment_type is ugly ... if overall_score == 100: assessment_name = 'postcourse_100' else: if overall_score >= 90: assessment_name = 'postcourse_pass' else: if overall_score > 0: assessment_name = 'postcourse_fail' else: assessment_name = 'not_complete' # utils.set_score(student, 'overall_score', overall_score) # store the overall_score of the first run of training in post_course # post_s= utils.get_score(student, 'postcourse') # logging.error('postcourse : %s ', utils.get_score(student, 'postcourse')) if utils.get_score(student, 'postcourse') == None and (overall_score > -1): utils.set_score(student, 'postcourse', overall_score) utils.set_score(student, 'overall_score', overall_score) over_s= utils.get_score(student, 'overall_score') if over_s <> None: overall_score = calc_total_score(student) utils.set_score(student, 'overall_score', overall_score) return assessment_name