def get_staff_path_and_context(self): """ Gets the path and context for the staff section of the ORA XBlock. """ context = {} path = 'openassessmentblock/staff_debug/staff_debug.html' student_item = self.get_student_item_dict() # We need to display the new-style locations in the course staff # info, even if we're using old-style locations internally, # so course staff can use the locations to delete student state. context['item_id'] = student_item["item_id"] # Calculate how many students are in each step of the workflow status_counts, num_submissions = self.get_workflow_status_counts() context['status_counts'] = status_counts context['num_submissions'] = num_submissions # Show the schedule training button if example based assessment is # configured, and the current user has admin privileges. example_based_assessment = self.get_assessment_module( 'example-based-assessment') display_ai_staff_info = (self.is_admin and bool(example_based_assessment) and not self.in_studio_preview) context['display_schedule_training'] = display_ai_staff_info context['display_reschedule_unfinished_tasks'] = display_ai_staff_info if display_ai_staff_info: context['classifierset'] = ai_api.get_classifier_set_info( create_rubric_dict(self.prompts, self.rubric_criteria_with_labels), example_based_assessment['algorithm_id'], student_item['course_id'], student_item['item_id']) # Include release/due dates for each step in the problem context['step_dates'] = list() # Include Latex setting context['allow_latex'] = self.allow_latex steps = ['submission'] + self.assessment_steps for step in steps: if step == 'example-based-assessment': continue # Get the dates as a student would see them __, __, start_date, due_date = self.is_closed(step=step, course_staff=False) context['step_dates'].append({ 'step': step, 'start': start_date if start_date > DISTANT_PAST else None, 'due': due_date if due_date < DISTANT_FUTURE else None, }) return path, context
def get_staff_path_and_context(self): """ Gets the path and context for the staff section of the ORA XBlock. """ context = {} path = 'openassessmentblock/staff_debug/staff_debug.html' student_item = self.get_student_item_dict() # We need to display the new-style locations in the course staff # info, even if we're using old-style locations internally, # so course staff can use the locations to delete student state. context['item_id'] = student_item["item_id"] # Calculate how many students are in each step of the workflow status_counts, num_submissions = self.get_workflow_status_counts() context['status_counts'] = status_counts context['num_submissions'] = num_submissions # Show the schedule training button if example based assessment is # configured, and the current user has admin privileges. example_based_assessment = self.get_assessment_module('example-based-assessment') display_ai_staff_info = ( self.is_admin and bool(example_based_assessment) and not self.in_studio_preview ) context['display_schedule_training'] = display_ai_staff_info context['display_reschedule_unfinished_tasks'] = display_ai_staff_info if display_ai_staff_info: context['classifierset'] = ai_api.get_classifier_set_info( create_rubric_dict(self.prompt, self.rubric_criteria_with_labels), example_based_assessment['algorithm_id'], student_item['course_id'], student_item['item_id'] ) # Include release/due dates for each step in the problem context['step_dates'] = list() # Include Latex setting context['allow_latex'] = self.allow_latex steps = ['submission'] + self.assessment_steps for step in steps: if step == 'example-based-assessment': continue # Get the dates as a student would see them __, __, start_date, due_date = self.is_closed(step=step, course_staff=False) context['step_dates'].append({ 'step': step, 'start': start_date if start_date > DISTANT_PAST else None, 'due': due_date if due_date < DISTANT_FUTURE else None, }) return path, context
def test_multiple_classifier_sets(self): # Train multiple classifiers ai_api.train_classifiers(RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID) second_uuid = ai_api.train_classifiers(RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID) # Expect that we get the info for the second classifier classifier_info = ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item') workflow = AITrainingWorkflow.objects.get(uuid=second_uuid) classifier_set = workflow.classifier_set self.assertEqual(classifier_info['created_at'], classifier_set.created_at)
def test_multiple_classifier_sets(self): # Train multiple classifiers ai_api.train_classifiers( RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID ) second_uuid = ai_api.train_classifiers( RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID ) # Expect that we get the info for the second classifier classifier_info = ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item' ) workflow = AITrainingWorkflow.objects.get(uuid=second_uuid) classifier_set = workflow.classifier_set self.assertEqual(classifier_info['created_at'], classifier_set.created_at)
def test_classifier_set_info(self): workflow_uuid = ai_api.train_classifiers(RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID) classifier_info = ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item') # Retrieve the classifier set so we can get its actual creation date workflow = AITrainingWorkflow.objects.get(uuid=workflow_uuid) classifier_set = workflow.classifier_set expected_info = { 'created_at': classifier_set.created_at, 'algorithm_id': ALGORITHM_ID, 'course_id': 'test_course', 'item_id': 'test_item' } self.assertEqual(classifier_info, expected_info)
def test_classifier_set_info(self): workflow_uuid = ai_api.train_classifiers( RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID ) classifier_info = ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item' ) # Retrieve the classifier set so we can get its actual creation date workflow = AITrainingWorkflow.objects.get(uuid=workflow_uuid) classifier_set = workflow.classifier_set expected_info = { 'created_at': classifier_set.created_at, 'algorithm_id': ALGORITHM_ID, 'course_id': 'test_course', 'item_id': 'test_item' } self.assertEqual(classifier_info, expected_info)
def test_invalid_rubric_error(self): invalid_rubric = {} ai_api.get_classifier_set_info(invalid_rubric, ALGORITHM_ID, 'test_course', 'test_item')
def test_database_error(self, mock_call): mock_call.side_effect = DatabaseError('OH NO!') ai_api.get_classifier_set_info(RUBRIC, ALGORITHM_ID, 'test_course', 'test_item')
def test_no_classifier_set(self): classifier_info = ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item') self.assertIs(classifier_info, None)
def test_database_error(self, mock_call): mock_call.side_effect = DatabaseError('OH NO!') ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item' )
def test_no_classifier_set(self): classifier_info = ai_api.get_classifier_set_info( RUBRIC, ALGORITHM_ID, 'test_course', 'test_item' ) self.assertIs(classifier_info, None)