def convert_item(item, to_be_deleted): """ Convert the subtree """ # collect the children's ids for future processing next_tier = [] for child in item.get('definition', {}).get('children', []): child_loc = Location.from_deprecated_string(child) next_tier.append(child_loc.to_deprecated_son()) # insert a new DRAFT version of the item item['_id']['revision'] = MongoRevisionKey.draft # ensure keys are in fixed and right order before inserting item['_id'] = self._id_dict_to_son(item['_id']) try: self.collection.insert(item) except pymongo.errors.DuplicateKeyError: # prevent re-creation of DRAFT versions, unless explicitly requested to ignore if not ignore_if_draft: raise DuplicateItemError(item['_id'], self, 'collection') # delete the old PUBLISHED version if requested if delete_published: item['_id']['revision'] = MongoRevisionKey.published to_be_deleted.append(item['_id']) return next_tier
def delete_draft_only(root_location): """ Helper function that calls delete on the specified location if a draft version of the item exists. If no draft exists, this function recursively calls itself on the children of the item. """ query = root_location.to_deprecated_son(prefix='_id.') del query['_id.revision'] versions_found = self.collection.find( query, {'_id': True, 'definition.children': True}, sort=[SORT_REVISION_FAVOR_DRAFT] ) # If 2 versions versions exist, we can assume one is a published version. Go ahead and do the delete # of the draft version. if versions_found.count() > 1: # Moving a child from published parent creates a draft of the parent and moved child. published_version = [ version for version in versions_found if version.get('_id').get('revision') != MongoRevisionKey.draft ] if len(published_version) > 0: # This change makes sure that parents are updated too i.e. an item will have only one parent. self.update_parent_if_moved(root_location, published_version[0], delete_draft_only, user_id) self._delete_subtree(root_location, [as_draft], draft_only=True) elif versions_found.count() == 1: # Since this method cannot be called on something in DIRECT_ONLY_CATEGORIES and we call # delete_subtree as soon as we find an item with a draft version, if there is only 1 version # it must be published (since adding a child to a published item creates a draft of the parent). item = versions_found[0] assert item.get('_id').get('revision') != MongoRevisionKey.draft for child in item.get('definition', {}).get('children', []): child_loc = Location.from_deprecated_string(child) delete_draft_only(child_loc)
def delete_draft_only(root_location): """ Helper function that calls delete on the specified location if a draft version of the item exists. If no draft exists, this function recursively calls itself on the children of the item. """ query = root_location.to_deprecated_son(prefix='_id.') del query['_id.revision'] versions_found = self.collection.find( query, { '_id': True, 'definition.children': True }, sort=[SORT_REVISION_FAVOR_DRAFT]) # If 2 versions versions exist, we can assume one is a published version. Go ahead and do the delete # of the draft version. if versions_found.count() > 1: self._delete_subtree(root_location, [as_draft], draft_only=True) elif versions_found.count() == 1: # Since this method cannot be called on something in DIRECT_ONLY_CATEGORIES and we call # delete_subtree as soon as we find an item with a draft version, if there is only 1 version # it must be published (since adding a child to a published item creates a draft of the parent). item = versions_found[0] assert item.get('_id').get( 'revision') != MongoRevisionKey.draft for child in item.get('definition', {}).get('children', []): child_loc = Location.from_deprecated_string(child) delete_draft_only(child_loc)
def _get_course_child(request, user, course_key, content_id): """ Return a course xmodule/xblock to the caller """ content_descriptor = None content_key = None content = None try: content_key = UsageKey.from_string(content_id) except InvalidKeyError: try: content_key = Location.from_deprecated_string(content_id) except (InvalidKeyError, InvalidLocationError): pass if content_key: store = modulestore() content_descriptor = store.get_item(content_key) if content_descriptor: field_data_cache = FieldDataCache([content_descriptor], course_key, user) content = module_render.get_module( user, request, content_key, field_data_cache, course_key) return content_descriptor, content_key, content
def get_students_problem_grades(request, csv=False): """ Get a list of students and grades for a particular problem. If 'csv' is False, returns a dict of student's name: username: grade: percent. If 'csv' is True, returns a header array, and an array of arrays in the format: student names, usernames, grades, percents for CSV download. """ module_state_key = Location.from_deprecated_string(request.GET.get('module_id')) csv = request.GET.get('csv') # Query for "problem grades" students students = models.StudentModule.objects.select_related('student').filter( module_state_key=module_state_key, module_type__exact='problem', grade__isnull=False, ).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name') results = [] if not csv: # Restrict screen list length # Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH # without doing another select. for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]: student_dict = { 'name': student['student__profile__name'], 'username': student['student__username'], 'grade': student['grade'], } student_dict['percent'] = 0 if student['max_grade'] > 0: student_dict['percent'] = round(student['grade'] * 100 / student['max_grade']) results.append(student_dict) max_exceeded = False if len(results) > MAX_SCREEN_LIST_LENGTH: # Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH del results[-1] max_exceeded = True response_payload = { 'results': results, 'max_exceeded': max_exceeded, } return JsonResponse(response_payload) else: tooltip = request.GET.get('tooltip') filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')]) header = [_("Name").encode('utf-8'), _("Username").encode('utf-8'), _("Grade").encode('utf-8'), _("Percent").encode('utf-8')] for student in students: percent = 0 if student['max_grade'] > 0: percent = round(student['grade'] * 100 / student['max_grade']) results.append([student['student__profile__name'], student['student__username'], student['grade'], percent]) response = create_csv_response(filename, header, results) return response
def convert_item(item, to_be_deleted): """ Convert the subtree """ # collect the children's ids for future processing next_tier = [] for child in item.get('definition', {}).get('children', []): child_loc = Location.from_deprecated_string(child) next_tier.append(child_loc.to_deprecated_son()) # insert a new DRAFT version of the item item['_id']['revision'] = MongoRevisionKey.draft # ensure keys are in fixed and right order before inserting item['_id'] = self._id_dict_to_son(item['_id']) bulk_record = self._get_bulk_ops_record(location.course_key) bulk_record.dirty = True try: self.collection.insert(item) except pymongo.errors.DuplicateKeyError: # prevent re-creation of DRAFT versions, unless explicitly requested to ignore if not ignore_if_draft: raise DuplicateItemError(item['_id'], self, 'collection') # delete the old PUBLISHED version if requested if delete_published: item['_id']['revision'] = MongoRevisionKey.published to_be_deleted.append(item['_id']) return next_tier
def problem_location(problem_url_name): """ Create an internal location for a test problem. """ if "i4x:" in problem_url_name: return Location.from_deprecated_string(problem_url_name) else: return TEST_COURSE_KEY.make_usage_key('problem', problem_url_name)
def get_course_child_key(content_id): try: content_key = UsageKey.from_string(content_id) except InvalidKeyError: try: content_key = Location.from_deprecated_string(content_id) except (InvalidLocationError, InvalidKeyError): content_key = None return content_key
def get_students_opened_subsection(request, csv=False): """ Get a list of students that opened a particular subsection. If 'csv' is False, returns a dict of student's name: username. If 'csv' is True, returns a header array, and an array of arrays in the format: student names, usernames for CSV download. """ module_state_key = Location.from_deprecated_string( request.GET.get('module_id')) csv = request.GET.get('csv') # Query for "opened a subsection" students students = models.StudentModule.objects.select_related('student').filter( module_state_key__exact=module_state_key, module_type__exact='sequential', ).values('student__username', 'student__profile__name').order_by('student__profile__name') results = [] if not csv: # Restrict screen list length # Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH # without doing another select. for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]: results.append({ 'name': student['student__profile__name'], 'username': student['student__username'], }) max_exceeded = False if len(results) > MAX_SCREEN_LIST_LENGTH: # Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH del results[-1] max_exceeded = True response_payload = { 'results': results, 'max_exceeded': max_exceeded, } return JsonResponse(response_payload) else: tooltip = request.GET.get('tooltip') # Subsection name is everything after 3rd space in tooltip filename = sanitize_filename(' '.join(tooltip.split(' ')[3:])) header = [_("Name").encode('utf-8'), _("Username").encode('utf-8')] for student in students: results.append([ student['student__profile__name'], student['student__username'] ]) response = create_csv_response(filename, header, results) return response
def to_python(self, location): """ Deserialize to a UsageKey instance: for now it's a location missing the run """ assert isinstance(location, (NoneType, basestring, Location)) if location == '': return None if isinstance(location, basestring): location = super(UsageKeyField, self).to_python(location) return Location.from_deprecated_string(location) else: return location
def get_students_opened_subsection(request, csv=False): """ Get a list of students that opened a particular subsection. If 'csv' is False, returns a dict of student's name: username. If 'csv' is True, returns a header array, and an array of arrays in the format: student names, usernames for CSV download. """ module_state_key = Location.from_deprecated_string(request.GET.get('module_id')) course_id = request.GET.get('course_id') csv = request.GET.get('csv') course_key = locator.CourseLocator.from_string(course_id) non_student_list = get_non_student_list(course_key) # Query for "opened a subsection" students students = models.StudentModule.objects.select_related('student').filter( module_state_key__exact=module_state_key, module_type__exact='sequential', ).exclude(student_id__in=non_student_list).values('student__id', 'student__username', 'student__profile__name').order_by('student__profile__name') results = [] if not csv: # Restrict screen list length # Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH # without doing another select. for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]: results.append({ 'name': student['student__profile__name'], 'username': student['student__username'], }) max_exceeded = False if len(results) > MAX_SCREEN_LIST_LENGTH: # Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH del results[-1] max_exceeded = True response_payload = { 'results': results, 'max_exceeded': max_exceeded, } return JsonResponse(response_payload) else: tooltip = request.GET.get('tooltip') # Subsection name is everything after 3rd space in tooltip filename = sanitize_filename(' '.join(tooltip.split(' ')[3:])) header = [_("Name").encode('utf-8'), _("Username").encode('utf-8')] for student in students: results.append([student['student__profile__name'], student['student__username']]) response = create_csv_response(filename, header, results) return response
def get_course_child_key(content_id): """ Returns course child key """ try: content_key = UsageKey.from_string(content_id) except InvalidKeyError: try: content_key = Location.from_deprecated_string(content_id) except (InvalidLocationError, InvalidKeyError): content_key = None return content_key
def to_python(self, value): if value is self.Empty or value is None: return value assert isinstance(value, (basestring, UsageKey)) if value == '': return None if isinstance(value, basestring): return Location.from_deprecated_string(value) else: return value
def update_parent_if_moved(self, original_parent_location, published_version, delete_draft_only, user_id): """ Update parent of an item if it has moved. Arguments: original_parent_location (BlockUsageLocator) : Original parent block locator. published_version (dict) : Published version of the block. delete_draft_only (function) : A callback function to delete draft children if it was moved. user_id (int) : User id """ for child_location in published_version.get('definition', {}).get('children', []): item_location = original_parent_location.course_key.make_usage_key_from_deprecated_string(child_location) try: source_item = self.get_item(item_location) except ItemNotFoundError: log.error('Unable to find the item %s', unicode(item_location)) return if source_item.parent and source_item.parent.block_id != original_parent_location.block_id: if self.update_item_parent(item_location, original_parent_location, source_item.parent, user_id): delete_draft_only(Location.from_deprecated_string(child_location))
def delete_draft_only(root_location): """ Helper function that calls delete on the specified location if a draft version of the item exists. If no draft exists, this function recursively calls itself on the children of the item. """ query = root_location.to_deprecated_son(prefix="_id.") del query["_id.revision"] versions_found = self.collection.find( query, {"_id": True, "definition.children": True}, sort=[SORT_REVISION_FAVOR_DRAFT] ) # If 2 versions versions exist, we can assume one is a published version. Go ahead and do the delete # of the draft version. if versions_found.count() > 1: self._delete_subtree(root_location, [as_draft]) elif versions_found.count() == 1: # Since this method cannot be called on something in DIRECT_ONLY_CATEGORIES and we call # delete_subtree as soon as we find an item with a draft version, if there is only 1 version # it must be published (since adding a child to a published item creates a draft of the parent). item = versions_found[0] assert item.get("_id").get("revision") != MongoRevisionKey.draft for child in item.get("definition", {}).get("children", []): child_loc = Location.from_deprecated_string(child) delete_draft_only(child_loc)
def _convert_reference_to_key(self, ref_string): """ Convert a single serialized UsageKey string in a ReferenceField into a UsageKey. """ key = Location.from_deprecated_string(ref_string) return key.replace(run=self.modulestore.fill_in_run(key.course_key).run)
def chart_update(request): results = {'success': False} chart_info_json = dumps(results) if request.method == u'GET': GET = request.GET user_id = GET[u'user_id'] user_id = request.user if user_id == "" else user_id chart = int(GET[u'chart']) course_key = get_course_key(GET[u'course_id']) if chart == VISUALIZATIONS_ID['LA_chapter_time']: cs, st = get_DB_course_spent_time(course_key, student_id=user_id) student_spent_time = chapter_time_to_js(cs, st) chart_info_json = dumps(student_spent_time) elif chart == VISUALIZATIONS_ID['LA_course_accesses']: cs, sa = get_DB_course_section_accesses(course_key, student_id=user_id) student_course_accesses = course_accesses_to_js(cs, sa) chart_info_json = dumps(student_course_accesses) elif chart == VISUALIZATIONS_ID['LA_student_grades']: students_grades = get_DB_student_grades(course_key, student_id=user_id) chart_info_json = dumps(students_grades) elif chart == VISUALIZATIONS_ID['LA_time_schedule']: student_time_schedule = get_DB_time_schedule(course_key, student_id=user_id) chart_info_json = dumps(student_time_schedule) elif chart == VISUALIZATIONS_ID['LA_vid_prob_prog']: student_prob_vid_progress = get_DB_course_video_problem_progress( course_key, student_id=user_id) chart_info_json = dumps(student_prob_vid_progress) elif chart == VISUALIZATIONS_ID['LA_video_progress']: # Video progress visualization. Video percentage seen total and non-overlapped. course = get_course_with_access(user_id, action='load', course_key=course_key, depth=None, check_if_enrolled=False) video_descriptors = videos_problems_in(course)[0] video_durations = get_info_videos_descriptors(video_descriptors)[2] video_names, avg_video_time, video_percentages = get_module_consumption( user_id, course_key, 'video', 'video_progress') if avg_video_time != []: all_video_time_percent = map(truediv, avg_video_time, video_durations) all_video_time_percent = [ int(round(x * 100, 0)) for x in all_video_time_percent ] else: all_video_time_percent = avg_video_time column_headers = [ 'Video', 'Different video time', 'Total video time' ] chart_info_json = ready_for_arraytodatatable( column_headers, video_names, video_percentages, all_video_time_percent) elif chart == VISUALIZATIONS_ID['LA_video_time']: # Time spent on every video resource video_names, all_video_time = get_module_consumption( user_id, course_key, 'video', 'total_time_vid_prob')[0:2] column_headers = ['Video', 'Time watched'] chart_info_json = ready_for_arraytodatatable( column_headers, video_names, all_video_time) elif chart == VISUALIZATIONS_ID['LA_problem_time']: # Time spent on every problem resource problem_names, time_x_problem = get_module_consumption( user_id, course_key, 'problem', 'total_time_vid_prob')[0:2] column_headers = ['Problem', 'Time on problem'] chart_info_json = ready_for_arraytodatatable( column_headers, problem_names, time_x_problem) elif chart == VISUALIZATIONS_ID['LA_repetition_video_interval']: # Repetitions per video intervals video_name = GET[u'video'] video_id = BlockUsageLocator._from_string(video_name) video_id = Location.from_deprecated_string( video_id._to_deprecated_string()) chart_info_json = get_user_video_intervals(user_id, video_id) elif chart == VISUALIZATIONS_ID['LA_daily_time']: # Daily time spent on video and/or problem resources video_days, video_daily_time = get_daily_consumption( user_id, course_key, 'video') problem_days, problem_daily_time = get_daily_consumption( user_id, course_key, 'problem') chart_info_json = join_video_problem_time(video_days, video_daily_time, problem_days, problem_daily_time) elif chart == VISUALIZATIONS_ID['LA_video_events']: # Video events dispersion within video length video_name = GET[u'video'] video_id = BlockUsageLocator._from_string(video_name) video_id = Location.from_deprecated_string( video_id._to_deprecated_string()) chart_info_json = get_video_events_info(user_id, video_id) return HttpResponse(chart_info_json, mimetype='application/json')
def chart_update(request): results = {'success' : False} chart_info_json = dumps(results) if request.method == u'GET': GET = request.GET user_id = GET[u'user_id'] user_id = request.user if user_id == "" else user_id chart = int(GET[u'chart']) course_key = get_course_key(GET[u'course_id']) if chart == VISUALIZATIONS_ID['LA_chapter_time']: cs, st = get_DB_course_spent_time(course_key, student_id=user_id) student_spent_time = chapter_time_to_js(cs, st) chart_info_json = dumps(student_spent_time) elif chart == VISUALIZATIONS_ID['LA_course_accesses']: cs, sa = get_DB_course_section_accesses(course_key, student_id=user_id) student_course_accesses = course_accesses_to_js(cs, sa) chart_info_json = dumps(student_course_accesses) elif chart == VISUALIZATIONS_ID['LA_student_grades']: students_grades = get_DB_student_grades(course_key, student_id=user_id) chart_info_json = dumps(students_grades) elif chart == VISUALIZATIONS_ID['LA_time_schedule']: student_time_schedule = get_DB_time_schedule(course_key, student_id=user_id) chart_info_json = dumps(student_time_schedule) elif chart == VISUALIZATIONS_ID['LA_vid_prob_prog']: student_prob_vid_progress = get_DB_course_video_problem_progress(course_key, student_id=user_id) chart_info_json = dumps(student_prob_vid_progress) elif chart == VISUALIZATIONS_ID['LA_video_progress']: # Video progress visualization. Video percentage seen total and non-overlapped. course = get_course_with_access(user_id, action='load', course_key=course_key, depth=None, check_if_enrolled=False) video_descriptors = videos_problems_in(course)[0] video_durations = get_info_videos_descriptors(video_descriptors)[2] video_names, avg_video_time, video_percentages = get_module_consumption(user_id, course_key, 'video', 'video_progress') if avg_video_time != []: all_video_time_percent = map(truediv, avg_video_time, video_durations) all_video_time_percent = [int(round(x*100,0)) for x in all_video_time_percent] else: all_video_time_percent = avg_video_time column_headers = ['Video', 'Different video time', 'Total video time'] chart_info_json = ready_for_arraytodatatable(column_headers, video_names, video_percentages, all_video_time_percent) elif chart == VISUALIZATIONS_ID['LA_video_time']: # Time spent on every video resource video_names, all_video_time = get_module_consumption(user_id, course_key, 'video', 'total_time_vid_prob')[0:2] column_headers = ['Video', 'Time watched'] chart_info_json = ready_for_arraytodatatable(column_headers, video_names, all_video_time) elif chart == VISUALIZATIONS_ID['LA_problem_time']: # Time spent on every problem resource problem_names, time_x_problem = get_module_consumption(user_id, course_key, 'problem', 'total_time_vid_prob')[0:2] column_headers = ['Problem', 'Time on problem'] chart_info_json = ready_for_arraytodatatable(column_headers, problem_names, time_x_problem) elif chart == VISUALIZATIONS_ID['LA_repetition_video_interval']: # Repetitions per video intervals video_name = GET[u'video'] video_id = BlockUsageLocator._from_string(video_name) video_id = Location.from_deprecated_string(video_id._to_deprecated_string()) chart_info_json = get_user_video_intervals(user_id, video_id) elif chart == VISUALIZATIONS_ID['LA_daily_time']: # Daily time spent on video and/or problem resources video_days, video_daily_time = get_daily_consumption(user_id, course_key, 'video') problem_days, problem_daily_time = get_daily_consumption(user_id, course_key, 'problem') chart_info_json = join_video_problem_time(video_days, video_daily_time, problem_days, problem_daily_time) elif chart == VISUALIZATIONS_ID['LA_video_events']: # Video events dispersion within video length video_name = GET[u'video'] video_id = BlockUsageLocator._from_string(video_name) video_id = Location.from_deprecated_string(video_id._to_deprecated_string()) chart_info_json = get_video_events_info(user_id, video_id) return HttpResponse(chart_info_json, mimetype='application/json')