def test_get_topics_by_id(self): expected_topic = self.topic.to_dict() topics = topic_fetchers.get_topics_by_ids([self.TOPIC_ID]) self.assertEqual(topics[0].to_dict(), expected_topic) self.assertEqual(len(topics), 1) topics = topic_fetchers.get_topics_by_ids([self.TOPIC_ID, 'topic']) self.assertEqual(topics[0].to_dict(), expected_topic) self.assertIsNone(topics[1]) self.assertEqual(len(topics), 2)
def get(self): """Handles GET requests.""" comma_separated_topic_ids = ( self.request.get('comma_separated_topic_ids')) topic_ids = comma_separated_topic_ids.split(',') topics = topic_fetchers.get_topics_by_ids(topic_ids) all_skill_ids = [] subtopic_mastery_dict = {} for ind, topic in enumerate(topics): if not topic: raise self.InvalidInputException('Invalid topic ID %s' % topic_ids[ind]) all_skill_ids.extend(topic.get_all_skill_ids()) all_skill_ids = list(set(all_skill_ids)) all_skills_mastery_dict = skill_services.get_multi_user_skill_mastery( self.user_id, all_skill_ids) for topic in topics: subtopic_mastery_dict[topic.id] = {} for subtopic in topic.subtopics: skill_mastery_dict = { skill_id: mastery for skill_id, mastery in all_skills_mastery_dict.items() if mastery is not None and skill_id in subtopic.skill_ids } if skill_mastery_dict: # Subtopic mastery is average of skill masteries. subtopic_mastery_dict[topic.id][subtopic.id] = ( python_utils.divide(sum(skill_mastery_dict.values()), len(skill_mastery_dict))) self.values.update({'subtopic_mastery_dict': subtopic_mastery_dict}) self.render_json(self.values)
def post(self): description = self.payload.get('description') linked_topic_ids = self.payload.get('linked_topic_ids') explanation_dict = self.payload.get('explanation_dict') rubrics = self.payload.get('rubrics') if not isinstance(rubrics, list): raise self.InvalidInputException('Rubrics should be a list.') if not isinstance(explanation_dict, dict): raise self.InvalidInputException('Explanation should be a dict.') try: state_domain.SubtitledHtml.from_dict(explanation_dict) except: raise self.InvalidInputException( 'Explanation should be a valid SubtitledHtml dict.') rubrics = [skill_domain.Rubric.from_dict(rubric) for rubric in rubrics] new_skill_id = skill_services.get_new_skill_id() if linked_topic_ids is not None: topics = topic_fetchers.get_topics_by_ids(linked_topic_ids) for topic in topics: if topic is None: raise self.InvalidInputException topic_services.add_uncategorized_skill(self.user_id, topic.id, new_skill_id) skill_domain.Skill.require_valid_description(description) skill = skill_domain.Skill.create_default_skill( new_skill_id, description, rubrics) skill.update_explanation(explanation_dict) skill_services.save_new_skill(self.user_id, skill) self.render_json({'skillId': new_skill_id})
def _get_skill_opportunities_with_corresponding_topic_name(self, cursor): """Returns a list of skill opportunities available for questions with a corresponding topic name. Args: cursor: str or None. If provided, the list of returned entities starts from this datastore cursor. Otherwise, the returned entities start from the beginning of the full list of entities. Returns: 3-tuple(opportunities, cursor, more). where: opportunities: list(dict). A list of dicts of skill opportunity details with additional corresponding topic_name. cursor: str or None. A query cursor pointing to the next batch of results. If there are no more results, this might be None. more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ # We want to focus attention on lessons that are part of a classroom. # See issue #12221. classroom_topic_ids = [] for classroom_dict in config_domain.CLASSROOM_PAGES_DATA.value: classroom_topic_ids.extend(classroom_dict['topic_ids']) classroom_topics = topic_fetchers.get_topics_by_ids(classroom_topic_ids) # Associate each skill with one classroom topic name. # TODO(#8912): Associate each skill/skill opportunity with all linked # topics. classroom_topic_skill_id_to_topic_name = {} for topic in classroom_topics: if topic is None: continue for skill_id in topic.get_all_skill_ids(): classroom_topic_skill_id_to_topic_name[skill_id] = topic.name skill_opportunities, cursor, more = ( opportunity_services.get_skill_opportunities(cursor)) opportunities = [] # Fetch opportunities until we have at least a page's worth that # correspond to a classroom or there are no more opportunities. while len(opportunities) < constants.OPPORTUNITIES_PAGE_SIZE: for skill_opportunity in skill_opportunities: if ( skill_opportunity.id in classroom_topic_skill_id_to_topic_name): skill_opportunity_dict = skill_opportunity.to_dict() skill_opportunity_dict['topic_name'] = ( classroom_topic_skill_id_to_topic_name[ skill_opportunity.id]) opportunities.append(skill_opportunity_dict) if ( not more or len(opportunities) >= constants.OPPORTUNITIES_PAGE_SIZE): break skill_opportunities, cursor, more = ( opportunity_services.get_skill_opportunities(cursor)) return opportunities, cursor, more
def test_get_topics_by_id(self) -> None: # Ruling out the possibility of None for mypy type checking. assert self.topic is not None expected_topic = self.topic.to_dict() topics: List[Optional[topic_domain.Topic]] = ( topic_fetchers.get_topics_by_ids([self.TOPIC_ID])) # Ruling out the possibility of None for mypy type checking. assert topics[0] is not None self.assertEqual(topics[0].to_dict(), expected_topic) self.assertEqual(len(topics), 1) topics = (topic_fetchers.get_topics_by_ids([self.TOPIC_ID, 'topic'])) # Ruling out the possibility of None for mypy type checking. assert topics[0] is not None self.assertEqual(topics[0].to_dict(), expected_topic) self.assertIsNone(topics[1]) self.assertEqual(len(topics), 2)
def _get_skill_opportunities_with_corresponding_topic_name(self, cursor): """Returns a list of skill opportunities available for questions with topic information. Args: cursor: str or None. If provided, the list of returned entities starts from this datastore cursor. Otherwise, the returned entities start from the beginning of the full list of entities. Returns: 3-tuple(opportunities, cursor, more). where: opportunities: list(dict). A list of dicts of skill opportunity details with additional corresponding topic_name. cursor: str or None. A query cursor pointing to the next batch of results. If there are no more results, this might be None. more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ # We want to focus attention on lessons that are part of a classroom. # See issue #12221. classroom_topic_ids = [] for classroom_dict in config_domain.CLASSROOM_PAGES_DATA.value: classroom_topic_ids.extend(classroom_dict['topic_ids']) classroom_topics = topic_fetchers.get_topics_by_ids( classroom_topic_ids) classroom_topics_with_skills = [ topic for topic in classroom_topics if topic and topic.get_all_skill_ids() ] skill_opportunities, cursor, more = ( opportunity_services.get_skill_opportunities(cursor)) id_to_skill_opportunity_dict = { opp.id: opp.to_dict() for opp in skill_opportunities } opportunities = [] for topic in classroom_topics_with_skills: for skill_id in topic.get_all_skill_ids(): if len(opportunities) == constants.OPPORTUNITIES_PAGE_SIZE: break if skill_id in id_to_skill_opportunity_dict: skill_opportunity_dict = ( id_to_skill_opportunity_dict[skill_id]) skill_opportunity_dict['topic_name'] = topic.name opportunities.append(skill_opportunity_dict) return opportunities, cursor, more
def post(self): description = self.payload.get('description') linked_topic_ids = self.payload.get('linked_topic_ids') new_skill_id = skill_services.get_new_skill_id() if linked_topic_ids is not None: topics = topic_fetchers.get_topics_by_ids(linked_topic_ids) for topic in topics: if topic is None: raise self.InvalidInputException topic_services.add_uncategorized_skill(self.user_id, topic.id, new_skill_id) skill_domain.Skill.require_valid_description(description) skill = skill_domain.Skill.create_default_skill( new_skill_id, description) skill_services.save_new_skill(self.user_id, skill) self.render_json({'skillId': new_skill_id})
def post(self): description = self.normalized_payload.get('description') linked_topic_ids = self.normalized_payload.get('linked_topic_ids') explanation_dict = self.normalized_payload.get('explanation_dict') rubrics = self.normalized_payload.get('rubrics') files = self.normalized_payload.get('files') new_skill_id = skill_services.get_new_skill_id() if linked_topic_ids is not None: topics = topic_fetchers.get_topics_by_ids(linked_topic_ids) for topic in topics: if topic is None: raise self.InvalidInputException topic_services.add_uncategorized_skill(self.user_id, topic.id, new_skill_id) if skill_services.does_skill_with_description_exist(description): raise self.InvalidInputException( 'Skill description should not be a duplicate.') skill = skill_domain.Skill.create_default_skill( new_skill_id, description, rubrics) skill.update_explanation(explanation_dict) image_filenames = skill_services.get_image_filenames_from_skill(skill) skill_services.save_new_skill(self.user_id, skill) for filename in image_filenames: base64_image = files.get(filename) bytes_image = base64.decodebytes(base64_image.encode('utf-8')) file_format = ( image_validation_services.validate_image_and_filename( bytes_image, filename)) image_is_compressible = (file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS) fs_services.save_original_and_compressed_versions_of_image( filename, feconf.ENTITY_TYPE_SKILL, skill.id, bytes_image, 'image', image_is_compressible) self.render_json({'skillId': new_skill_id})
def post(self): description = self.payload.get('description') linked_topic_ids = self.payload.get('linked_topic_ids') explanation_dict = self.payload.get('explanation_dict') rubrics = self.payload.get('rubrics') if not isinstance(rubrics, list): raise self.InvalidInputException('Rubrics should be a list.') if not isinstance(explanation_dict, dict): raise self.InvalidInputException('Explanation should be a dict.') try: subtitled_html = ( state_domain.SubtitledHtml.from_dict(explanation_dict)) subtitled_html.validate() except Exception as e: raise self.InvalidInputException( 'Explanation should be a valid SubtitledHtml dict.') from e rubrics = [skill_domain.Rubric.from_dict(rubric) for rubric in rubrics] new_skill_id = skill_services.get_new_skill_id() if linked_topic_ids is not None: topics = topic_fetchers.get_topics_by_ids(linked_topic_ids) for topic in topics: if topic is None: raise self.InvalidInputException topic_services.add_uncategorized_skill(self.user_id, topic.id, new_skill_id) skill_domain.Skill.require_valid_description(description) if skill_services.does_skill_with_description_exist(description): raise self.InvalidInputException( 'Skill description should not be a duplicate.') skill = skill_domain.Skill.create_default_skill( new_skill_id, description, rubrics) skill.update_explanation( state_domain.SubtitledHtml.from_dict(explanation_dict)) image_filenames = skill_services.get_image_filenames_from_skill(skill) skill_services.save_new_skill(self.user_id, skill) image_validation_error_message_suffix = ( 'Please go to oppia.org/skill_editor/%s to edit ' 'the image.' % skill.id) for filename in image_filenames: image = self.request.get(filename) if not image: logging.exception( 'Image not provided for file with name %s when the skill ' 'with id %s was created.' % (filename, skill.id)) raise self.InvalidInputException( 'No image data provided for file with name %s. %s' % (filename, image_validation_error_message_suffix)) try: file_format = ( image_validation_services.validate_image_and_filename( image, filename)) except utils.ValidationError as e: e = '%s %s' % (e, image_validation_error_message_suffix) raise self.InvalidInputException(e) image_is_compressible = (file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS) fs_services.save_original_and_compressed_versions_of_image( filename, feconf.ENTITY_TYPE_SKILL, skill.id, image, 'image', image_is_compressible) self.render_json({'skillId': new_skill_id})
def get_multi_users_progress_in_stories( user_ids: List[str], story_ids: List[str] ) -> Dict[str, List[story_domain.LearnerGroupSyllabusStorySummaryDict]]: """Returns the progress of given users in all given stories. Args: user_ids: list(str). The user ids of the users. story_ids: list(str). The list of story ids. Returns: Dict(str, list(StoryProgressDict)). Dictionary of user id and their corresponding list of story progress dicts. """ all_valid_stories = [ story for story in get_stories_by_ids(story_ids) if story ] # Filter unique topic ids from all valid stories. topic_ids = list( {story.corresponding_topic_id for story in all_valid_stories}) topics = topic_fetchers.get_topics_by_ids(topic_ids) topic_id_to_topic_map = {} for topic in topics: # Ruling out the possibility of None for mypy type checking. assert topic is not None topic_id_to_topic_map[topic.id] = topic story_id_to_story_map = {story.id: story for story in all_valid_stories} valid_story_ids = [story.id for story in all_valid_stories] all_story_summaries = get_story_summaries_by_ids(valid_story_ids) story_id_to_summary_map = { summary.id: summary for summary in all_story_summaries } # All poosible combinations of user_id and story_id for which progress # models are returned. all_posssible_combinations = itertools.product(user_ids, valid_story_ids) progress_models = user_models.StoryProgressModel.get_multi( user_ids, valid_story_ids) all_users_stories_progress: Dict[ str, List[story_domain.LearnerGroupSyllabusStorySummaryDict]] = { user_id: [] for user_id in user_ids } for i, (user_id, story_id) in enumerate(all_posssible_combinations): progress_model = progress_models[i] completed_node_ids = [] if progress_model is not None: completed_node_ids = progress_model.completed_node_ids story = story_id_to_story_map[story_id] completed_node_titles = [ node.title for node in story.story_contents.nodes if node.id in completed_node_ids ] topic = topic_id_to_topic_map[story.corresponding_topic_id] summary_dict = story_id_to_summary_map[story_id].to_dict() all_users_stories_progress[user_id].append({ 'id': summary_dict['id'], 'title': summary_dict['title'], 'description': summary_dict['description'], 'language_code': summary_dict['language_code'], 'version': summary_dict['version'], 'node_titles': summary_dict['node_titles'], 'thumbnail_filename': summary_dict['thumbnail_filename'], 'thumbnail_bg_color': summary_dict['thumbnail_bg_color'], 'url_fragment': summary_dict['url_fragment'], 'story_model_created_on': summary_dict['story_model_created_on'], 'story_model_last_updated': summary_dict['story_model_last_updated'], 'story_is_published': True, 'completed_node_titles': completed_node_titles, 'all_node_dicts': [node.to_dict() for node in story.story_contents.nodes], 'topic_name': topic.name, 'topic_url_fragment': topic.url_fragment }) return all_users_stories_progress
def get_multi_users_subtopic_pages_progress( user_ids: List[str], subtopic_page_ids: List[str] ) -> Dict[str, List[subtopic_page_domain.SubtopicPageSummaryDict]]: """Returns the progress of the given user on the given subtopic pages. Args: user_ids: list(str). The ids of the users. subtopic_page_ids: list(str). The ids of the subtopic pages. Returns: dict(str, list(SubtopicPageSummaryDict)). User IDs as keys and Subtopic Page Summary domain object dictionaries containing details of the subtopic page and users mastery in it as values. """ topic_ids = get_topic_ids_from_subtopic_page_ids(subtopic_page_ids) topics = topic_fetchers.get_topics_by_ids(topic_ids) all_skill_ids_lists = [ topic.get_all_skill_ids() for topic in topics if topic ] all_skill_ids = list({ skill_id for skill_list in all_skill_ids_lists for skill_id in skill_list }) all_users_skill_mastery_dicts = ( skill_services.get_multi_users_skills_mastery(user_ids, all_skill_ids)) all_users_subtopic_prog_summaries: Dict[ str, List[subtopic_page_domain.SubtopicPageSummaryDict]] = { user_id: [] for user_id in user_ids } for topic in topics: # Ruling out the possibility of None for mypy type checking. assert topic is not None for subtopic in topic.subtopics: subtopic_page_id = '{}:{}'.format(topic.id, subtopic.id) if subtopic_page_id not in subtopic_page_ids: continue for user_id, skills_mastery_dict in ( all_users_skill_mastery_dicts.items()): skill_mastery_dict = { skill_id: mastery for skill_id, mastery in skills_mastery_dict.items() if mastery is not None and (skill_id in subtopic.skill_ids) } subtopic_mastery: Optional[float] = None # Subtopic mastery is average of skill masteries. if skill_mastery_dict: subtopic_mastery = (sum(skill_mastery_dict.values()) / len(skill_mastery_dict)) all_users_subtopic_prog_summaries[user_id].append({ 'subtopic_id': subtopic.id, 'subtopic_title': subtopic.title, 'parent_topic_id': topic.id, 'parent_topic_name': topic.name, 'thumbnail_filename': subtopic.thumbnail_filename, 'thumbnail_bg_color': subtopic.thumbnail_bg_color, 'subtopic_mastery': subtopic_mastery }) return all_users_subtopic_prog_summaries
def get_matching_learner_group_syllabus_to_add( learner_group_id: str, keyword: str, search_type: str, category: str, language_code: str) -> learner_group_domain.LearnerGroupSyllabusDict: """Returns the syllabus of items matching the given filter arguments that can be added to the learner group. Args: learner_group_id: str. The id of the learner group. keyword: str. The keyword to search the syllabus. It is compared with the title of the topics, stories and subtopics. search_type: str. The type of the syllabus item to search. It can be either 'Story' or 'Skill'. category: str. The category of the syllabus items. It is the classroom in which the stories and subtopics are to be searched. language_code: str. The language of the topics in which the stories and subtopics are to be searched. Returns: dict. The matching syllabus items to add to the learner group. """ # Default case when syllabus is being added to a new group. group_subtopic_page_ids: List[str] = [] group_story_ids: List[str] = [] # Case when syllabus is being added to an existing group. if learner_group_id: learner_group_model = learner_group_models.LearnerGroupModel.get( learner_group_id, strict=True) group_subtopic_page_ids = learner_group_model.subtopic_page_ids group_story_ids = learner_group_model.story_ids matching_topic_ids: List[str] = [] all_classrooms_dict = config_domain.CLASSROOM_PAGES_DATA.value matching_subtopics_dicts: List[ subtopic_page_domain.SubtopicPageSummaryDict] = [] matching_story_syllabus_item_dicts: List[ story_domain.LearnerGroupSyllabusStorySummaryDict] = [] if category != constants.DEFAULT_ADD_SYLLABUS_FILTER: for classroom in all_classrooms_dict: if category and classroom['name'] == category: matching_topic_ids.extend(classroom['topic_ids']) matching_topics_with_none: Sequence[Optional[topic_domain.Topic]] = ( topic_fetchers.get_topics_by_ids(matching_topic_ids)) else: matching_topics_with_none = topic_fetchers.get_all_topics() keyword = keyword.lower() for topic in matching_topics_with_none: # Ruling out the possibility of None for mypy type checking. assert topic is not None if language_code not in (constants.DEFAULT_ADD_SYLLABUS_FILTER, topic.language_code): continue if keyword in topic.canonical_name: # If search type is set to default or search type is set to # 'Story', add all story ids of this topic to the filtered # story ids. if (search_type in (constants.LEARNER_GROUP_ADD_STORY_FILTER, constants.DEFAULT_ADD_SYLLABUS_FILTER)): matching_story_syllabus_item_dicts.extend( get_matching_story_syllabus_item_dicts( topic, group_story_ids)) # If search type is set to default or search type is set to # 'Skill', add all subtopics of this topic to the filtered # subtopics. if (search_type in (constants.LEARNER_GROUP_ADD_SKILL_FILTER, constants.DEFAULT_ADD_SYLLABUS_FILTER)): matching_subtopics_dicts.extend( get_matching_subtopic_syllabus_item_dicts( topic, group_subtopic_page_ids)) else: # If search type is set to default or search type is set to # 'Skill', add the subtopics which have the keyword in their # title to the filtered subtopics. if (search_type in (constants.LEARNER_GROUP_ADD_SKILL_FILTER, constants.DEFAULT_ADD_SYLLABUS_FILTER)): matching_subtopics_dicts.extend( get_matching_subtopic_syllabus_item_dicts( topic, group_subtopic_page_ids, keyword)) # If search type is set to default or search type is set to # 'Story', add all story ids of this topic to the possible # story ids. if (search_type in (constants.LEARNER_GROUP_ADD_STORY_FILTER, constants.DEFAULT_ADD_SYLLABUS_FILTER)): matching_story_syllabus_item_dicts.extend( get_matching_story_syllabus_item_dicts( topic, group_story_ids, keyword)) return { 'story_summary_dicts': matching_story_syllabus_item_dicts, 'subtopic_summary_dicts': matching_subtopics_dicts }