def untag_deleted_misconceptions( committer_id, skill_id, skill_description, deleted_skill_misconception_ids): """Untags deleted misconceptions from questions belonging to a skill with the provided skill_id. Args: committer_id: str. The id of the user who triggered the update. skill_id: str. The skill id. skill_description: str. The description of the skill. deleted_skill_misconception_ids: list(str). The skill misconception ids of deleted misconceptions. The list items take the form <skill_id>-<misconception_id>. """ question_skill_links = get_question_skill_links_of_skill( skill_id, skill_description) question_ids = [model.question_id for model in question_skill_links] questions = question_fetchers.get_questions_by_ids(question_ids) for question in questions: change_list = [] inapplicable_skill_misconception_ids = ( question.inapplicable_skill_misconception_ids) deleted_inapplicable_skill_misconception_ids = ( list( set(deleted_skill_misconception_ids) & set(inapplicable_skill_misconception_ids))) if deleted_inapplicable_skill_misconception_ids: new_inapplicable_skill_misconception_ids = ( utils.compute_list_difference( question.inapplicable_skill_misconception_ids, deleted_inapplicable_skill_misconception_ids)) change_list.append(question_domain.QuestionChange({ 'cmd': 'update_question_property', 'property_name': 'inapplicable_skill_misconception_ids', 'new_value': new_inapplicable_skill_misconception_ids, 'old_value': question.inapplicable_skill_misconception_ids })) old_question_state_data_dict = question.question_state_data.to_dict() answer_groups = ( list(question.question_state_data.interaction.answer_groups)) for i in python_utils.RANGE(len(answer_groups)): tagged_skill_misconception_id = ( answer_groups[i].to_dict()['tagged_skill_misconception_id']) if (tagged_skill_misconception_id in deleted_skill_misconception_ids): answer_groups[i].tagged_skill_misconception_id = None question.question_state_data.interaction.answer_groups = answer_groups change_list.append(question_domain.QuestionChange({ 'cmd': 'update_question_property', 'property_name': 'question_state_data', 'new_value': question.question_state_data.to_dict(), 'old_value': old_question_state_data_dict })) update_question( committer_id, question.id, change_list, 'Untagged deleted skill misconception ids.')
def main(): """Test the CI config files and protractor.conf.js to have same e2e test suites. """ python_utils.PRINT( 'Checking all e2e test files are captured ' 'in protractor.conf.js...') protractor_test_suite_files = get_e2e_test_filenames_from_protractor_dir() protractor_conf_test_suites = ( get_e2e_test_filenames_from_protractor_conf_file()) if not protractor_test_suite_files == protractor_conf_test_suites: raise Exception( 'One or more test file from protractor or protractor_desktop ' 'directory is missing from protractor.conf.js') python_utils.PRINT('Done!') python_utils.PRINT( 'Checking e2e tests are captured in CI config files...') protractor_test_suites = get_e2e_suite_names_from_protractor_file() ci_suite_names = get_e2e_suite_names_from_ci_config_file() for excluded_test in TEST_SUITES_NOT_RUN_IN_CI: protractor_test_suites.remove(excluded_test) if not ci_suite_names: raise Exception( 'The e2e test suites that have been extracted from ' 'script section from CI config files are empty.') if not protractor_test_suites: raise Exception( 'The e2e test suites that have been extracted from ' 'protractor.conf.js are empty.') if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in ci_suite_names: raise Exception( '{} is expected to be in the e2e test suites ' 'extracted from the script section of CI config ' 'files, but it is missing.' .format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST)) if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in protractor_test_suites: raise Exception( '{} is expected to be in the e2e test suites ' 'extracted from the protractor.conf.js file, ' 'but it is missing.' .format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST)) if protractor_test_suites != ci_suite_names: raise Exception( 'Protractor test suites and CI test suites are not in sync. ' 'Following suites are not in sync: {}'.format( utils.compute_list_difference( protractor_test_suites, ci_suite_names))) python_utils.PRINT('Done!')
def update_opportunity_with_updated_exploration(exp_id): """Updates the opportunities models with the changes made in the exploration. Args: exp_id: str. The exploration id which is also the id of the opportunity model. """ updated_exploration = exp_fetchers.get_exploration_by_id(exp_id) content_count = updated_exploration.get_content_count() translation_counts = updated_exploration.get_translation_counts() complete_translation_language_list = ( updated_exploration.get_languages_with_complete_translation()) model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id) exploration_opportunity_summary = ( get_exploration_opportunity_summary_from_model(model)) exploration_opportunity_summary.content_count = content_count exploration_opportunity_summary.translation_counts = translation_counts exploration_opportunity_summary.incomplete_translation_language_codes = ( utils.compute_list_difference( exploration_opportunity_summary. incomplete_translation_language_codes, complete_translation_language_list)) new_languages_for_voiceover = set( complete_translation_language_list) - set( exploration_opportunity_summary. language_codes_with_assigned_voice_artists) # We only append new languages to language_codes_needing_voice_artists( # instead of adding all of the complete_translation_language_list), as the # complete translation languages list will be dynamic based on some # content text are changed, where as the voiceover is a long term work and # we can allow a voice_artist to work for an exploration which needs a # little bit update in text translation. language_codes_needing_voice_artists_set = set( exploration_opportunity_summary.language_codes_needing_voice_artists) language_codes_needing_voice_artists_set |= set( new_languages_for_voiceover) exploration_opportunity_summary.language_codes_needing_voice_artists = list( language_codes_needing_voice_artists_set) exploration_opportunity_summary.validate() _save_multi_exploration_opportunity_summary( [exploration_opportunity_summary])
def post(self, suggestion_id): """Handles PUT requests. Raises: InvalidInputException. The suggestion is already handled. InvalidInputException. The 'skill_difficulty' parameter is missing. InvalidInputException. The 'skill_difficulty' is not a decimal. InvalidInputException. The 'question_state_data' parameter is missing. InvalidInputException. The 'question_state_data' parameter is invalid. """ suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) if suggestion.is_handled: raise self.InvalidInputException( 'The suggestion with id %s has been accepted or rejected' % (suggestion_id)) if self.payload.get('skill_difficulty') is None: raise self.InvalidInputException( 'The parameter \'skill_difficulty\' is missing.') if not isinstance(self.payload.get('skill_difficulty'), float): raise self.InvalidInputException( 'The parameter \'skill_difficulty\' should be a decimal.') if self.payload.get('question_state_data') is None: raise self.InvalidInputException( 'The parameter \'question_state_data\' is missing.') question_state_data_obj = state_domain.State.from_dict( self.payload.get('question_state_data')) question_state_data_obj.validate(None, False) updated_suggestion = suggestion_services.update_question_suggestion( suggestion_id, self.payload.get('skill_difficulty'), self.payload.get('question_state_data')) new_image_filenames = (utils.compute_list_difference( updated_suggestion.get_new_image_filenames_added_in_suggestion(), suggestion.get_new_image_filenames_added_in_suggestion())) _upload_suggestion_images(self.request, updated_suggestion, new_image_filenames) self.render_json(self.values)
def get_new_image_filenames_added_in_suggestion(self): """Returns the list of newly added image filenames in the suggestion. Returns: list(str). A list of newly added image filenames in the suggestion. """ html_list = self.get_all_html_content_strings() all_image_filenames = ( html_cleaner.get_image_filenames_from_html_strings(html_list)) target_entity_html_list = self.get_target_entity_html_strings() target_image_filenames = ( html_cleaner.get_image_filenames_from_html_strings( target_entity_html_list)) new_image_filenames = utils.compute_list_difference( all_image_filenames, target_image_filenames) return new_image_filenames
def get_canonical_story_dicts(user_id, topic): """Returns a list of canonical story dicts in the topic. Args: user_id: str. The ID of the user. topic: Topic. The topic domain object. Returns: list(dict). A list of canonical story dicts in the given topic. """ canonical_story_ids = topic.get_canonical_story_ids( include_only_published=True) canonical_story_summaries = [ story_fetchers.get_story_summary_by_id(canonical_story_id) for canonical_story_id in canonical_story_ids ] canonical_story_dicts = [] for story_summary in canonical_story_summaries: pending_and_all_nodes_in_story = ( story_fetchers.get_pending_and_all_nodes_in_story( user_id, story_summary.id)) all_nodes = pending_and_all_nodes_in_story['all_nodes'] pending_nodes = pending_and_all_nodes_in_story['pending_nodes'] pending_node_titles = [node.title for node in pending_nodes] completed_node_titles = utils.compute_list_difference( story_summary.node_titles, pending_node_titles) story_summary_dict = story_summary.to_human_readable_dict() story_summary_dict['topic_url_fragment'] = topic.url_fragment story_summary_dict['classroom_url_fragment'] = ( classroom_services.get_classroom_url_fragment_for_topic_id( topic.id)) story_summary_dict['story_is_published'] = True story_summary_dict['completed_node_titles'] = completed_node_titles story_summary_dict['all_node_dicts'] = [ node.to_dict() for node in all_nodes ] canonical_story_dicts.append(story_summary_dict) return canonical_story_dicts
def _validate_inapplicable_skill_misconception_ids(cls, item): """Validate that inapplicable skill misconception ids are valid. Args: item: datastore_services.Model. QuestionModel to validate. """ inapplicable_skill_misconception_ids = ( item.inapplicable_skill_misconception_ids) skill_misconception_id_mapping = {} skill_ids = [] for skill_misconception_id in inapplicable_skill_misconception_ids: skill_id, misconception_id = skill_misconception_id.split('-') skill_misconception_id_mapping[skill_id] = misconception_id skill_ids.append(skill_id) skills = skill_fetchers.get_multi_skills(skill_ids, strict=False) for skill in skills: if skill is not None: misconception_ids = [ misconception.id for misconception in skill.misconceptions ] expected_misconception_id = ( skill_misconception_id_mapping[skill.id]) if int(expected_misconception_id) not in misconception_ids: cls._add_error( 'misconception id', 'Entity id %s: misconception with the id %s does ' 'not exist in the skill with id %s' % (item.id, expected_misconception_id, skill.id)) missing_skill_ids = utils.compute_list_difference( skill_ids, [skill.id for skill in skills if skill is not None]) for skill_id in missing_skill_ids: cls._add_error( 'skill id', 'Entity id %s: skill with the following id does not exist:' ' %s' % (item.id, skill_id))
def advance_version_of_exp_stats(exp_version, exp_versions_diff, exp_stats, reverted_exp_stats, revert_to_version): """Makes required changes to the structure of ExplorationStatsModel of an old exp_version and a new ExplorationStatsModel is created for the new exp_version. Note: This function does not save the newly created model, it returns it. Callers should explicitly save the model if required. Args: exp_version: int. Version of the exploration. exp_versions_diff: ExplorationVersionsDiff|None. The domain object for the exploration versions difference, None if it is a revert. exp_stats: ExplorationStats. The ExplorationStats model. reverted_exp_stats: ExplorationStats|None. The reverted ExplorationStats model. revert_to_version: int|None. If the change is a revert, the version. Otherwise, None. Returns: ExplorationStats. The newly created exploration stats object. """ # Handling reverts. if revert_to_version: # If the old exploration issues model doesn't exist, the current model # is carried over (this is a fallback case for some tests, and can never # happen in production.) if reverted_exp_stats: exp_stats.num_starts_v2 = reverted_exp_stats.num_starts_v2 exp_stats.num_actual_starts_v2 = ( reverted_exp_stats.num_actual_starts_v2) exp_stats.num_completions_v2 = ( reverted_exp_stats.num_completions_v2) exp_stats.state_stats_mapping = ( reverted_exp_stats.state_stats_mapping) exp_stats.exp_version = exp_version return exp_stats new_state_name_stats_mapping = {} # Handle unchanged states. unchanged_state_names = set( utils.compute_list_difference( exp_stats.state_stats_mapping, exp_versions_diff.deleted_state_names + list(exp_versions_diff.new_to_old_state_names.values()))) for state_name in unchanged_state_names: new_state_name_stats_mapping[state_name] = ( exp_stats.state_stats_mapping[state_name].clone()) # Handle renamed states. for state_name in exp_versions_diff.new_to_old_state_names: old_state_name = exp_versions_diff.new_to_old_state_names[state_name] new_state_name_stats_mapping[state_name] = ( exp_stats.state_stats_mapping[old_state_name].clone()) # Handle newly-added states. for state_name in exp_versions_diff.added_state_names: new_state_name_stats_mapping[state_name] = ( stats_domain.StateStats.create_default()) exp_stats.state_stats_mapping = new_state_name_stats_mapping exp_stats.exp_version = exp_version return exp_stats
def get(self, topic_name): """Handles GET requests.""" topic = topic_fetchers.get_topic_by_name(topic_name) canonical_story_ids = topic.get_canonical_story_ids( include_only_published=True) additional_story_ids = topic.get_additional_story_ids( include_only_published=True) canonical_story_summaries = [ story_fetchers.get_story_summary_by_id(canonical_story_id) for canonical_story_id in canonical_story_ids ] additional_story_summaries = [ story_fetchers.get_story_summary_by_id(additional_story_id) for additional_story_id in additional_story_ids ] canonical_story_dicts = [] for story_summary in canonical_story_summaries: pending_nodes = story_fetchers.get_pending_nodes_in_story( self.user_id, story_summary.id) pending_node_titles = [node.title for node in pending_nodes] completed_node_titles = utils.compute_list_difference( story_summary.node_titles, pending_node_titles) story_summary_dict = story_summary.to_human_readable_dict() story_summary_dict['story_is_published'] = True story_summary_dict['completed_node_titles'] = completed_node_titles story_summary_dict['pending_node_dicts'] = [ node.to_dict() for node in pending_nodes ] canonical_story_dicts.append(story_summary_dict) additional_story_dicts = [] for story_summary in additional_story_summaries: pending_nodes = story_fetchers.get_pending_nodes_in_story( self.user_id, story_summary.id) pending_node_titles = [node.title for node in pending_nodes] completed_node_titles = utils.compute_list_difference( story_summary.node_titles, pending_node_titles) story_summary_dict = story_summary.to_human_readable_dict() story_summary_dict['story_is_published'] = True story_summary_dict['completed_node_titles'] = completed_node_titles story_summary_dict['pending_node_dicts'] = [ node.to_dict() for node in pending_nodes ] additional_story_dicts.append(story_summary_dict) uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids() subtopics = topic.get_all_subtopics() all_skill_ids = topic.get_all_skill_ids() skill_descriptions, deleted_skill_ids = ( skill_services.get_descriptions_of_skills(all_skill_ids)) if deleted_skill_ids: deleted_skills_string = ', '.join(deleted_skill_ids) logging.error( 'The deleted skills: %s are still present in topic with id %s' % (deleted_skills_string, topic.id)) if feconf.CAN_SEND_EMAILS: email_manager.send_mail_to_admin( 'Deleted skills present in topic', 'The deleted skills: %s are still present in topic with ' 'id %s' % (deleted_skills_string, topic.id)) if self.user_id: degrees_of_mastery = skill_services.get_multi_user_skill_mastery( self.user_id, all_skill_ids) else: degrees_of_mastery = {} for skill_id in all_skill_ids: degrees_of_mastery[skill_id] = None self.values.update({ 'topic_id': topic.id, 'topic_name': topic.name, 'topic_description': topic.description, 'canonical_story_dicts': canonical_story_dicts, 'additional_story_dicts': additional_story_dicts, 'uncategorized_skill_ids': uncategorized_skill_ids, 'subtopics': subtopics, 'degrees_of_mastery': degrees_of_mastery, 'skill_descriptions': skill_descriptions, 'practice_tab_is_displayed': topic.practice_tab_is_displayed, 'meta_tag_content': topic.meta_tag_content, 'page_title_fragment_for_web': topic.page_title_fragment_for_web }) self.render_json(self.values)