Ejemplo n.º 1
0
 def test_compute_list_difference(self) -> None:
     self.assertEqual(
         utils.compute_list_difference(['-1', '-2', '-3', '-4', '-5'],
                                       ['-2', '-5', '-4']), ['-1', '-3'])
     self.assertEqual(
         utils.compute_list_difference(['-1', '-2', '-3', '-4', '-5'],
                                       ['-5', '-4', '-3', '-2', '-1']), [])
     self.assertEqual(
         utils.compute_list_difference(['-1', '-2', '-3', '-4', '-5'],
                                       ['-6', '-7', '-8', '-9', '-10']),
         ['-1', '-2', '-3', '-4', '-5'])
     self.assertEqual(
         utils.compute_list_difference(['-1', '-2'],
                                       ['-1', '-2', '-3', '-4', '-5']), [])
Ejemplo n.º 2
0
def untag_deleted_misconceptions(
        committer_id, skill_id, skill_description,
        deleted_skill_misconception_ids):
    """Untags deleted misconceptions from questions belonging
    to a skill with the provided skill_id.

    Args:
        committer_id: str. The id of the user who triggered the update.
        skill_id: str. The skill id.
        skill_description: str. The description of the skill.
        deleted_skill_misconception_ids: list(str). The skill misconception
            ids of deleted misconceptions. The list items take the form
            <skill_id>-<misconception_id>.
    """
    question_skill_links = get_question_skill_links_of_skill(
        skill_id, skill_description)
    question_ids = [model.question_id for model in question_skill_links]
    questions = question_fetchers.get_questions_by_ids(question_ids)
    for question in questions:
        change_list = []
        inapplicable_skill_misconception_ids = (
            question.inapplicable_skill_misconception_ids)
        deleted_inapplicable_skill_misconception_ids = (
            list(
                set(deleted_skill_misconception_ids) &
                set(inapplicable_skill_misconception_ids)))
        if deleted_inapplicable_skill_misconception_ids:
            new_inapplicable_skill_misconception_ids = (
                utils.compute_list_difference(
                    question.inapplicable_skill_misconception_ids,
                    deleted_inapplicable_skill_misconception_ids))
            change_list.append(question_domain.QuestionChange({
                'cmd': 'update_question_property',
                'property_name': 'inapplicable_skill_misconception_ids',
                'new_value': new_inapplicable_skill_misconception_ids,
                'old_value': question.inapplicable_skill_misconception_ids
            }))
        old_question_state_data_dict = question.question_state_data.to_dict()
        answer_groups = (
            list(question.question_state_data.interaction.answer_groups))
        for answer_group in answer_groups:
            tagged_skill_misconception_id = (
                answer_group.to_dict()['tagged_skill_misconception_id'])
            if (tagged_skill_misconception_id
                    in deleted_skill_misconception_ids):
                answer_group.tagged_skill_misconception_id = None
        question.question_state_data.interaction.answer_groups = answer_groups
        change_list.append(question_domain.QuestionChange({
            'cmd': 'update_question_property',
            'property_name': 'question_state_data',
            'new_value': question.question_state_data.to_dict(),
            'old_value': old_question_state_data_dict
        }))
        update_question(
            committer_id, question.id, change_list,
            'Untagged deleted skill misconception ids.')
def main():
    """Test the CI config files and protractor.conf.js to have same
    e2e test suites.
    """
    python_utils.PRINT(
        'Checking all e2e test files are captured '
        'in protractor.conf.js...')
    protractor_test_suite_files = get_e2e_test_filenames_from_protractor_dir()
    protractor_conf_test_suites = (
        get_e2e_test_filenames_from_protractor_conf_file())

    if not protractor_test_suite_files == protractor_conf_test_suites:
        raise Exception(
            'One or more test file from protractor or protractor_desktop '
            'directory is missing from protractor.conf.js')
    python_utils.PRINT('Done!')

    python_utils.PRINT(
        'Checking e2e tests are captured in CI config files...')
    protractor_test_suites = get_e2e_suite_names_from_protractor_file()
    ci_suite_names = get_e2e_suite_names_from_ci_config_file()

    for excluded_test in TEST_SUITES_NOT_RUN_IN_CI:
        protractor_test_suites.remove(excluded_test)

    if not ci_suite_names:
        raise Exception(
            'The e2e test suites that have been extracted from '
            'script section from CI config files are empty.')
    if not protractor_test_suites:
        raise Exception(
            'The e2e test suites that have been extracted from '
            'protractor.conf.js are empty.')

    if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in ci_suite_names:
        raise Exception(
            '{} is expected to be in the e2e test suites '
            'extracted from the script section of CI config '
            'files, but it is missing.'
            .format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST))

    if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in protractor_test_suites:
        raise Exception(
            '{} is expected to be in the e2e test suites '
            'extracted from the protractor.conf.js file, '
            'but it is missing.'
            .format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST))

    if set(protractor_test_suites) != set(ci_suite_names):
        raise Exception(
            'Protractor test suites and CI test suites are not in sync. '
            'Following suites are not in sync: {}'.format(
                utils.compute_list_difference(
                    protractor_test_suites, ci_suite_names)))

    python_utils.PRINT('Done!')
Ejemplo n.º 4
0
def get_canonical_story_dicts(
    user_id: str, topic: topic_domain.Topic
) -> List[CannonicalStoryDict]:
    """Returns a list of canonical story dicts in the topic.

    Args:
        user_id: str. The ID of the user.
        topic: Topic. The topic domain object.

    Returns:
        list(dict). A list of canonical story dicts in the given topic.
    """
    canonical_story_ids: List[str] = topic.get_canonical_story_ids(
        include_only_published=True)
    canonical_story_summaries: List[Optional[story_domain.StorySummary]] = [
        story_fetchers.get_story_summary_by_id(
            canonical_story_id) for canonical_story_id
        in canonical_story_ids]
    canonical_story_dicts = []
    for story_summary in canonical_story_summaries:
        # Ruling out the possibility of None for mypy type checking.
        assert story_summary is not None

        pending_and_all_nodes_in_story = (
            story_fetchers.get_pending_and_all_nodes_in_story(
                user_id, story_summary.id))
        all_nodes = pending_and_all_nodes_in_story['all_nodes']
        pending_nodes = pending_and_all_nodes_in_story['pending_nodes']
        pending_node_titles = [node.title for node in pending_nodes]
        completed_node_titles = utils.compute_list_difference(
            story_summary.node_titles, pending_node_titles)
        # Here, the return type of 'to_human_readable_dict()' method is
        # HumanReadableStorySummaryDict which does not have topic_url_fragment,
        # story_is_published and other keys. To overcome this missing keys
        # issues, we defined a CannonicalStoryDict and assigned it to the
        # `story_summary_dict`. Due this a conflict in type assignment is
        # raised which cause MyPy to throw `Incompatible types in assignment`
        # error. Thus to avoid error, we used ignore here.
        story_summary_dict: CannonicalStoryDict = (
            story_summary.to_human_readable_dict()  # type: ignore[assignment]
        )
        story_summary_dict['topic_url_fragment'] = topic.url_fragment
        story_summary_dict['classroom_url_fragment'] = (
            classroom_services.get_classroom_url_fragment_for_topic_id(
                topic.id))
        story_summary_dict['story_is_published'] = True
        story_summary_dict['completed_node_titles'] = completed_node_titles
        story_summary_dict['all_node_dicts'] = [
            node.to_dict() for node in all_nodes]
        canonical_story_dicts.append(story_summary_dict)

    return canonical_story_dicts
Ejemplo n.º 5
0
    def post(self, suggestion_id):
        """Handles PUT requests.

        Raises:
            InvalidInputException. The suggestion is already handled.
            InvalidInputException. The 'skill_difficulty' parameter is missing.
            InvalidInputException. The 'skill_difficulty' is not a decimal.
            InvalidInputException. The 'question_state_data' parameter is
                missing.
            InvalidInputException. The 'question_state_data' parameter is
                invalid.
        """
        suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
        if suggestion.is_handled:
            raise self.InvalidInputException(
                'The suggestion with id %s has been accepted or rejected' %
                (suggestion_id))

        if self.payload.get('skill_difficulty') is None:
            raise self.InvalidInputException(
                'The parameter \'skill_difficulty\' is missing.')

        if not isinstance(self.payload.get('skill_difficulty'), float):
            raise self.InvalidInputException(
                'The parameter \'skill_difficulty\' should be a decimal.')

        if self.payload.get('question_state_data') is None:
            raise self.InvalidInputException(
                'The parameter \'question_state_data\' is missing.')

        question_state_data_obj = state_domain.State.from_dict(
            self.payload.get('question_state_data'))
        question_state_data_obj.validate(None, False)

        updated_suggestion = suggestion_services.update_question_suggestion(
            suggestion_id, self.payload.get('skill_difficulty'),
            self.payload.get('question_state_data'))

        new_image_filenames = (utils.compute_list_difference(
            updated_suggestion.get_new_image_filenames_added_in_suggestion(),
            suggestion.get_new_image_filenames_added_in_suggestion()))
        _upload_suggestion_images(self.request, updated_suggestion,
                                  new_image_filenames)

        self.render_json(self.values)
Ejemplo n.º 6
0
def get_canonical_story_dicts(user_id, topic):
    """Returns a list of canonical story dicts in the topic.

    Args:
        user_id: str. The ID of the user.
        topic: Topic. The topic domain object.

    Returns:
        list(dict). A list of canonical story dicts in the given topic.
    """
    canonical_story_ids = topic.get_canonical_story_ids(
        include_only_published=True)
    canonical_story_summaries = [
        story_fetchers.get_story_summary_by_id(canonical_story_id)
        for canonical_story_id in canonical_story_ids
    ]
    canonical_story_dicts = []
    for story_summary in canonical_story_summaries:
        pending_and_all_nodes_in_story = (
            story_fetchers.get_pending_and_all_nodes_in_story(
                user_id, story_summary.id))
        all_nodes = pending_and_all_nodes_in_story['all_nodes']
        pending_nodes = pending_and_all_nodes_in_story['pending_nodes']
        pending_node_titles = [node.title for node in pending_nodes]
        completed_node_titles = utils.compute_list_difference(
            story_summary.node_titles, pending_node_titles)
        story_summary_dict = story_summary.to_human_readable_dict()
        story_summary_dict['topic_url_fragment'] = topic.url_fragment
        story_summary_dict['classroom_url_fragment'] = (
            classroom_services.get_classroom_url_fragment_for_topic_id(
                topic.id))
        story_summary_dict['story_is_published'] = True
        story_summary_dict['completed_node_titles'] = completed_node_titles
        story_summary_dict['all_node_dicts'] = [
            node.to_dict() for node in all_nodes
        ]
        canonical_story_dicts.append(story_summary_dict)

    return canonical_story_dicts
Ejemplo n.º 7
0
    def get(self, topic_name):
        """Handles GET requests."""

        topic = topic_fetchers.get_topic_by_name(topic_name)
        canonical_story_ids = topic.get_canonical_story_ids(
            include_only_published=True)
        additional_story_ids = topic.get_additional_story_ids(
            include_only_published=True)
        canonical_story_summaries = [
            story_fetchers.get_story_summary_by_id(
                canonical_story_id) for canonical_story_id
            in canonical_story_ids]

        additional_story_summaries = [
            story_fetchers.get_story_summary_by_id(
                additional_story_id) for additional_story_id
            in additional_story_ids]

        canonical_story_dicts = []
        for story_summary in canonical_story_summaries:
            all_nodes = story_fetchers.get_pending_and_all_nodes_in_story(
                self.user_id, story_summary.id)['all_nodes']
            pending_nodes = story_fetchers.get_pending_and_all_nodes_in_story(
                self.user_id, story_summary.id)['pending_nodes']
            pending_node_titles = [node.title for node in pending_nodes]
            completed_node_titles = utils.compute_list_difference(
                story_summary.node_titles, pending_node_titles)
            story_summary_dict = story_summary.to_human_readable_dict()
            story_summary_dict['story_is_published'] = True
            story_summary_dict['completed_node_titles'] = completed_node_titles
            story_summary_dict['all_node_dicts'] = [
                node.to_dict() for node in all_nodes]
            canonical_story_dicts.append(story_summary_dict)

        additional_story_dicts = []
        for story_summary in additional_story_summaries:
            all_nodes = story_fetchers.get_pending_and_all_nodes_in_story(
                self.user_id, story_summary.id)['all_nodes']
            pending_nodes = story_fetchers.get_pending_and_all_nodes_in_story(
                self.user_id, story_summary.id)['pending_nodes']
            pending_node_titles = [node.title for node in pending_nodes]
            completed_node_titles = utils.compute_list_difference(
                story_summary.node_titles, pending_node_titles)
            story_summary_dict = story_summary.to_human_readable_dict()
            story_summary_dict['story_is_published'] = True
            story_summary_dict['completed_node_titles'] = completed_node_titles
            story_summary_dict['all_node_dicts'] = [
                node.to_dict() for node in all_nodes]
            additional_story_dicts.append(story_summary_dict)

        uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
        subtopics = topic.get_all_subtopics()

        all_skill_ids = topic.get_all_skill_ids()
        skill_descriptions, deleted_skill_ids = (
            skill_services.get_descriptions_of_skills(
                all_skill_ids))

        if deleted_skill_ids:
            deleted_skills_string = ', '.join(deleted_skill_ids)
            logging.exception(
                'The deleted skills: %s are still present in topic with id %s'
                % (deleted_skills_string, topic.id)
            )
            if feconf.CAN_SEND_EMAILS:
                email_manager.send_mail_to_admin(
                    'Deleted skills present in topic',
                    'The deleted skills: %s are still present in topic with '
                    'id %s' % (deleted_skills_string, topic.id))

        if self.user_id:
            degrees_of_mastery = skill_services.get_multi_user_skill_mastery(
                self.user_id, all_skill_ids)
        else:
            degrees_of_mastery = {}
            for skill_id in all_skill_ids:
                degrees_of_mastery[skill_id] = None

        self.values.update({
            'topic_id': topic.id,
            'topic_name': topic.name,
            'topic_description': topic.description,
            'canonical_story_dicts': canonical_story_dicts,
            'additional_story_dicts': additional_story_dicts,
            'uncategorized_skill_ids': uncategorized_skill_ids,
            'subtopics': subtopics,
            'degrees_of_mastery': degrees_of_mastery,
            'skill_descriptions': skill_descriptions,
            'practice_tab_is_displayed': topic.practice_tab_is_displayed,
            'meta_tag_content': topic.meta_tag_content,
            'page_title_fragment_for_web': topic.page_title_fragment_for_web
        })
        self.render_json(self.values)
Ejemplo n.º 8
0
def advance_version_of_exp_stats(
        exp_version, exp_versions_diff, exp_stats,
        reverted_exp_stats, revert_to_version):
    """Makes required changes to the structure of ExplorationStatsModel of an
    old exp_version and a new ExplorationStatsModel is created for the new
    exp_version. Note: This function does not save the newly created model, it
    returns it. Callers should explicitly save the model if required.

    Args:
        exp_version: int. Version of the exploration.
        exp_versions_diff: ExplorationVersionsDiff|None. The domain object for
            the exploration versions difference, None if it is a revert.
        exp_stats: ExplorationStats. The ExplorationStats model.
        reverted_exp_stats: ExplorationStats|None. The reverted
            ExplorationStats model.
        revert_to_version: int|None. If the change is a revert, the version.
            Otherwise, None.

    Returns:
        ExplorationStats. The newly created exploration stats object.
    """

    # Handling reverts.
    if revert_to_version:
        # If the old exploration issues model doesn't exist, the current model
        # is carried over (this is a fallback case for some tests, and can never
        # happen in production.)
        if reverted_exp_stats:
            exp_stats.num_starts_v2 = reverted_exp_stats.num_starts_v2
            exp_stats.num_actual_starts_v2 = (
                reverted_exp_stats.num_actual_starts_v2)
            exp_stats.num_completions_v2 = (
                reverted_exp_stats.num_completions_v2)
            exp_stats.state_stats_mapping = (
                reverted_exp_stats.state_stats_mapping)
        exp_stats.exp_version = exp_version

        return exp_stats

    new_state_name_stats_mapping = {}

    # Handle unchanged states.
    unchanged_state_names = set(utils.compute_list_difference(
        exp_stats.state_stats_mapping,
        exp_versions_diff.deleted_state_names +
        list(exp_versions_diff.new_to_old_state_names.values())))
    for state_name in unchanged_state_names:
        new_state_name_stats_mapping[state_name] = (
            exp_stats.state_stats_mapping[state_name].clone())

    # Handle renamed states.
    for state_name in exp_versions_diff.new_to_old_state_names:
        old_state_name = exp_versions_diff.new_to_old_state_names[
            state_name]
        new_state_name_stats_mapping[state_name] = (
            exp_stats.state_stats_mapping[old_state_name].clone())

    # Handle newly-added states.
    for state_name in exp_versions_diff.added_state_names:
        new_state_name_stats_mapping[state_name] = (
            stats_domain.StateStats.create_default())

    exp_stats.state_stats_mapping = new_state_name_stats_mapping
    exp_stats.exp_version = exp_version

    return exp_stats