예제 #1
0
    def test_accumulate_one_more_than_limit_case_is_split(self) -> None:
        accumulated_results = job_run_result.JobRunResult.accumulate([
            job_run_result.JobRunResult(stdout='', stderr='a' * 2000),
            job_run_result.JobRunResult(stdout='', stderr='b' * 3000),
        ])

        self.assertEqual(len(accumulated_results), 2)
예제 #2
0
    def test_unmigrated_story_with_unmigrated_rubric_is_migrated(self) -> None:
        story_model = self.create_model(
            story_models.StoryModel,
            id=self.STORY_1_ID,
            story_contents_schema_version=4,
            title='title',
            language_code='cs',
            notes='notes',
            description='description',
            story_contents=self.unmigrated_contents,
            corresponding_topic_id=self.TOPIC_1_ID,
            url_fragment='urlfragment',
        )
        story_model.update_timestamps()
        story_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story',
                           [{
                               'cmd': story_domain.CMD_CREATE_NEW
                           }])

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='STORY PROCESSED SUCCESS: 1'),
            job_run_result.JobRunResult(stdout='STORY MIGRATED SUCCESS: 1'),
            job_run_result.JobRunResult(stdout='CACHE DELETION SUCCESS: 1')
        ])

        migrated_story_model = story_models.StoryModel.get(self.STORY_1_ID)
        self.assertEqual(migrated_story_model.version, 2)
        self.assertEqual(migrated_story_model.story_contents_schema_version,
                         feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION)
        self.assertEqual(migrated_story_model.story_contents,
                         self.latest_contents)
예제 #3
0
    def test_broken_cache_is_reported(self) -> None:
        cache_swap = self.swap_to_always_raise(
            caching_services, 'delete_multi',
            Exception('cache deletion error'))

        story_model = self.create_model(
            story_models.StoryModel,
            id=self.STORY_1_ID,
            story_contents_schema_version=4,
            title='title',
            language_code='cs',
            notes='notes',
            description='description',
            story_contents=self.latest_contents,
            corresponding_topic_id=self.TOPIC_1_ID,
            url_fragment='urlfragment',
        )
        story_model.update_timestamps()
        story_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story',
                           [{
                               'cmd': story_domain.CMD_CREATE_NEW
                           }])

        with cache_swap:
            self.assert_job_output_is([
                job_run_result.JobRunResult(
                    stdout='STORY MIGRATED SUCCESS: 1'),
                job_run_result.JobRunResult(
                    stdout='STORY PROCESSED SUCCESS: 1'),
                job_run_result.JobRunResult(
                    stderr='CACHE DELETION ERROR: "cache deletion error": 1')
            ])

        migrated_story_model = story_models.StoryModel.get(self.STORY_1_ID)
        self.assertEqual(migrated_story_model.version, 2)
예제 #4
0
    def test_counts_multiple_collection(self) -> None:
        user1 = self.create_model(
            user_models.UserSettingsModel,
            id=self.USER_ID_1,
            email='*****@*****.**',
            roles=[feconf.ROLE_ID_COLLECTION_EDITOR]
        )
        user2 = self.create_model(
            user_models.UserSettingsModel,
            id=self.USER_ID_2,
            email='*****@*****.**',
            roles=[feconf.ROLE_ID_COLLECTION_EDITOR]
        )
        # Checking a user who has no collection.
        user3 = self.create_model(
            user_models.UserSettingsModel,
            id=self.USER_ID_3,
            email='*****@*****.**',
            roles=[feconf.ROLE_ID_COLLECTION_EDITOR]
        )
        user1.update_timestamps()
        user2.update_timestamps()
        user3.update_timestamps()
        collection1 = self.create_model(
            collection_models.CollectionRightsModel,
            id='col_1',
            owner_ids=[self.USER_ID_1, self.USER_ID_2],
            editor_ids=[self.USER_ID_1],
            voice_artist_ids=[self.USER_ID_1],
            community_owned=False,
            status=constants.ACTIVITY_STATUS_PUBLIC,
            viewable_if_private=False,
            first_published_msec=0.2
        )
        collection1.update_timestamps()
        collection2 = self.create_model(
            collection_models.CollectionRightsModel,
            id='col_2',
            owner_ids=[self.USER_ID_2],
            editor_ids=[self.USER_ID_1],
            voice_artist_ids=[self.USER_ID_1],
            community_owned=False,
            status=constants.ACTIVITY_STATUS_PUBLIC,
            viewable_if_private=False,
            first_published_msec=0.2
        )
        collection2.update_timestamps()
        self.put_multi([user1, user2, user3, collection1, collection2])

        self.assert_job_output_is([
            job_run_result.JobRunResult(
                stdout=(
                    'collection_ids: [\'col_1\'], email: '
                    '[\'[email protected]\']')),
            job_run_result.JobRunResult(
                stdout=(
                    'collection_ids: [\'col_1\', \'col_2\'], email: '
                    '[\'[email protected]\']')
                )
        ])
예제 #5
0
    def test_equality(self):
        a_result = job_run_result.JobRunResult(stdout='abc', stderr='123')
        b_result = job_run_result.JobRunResult(stdout='def', stderr='456')

        self.assertEqual(a_result, a_result)
        self.assertEqual(b_result, b_result)
        self.assertNotEqual(a_result, b_result)
예제 #6
0
    def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
        user_settings_models = (
            self.pipeline
            | 'Get all UserSettingsModels' >>
            (ndb_io.GetModels(user_models.UserSettingsModel.get_all())))

        old_user_stats_models = (
            self.pipeline
            | 'Get all UserStatsModels' >>
            (ndb_io.GetModels(user_models.UserStatsModel.get_all())))

        # Creates UserStatsModels if it does not exists.
        new_user_stats_models = (
            (user_settings_models, old_user_stats_models)
            | 'Merge models' >> beam.Flatten()
            # Returns a PCollection of
            # (model.id, (user_settings_models, user_stats_models)) or
            # (model.id, (user_settings_models,)).
            | 'Group models with same ID' >> beam.GroupBy(lambda m: m.id)
            # Discards model.id from the PCollection.
            | 'Get rid of key' >> beam.Values()  # pylint: disable=no-value-for-parameter
            # Only keep groupings that indicate that
            # the UserStatsModel is missing.
            | 'Filter pairs of models' >>
            beam.Filter(lambda models: (len(list(models)) == 1 and isinstance(
                list(models)[0], user_models.UserSettingsModel)))
            # Choosing the first element.
            | 'Transform tuples into models' >>
            beam.Map(lambda models: list(models)[0])
            # Creates the missing UserStatsModels.
            | 'Create new user stat models' >> beam.ParDo(
                CreateUserStatsModel()))

        unused_put_result = (
            (new_user_stats_models, old_user_stats_models)
            | 'Merge new and old models together' >> beam.Flatten()
            | 'Update the dashboard stats' >> beam.ParDo(
                UpdateWeeklyCreatorStats())
            | 'Put models into the datastore' >> ndb_io.PutModels())

        new_user_stats_job_result = (
            new_user_stats_models
            | 'Count all new models' >> beam.combiners.Count.Globally()
            | 'Only create result for new models when > 0' >>
            (beam.Filter(lambda x: x > 0))
            | 'Create result for new models' >>
            beam.Map(lambda x: job_run_result.JobRunResult(
                stdout='SUCCESS NEW %s' % x)))
        old_user_stats_job_result = (
            old_user_stats_models
            | 'Count all old models' >> beam.combiners.Count.Globally()
            | 'Only create result for old models when > 0' >>
            (beam.Filter(lambda x: x > 0))
            | 'Create result for old models' >>
            beam.Map(lambda x: job_run_result.JobRunResult(
                stdout='SUCCESS OLD %s' % x)))

        return ((new_user_stats_job_result, old_user_stats_job_result)
                | 'Merge new and old results together' >> beam.Flatten())
예제 #7
0
    def test_accumulate(self) -> None:
        single_job_run_result = job_run_result.JobRunResult.accumulate([
            job_run_result.JobRunResult(stdout='abc', stderr=''),
            job_run_result.JobRunResult(stdout='', stderr='123'),
            job_run_result.JobRunResult(stdout='def', stderr='456'),
        ])[0]

        self.assertItemsEqual(  # type: ignore[no-untyped-call]
            single_job_run_result.stdout.split('\n'), ['abc', 'def'])
        self.assertItemsEqual(  # type: ignore[no-untyped-call]
            single_job_run_result.stderr.split('\n'), ['123', '456'])
예제 #8
0
    def test_accumulate_one_less_than_limit_is_not_truncated(self) -> None:
        accumulated_results = job_run_result.JobRunResult.accumulate([
            job_run_result.JobRunResult(stdout='', stderr='a' * 1999),
            job_run_result.JobRunResult(stdout='', stderr='b' * 3000),
        ])

        self.assertEqual(len(accumulated_results), 1)

        self.assertItemsEqual(  # type: ignore[no-untyped-call]
            accumulated_results[0].stderr.split('\n'),
            ['a' * 1999, 'b' * 3000])
예제 #9
0
    def test_accumulate(self):
        single_job_run_result = job_run_result.JobRunResult.accumulate([
            job_run_result.JobRunResult(stdout='abc', stderr=''),
            job_run_result.JobRunResult(stdout='', stderr='123'),
            job_run_result.JobRunResult(stdout='def', stderr='456'),
        ])[0]

        self.assertItemsEqual(single_job_run_result.stdout.split('\n'),
                              ['abc', 'def'])
        self.assertItemsEqual(single_job_run_result.stderr.split('\n'),
                              ['123', '456'])
예제 #10
0
    def test_accumulate_with_enormous_outputs(self):
        accumulated_results = job_run_result.JobRunResult.accumulate([
            job_run_result.JobRunResult(stdout='a' * 750, stderr='b' * 750),
            job_run_result.JobRunResult(stdout='a' * 500, stderr='b' * 500),
            job_run_result.JobRunResult(stdout='a' * 250, stderr='b' * 250),
            job_run_result.JobRunResult(stdout='a' * 100, stderr='b' * 100),
            job_run_result.JobRunResult(stdout='a' * 50, stderr='b' * 50),
        ])

        # 100000 and 200000 are small enough ot fit as one, but the others will
        # each need their own result.
        self.assertEqual(len(accumulated_results), 3)
예제 #11
0
    def test_unmigrated_skill_with_unmigrated_skill_contents_is_migrated(
        self
    ) -> None:
        skill_model = self.create_model(
            skill_models.SkillModel,
            id=self.SKILL_1_ID,
            description='description',
            misconceptions_schema_version=(
                feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION),
            rubric_schema_version=feconf.CURRENT_RUBRIC_SCHEMA_VERSION,
            rubrics=self.latest_rubrics,
            language_code='cs',
            skill_contents_schema_version=3,
            skill_contents={
                'explanation': {
                    'content_id': 'content_id',
                    'html': '<b>bo&nbsp;ld\n</b>'
                },
                'worked_examples': [],
                'recorded_voiceovers': {
                    'voiceovers_mapping': {
                        'content_id': {}
                    }
                },
                'written_translations': {
                    'translations_mapping': {
                        'content_id': {}
                    }
                }
            },
            next_misconception_id=2,
            all_questions_merged=False
        )
        skill_model.update_timestamps()
        skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{
            'cmd': skill_domain.CMD_CREATE_NEW
        }])

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'),
            job_run_result.JobRunResult(stdout='SKILL MIGRATED SUCCESS: 1'),
            job_run_result.JobRunResult(stdout='CACHE DELETION SUCCESS: 1')
        ])

        migrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID)
        self.assertEqual(migrated_skill_model.version, 2)
        self.assertEqual(
            migrated_skill_model.skill_contents_schema_version,
            feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION)
        self.assertEqual(
            migrated_skill_model.skill_contents,
            self.latest_skill_contents
        )
예제 #12
0
    def test_indexes_non_deleted_models(self) -> None:
        for i in range(5):
            exp_summary = self.create_model(
                exp_models.ExpSummaryModel,
                id='abcd%s' % i,
                deleted=False,
                title='title',
                category='category',
                objective='objective',
                language_code='lang',
                community_owned=False,
                status=constants.ACTIVITY_STATUS_PUBLIC
            )
            exp_summary.update_timestamps()
            exp_summary.put()

        add_docs_to_index_swap = self.swap_with_checks(
            platform_search_services,
            'add_documents_to_index',
            lambda _, __: None,
            expected_args=[
                (
                    [{
                        'id': 'abcd%s' % i,
                        'language_code': 'lang',
                        'title': 'title',
                        'category': 'category',
                        'tags': [],
                        'objective': 'objective',
                        'rank': 20,
                    }],
                    search_services.SEARCH_INDEX_EXPLORATIONS
                ) for i in range(5)
            ]
        )

        max_batch_size_swap = self.swap(
            exp_search_indexing_jobs.IndexExplorationsInSearchJob,
            'MAX_BATCH_SIZE', 1)

        with add_docs_to_index_swap, max_batch_size_swap:
            self.assert_job_output_is([
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
            ])
예제 #13
0
    def test_job_deletes_all_opportunities(self) -> None:
        skill_opportunity_model_1 = self.create_model(
            opportunity_models.SkillOpportunityModel,
            id='opportunity_id1',
            skill_description='A skill description',
            question_count=20,
        )
        skill_opportunity_model_1.update_timestamps()
        skill_opportunity_model_1.put()
        skill_opportunity_model_2 = self.create_model(
            opportunity_models.SkillOpportunityModel,
            id='opportunity_id2',
            skill_description='A skill description',
            question_count=20,
        )
        skill_opportunity_model_2.update_timestamps()
        skill_opportunity_model_2.put()

        all_skill_opportunity_models = list(
            opportunity_models.SkillOpportunityModel.get_all())
        self.assertEqual(len(all_skill_opportunity_models), 2)

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='SUCCESS: 2')
        ])

        all_skill_opportunity_models = list(
            opportunity_models.SkillOpportunityModel.get_all())
        self.assertEqual(len(all_skill_opportunity_models), 0)
예제 #14
0
    def test_generation_job_creates_new_models(self) -> None:
        all_opportunity_models = list(
            opportunity_models.SkillOpportunityModel.get_all())
        self.assertEqual(len(all_opportunity_models), 0)

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='SUCCESS: 2')
        ])

        opportunity_model_1 = (
            opportunity_models.SkillOpportunityModel.get(
                self.SKILL_1_ID))
        # Ruling out the possibility of None for mypy type checking.
        assert opportunity_model_1 is not None
        self.assertEqual(opportunity_model_1.id, self.SKILL_1_ID)
        self.assertEqual(
            opportunity_model_1.skill_description,
            self.SKILL_1_DESCRIPTION)
        self.assertEqual(opportunity_model_1.question_count, 1)

        opportunity_model_2 = (
            opportunity_models.SkillOpportunityModel.get(
                self.SKILL_2_ID))
        assert opportunity_model_2 is not None
        self.assertEqual(opportunity_model_2.id, self.SKILL_2_ID)
        self.assertEqual(
            opportunity_model_2.skill_description,
            self.SKILL_2_DESCRIPTION)
        self.assertEqual(opportunity_model_2.question_count, 1)
예제 #15
0
    def test_updates_existing_stats_model_when_values_are_provided(
            self) -> None:
        user_settings_model = self.create_model(user_models.UserSettingsModel,
                                                id=self.VALID_USER_ID_1,
                                                email='*****@*****.**')
        user_stats_model = self.create_model(
            user_models.UserStatsModel,
            id=self.VALID_USER_ID_1,
            num_ratings=10,
            average_ratings=4.5,
            total_plays=22,
        )

        self.put_multi([user_settings_model, user_stats_model])

        self.assert_job_output_is(
            [job_run_result.JobRunResult(stdout='SUCCESS OLD 1')])

        new_user_stats_model = (user_models.UserStatsModel.get(
            self.VALID_USER_ID_1))
        # Ruling out the possibility of None for mypy type checking.
        assert new_user_stats_model is not None
        self.assertEqual(new_user_stats_model.weekly_creator_stats_list, [{
            self.formated_datetime: {
                'num_ratings': 10,
                'average_ratings': 4.5,
                'total_plays': 22
            }
        }])
예제 #16
0
    def test_skips_private_model(self) -> None:
        exp_summary = self.create_model(
            exp_models.ExpSummaryModel,
            id='abcd',
            deleted=False,
            title='title',
            category='category',
            objective='objective',
            language_code='lang',
            community_owned=False,
            status=constants.ACTIVITY_STATUS_PRIVATE
        )
        exp_summary.update_timestamps()
        exp_summary.put()

        add_docs_to_index_swap = self.swap_with_checks(
            platform_search_services,
            'add_documents_to_index',
            lambda _, __: None,
            expected_args=[([], search_services.SEARCH_INDEX_EXPLORATIONS)]
        )

        with add_docs_to_index_swap:
            self.assert_job_output_is([
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed')
            ])
예제 #17
0
    def test_generation_job_counts_multiple_questions(self) -> None:
        all_opportunity_models = list(
            opportunity_models.SkillOpportunityModel.get_all())
        self.assertEqual(len(all_opportunity_models), 0)

        question_skill_link_model_1 = self.create_model(
            question_models.QuestionSkillLinkModel,
            question_id=self.QUESTION_1_ID,
            skill_id=self.SKILL_2_ID,
            skill_difficulty=1)
        question_skill_link_model_1.update_timestamps()
        datastore_services.put_multi([question_skill_link_model_1])

        self.assert_job_output_is(
            [job_run_result.JobRunResult(stdout='SUCCESS: 2')])

        opportunity_model_1 = (opportunity_models.SkillOpportunityModel.get(
            self.SKILL_1_ID))
        # Ruling out the possibility of None for mypy type checking.
        assert opportunity_model_1 is not None
        self.assertEqual(opportunity_model_1.id, self.SKILL_1_ID)
        self.assertEqual(opportunity_model_1.skill_description,
                         self.SKILL_1_DESCRIPTION)
        self.assertEqual(opportunity_model_1.question_count, 1)

        opportunity_model_2 = (opportunity_models.SkillOpportunityModel.get(
            self.SKILL_2_ID))
        assert opportunity_model_2 is not None
        self.assertEqual(opportunity_model_2.id, self.SKILL_2_ID)
        self.assertEqual(opportunity_model_2.skill_description,
                         self.SKILL_2_DESCRIPTION)
        self.assertEqual(opportunity_model_2.question_count, 2)
예제 #18
0
    def test_migrated_skill_is_not_migrated(self) -> None:
        skill_model = self.create_model(
            skill_models.SkillModel,
            id=self.SKILL_1_ID,
            description='description',
            misconceptions_schema_version=(
                feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION),
            rubric_schema_version=feconf.CURRENT_RUBRIC_SCHEMA_VERSION,
            rubrics=self.latest_rubrics,
            language_code='cs',
            skill_contents_schema_version=(
                feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION),
            skill_contents=self.latest_skill_contents,
            next_misconception_id=2,
            all_questions_merged=False
        )
        skill_model.update_timestamps()
        skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{
            'cmd': skill_domain.CMD_CREATE_NEW
        }])

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1')
        ])

        unmigrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID)
        self.assertEqual(unmigrated_skill_model.version, 1)
    def test_reports_failure_on_broken_model(self) -> None:
        suggestion_model = self.create_model(
            suggestion_models.GeneralSuggestionModel,
            id='suggestion_id',
            suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
            author_id=self.VALID_USER_ID_1,
            change_cmd={
                'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
                'state_name': 'state',
                'content_id': 'content_id',
                'language_code': 'lang',
                'content_html': 111,
                'translation_html': '111 222 333',
                'data_format': 'html'
            },
            score_category='irelevant',
            status=suggestion_models.STATUS_IN_REVIEW,
            target_type='exploration',
            target_id=self.EXP_1_ID,
            target_version_at_submission=0,
            language_code=self.LANG_1
        )
        suggestion_model.update_timestamps()
        suggestion_model.put()

        self.assert_job_output_is([
            job_run_result.JobRunResult(
                stderr=(
                    'ERROR: "suggestion_id: argument cannot be of \'int\' '
                    'type, must be of text type": 1'
                )
            )
        ])
    def test_generation_job_returns_initial_opportunity(self) -> None:
        all_opportunity_models = list(
            opportunity_models.ExplorationOpportunitySummaryModel.get_all())
        self.assertEqual(len(all_opportunity_models), 0)

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='SUCCESS')
        ])

        opportunity_model = (
            opportunity_models.ExplorationOpportunitySummaryModel.get(
                self.EXP_1_ID))
        # Ruling out the possibility of None for mypy type checking.
        assert opportunity_model is not None
        self.assertEqual(opportunity_model.topic_id, self.TOPIC_1_ID)
        self.assertEqual(opportunity_model.topic_name, 'topic title')
        self.assertEqual(opportunity_model.story_id, self.STORY_1_ID)
        self.assertEqual(opportunity_model.story_title, 'story title')
        self.assertEqual(opportunity_model.chapter_title, 'node title')
        self.assertEqual(opportunity_model.content_count, 1)
        self.assertItemsEqual( # type: ignore[no-untyped-call]
            opportunity_model.incomplete_translation_language_codes,
            {l['id'] for l in constants.SUPPORTED_AUDIO_LANGUAGES} - {'cs'})
        self.assertEqual(opportunity_model.translation_counts, {})
        self.assertEqual(
            opportunity_model.language_codes_needing_voice_artists, ['cs'])
예제 #21
0
    def test_fails_when_existing_stats_has_wrong_schema_version(self) -> None:
        user_settings_model = self.create_model(
            user_models.UserSettingsModel,
            id=self.VALID_USER_ID_1, email='*****@*****.**')
        user_stats_model = self.create_model(
            user_models.UserStatsModel,
            id=self.VALID_USER_ID_1,
            schema_version=0
        )

        self.put_multi([user_settings_model, user_stats_model])

        with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
            Exception,
            'Sorry, we can only process v1-v%d dashboard stats schemas at '
            'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION
        ):
            self.assert_job_output_is([
                job_run_result.JobRunResult(stdout='SUCCESS OLD 1')
            ])

        new_user_stats_model = (
            user_models.UserStatsModel.get(self.VALID_USER_ID_1))
        # Ruling out the possibility of None for mypy type checking.
        assert new_user_stats_model is not None
        self.assertEqual(new_user_stats_model.weekly_creator_stats_list, [])
예제 #22
0
    def test_broken_skill_is_not_migrated(self) -> None:
        story_model = self.create_model(
            story_models.StoryModel,
            id=self.STORY_1_ID,
            story_contents_schema_version=4,
            title='title',
            language_code='cs',
            notes='notes',
            description='description',
            story_contents=self.broken_contents,
            corresponding_topic_id=self.TOPIC_1_ID,
            url_fragment='urlfragment',
        )
        story_model.update_timestamps()
        story_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story',
                           [{
                               'cmd': story_domain.CMD_CREATE_NEW
                           }])
        self.assert_job_output_is([
            job_run_result.JobRunResult(stderr=(
                'STORY PROCESSED ERROR: "(\'story_1_id\', ValidationError('
                '\'Expected description to be a string, received 123\''
                '))": 1'))
        ])

        migrated_story_model = story_models.StoryModel.get(self.STORY_1_ID)
        self.assertEqual(migrated_story_model.version, 1)
예제 #23
0
    def test_counts_single_collection(self) -> None:
        user = self.create_model(
            user_models.UserSettingsModel,
            id=self.USER_ID_1,
            email='*****@*****.**',
            roles=[feconf.ROLE_ID_COLLECTION_EDITOR]
        )
        user.update_timestamps()
        collection = self.create_model(
            collection_models.CollectionRightsModel,
            id='col_1',
            owner_ids=[self.USER_ID_1],
            editor_ids=[self.USER_ID_1],
            voice_artist_ids=[self.USER_ID_1],
            community_owned=False,
            status=constants.ACTIVITY_STATUS_PUBLIC,
            viewable_if_private=False,
            first_published_msec=0.2
        )
        collection.update_timestamps()
        self.put_multi([user, collection])

        self.assert_job_output_is([
            job_run_result.JobRunResult(
                stdout=(
                    'collection_ids: [\'col_1\'], email: [\'[email protected]\']'))
        ])
예제 #24
0
    def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
        """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from
        deleting ExplorationOpportunitySummaryModel.

        Returns:
            PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from
            deleting ExplorationOpportunitySummaryModel.
        """
        exp_opportunity_summary_model = (
            self.pipeline
            | 'Get all non-deleted opportunity models' >> ndb_io.GetModels(
                opportunity_models.ExplorationOpportunitySummaryModel.get_all(
                    include_deleted=False)))

        unused_delete_result = (exp_opportunity_summary_model
                                | beam.Map(lambda model: model.key)
                                | 'Delete all models' >> ndb_io.DeleteModels())

        return (exp_opportunity_summary_model
                | 'Count all new models' >> beam.combiners.Count.Globally()
                | 'Only create result for new models when > 0' >>
                (beam.Filter(lambda n: n > 0))
                | 'Create result for new models' >>
                beam.Map(lambda n: job_run_result.JobRunResult(
                    stdout='SUCCESS %s' % n)))
예제 #25
0
    def test_job_runs_correctly_for_second_batch(self) -> None:
        config_model = self.create_model(
                config_models.ConfigPropertyModel,
                id='batch_index_for_mailchimp',
                value=1
            )
        config_model.update_timestamps()
        config_model.commit('user_id_0', [])

        expected_emails = [
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**'
        ]

        expected_output = ','.join(expected_emails)
        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout=expected_output)
        ])
예제 #26
0
    def test_job_fails_correctly_with_request_error(self) -> None:
        user_model = self.create_model(
                user_models.UserSettingsModel,
                id='user_id',
                email='errored_email'
            )
        user_model.update_timestamps()
        user_model.put()

        # Half of the users have emails enabled.
        preferences_model = self.create_model(
            user_models.UserEmailPreferencesModel,
            id='user_id',
            site_updates=True
        )
        preferences_model.update_timestamps()
        preferences_model.put()

        mailchimp = self.MockMailchimpClass()
        swapped_mailchimp = lambda: mailchimp
        swap_mailchimp_context = self.swap(
            mailchimp_population_jobs, '_get_mailchimp_class',
            swapped_mailchimp)

        with swap_mailchimp_context, self.swap_audience_id:
            self.assert_job_output_is([
                job_run_result.JobRunResult(
                    stdout='Server Issue')
            ])
예제 #27
0
    def test_generation_job_fails_when_exp_id_is_not_available(self) -> None:
        self.topic_model.canonical_story_references.append({
            'story_id':
            self.STORY_2_ID,
            'story_is_published':
            False
        })
        self.topic_model.update_timestamps()

        story_model = self.create_model(story_models.StoryModel,
                                        id=self.STORY_2_ID,
                                        title='story 2 title',
                                        language_code='cs',
                                        story_contents_schema_version=1,
                                        corresponding_topic_id=self.TOPIC_1_ID,
                                        url_fragment='story',
                                        story_contents={
                                            'nodes': [{
                                                'id':
                                                'node',
                                                'outline':
                                                'outline',
                                                'title':
                                                'node 2 title',
                                                'description':
                                                'description',
                                                'destination_node_ids':
                                                ['123'],
                                                'acquired_skill_ids': [],
                                                'exploration_id':
                                                'missing_id',
                                                'prerequisite_skill_ids': [],
                                                'outline_is_finalized':
                                                True
                                            }],
                                            'initial_node_id':
                                            'abc',
                                            'next_node_id':
                                            'efg'
                                        },
                                        notes='note')
        story_model.update_timestamps()

        datastore_services.put_multi([self.topic_model, story_model])

        all_opportunity_models = list(
            opportunity_models.ExplorationOpportunitySummaryModel.get_all())
        self.assertEqual(len(all_opportunity_models), 0)

        self.assert_job_output_is([
            job_run_result.JobRunResult(stderr=(
                'ERROR: "Failed to regenerate opportunities for topic id: '
                'topic_1_id, missing_exp_with_ids: [\'missing_id\'], '
                'missing_story_with_ids: []": 1'))
        ])

        all_opportunity_models = list(
            opportunity_models.ExplorationOpportunitySummaryModel.get_all())
        self.assertEqual(len(all_opportunity_models), 0)
    def test_creates_stats_model_from_one_suggestion_in_set_format(
        self
    ) -> None:
        suggestion_model = self.create_model(
            suggestion_models.GeneralSuggestionModel,
            suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
            author_id=self.VALID_USER_ID_1,
            change_cmd={
                'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
                'state_name': 'state',
                'content_id': 'content_id',
                'language_code': 'lang',
                'content_html': ['111 a', '222 b', '333 c'],
                'translation_html': ['111 a', '222 b', '333 c'],
                'data_format': 'set_of_normalized_string'
            },
            score_category='irelevant',
            status=suggestion_models.STATUS_IN_REVIEW,
            target_type='exploration',
            target_id=self.EXP_1_ID,
            target_version_at_submission=0,
            language_code=self.LANG_1
        )
        suggestion_model.update_timestamps()
        suggestion_model.put()

        self.assert_job_output_is([
            job_run_result.JobRunResult(stdout='SUCCESS: 1')
        ])

        translation_stats_model = (
            suggestion_models.TranslationContributionStatsModel.get(
                self.LANG_1, self.VALID_USER_ID_1, ''))

        # Ruling out the possibility of None for mypy type checking.
        assert translation_stats_model is not None
        self.assertEqual(translation_stats_model.language_code, self.LANG_1)
        self.assertEqual(
            translation_stats_model.contributor_user_id, self.VALID_USER_ID_1)
        self.assertEqual(translation_stats_model.topic_id, '')
        self.assertEqual(
            translation_stats_model.submitted_translations_count, 1)
        self.assertEqual(
            translation_stats_model.submitted_translation_word_count, 6)
        self.assertEqual(translation_stats_model.accepted_translations_count, 0)
        self.assertEqual(
            translation_stats_model
            .accepted_translations_without_reviewer_edits_count,
            0
        )
        self.assertEqual(
            translation_stats_model.accepted_translation_word_count, 0)
        self.assertEqual(translation_stats_model.rejected_translations_count, 0)
        self.assertEqual(
            translation_stats_model.rejected_translation_word_count, 0)
        self.assertItemsEqual( # type: ignore[no-untyped-call]
            translation_stats_model.contribution_dates,
            [datetime.date.today()]
        )
예제 #29
0
    def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
        """Generates the translation contributins stats.

        Returns:
            PCollection. A PCollection of 'SUCCESS x' results, where x is
            the number of generated stats..
        """
        suggestions_grouped_by_target = (
            self.pipeline
            | 'Get all non-deleted suggestion models' >> ndb_io.GetModels(
                suggestion_models.GeneralSuggestionModel.get_all(
                    include_deleted=False))
            # We need to window the models so that CoGroupByKey below
            # works properly.
            | 'Filter translate suggestions' >> beam.Filter(lambda m: (
                m.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT))
            | 'Transform to suggestion domain object' >> beam.Map(
                suggestion_services.get_suggestion_from_model)
            | 'Group by target' >> beam.GroupBy(lambda m: m.target_id))
        exp_opportunities = (
            self.pipeline
            | 'Get all non-deleted opportunity models' >> ndb_io.GetModels(
                opportunity_models.ExplorationOpportunitySummaryModel.get_all(
                    include_deleted=False))
            # We need to window the models so that CoGroupByKey below
            # works properly.
            | 'Transform to opportunity domain object' >>
            beam.Map(opportunity_services.
                     get_exploration_opportunity_summary_from_model)
            | 'Group by ID' >> beam.GroupBy(lambda m: m.id))

        new_user_stats_models = (
            {
                'suggestion': suggestions_grouped_by_target,
                'opportunity': exp_opportunities
            }
            | 'Merge models' >> beam.CoGroupByKey()
            | 'Get rid of key' >> beam.Values()  # pylint: disable=no-value-for-parameter
            | 'Generate stats' >> beam.ParDo(lambda x: self._generate_stats(
                x['suggestion'][0] if len(x['suggestion']) else [],
                list(x['opportunity'][0])[0]
                if len(x['opportunity']) else None))
            | 'Combine the stats' >> beam.CombinePerKey(CombineStats())
            | 'Generate models from stats' >> beam.MapTuple(
                self._generate_translation_contribution_model))

        unused_put_result = (
            new_user_stats_models
            | 'Put models into the datastore' >> ndb_io.PutModels())

        return (new_user_stats_models
                | 'Count all new models' >>
                (beam.combiners.Count.Globally().without_defaults())
                | 'Only create result for new models when > 0' >>
                (beam.Filter(lambda x: x > 0))
                | 'Create result for new models' >>
                beam.Map(lambda x: job_run_result.JobRunResult(
                    stdout='SUCCESS %s' % x)))
예제 #30
0
    def test_job_deletes_sent_email_model_with_user_as_recipient(self) -> None:
        self.sent_email_model_with_recipient.update_timestamps()
        self.sent_email_model_with_recipient.put()

        self.assert_job_output_is(
            [job_run_result.JobRunResult(stdout='SENT EMAILS SUCCESS: 1')])

        self.assertIsNone(
            email_models.SentEmailModel.get('sent_email_id', strict=False))