def test_generation_job_counts_multiple_questions(self) -> None: all_opportunity_models = list( opportunity_models.SkillOpportunityModel.get_all()) self.assertEqual(len(all_opportunity_models), 0) question_skill_link_model_1 = self.create_model( question_models.QuestionSkillLinkModel, question_id=self.QUESTION_1_ID, skill_id=self.SKILL_2_ID, skill_difficulty=1) question_skill_link_model_1.update_timestamps() datastore_services.put_multi([question_skill_link_model_1]) self.assert_job_output_is( [job_run_result.JobRunResult(stdout='SUCCESS: 2')]) opportunity_model_1 = (opportunity_models.SkillOpportunityModel.get( self.SKILL_1_ID)) # Ruling out the possibility of None for mypy type checking. assert opportunity_model_1 is not None self.assertEqual(opportunity_model_1.id, self.SKILL_1_ID) self.assertEqual(opportunity_model_1.skill_description, self.SKILL_1_DESCRIPTION) self.assertEqual(opportunity_model_1.question_count, 1) opportunity_model_2 = (opportunity_models.SkillOpportunityModel.get( self.SKILL_2_ID)) assert opportunity_model_2 is not None self.assertEqual(opportunity_model_2.id, self.SKILL_2_ID) self.assertEqual(opportunity_model_2.skill_description, self.SKILL_2_DESCRIPTION) self.assertEqual(opportunity_model_2.question_count, 2)
def get_beam_job_runs( refresh: bool = True ) -> List[beam_job_domain.BeamJobRun]: """Returns all of the Apache Beam job runs recorded in the datastore. Args: refresh: bool. Whether to refresh the jobs' state before returning them. Returns: list(BeamJobRun). A list of every job run recorded in the datastore. """ beam_job_run_models = list(beam_job_models.BeamJobRunModel.query()) beam_job_runs = [ get_beam_job_run_from_model(m) for m in beam_job_run_models ] if refresh: updated_beam_job_run_models = [] for i, beam_job_run_model in enumerate(beam_job_run_models): if beam_job_runs[i].in_terminal_state: continue jobs_manager.refresh_state_of_beam_job_run_model(beam_job_run_model) beam_job_run_model.update_timestamps(update_last_updated_time=False) updated_beam_job_run_models.append(beam_job_run_model) beam_job_runs[i] = get_beam_job_run_from_model(beam_job_run_model) if updated_beam_job_run_models: datastore_services.put_multi(updated_beam_job_run_models) return beam_job_runs
def test_generation_job_fails_when_exp_id_is_not_available(self) -> None: self.topic_model.canonical_story_references.append({ 'story_id': self.STORY_2_ID, 'story_is_published': False }) self.topic_model.update_timestamps() story_model = self.create_model(story_models.StoryModel, id=self.STORY_2_ID, title='story 2 title', language_code='cs', story_contents_schema_version=1, corresponding_topic_id=self.TOPIC_1_ID, url_fragment='story', story_contents={ 'nodes': [{ 'id': 'node', 'outline': 'outline', 'title': 'node 2 title', 'description': 'description', 'destination_node_ids': ['123'], 'acquired_skill_ids': [], 'exploration_id': 'missing_id', 'prerequisite_skill_ids': [], 'outline_is_finalized': True }], 'initial_node_id': 'abc', 'next_node_id': 'efg' }, notes='note') story_model.update_timestamps() datastore_services.put_multi([self.topic_model, story_model]) all_opportunity_models = list( opportunity_models.ExplorationOpportunitySummaryModel.get_all()) self.assertEqual(len(all_opportunity_models), 0) self.assert_job_output_is([ job_run_result.JobRunResult(stderr=( 'ERROR: "Failed to regenerate opportunities for topic id: ' 'topic_1_id, missing_exp_with_ids: [\'missing_id\'], ' 'missing_story_with_ids: []": 1')) ]) all_opportunity_models = list( opportunity_models.ExplorationOpportunitySummaryModel.get_all()) self.assertEqual(len(all_opportunity_models), 0)
def setUp(self): super().setUp() story_summary_model = self.create_model( story_models.StorySummaryModel, id=self.STORY_1_ID, title='title', url_fragment='urlfragment', language_code='cs', description='description', node_titles=['title1', 'title2'], story_model_last_updated=datetime.datetime.utcnow(), story_model_created_on=datetime.datetime.utcnow(), version=1) topic_model = self.create_model( topic_models.TopicModel, id=self.TOPIC_1_ID, name='topic title', canonical_name='topic title', story_reference_schema_version=1, subtopic_schema_version=1, next_subtopic_id=1, language_code='cs', url_fragment='topic', canonical_story_references=[{ 'story_id': self.STORY_1_ID, 'story_is_published': False }], page_title_fragment_for_web='fragm', ) datastore_services.update_timestamps_multi( [topic_model, story_summary_model]) datastore_services.put_multi([topic_model, story_summary_model]) self.latest_contents = { 'nodes': [{ 'id': 'node_1111', 'title': 'title', 'description': 'description', 'thumbnail_filename': 'thumbnail_filename.svg', 'thumbnail_bg_color': '#F8BF74', 'thumbnail_size_in_bytes': None, 'destination_node_ids': [], 'acquired_skill_ids': [], 'prerequisite_skill_ids': [], 'outline': 'outline', 'outline_is_finalized': True, 'exploration_id': 'exp_id' }], 'initial_node_id': 'node_1111', 'next_node_id': 'node_2222' } self.broken_contents = copy.deepcopy(self.latest_contents) self.broken_contents['nodes'][0]['description'] = 123 self.unmigrated_contents = copy.deepcopy(self.latest_contents) self.unmigrated_contents['nodes'][0]['thumbnail_size_in_bytes'] = 123
def put_multi(self, model_list: Sequence[base_models.BaseModel]) -> None: """Puts the input models into the datastore. Args: model_list: list(Model). The NDB models to put into the datastore. """ datastore_services.update_timestamps_multi( model_list, update_last_updated_time=False) datastore_services.put_multi(model_list)
def mark_outdated_models_as_deleted() -> None: """Mark models in MODEL_CLASSES_TO_MARK_AS_DELETED, as deleted if they were last updated more than four weeks ago. """ date_before_which_to_mark_as_deleted = ( datetime.datetime.utcnow() - feconf.PERIOD_TO_MARK_MODELS_AS_DELETED) models_to_mark_as_deleted: List[base_models.BaseModel] = [] for model_class in MODEL_CLASSES_TO_MARK_AS_DELETED: models_to_mark_as_deleted.extend( model_class.query( model_class.last_updated < date_before_which_to_mark_as_deleted ).fetch()) for model_to_mark_as_deleted in models_to_mark_as_deleted: model_to_mark_as_deleted.deleted = True datastore_services.update_timestamps_multi(models_to_mark_as_deleted) datastore_services.put_multi(models_to_mark_as_deleted)
def delete_multi( # type: ignore[override] cls, entity_ids: List[str], committer_id: str, commit_message: str, force_deletion: bool = False) -> None: """Deletes the given cls instances with the given entity_ids. Note that this extends the superclass method. Args: entity_ids: list(str). Ids of entities to delete. committer_id: str. The user_id of the user who committed the change. commit_message: str. The commit description message. force_deletion: bool. If True these models are deleted completely from storage, otherwise there are only marked as deleted. Default is False. """ super(ExplorationModel, cls).delete_multi(entity_ids, committer_id, commit_message, force_deletion=force_deletion) if not force_deletion: commit_log_models = [] exp_rights_models = ExplorationRightsModel.get_multi( entity_ids, include_deleted=True) versioned_models = cls.get_multi(entity_ids, include_deleted=True) versioned_and_exp_rights_models = zip(versioned_models, exp_rights_models) for model, rights_model in versioned_and_exp_rights_models: # Ruling out the possibility of None for mypy type checking. assert model is not None assert rights_model is not None exploration_commit_log = ExplorationCommitLogEntryModel.create( model.id, model.version, committer_id, feconf.COMMIT_TYPE_DELETE, commit_message, [{ 'cmd': cls.CMD_DELETE_COMMIT }], rights_model.status, rights_model.community_owned) exploration_commit_log.exploration_id = model.id commit_log_models.append(exploration_commit_log) ExplorationCommitLogEntryModel.update_timestamps_multi( commit_log_models) datastore_services.put_multi(commit_log_models)
def test_generation_job_fails_when_story_id_is_not_available(self) -> None: self.topic_model.canonical_story_references.append({ 'story_id': 'missing_id', 'story_is_published': False }) self.topic_model.update_timestamps() datastore_services.put_multi([self.topic_model]) all_opportunity_models = list( opportunity_models.ExplorationOpportunitySummaryModel.get_all()) self.assertEqual(len(all_opportunity_models), 0) self.assert_job_output_is([ job_run_result.JobRunResult(stderr=( 'FAILURE: Failed to regenerate opportunities for topic id: ' 'topic_1_id, missing_exp_with_ids: [], ' 'missing_story_with_ids: [\'missing_id\']' )) ]) all_opportunity_models = list( opportunity_models.ExplorationOpportunitySummaryModel.get_all()) self.assertEqual(len(all_opportunity_models), 0)
def test_job_finds_math_explorations_with_rules(self) -> None: exp_model_1 = self.create_model( exp_models.ExplorationModel, id=self.EXP_1_ID, title='exploration 1 title', category='category', objective='objective', language_code='cs', init_state_name='state', states_schema_version=48, states={ 'init_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict(), 'alg_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict(), 'eq_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict(), 'end_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict(), }) exp_model_1.states['alg_state']['interaction']['id'] = ( 'AlgebraicExpressionInput') exp_model_1.states['alg_state']['interaction']['answer_groups'] = [{ 'rule_specs': [{ 'inputs': { 'x': 'a + b' }, 'rule_type': 'IsEquivalentTo' }, { 'inputs': { 'x': 'a - b' }, 'rule_type': 'ContainsSomeOf' }] }] exp_model_1.states['eq_state']['interaction']['id'] = ( 'MathEquationInput') exp_model_1.states['eq_state']['interaction']['answer_groups'] = [{ 'rule_specs': [{ 'inputs': { 'x': 'x = y', 'y': 'both' }, 'rule_type': 'MatchesExactlyWith' }] }] exp_model_1.update_timestamps() exp_model_2 = self.create_model( exp_models.ExplorationModel, id=self.EXP_2_ID, title='exploration 2 title', category='category', objective='objective', language_code='cs', init_state_name='state', states_schema_version=48, states={ 'init_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict(), 'num_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict(), 'end_state': state_domain.State. create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True).to_dict() }) exp_model_2.states['num_state']['interaction']['id'] = ( 'NumericExpressionInput') exp_model_2.states['num_state']['interaction']['answer_groups'] = [{ 'rule_specs': [{ 'inputs': { 'x': '1.2 + 3' }, 'rule_type': 'MatchesExactlyWith' }, { 'inputs': { 'x': '1 - 2' }, 'rule_type': 'OmitsSomeOf' }] }] exp_model_2.update_timestamps() datastore_services.put_multi([exp_model_1, exp_model_2]) self.assert_job_output_is([ job_run_result.JobRunResult( stdout=('(\'exp_1_id\', \'alg_state\', ' '[\'IsEquivalentTo\', \'ContainsSomeOf\'])')), job_run_result.JobRunResult(stdout=( '(\'exp_1_id\', \'eq_state\', [\'MatchesExactlyWith\'])')), job_run_result.JobRunResult( stdout=('(\'exp_2_id\', \'num_state\', ' '[\'MatchesExactlyWith\', \'OmitsSomeOf\'])')) ])
def test_generation_job_returns_multiple_opportunities_for_multiple_topics( self ) -> None: topic_model = self.create_model( topic_models.TopicModel, id=self.TOPIC_2_ID, name='topic 2 title', canonical_name='topic 2 title', story_reference_schema_version=1, subtopic_schema_version=1, next_subtopic_id=1, language_code='cs', url_fragment='topic', canonical_story_references=[{ 'story_id': self.STORY_2_ID, 'story_is_published': False }]) topic_model.update_timestamps() topic_rights_model = self.create_model( topic_models.TopicRightsModel, id=self.TOPIC_2_ID) topic_rights_model.update_timestamps() story_model = self.create_model( story_models.StoryModel, id=self.STORY_2_ID, title='story 2 title', language_code='cs', story_contents_schema_version=1, corresponding_topic_id=self.TOPIC_1_ID, url_fragment='story', story_contents={ 'nodes': [{ 'id': 'node', 'outline': 'outline', 'title': 'node 2 title', 'description': 'description', 'destination_node_ids': ['123'], 'acquired_skill_ids': [], 'exploration_id': self.EXP_2_ID, 'prerequisite_skill_ids': [], 'outline_is_finalized': True }], 'initial_node_id': 'abc', 'next_node_id': 'efg' }, notes='note') story_model.update_timestamps() exp_model = self.create_model( exp_models.ExplorationModel, id=self.EXP_2_ID, title='exploration 2 title', category='category', objective='objective', language_code='en', init_state_name='state1', states_schema_version=48, states={ 'state1': state_domain.State.create_default_state( # type: ignore[no-untyped-call] 'state1', is_initial_state=True ).to_dict(), 'state2': state_domain.State.create_default_state( # type: ignore[no-untyped-call] 'state2' ).to_dict() }) exp_model.update_timestamps() datastore_services.put_multi([ exp_model, story_model, topic_model, topic_rights_model ]) all_opportunity_models = list( opportunity_models.ExplorationOpportunitySummaryModel.get_all()) self.assertEqual(len(all_opportunity_models), 0) self.assert_job_output_is([ job_run_result.JobRunResult(stdout='SUCCESS'), job_run_result.JobRunResult(stdout='SUCCESS') ]) all_opportunity_models = list( opportunity_models.ExplorationOpportunitySummaryModel.get_all()) self.assertEqual(len(all_opportunity_models), 2) opportunity_model = ( opportunity_models.ExplorationOpportunitySummaryModel.get( self.EXP_2_ID)) # Ruling out the possibility of None for mypy type checking. assert opportunity_model is not None self.assertEqual(opportunity_model.topic_id, self.TOPIC_2_ID) self.assertEqual(opportunity_model.topic_name, 'topic 2 title') self.assertEqual(opportunity_model.story_id, self.STORY_2_ID) self.assertEqual(opportunity_model.story_title, 'story 2 title') self.assertEqual(opportunity_model.chapter_title, 'node 2 title') self.assertEqual(opportunity_model.content_count, 2) self.assertItemsEqual( # type: ignore[no-untyped-call] opportunity_model.incomplete_translation_language_codes, {l['id'] for l in constants.SUPPORTED_AUDIO_LANGUAGES} - {'en'}) self.assertEqual(opportunity_model.translation_counts, {}) self.assertEqual( opportunity_model.language_codes_needing_voice_artists, ['en'])
def setUp(self) -> None: super().setUp() self.topic_model = self.create_model( topic_models.TopicModel, id=self.TOPIC_1_ID, name='topic title', canonical_name='topic title', story_reference_schema_version=1, subtopic_schema_version=1, next_subtopic_id=1, language_code='cs', url_fragment='topic', canonical_story_references=[{ 'story_id': self.STORY_1_ID, 'story_is_published': False }]) self.topic_model.update_timestamps() topic_rights_model = self.create_model( topic_models.TopicRightsModel, id=self.TOPIC_1_ID) topic_rights_model.update_timestamps() story_model = self.create_model( story_models.StoryModel, id=self.STORY_1_ID, title='story title', language_code='cs', story_contents_schema_version=1, corresponding_topic_id=self.TOPIC_1_ID, url_fragment='story', story_contents={ 'nodes': [{ 'id': 'node', 'outline': 'outline', 'title': 'node title', 'description': 'description', 'destination_node_ids': ['123'], 'acquired_skill_ids': [], 'exploration_id': self.EXP_1_ID, 'prerequisite_skill_ids': [], 'outline_is_finalized': True }], 'initial_node_id': 'abc', 'next_node_id': 'efg' }, notes='note') story_model.update_timestamps() exp_model = self.create_model( exp_models.ExplorationModel, id=self.EXP_1_ID, title='exploration title', category='category', objective='objective', language_code='cs', init_state_name='state', states_schema_version=48, states={ 'state': state_domain.State.create_default_state( # type: ignore[no-untyped-call] 'state', is_initial_state=True ).to_dict() }) exp_model.update_timestamps() datastore_services.put_multi([ exp_model, story_model, self.topic_model, topic_rights_model ])
def setUp(self) -> None: super().setUp() question_skill_link_model_1 = self.create_model( question_models.QuestionSkillLinkModel, question_id=self.QUESTION_1_ID, skill_id=self.SKILL_1_ID, skill_difficulty=1 ) question_skill_link_model_2 = self.create_model( question_models.QuestionSkillLinkModel, question_id=self.QUESTION_2_ID, skill_id=self.SKILL_2_ID, skill_difficulty=1 ) question_skill_link_model_1.update_timestamps() question_skill_link_model_2.update_timestamps() skill_1_model = self.create_model( skill_models.SkillModel, id=self.SKILL_1_ID, description=self.SKILL_1_DESCRIPTION, language_code=constants.DEFAULT_LANGUAGE_CODE, misconceptions=[], rubrics=[], skill_contents={ 'explanation': { 'html': 'test explanation', 'content_id': 'explanation', }, 'worked_examples': [], 'recorded_voiceovers': { 'voiceovers_mapping': {} }, 'written_translations': { 'translations_mapping': { 'content': {}, 'default_outcome': {} } } }, next_misconception_id=0, misconceptions_schema_version=feconf .CURRENT_MISCONCEPTIONS_SCHEMA_VERSION, rubric_schema_version=feconf .CURRENT_RUBRIC_SCHEMA_VERSION, skill_contents_schema_version=feconf .CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, superseding_skill_id='blah', all_questions_merged=False, prerequisite_skill_ids=[] ) skill_2_model = self.create_model( skill_models.SkillModel, id=self.SKILL_2_ID, description=self.SKILL_2_DESCRIPTION, language_code=constants.DEFAULT_LANGUAGE_CODE, misconceptions=[], rubrics=[], skill_contents={ 'explanation': { 'html': 'test explanation', 'content_id': 'explanation', }, 'worked_examples': [], 'recorded_voiceovers': { 'voiceovers_mapping': {} }, 'written_translations': { 'translations_mapping': { 'content': {}, 'default_outcome': {} } } }, next_misconception_id=0, misconceptions_schema_version=feconf .CURRENT_MISCONCEPTIONS_SCHEMA_VERSION, rubric_schema_version=feconf .CURRENT_RUBRIC_SCHEMA_VERSION, skill_contents_schema_version=feconf .CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, superseding_skill_id='blah', all_questions_merged=False, prerequisite_skill_ids=[] ) skill_1_model.update_timestamps() skill_2_model.update_timestamps() datastore_services.put_multi([ skill_1_model, skill_2_model, question_skill_link_model_1, question_skill_link_model_2 ])