Пример #1
0
    def _generate_id(cls, platform, ticket_id, stats_tracking_date):
        """Generates key for the instance of AppFeedbackReportStatsModel
        class in the required format with the arguments provided.

        Args:
            platform: str. The platform this entity is aggregating on.
            ticket_id: str. The ID for the ticket these stats aggregate on.
            stats_tracking_date: date. The date these stats are tracking on.

        Returns:
            str. The generated ID for this entity of the form
            '[platform]:[ticket_id]:[stats_date in YYYY-MM-DD]'.
        """
        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = '%s:%s:%s' % (platform, ticket_id,
                                   stats_tracking_date.isoformat())
            if not cls.get_by_id(new_id):
                return new_id
        raise Exception(
            'The id generator for AppFeedbackReportStatsModel is producing too '
            'many collisions.')
Пример #2
0
    def get_next_exploration_id_in_sequence(self, current_exploration_id):
        """Returns the exploration ID of the node just after the node
           corresponding to the current exploration id. If the user is on the
           last node, None is returned.

        Args:
            current_exploration_id: str. The id of exploration currently
                completed.

        Returns:
            str|None. The exploration ID of the next node,
            or None if the passed id is the last one in the collection.
        """
        exploration_just_unlocked = None

        for index in python_utils.RANGE(0, len(self.nodes) - 1):
            if self.nodes[index].exploration_id == current_exploration_id:
                exploration_just_unlocked = self.nodes[index + 1].exploration_id
                break

        return exploration_just_unlocked
Пример #3
0
def run_webpack_compilation():
    """Runs webpack compilation."""
    max_tries = 5
    webpack_bundles_dir_name = 'webpack_bundles'
    for _ in python_utils.RANGE(max_tries):
        try:
            webpack_config_file = build.WEBPACK_DEV_CONFIG
            subprocess.check_call([
                common.NODE_BIN_PATH, WEBPACK_BIN_PATH, '--config',
                webpack_config_file
            ])
        except subprocess.CalledProcessError as error:
            python_utils.PRINT(error.output)
            sys.exit(error.returncode)
            return
        if os.path.isdir(webpack_bundles_dir_name):
            break
    if not os.path.isdir(webpack_bundles_dir_name):
        python_utils.PRINT(
            'Failed to complete webpack compilation, exiting ...')
        sys.exit(1)
Пример #4
0
    def test_force_deletion(self):
        # type: () -> None
        model_id = 'model_id'
        model = TestVersionedModel(id=model_id)
        model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model_version_numbers = python_utils.RANGE(1, model.version + 1)
        model_snapshot_ids = [
            model.get_snapshot_id(model.id, version_number)
            for version_number in model_version_numbers]

        model.delete(
            feconf.SYSTEM_COMMITTER_ID, 'commit_msg', force_deletion=True)

        self.assertIsNone(TestVersionedModel.get_by_id(model_id))
        for model_snapshot_id in model_snapshot_ids:
            self.assertIsNone(
                TestSnapshotContentModel.get_by_id(model_snapshot_id))
            self.assertIsNone(
                TestSnapshotMetadataModel.get_by_id(model_snapshot_id))
Пример #5
0
    def generate_new_blog_post_id(cls):
        """Generates a new blog post ID which is unique and is in the form of
        random hash of 12 chars.

        Returns:
            str. A blog post ID that is different from the IDs of all
            the existing blog posts.

        Raises:
            Exception. There were too many collisions with existing blog post
                IDs when attempting to generate a new blog post ID.
        """
        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            blog_post_id = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            if not cls.get_by_id(blog_post_id):
                return blog_post_id
        raise Exception(
            'New blog post id generator is producing too many collisions.')
Пример #6
0
def _remove_user_id_from_contributors_in_summary_models(
        user_id, summary_model_class):
    """Remove the user ID from contributor_ids and contributor_summary
    fields in relevant summary models.

    Args:
        user_id: str. The user ID that should be removed.
        summary_model_class: CollectionSummaryModel|ExpSummaryModel. Class of
            the summary model from which should the user ID be removed.
    """
    related_summary_models = summary_model_class.query(
        summary_model_class.contributor_ids == user_id
    ).fetch()

    def _remove_user_id_from_models(summary_models):
        """Remove the user ID from contributor_ids and contributor_summary
        fields.

        This function is run in a transaction, with the maximum number of
        summary_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION.

        Args:
            summary_models: list(BaseModel). Models from which should
                the user ID be removed.
        """
        for summary_model in related_summary_models:
            summary_model.contributor_ids = [
                contributor_id for contributor_id in
                summary_model.contributor_ids
                if contributor_id != user_id
            ]
            del summary_model.contributors_summary[user_id]

        datastore_services.put_multi(summary_models)

    for i in python_utils.RANGE(
            0, len(related_summary_models), MAX_NUMBER_OF_OPS_IN_TRANSACTION):
        transaction_services.run_in_transaction(
            _remove_user_id_from_models,
            related_summary_models[i:i + MAX_NUMBER_OF_OPS_IN_TRANSACTION])
Пример #7
0
 def test_get_with_five_or_more_questions(self):
     number_of_questions = 6
     self.topic_id = 'new_topic'
     self.skill_id_1 = skill_services.get_new_skill_id()
     self.topic = topic_domain.Topic.create_default_topic(
         self.topic_id, 'new_topic', 'abbrev')
     self.topic.uncategorized_skill_ids.append(self.skill_id_1)
     self.topic.thumbnail_filename = 'Image.png'
     topic_services.save_new_topic(self.admin_id, self.topic)
     topic_services.publish_topic(self.topic_id, self.admin_id)
     self.save_new_skill(self.skill_id_1,
                         self.admin_id,
                         description='Skill Description 1')
     for index in python_utils.RANGE(number_of_questions):
         question_id = question_services.get_new_question_id()
         self.save_new_question(question_id, self.admin_id,
                                self._create_valid_question_data(index),
                                [self.skill_id_1])
         question_services.create_new_question_skill_link(
             self.admin_id, question_id, self.skill_id_1, 0.5)
     with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
         json_response = self.get_json(
             '%s/%s' % (feconf.TOPIC_DATA_HANDLER, 'new_topic'))
         expected_dict = {
             'topic_name': 'new_topic',
             'topic_id': self.topic_id,
             'canonical_story_dicts': [],
             'additional_story_dicts': [],
             'uncategorized_skill_ids': [self.skill_id_1],
             'subtopics': [],
             'degrees_of_mastery': {
                 self.skill_id_1: None
             },
             'skill_descriptions': {
                 self.skill_id_1: 'Skill Description 1'
             },
             'train_tab_should_be_displayed': True
         }
         self.assertDictContainsSubset(expected_dict, json_response)
     self.logout()
    def setUp(self):
        super(OpportunityServicesUnitTest, self).setUp()
        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)

        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)

        self.TOPIC_ID = 'topic'
        self.STORY_ID = 'story'
        explorations = [exp_domain.Exploration.create_default_exploration(
            '%s' % i,
            title='title %d' % i,
            category='category%d' % i,
        ) for i in python_utils.RANGE(5)]

        for exp in explorations:
            exp_services.save_new_exploration(self.owner_id, exp)

        topic = topic_domain.Topic.create_default_topic(
            topic_id=self.TOPIC_ID, name='topic', abbreviated_name='abbrev')
        topic_services.save_new_topic(self.owner_id, topic)

        story = story_domain.Story.create_default_story(
            self.STORY_ID, title='A story',
            corresponding_topic_id=self.TOPIC_ID)
        story_services.save_new_story(self.owner_id, story)
        topic_services.add_canonical_story(
            self.owner_id, self.TOPIC_ID, self.STORY_ID)

        story_services.update_story(
            self.owner_id, self.STORY_ID, [story_domain.StoryChange({
                'cmd': 'add_story_node',
                'node_id': 'node_1',
                'title': 'Node1',
            }), story_domain.StoryChange({
                'cmd': 'update_story_node_property',
                'property_name': 'exploration_id',
                'node_id': 'node_1',
                'old_value': None,
                'new_value': '0'
            })], 'Changes.')
Пример #9
0
    def _generate_dummy_explorations(
            self, num_dummy_exps_to_generate, num_dummy_exps_to_publish):
        """Generates and publishes the given number of dummy explorations.

        Args:
            num_dummy_exps_to_generate: int. Count of dummy explorations to
                be generated.
            num_dummy_exps_to_publish: int. Count of explorations to
                be published.

        Raises:
            Exception. Environment is not DEVMODE.
        """

        if constants.DEV_MODE:
            logging.info(
                '[ADMIN] %s generated %s number of dummy explorations' %
                (self.user_id, num_dummy_exps_to_generate))
            possible_titles = ['Hulk Neuroscience', 'Quantum Starks',
                               'Wonder Anatomy',
                               'Elvish, language of "Lord of the Rings',
                               'The Science of Superheroes']
            exploration_ids_to_publish = []
            for i in python_utils.RANGE(num_dummy_exps_to_generate):
                title = random.choice(possible_titles)
                category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES)
                new_exploration_id = exp_fetchers.get_new_exploration_id()
                exploration = exp_domain.Exploration.create_default_exploration(
                    new_exploration_id, title=title, category=category,
                    objective='Dummy Objective')
                exp_services.save_new_exploration(self.user_id, exploration)
                if i <= num_dummy_exps_to_publish - 1:
                    exploration_ids_to_publish.append(new_exploration_id)
                    rights_manager.publish_exploration(
                        self.user, new_exploration_id)
            exp_services.index_explorations_given_ids(
                exploration_ids_to_publish)
        else:
            raise Exception('Cannot generate dummy explorations in production.')
Пример #10
0
    def test_reports_error_if_individual_users_failed(self):
        self.put_firebase_seed_model()

        self.exit_stack.enter_context(
            self.firebase_sdk_stub.mock_delete_users_error(
                individual_error_pattern=[None, 'uh-oh!', None, None]))
        self.exit_stack.enter_context(self.swap(
            self.JOB_CLASS, 'MAX_USERS_FIREBASE_CAN_DELETE_PER_CALL', 3))

        uids = ['aid_%d' % i for i in python_utils.RANGE(10)]
        self.firebase_sdk_stub.import_users(
            [firebase_admin.auth.ImportUserRecord(uid) for uid in uids])

        self.firebase_sdk_stub.assert_is_user_multi(uids)
        self.assertItemsEqual(self.run_one_off_job(), [
            ['ERROR: Failed to delete an individual Firebase account',
             ['firebase_auth_id=aid_1, reason=uh-oh!',
              'firebase_auth_id=aid_5, reason=uh-oh!',
              'firebase_auth_id=aid_9, reason=uh-oh!']
            ],
            ['SUCCESS: Firebase accounts deleted', 7],
        ])
Пример #11
0
    def _get_new_id(cls):
        """Generates a unique ID for the question of the form
        {{random_hash_of_12_chars}}

        Returns:
           new_id: int. ID of the new QuestionModel instance.

        Raises:
            Exception: The ID generator for QuestionModel is
            producing too many collisions.
        """

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = utils.convert_to_hash(
                python_utils.STR(utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for QuestionModel is producing too many '
            'collisions.')
Пример #12
0
def _parse_port_ranges(pool_str):
    """Given a 'N-P,X-Y' description of port ranges, return a set of ints.

    Args:
        pool_str: str. The N-P,X-Y description of port ranges.

    Returns:
        set(int). The port numbers in the port ranges.
    """
    ports = set()
    for range_str in pool_str.split(','):
        try:
            a, b = range_str.split('-', 1)
            start, end = int(a), int(b)
        except ValueError:
            logging.info('Ignoring unparsable port range %r.', range_str)
            continue
        if start < 1 or end > 65535:
            logging.info('Ignoring out of bounds port range %r.', range_str)
            continue
        ports.update(set(python_utils.RANGE(start, end + 1)))
    return ports
Пример #13
0
 def _get_external_id_relationships(cls, item):
     snapshot_model_ids = [
         '%s-%d' % (item.id, version)
         for version in python_utils.RANGE(1, item.version + 1)
     ]
     return [
         base_model_validators.ExternalModelFetcherDetails(
             'topic_ids', topic_models.TopicModel, [item.id]),
         base_model_validators.UserSettingsModelFetcherDetails(
             'manager_user_ids',
             item.manager_ids,
             may_contain_system_ids=False,
             may_contain_pseudonymous_ids=False),
         base_model_validators.ExternalModelFetcherDetails(
             'snapshot_metadata_ids',
             topic_models.TopicRightsSnapshotMetadataModel,
             snapshot_model_ids),
         base_model_validators.ExternalModelFetcherDetails(
             'snapshot_content_ids',
             topic_models.TopicRightsSnapshotContentModel,
             snapshot_model_ids)
     ]
Пример #14
0
def filter_skills_by_mastery(user_id, skill_ids):
    """Given a list of skill_ids, it returns a list of
    feconf.MAX_NUMBER_OF_SKILL_IDS skill_ids in which the user has
    the least mastery.(Please note that python 2.7 considers the None
    type smaller than any value, so None types will be returned first)

    Args:
        user_id: str. The unique user ID of the user.
        skill_ids: list(str). The skill_ids that are to be filtered.

    Returns:
        list(str). A list of the filtered skill_ids.
    """
    degrees_of_mastery = get_multi_user_skill_mastery(user_id, skill_ids)
    filtered_skill_ids = get_sorted_skill_ids(degrees_of_mastery)

    # Arranges the skill_ids in the order as it was received.
    arranged_filtered_skill_ids = []
    for i in python_utils.RANGE(len(skill_ids)):
        if skill_ids[i] in filtered_skill_ids:
            arranged_filtered_skill_ids.append(skill_ids[i])
    return arranged_filtered_skill_ids
Пример #15
0
 def _get_external_id_relationships(cls, item):
     field_name_to_external_model_references = [
         base_model_validators.ExternalModelFetcherDetails(
             'message_ids',
             feedback_models.GeneralFeedbackMessageModel,
             ['%s.%s' % (item.id, i) for i in python_utils.RANGE(
                 item.message_count)])
     ]
     if (
             item.original_author_id and
             user_services.is_user_id_valid(item.original_author_id)
     ):
         field_name_to_external_model_references.append(
             base_model_validators.ExternalModelFetcherDetails(
                 'author_ids', user_models.UserSettingsModel,
                 [item.original_author_id]))
     if item.has_suggestion:
         field_name_to_external_model_references.append(
             base_model_validators.ExternalModelFetcherDetails(
                 'suggestion_ids', suggestion_models.GeneralSuggestionModel,
                 [item.id]))
     if item.entity_type in TARGET_TYPE_TO_TARGET_MODEL:
         field_name_to_external_model_references.append(
             base_model_validators.ExternalModelFetcherDetails(
                 '%s_ids' % item.entity_type,
                 TARGET_TYPE_TO_TARGET_MODEL[item.entity_type],
                 [item.entity_id]))
     if (
             item.last_nonempty_message_author_id and
             user_services.is_user_id_valid(
                 item.last_nonempty_message_author_id)
     ):
         field_name_to_external_model_references.append(
             base_model_validators.ExternalModelFetcherDetails(
                 'last_nonempty_message_author_ids',
                 user_models.UserSettingsModel,
                 [item.last_nonempty_message_author_id]))
     return field_name_to_external_model_references
    def get_thread_analytics_multi(cls, exploration_ids):
        """Gets the thread analytics for the explorations specified by the
        exploration_ids.

        Args:
            exploration_ids: list(str). IDs of the explorations to get analytics
                for.

        Returns:
            list(dict). Each dict in this list corresponds to an
            exploration ID in the input list, and has two keys:
            - num_open_threads: int. The count of open feedback threads
              for this exploration.
            - num_total_threads: int. The count of all feedback threads
              for this exploration.
        """
        realtime_model_ids = cls.get_multi_active_realtime_layer_ids(
            exploration_ids)
        realtime_models = cls._get_realtime_datastore_class().get_multi(
            realtime_model_ids)

        feedback_thread_analytics_models = (
            feedback_models.FeedbackAnalyticsModel.get_multi(exploration_ids))
        return [feedback_domain.FeedbackAnalytics(
            feconf.ENTITY_TYPE_EXPLORATION, exploration_ids[i],
            (
                realtime_models[i].num_open_threads
                if realtime_models[i] is not None else 0) +
            (
                feedback_thread_analytics_models[i].num_open_threads
                if feedback_thread_analytics_models[i] is not None else 0),
            (
                realtime_models[i].num_total_threads
                if realtime_models[i] is not None else 0) +
            (
                feedback_thread_analytics_models[i].num_total_threads
                if feedback_thread_analytics_models[i] is not None else 0)
        ) for i in python_utils.RANGE(len(exploration_ids))]
Пример #17
0
    def test_put_multi(self):
        models_1 = [base_models.BaseModel() for _ in python_utils.RANGE(3)]
        for model in models_1:
            self.assertIsNone(model.created_on)
            self.assertIsNone(model.last_updated)

        # Field last_updated will get updated anyway because it is None.
        base_models.BaseModel.update_timestamps_multi(
            models_1, update_last_updated_time=False)
        base_models.BaseModel.put_multi(models_1)
        model_ids = [model.id for model in models_1]
        last_updated_values = []
        for model_id in model_ids:
            model = base_models.BaseModel.get_by_id(model_id)
            self.assertIsNotNone(model.created_on)
            self.assertIsNotNone(model.last_updated)
            last_updated_values.append(model.last_updated)

        # Field last_updated won't get updated because update_last_updated_time
        # is set to False and last_updated already has some value.
        models_2 = base_models.BaseModel.get_multi(model_ids)
        base_models.BaseModel.update_timestamps_multi(
            models_2, update_last_updated_time=False)
        base_models.BaseModel.put_multi(models_2)
        for model_id, last_updated in python_utils.ZIP(model_ids,
                                                       last_updated_values):
            model = base_models.BaseModel.get_by_id(model_id)
            self.assertEqual(model.last_updated, last_updated)

        # Field last_updated will get updated because update_last_updated_time
        # is set to True (by default).
        models_3 = base_models.BaseModel.get_multi(model_ids)
        base_models.BaseModel.update_timestamps_multi(models_3)
        base_models.BaseModel.put_multi(models_3)
        for model_id, last_updated in python_utils.ZIP(model_ids,
                                                       last_updated_values):
            model = base_models.BaseModel.get_by_id(model_id)
            self.assertNotEqual(model.last_updated, last_updated)
Пример #18
0
def regenerate_exp_commit_log_model(exp_model, version):
    """Helper function to regenerate a commit log model for an
    exploration model.

    NOTE TO DEVELOPERS: Do not delete this function until issue #10808 is fixed.

    Args:
        exp_model: ExplorationModel. The exploration model for which
            commit log model is to be generated.
        version: int. The commit log version to be generated.

    Returns:
        ExplorationCommitLogEntryModel. The regenerated commit log model.
    """
    metadata_model = (exp_models.ExplorationSnapshotMetadataModel.get_by_id(
        '%s-%s' % (exp_model.id, version)))

    required_rights_model = exp_models.ExplorationRightsModel.get(exp_model.id,
                                                                  strict=True,
                                                                  version=1)
    for rights_version in python_utils.RANGE(2, version + 1):
        rights_model = exp_models.ExplorationRightsModel.get(
            exp_model.id, strict=False, version=rights_version)
        if rights_model is None:
            break
        if rights_model.created_on <= metadata_model.created_on:
            required_rights_model = rights_model
        else:
            break
    commit_log_model = (exp_models.ExplorationCommitLogEntryModel.create(
        exp_model.id, version, metadata_model.committer_id,
        metadata_model.commit_type, metadata_model.commit_message,
        metadata_model.commit_cmds, required_rights_model.status,
        required_rights_model.community_owned))
    commit_log_model.exploration_id = exp_model.id
    commit_log_model.created_on = metadata_model.created_on
    commit_log_model.last_updated = metadata_model.last_updated
    return commit_log_model
Пример #19
0
    def setUp(self):
        super(OpportunityServicesIntegrationTest, self).setUp()
        self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)

        self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)

        self.set_admins([self.ADMIN_USERNAME])
        self.admin = user_services.UserActionsInfo(self.admin_id)

        self.TOPIC_ID = 'topic'
        self.STORY_ID = 'story'
        self.USER_ID = 'user'
        self.SKILL_ID = 'skill'
        self.QUESTION_ID = question_services.get_new_question_id()
        explorations = [
            exp_domain.Exploration.create_default_exploration(
                '%s' % i,
                title='title %d' % i,
                category='category%d' % i,
            ) for i in python_utils.RANGE(5)
        ]

        for exp in explorations:
            exp_services.save_new_exploration(self.owner_id, exp)

        topic = topic_domain.Topic.create_default_topic(topic_id=self.TOPIC_ID,
                                                        name='topic')
        topic_services.save_new_topic(self.owner_id, topic)

        story = story_domain.Story.create_default_story(
            self.STORY_ID,
            title='A story',
            corresponding_topic_id=self.TOPIC_ID)
        story_services.save_new_story(self.owner_id, story)
        topic_services.add_canonical_story(self.owner_id, self.TOPIC_ID,
                                           self.STORY_ID)
Пример #20
0
    def _get_new_id(cls) -> str:
        """Generates a unique ID for the question in the form of random hash
        of 12 chars.

        Returns:
            new_id: str. ID of the new QuestionModel instance.

        Raises:
            Exception. The ID generator for QuestionModel is
                producing too many collisions.
        """

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for QuestionModel is producing too many '
            'collisions.')
Пример #21
0
    def test_get_can_build_full_task_list_after_enough_fetches(self):
        task_entries = [
            self._new_resolved_task(
                state_name='State %d' % i,
                resolved_on=self.MOCK_DATE + datetime.timedelta(minutes=i * 5))
            for i in python_utils.RANGE(1, 26)]
        improvements_services.put_tasks(task_entries)

        with self.login_context(self.OWNER_EMAIL):
            all_results, cursor, more = [], None, True
            while more:
                json_response = self.get_json(self.get_url(cursor=cursor))
                all_results.extend(json_response['results'])
                cursor = json_response['cursor']
                more = json_response['more']
        self.assertEqual(
            [t['target_id'] for t in all_results], [
                'State 25', 'State 24', 'State 23', 'State 22', 'State 21',
                'State 20', 'State 19', 'State 18', 'State 17', 'State 16',
                'State 15', 'State 14', 'State 13', 'State 12', 'State 11',
                'State 10', 'State 9', 'State 8', 'State 7', 'State 6',
                'State 5', 'State 4', 'State 3', 'State 2', 'State 1',
            ])
Пример #22
0
    def test_reports_error_if_user_batch_failed(self):
        self.put_firebase_seed_model()

        uh_oh = Exception('uh-oh!')
        unlucky = Exception('unlucky')
        self.exit_stack.enter_context(
            self.firebase_sdk_stub.mock_delete_users_error(
                batch_error_pattern=[None, uh_oh, None, unlucky]))
        self.exit_stack.enter_context(self.swap(
            self.JOB_CLASS, 'MAX_USERS_FIREBASE_CAN_DELETE_PER_CALL', 3))

        uids = ['aid_%d' % i for i in python_utils.RANGE(10)]
        self.firebase_sdk_stub.import_users(
            [firebase_admin.auth.ImportUserRecord(uid) for uid in uids])

        self.firebase_sdk_stub.assert_is_user_multi(uids)
        self.assertItemsEqual(self.run_one_off_job(), [
            ['ERROR: Failed to delete a batch of Firebase accounts',
             'count=4, reasons=[%r, %r]' % (uh_oh, unlucky)],
            ['SUCCESS: Firebase accounts deleted', 6],
        ])
        self.firebase_sdk_stub.assert_is_not_user_multi(uids[:3] + uids[6:9])
        self.firebase_sdk_stub.assert_is_user_multi(uids[3:6] + uids[9:])
Пример #23
0
    def setUp(self):
        super(StoryCommitLogEntryModelValidatorTests, self).setUp()

        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)

        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)

        topic = topic_domain.Topic.create_default_topic(
            '0', 'topic', 'abbrev', 'description')

        stories = [story_domain.Story.create_default_story(
            '%s' % i,
            'title %d' % i,
            'description %d' % i,
            '0',
            'title-%s' % chr(97 + i)
        ) for i in python_utils.RANGE(3)]

        for story in stories:
            story_services.save_new_story(self.owner_id, story)
            topic.add_canonical_story(story.id)

        topic_services.save_new_topic(self.owner_id, topic)

        self.model_instance_0 = (
            story_models.StoryCommitLogEntryModel.get_by_id(
                'story-0-1'))
        self.model_instance_1 = (
            story_models.StoryCommitLogEntryModel.get_by_id(
                'story-1-1'))
        self.model_instance_2 = (
            story_models.StoryCommitLogEntryModel.get_by_id(
                'story-2-1'))

        self.job_class = (
            prod_validation_jobs_one_off
            .StoryCommitLogEntryModelAuditOneOffJob)
Пример #24
0
def main(args=None):
    """Run tests, rerunning at most MAX_RETRY_COUNT times if they flake."""
    parsed_args = _PARSER.parse_args(args=args)
    policy = RERUN_POLICIES[parsed_args.suite.lower()]

    with servers.managed_portserver():
        for attempt_num in python_utils.RANGE(1, MAX_RETRY_COUNT + 1):
            python_utils.PRINT('***Attempt %d.***' % attempt_num)
            output, return_code = run_tests(parsed_args)

            if not flake_checker.check_if_on_ci():
                # Don't rerun off of CI.
                python_utils.PRINT('No reruns because not running on CI.')
                break

            if return_code == 0:
                # Don't rerun passing tests.
                flake_checker.report_pass(parsed_args.suite)
                break

            # Check whether we should rerun based on this suite's policy.
            test_is_flaky = flake_checker.is_test_output_flaky(
                output, parsed_args.suite)
            if policy == RERUN_POLICY_NEVER:
                python_utils.PRINT(
                    'Not rerunning because the policy is to never '
                    'rerun the {} suite'.format(parsed_args.suite))
                break
            if policy == RERUN_POLICY_KNOWN_FLAKES and not test_is_flaky:
                python_utils.PRINT(
                    ('Not rerunning because the policy is to only '
                     'rerun the %s suite on known flakes, and this '
                     'failure did not match any known flakes') %
                    parsed_args.suite)
                break

    sys.exit(return_code)
Пример #25
0
    def generate_new_thread_id(cls, entity_type, entity_id):
        """Generates a new thread ID which is unique.

        Args:
            entity_type: str. The type of the entity.
            entity_id: str. The ID of the entity.

        Returns:
            str. A thread ID that is different from the IDs of all
                the existing threads within the given entity.

        Raises:
           Exception: There were too many collisions with existing thread IDs
               when attempting to generate a new thread ID.
        """
        for _ in python_utils.RANGE(_MAX_RETRIES):
            thread_id = (
                entity_type + '.' + entity_id + '.' +
                utils.base64_from_int(utils.get_current_time_in_millisecs()) +
                utils.base64_from_int(utils.get_random_int(_RAND_RANGE)))
            if not cls.get_by_id(thread_id):
                return thread_id
        raise Exception(
            'New thread id generator is producing too many collisions.')
Пример #26
0
        def mock_popen(program_args, **kwargs):
            """Mock of psutil.Popen that creates processes using os.fork().

            The processes created will always terminate within ~1 minute.

            Args:
                program_args: list(*). Unused program arguments that would
                    otherwise be passed to Popen.
                **kwargs: dict(str: *). Keyword arguments passed to Popen.

            Returns:
                PopenStub. The return value of psutil.Popen.
            """
            popen_calls.append(self.POPEN_CALL(program_args, kwargs))

            pid = 1
            stdout = b''.join(b'%b\n' % o for o in outputs)
            child_procs = [
                scripts_test_utils.PopenStub(pid=i, unresponsive=unresponsive)
                for i in python_utils.RANGE(pid + 1, pid + 1 + num_children)
            ]
            return scripts_test_utils.PopenStub(
                pid=pid, stdout=stdout, unresponsive=unresponsive,
                child_procs=child_procs)
Пример #27
0
    def test_standard_operation(self):
        job_id = (
            activity_jobs_one_off.IndexAllActivitiesJobManager.create_new())
        activity_jobs_one_off.IndexAllActivitiesJobManager.enqueue(job_id)

        self.assertEqual(
            self.count_jobs_in_mapreduce_taskqueue(
                taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)

        indexed_docs = []

        def mock_add_documents_to_index(docs, index):
            indexed_docs.extend(docs)
            self.assertIn(index, (
                search_services.SEARCH_INDEX_EXPLORATIONS,
                search_services.SEARCH_INDEX_COLLECTIONS))

        add_docs_swap = self.swap(
            platform_search_services, 'add_documents_to_index',
            mock_add_documents_to_index)

        with add_docs_swap:
            self.process_and_flush_pending_mapreduce_tasks()

        ids = [doc['id'] for doc in indexed_docs]
        titles = [doc['title'] for doc in indexed_docs]
        categories = [doc['category'] for doc in indexed_docs]

        for index in python_utils.RANGE(5):
            self.assertIn('%s' % index, ids)
            self.assertIn('title %d' % index, titles)
            self.assertIn('category%d' % index, categories)

        self.assertIsNone(
            activity_jobs_one_off.IndexAllActivitiesJobManager.reduce(
                'key', 'value'))
Пример #28
0
    def _generate_dummy_skill_and_questions(self):
        """Generate and loads the database with a skill and 15 questions
        linked to the skill.

        Raises:
            Exception: Cannot load new structures data in production mode.
            Exception: User does not have enough rights to generate data.
        """
        if constants.DEV_MODE:
            if self.user.role != feconf.ROLE_ID_ADMIN:
                raise Exception(
                    'User does not have enough rights to generate data.')
            skill_id = skill_services.get_new_skill_id()
            skill_name = 'Dummy Skill %s' % python_utils.UNICODE(
                random.getrandbits(32))
            skill = self._create_dummy_skill(skill_id, skill_name,
                                             '<p>Dummy Explanation 1</p>')
            skill_services.save_new_skill(self.user_id, skill)
            for i in python_utils.RANGE(15):
                question_id = question_services.get_new_question_id()
                question_name = 'Question number %s %s' % (
                    python_utils.UNICODE(i), skill_name)
                question = self._create_dummy_question(question_id,
                                                       question_name,
                                                       [skill_id])
                question_services.add_question(self.user_id, question)
                question_difficulty = [
                    feconf.EASY_SKILL_DIFFICULTY,
                    feconf.MEDIUM_SKILL_DIFFICULTY,
                    feconf.HARD_SKILL_DIFFICULTY
                ]
                random_difficulty = random.choice(question_difficulty)
                question_services.create_new_question_skill_link(
                    self.user_id, question_id, skill_id, random_difficulty)
        else:
            raise Exception('Cannot generate dummy skills in production.')
Пример #29
0
 def test_get_with_cursor_as_none_returns_first_page(self):
     task_entries = [
         self._new_resolved_task(state_name='State %d' % i,
                                 resolved_on=self.MOCK_DATE +
                                 datetime.timedelta(minutes=i * 5))
         for i in python_utils.RANGE(1, 26)
     ]
     improvements_services.put_tasks(task_entries)
     with self.login_context(self.OWNER_EMAIL):
         json_response = self.get_json(self.get_url(cursor=None))
     self.assertEqual([t['target_id'] for t in json_response['results']], [
         'State 25',
         'State 24',
         'State 23',
         'State 22',
         'State 21',
         'State 20',
         'State 19',
         'State 18',
         'State 17',
         'State 16',
     ])
     self.assertIsNotNone(json_response['cursor'])
     self.assertTrue(json_response['more'])
Пример #30
0
    def setUp(self):
        super(OpportunityServicesUnitTest, self).setUp()
        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
        self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)

        self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)

        self.set_admins([self.ADMIN_USERNAME])

        self.TOPIC_ID = 'topic'
        self.STORY_ID = 'story'
        explorations = [
            self.save_new_valid_exploration('%s' % i,
                                            self.owner_id,
                                            title='title %d' % i,
                                            category='category%d' % i,
                                            end_state_name='End State',
                                            correctness_feedback_enabled=True)
            for i in python_utils.RANGE(5)
        ]

        for exp in explorations:
            self.publish_exploration(self.owner_id, exp.id)

        topic = topic_domain.Topic.create_default_topic(
            self.TOPIC_ID, 'topic', 'abbrev', 'description')
        topic.thumbnail_filename = 'thumbnail.svg'
        topic.thumbnail_bg_color = '#C6DCDA'
        topic.subtopics = [
            topic_domain.Subtopic(
                1, 'Title', ['skill_id_1'], 'image.svg',
                constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
                'dummy-subtopic-url')
        ]
        topic.next_subtopic_id = 2
        topic_services.save_new_topic(self.owner_id, topic)
        topic_services.publish_topic(self.TOPIC_ID, self.admin_id)

        story = story_domain.Story.create_default_story(
            self.STORY_ID, 'A story', 'Description', self.TOPIC_ID,
            'story-two')
        story_services.save_new_story(self.owner_id, story)
        topic_services.add_canonical_story(self.owner_id, self.TOPIC_ID,
                                           self.STORY_ID)
        topic_services.publish_story(self.TOPIC_ID, self.STORY_ID,
                                     self.admin_id)

        story_services.update_story(self.owner_id, self.STORY_ID, [
            story_domain.StoryChange({
                'cmd': 'add_story_node',
                'node_id': 'node_1',
                'title': 'Node1',
            }),
            story_domain.StoryChange({
                'cmd': 'update_story_node_property',
                'property_name': 'exploration_id',
                'node_id': 'node_1',
                'old_value': None,
                'new_value': '0'
            })
        ], 'Changes.')