def test_search_ranks_cannot_be_negative(self):
        self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
        exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)

        base_search_rank = 20

        self.assertEqual(
            search_services.get_search_rank_from_exp_summary(exp_summary),
            base_search_rank)

        # A user can (down-)rate an exploration at most once.
        for i in python_utils.RANGE(50):
            rating_services.assign_rating_to_exploration(
                'user_id_1', self.EXP_ID, 1)
        exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
        self.assertEqual(
            search_services.get_search_rank_from_exp_summary(exp_summary),
            base_search_rank - 5)

        for i in python_utils.RANGE(50):
            rating_services.assign_rating_to_exploration(
                'user_id_%s' % i, self.EXP_ID, 1)

        # The rank will be at least 0.
        exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
        self.assertEqual(
            search_services.get_search_rank_from_exp_summary(exp_summary), 0)
    def test_get_thread_summaries_load_test(self):
        # The speed of fetching the summaries of 100 threads having 5 messages
        # should be less than 1.7 second. In reality, the time taken to fetch
        # all the summaries is less than 0.2s. However since it seems to take
        # longer on Travis, the constant has been set to 1.7s.
        # Create 100 threads.
        for _ in python_utils.RANGE(100):
            feedback_services.create_thread(
                feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID_1,
                self.user_id, self.EXPECTED_THREAD_DICT['subject'],
                'not used here')
        threadlist = feedback_services.get_all_threads(
            feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID_1, False)

        thread_ids = []
        for thread in threadlist:
            thread_ids.append(thread.id)
            # Create 5 messages in each thread.
            for _ in python_utils.RANGE(5):
                feedback_services.create_message(
                    thread.id, self.user_id, None, None, 'editor message')

        start = time.time()
        # Fetch the summaries of all the threads.
        feedback_services.get_exp_thread_summaries(self.user_id, thread_ids)
        elapsed_time = time.time() - start
        self.assertLessEqual(elapsed_time, 1.7)
def validate_topic_similarities(data):
    """Validates topic similarities given by data, which should be a string
    of comma-separated values.

    The first line of data should be a list of topic names. The next lines
    should be a symmetric adjacency matrix of similarities, which are floats
    between 0.0 and 1.0.

    This function checks whether topics belong in the current list of
    known topics, and if the adjacency matrix is valid.
    """
    data = data.splitlines()
    data = list(csv.reader(data))
    topics_list = data[0]
    topics_length = len(topics_list)
    topic_similarities_values = data[1:]

    if len(topic_similarities_values) != topics_length:
        raise Exception('Length of topic similarities columns: %s '
                        'does not match length of topic list: %s.' %
                        (len(topic_similarities_values), topics_length))

    for topic in topics_list:
        if topic not in RECOMMENDATION_CATEGORIES:
            raise Exception('Topic %s not in list of known topics.' % topic)

    for index, topic in enumerate(topics_list):
        if len(topic_similarities_values[index]) != topics_length:
            raise Exception(
                'Length of topic similarities rows: %s '
                'does not match length of topic list: %s.' %
                (len(topic_similarities_values[index]), topics_length))

    for row_ind in python_utils.RANGE(topics_length):
        for col_ind in python_utils.RANGE(topics_length):
            similarity = topic_similarities_values[row_ind][col_ind]
            try:
                similarity = float(similarity)
            except ValueError:
                raise ValueError(
                    'Expected similarity to be a float, received %s' %
                    (similarity))

            if similarity < 0.0 or similarity > 1.0:
                raise ValueError('Expected similarity to be between 0.0 and '
                                 '1.0, received %s' % similarity)

    for row_ind in python_utils.RANGE(topics_length):
        for col_ind in python_utils.RANGE(topics_length):
            if (topic_similarities_values[row_ind][col_ind] !=
                    topic_similarities_values[col_ind][row_ind]):
                raise Exception('Expected topic similarities to be symmetric.')
示例#4
0
    def test_get_with_more_questions_with_fifty_or_more_skills(self):
        number_of_skills = 60
        number_of_questions = [0] * 60
        number_of_questions[46] = 2
        number_of_questions[20] = 3
        number_of_questions[29] = 10
        self.topic_id = 'new_topic'
        skill_ids = (
            [skill_services.get_new_skill_id() for _ in python_utils.RANGE(
                number_of_skills)])
        self.topic = topic_domain.Topic.create_default_topic(
            self.topic_id, 'new_topic', 'new-topic', 'description')
        for index in python_utils.RANGE(number_of_skills):
            self.topic.uncategorized_skill_ids.append(skill_ids[index])
        self.topic.thumbnail_filename = 'Image.svg'
        self.topic.thumbnail_bg_color = (
            constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
        self.topic.practice_tab_is_displayed = True
        subtopic_1 = topic_domain.Subtopic.create_default_subtopic(
            1, 'Subtopic Title 1')
        subtopic_1.skill_ids = ['skill_id_1']
        subtopic_1.url_fragment = 'sub-one-frag'
        self.topic.subtopics = [subtopic_1]
        self.topic.next_subtopic_id = 2
        topic_services.save_new_topic(self.admin_id, self.topic)
        topic_services.publish_topic(self.topic_id, self.admin_id)
        for i in python_utils.RANGE(number_of_skills):
            self.save_new_skill(
                skill_ids[i], self.admin_id,
                description='Skill Description')
        for i in python_utils.RANGE(number_of_skills):
            for j in python_utils.RANGE(number_of_questions[i]):
                question_id = question_services.get_new_question_id()
                self.save_new_question(
                    question_id, self.admin_id,
                    self._create_valid_question_data(j), [skill_ids[i]])
                question_services.create_new_question_skill_link(
                    self.admin_id, question_id, skill_ids[i], 0.5)

        json_response = self.get_json(
            '%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
        expected_dict = {
            'topic_name': 'new_topic',
            'topic_id': self.topic_id,
            'canonical_story_dicts': [],
            'additional_story_dicts': [],
            'practice_tab_is_displayed': True
        }
        self.assertDictContainsSubset(expected_dict, json_response)
        self.logout()
示例#5
0
def run_webpack_compilation(source_maps=False):
    """Runs webpack compilation.

    Args:
        source_maps: bool. Whether to compile with source maps.
    """
    max_tries = 5
    webpack_bundles_dir_name = 'webpack_bundles'

    for _ in python_utils.RANGE(max_tries):
        try:
            managed_webpack_compiler = (servers.managed_webpack_compiler(
                use_source_maps=source_maps))
            with managed_webpack_compiler as proc:
                proc.wait()
        except subprocess.CalledProcessError as error:
            python_utils.PRINT(error.output)
            sys.exit(error.returncode)
            return
        if os.path.isdir(webpack_bundles_dir_name):
            break
    else:
        # We didn't break out of the loop, meaning all attempts have failed.
        python_utils.PRINT(
            'Failed to complete webpack compilation, exiting...')
        sys.exit(1)
示例#6
0
        def mock_popen(program_args, **kwargs):
            """Mock of psutil.Popen that creates processes using os.fork().

            The processes created will always terminate within ~1 minute.

            Args:
                program_args: list(*). Unused program arguments that would
                    otherwise be passed to Popen.
                **kwargs: dict(str: *). Keyword arguments passed to Popen.

            Returns:
                PopenStub. The return value of psutil.Popen.
            """
            popen_calls.append(self.POPEN_CALL(program_args, kwargs))

            pid = 1
            stdout = b''.join(b'%b\n' % o for o in outputs)
            child_procs = [
                scripts_test_utils.PopenStub(pid=i, unresponsive=unresponsive)
                for i in python_utils.RANGE(pid + 1, pid + 1 + num_children)
            ]
            return scripts_test_utils.PopenStub(pid=pid,
                                                stdout=stdout,
                                                unresponsive=unresponsive,
                                                child_procs=child_procs)
示例#7
0
def _update_classifier_training_jobs_status(job_ids, status):
    """Checks for the existence of the model and then updates it.

    Args:
        job_ids: list(str). List of ID of the ClassifierTrainingJob domain
            objects.
        status: str. The status to which the job needs to be updated.

    Raises:
        Exception. The ClassifierTrainingJobModel corresponding to the job_id
            of the ClassifierTrainingJob does not exist.
    """
    classifier_training_job_models = (
        classifier_models.ClassifierTrainingJobModel.get_multi(job_ids))

    for index in python_utils.RANGE(len(job_ids)):
        if classifier_training_job_models[index] is None:
            raise Exception(
                'The ClassifierTrainingJobModel corresponding to the job_id '
                'of the ClassifierTrainingJob does not exist.')

        classifier_training_job = get_classifier_training_job_from_model(
            classifier_training_job_models[index])
        classifier_training_job.update_status(status)
        classifier_training_job.validate()

        classifier_training_job_models[index].status = status

    classifier_models.ClassifierTrainingJobModel.update_timestamps_multi(
        classifier_training_job_models)
    classifier_models.ClassifierTrainingJobModel.put_multi(
        classifier_training_job_models)
示例#8
0
def get_human_readable_contributors_summary(contributors_summary):
    """Gets contributors summary in human readable form.

    Args:
        contributors_summary: dict. The keys are user ids and
            the values are the number of commits made by that user.

    Returns:
        dict. Dicts of contributors in human readable form; the keys are
        usernames and the values are a dict. Example:

        {
            'albert': {
                'num_commits': 10,
            },
        }
    """
    contributor_ids = list(contributors_summary.keys())
    contributor_usernames = user_services.get_human_readable_user_ids(
        contributor_ids, strict=False)
    return {
        contributor_usernames[ind]: {
            'num_commits': contributors_summary[contributor_ids[ind]],
        }
        for ind in python_utils.RANGE(len(contributor_ids))
    }
示例#9
0
    def generate_new_thread_id(cls, entity_type: str, entity_id: str) -> str:
        """Generates a new thread ID which is unique.

        Args:
            entity_type: str. The type of the entity.
            entity_id: str. The ID of the entity.

        Returns:
            str. A thread ID that is different from the IDs of all
            the existing threads within the given entity.

        Raises:
            Exception. There were too many collisions with existing thread IDs
                when attempting to generate a new thread ID.
        """
        for _ in python_utils.RANGE(_MAX_RETRIES):
            thread_id = (
                '%s.%s.%s%s' %
                (entity_type, entity_id,
                 utils.base64_from_int(
                     int(utils.get_current_time_in_millisecs())),
                 utils.base64_from_int(utils.get_random_int(_RAND_RANGE))))
            if not cls.get_by_id(thread_id):
                return thread_id
        raise Exception(
            'New thread id generator is producing too many collisions.')
示例#10
0
    def _generate_dummy_skill_and_questions(self):
        """Generate and loads the database with a skill and 15 questions
        linked to the skill.

        Raises:
            Exception. Cannot load new structures data in production mode.
            Exception. User does not have enough rights to generate data.
        """
        if constants.DEV_MODE:
            if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
                raise Exception(
                    'User does not have enough rights to generate data.')
            skill_id = skill_services.get_new_skill_id()
            skill_name = 'Dummy Skill %s' % python_utils.UNICODE(
                random.getrandbits(32))
            skill = self._create_dummy_skill(
                skill_id, skill_name, '<p>Dummy Explanation 1</p>')
            skill_services.save_new_skill(self.user_id, skill)
            for i in python_utils.RANGE(15):
                question_id = question_services.get_new_question_id()
                question_name = 'Question number %s %s' % (
                    python_utils.UNICODE(i), skill_name)
                question = self._create_dummy_question(
                    question_id, question_name, [skill_id])
                question_services.add_question(self.user_id, question)
                question_difficulty = list(
                    constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values())
                random_difficulty = random.choice(question_difficulty)
                question_services.create_new_question_skill_link(
                    self.user_id, question_id, skill_id, random_difficulty)
        else:
            raise Exception('Cannot generate dummy skills in production.')
示例#11
0
    def generate_id(cls, platform: str,
                    submitted_on_datetime: datetime.datetime) -> str:
        """Generates key for the instance of AppFeedbackReportModel class in the
        required format with the arguments provided.

        Args:
            platform: str. The platform the user is the report from.
            submitted_on_datetime: datetime.datetime. The datetime that the
                report was submitted on in UTC.

        Returns:
            str. The generated ID for this entity using platform,
            submitted_on_sec, and a random string, of the form
            '[platform].[submitted_on_msec].[random hash]'.
        """
        submitted_datetime_in_msec = utils.get_time_in_millisecs(
            submitted_on_datetime)
        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            random_hash = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            new_id = '%s.%s.%s' % (platform, int(submitted_datetime_in_msec),
                                   random_hash)
            if not cls.get_by_id(new_id):
                return new_id
        raise Exception(
            'The id generator for AppFeedbackReportModel is producing too '
            'many collisions.')
示例#12
0
    def generate_id(cls, ticket_name: str) -> str:
        """Generates key for the instance of AppFeedbackReportTicketModel
        class in the required format with the arguments provided.

        Args:
            ticket_name: str. The name assigned to the ticket on creation.

        Returns:
            str. The generated ID for this entity using the current datetime in
            milliseconds (as the entity's creation timestamp), a SHA1 hash of
            the ticket_name, and a random string, of the form
            '[creation_datetime_msec]:[hash(ticket_name)]:[random hash]'.
        """
        current_datetime_in_msec = utils.get_time_in_millisecs(
            datetime.datetime.utcnow())
        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            name_hash = utils.convert_to_hash(ticket_name,
                                              base_models.ID_LENGTH)
            random_hash = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            new_id = '%s.%s.%s' % (int(current_datetime_in_msec), name_hash,
                                   random_hash)
            if not cls.get_by_id(new_id):
                return new_id
        raise Exception(
            'The id generator for AppFeedbackReportTicketModel is producing too'
            'many collisions.')
示例#13
0
    def _generate_id(cls, exp_id: str) -> str:
        """Generates a unique id for the training job of the form
        '[exp_id].[random hash of 16 chars]'.

        Args:
            exp_id: str. ID of the exploration.

        Returns:
            str. ID of the new ClassifierTrainingJobModel instance.

        Raises:
            Exception. The id generator for ClassifierTrainingJobModel is
                producing too many collisions.
        """

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = '%s.%s' % (
                exp_id,
                utils.convert_to_hash(
                    python_utils.UNICODE(
                        utils.get_random_int(base_models.RAND_RANGE)),
                    base_models.ID_LENGTH))
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for ClassifierTrainingJobModel is producing '
            'too many collisions.')
示例#14
0
    def test_retrieval_of_multiple_exploration_versions(self):
        # Update exploration to version 2.
        change_list = [
            exp_domain.ExplorationChange({
                'cmd': exp_domain.CMD_ADD_STATE,
                'state_name': 'New state',
            })
        ]
        exp_services.update_exploration(feconf.SYSTEM_COMMITTER_ID,
                                        self.EXP_1_ID, change_list, '')

        # Update exploration to version 3.
        change_list = [
            exp_domain.ExplorationChange({
                'cmd': exp_domain.CMD_ADD_STATE,
                'state_name': 'New state 2',
            })
        ]
        exp_services.update_exploration(feconf.SYSTEM_COMMITTER_ID,
                                        self.EXP_1_ID, change_list, '')

        exploration_latest = exp_fetchers.get_exploration_by_id(self.EXP_1_ID)
        latest_version = exploration_latest.version

        explorations = (
            exp_fetchers.
            get_multiple_versioned_exp_interaction_ids_mapping_by_version(
                self.EXP_1_ID, list(python_utils.RANGE(1,
                                                       latest_version + 1))))

        self.assertEqual(len(explorations), 3)
        self.assertEqual(explorations[0].version, 1)
        self.assertEqual(explorations[1].version, 2)
        self.assertEqual(explorations[2].version, 3)
示例#15
0
    def _generate_id(cls, intent: str) -> str:
        """Generates an ID for a new SentEmailModel instance.

        Args:
            intent: str. The intent string, i.e. the purpose of the email.
                Valid intent strings are defined in feconf.py.

        Returns:
            str. The newly-generated ID for the SentEmailModel instance.

        Raises:
            Exception. The id generator for SentEmailModel is producing
                too many collisions.
        """
        id_prefix = '%s.' % intent

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = '%s.%s' % (
                id_prefix,
                utils.convert_to_hash(
                    python_utils.UNICODE(utils.get_random_int(
                        base_models.RAND_RANGE)),
                    base_models.ID_LENGTH))
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for SentEmailModel is producing too many '
            'collisions.')
示例#16
0
    def test_indexes_non_deleted_models(self) -> None:
        for i in python_utils.RANGE(5):
            exp_summary = self.create_model(
                exp_models.ExpSummaryModel,
                id='abcd%s' % i,
                deleted=False,
                title='title',
                category='category',
                objective='objective',
                language_code='lang',
                community_owned=False,
                status=constants.ACTIVITY_STATUS_PUBLIC
            )
            exp_summary.update_timestamps()
            exp_summary.put()

        add_docs_to_index_swap = self.swap_with_checks(
            platform_search_services,
            'add_documents_to_index',
            lambda _, __: None,
            expected_args=[
                (
                    [{
                        'id': 'abcd%s' % i,
                        'language_code': 'lang',
                        'title': 'title',
                        'category': 'category',
                        'tags': [],
                        'objective': 'objective',
                        'rank': 20,
                    }],
                    search_services.SEARCH_INDEX_EXPLORATIONS
                ) for i in python_utils.RANGE(5)
            ]
        )

        max_batch_size_swap = self.swap(
            cron_jobs.IndexExplorationsInSearch, 'MAX_BATCH_SIZE', 1)

        with add_docs_to_index_swap, max_batch_size_swap:
            self.assert_job_output_is([
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
                job_run_result.JobRunResult(stdout='SUCCESS 1 models indexed'),
            ])
示例#17
0
def get_collection_ids_matching_query(query_string,
                                      categories,
                                      language_codes,
                                      offset=None):
    """Returns a list with all collection ids matching the given search query
    string, as well as a search offset for future fetches.

    Args:
        query_string: str. The search query string.
        categories: list(str). The list of categories to query for. If it is
            empty, no category filter is applied to the results. If it is not
            empty, then a result is considered valid if it matches at least one
            of these categories.
        language_codes: list(str). The list of language codes to query for. If
            it is empty, no language code filter is applied to the results. If
            it is not empty, then a result is considered valid if it matches at
            least one of these language codes.
        offset: str or None. Offset indicating where, in the list of
            collections, to start the search from.

    Returns:
        2-tuple of (returned_collection_ids, search_offset). Where:
            returned_collection_ids : list(str). A list with all collection ids
                matching the given search query string, as well as a search
                offset for future fetches. The list contains exactly
                feconf.SEARCH_RESULTS_PAGE_SIZE results if there are at least
                that many, otherwise it contains all remaining results. (If this
                behaviour does not occur, an error will be logged.)
            search_offset: str. Search offset for future fetches.
    """
    returned_collection_ids = []
    search_offset = offset

    for _ in python_utils.RANGE(MAX_ITERATIONS):
        remaining_to_fetch = feconf.SEARCH_RESULTS_PAGE_SIZE - len(
            returned_collection_ids)

        collection_ids, search_offset = search_services.search_collections(
            query_string,
            categories,
            language_codes,
            remaining_to_fetch,
            offset=search_offset)

        # Collection model cannot be None as we are fetching the collection ids
        # through query and there cannot be a collection id for which there is
        # no collection.
        for ind, _ in enumerate(
                collection_models.CollectionSummaryModel.get_multi(
                    collection_ids)):
            returned_collection_ids.append(collection_ids[ind])

        # The number of collections in a page is always less than or equal to
        # feconf.SEARCH_RESULTS_PAGE_SIZE.
        if len(returned_collection_ids) == feconf.SEARCH_RESULTS_PAGE_SIZE or (
                search_offset is None):
            break

    return (returned_collection_ids, search_offset)
示例#18
0
 def test_request_too_many_skills_raises_error(self) -> None:
     skill_ids = [
         'skill_id%s' % number for number in python_utils.RANGE(25)
     ]
     with self.assertRaisesRegexp(  # type: ignore[no-untyped-call]
             Exception, 'Please keep the number of skill IDs below 20.'):
         (question_models.QuestionSkillLinkModel.
          get_question_skill_links_equidistributed_by_skill(3, skill_ids))
示例#19
0
    def test_get_new_id_method_returns_unique_ids(self) -> None:
        ids: Set[str] = set([])
        for _ in python_utils.RANGE(100):
            new_id = base_models.BaseModel.get_new_id('')
            self.assertNotIn(new_id, ids)

            base_models.BaseModel(id=new_id).put()
            ids.add(new_id)
示例#20
0
    def test_get_comma_sep_string_from_list(self) -> None:
        """Test get_comma_sep_string_from_list method."""
        alist = ['a', 'b', 'c', 'd']
        results = ['', 'a', 'a and b', 'a, b and c', 'a, b, c and d']

        for i in python_utils.RANGE(len(alist) + 1):
            comma_sep_string = utils.get_comma_sep_string_from_list(alist[:i])
            self.assertEqual(comma_sep_string, results[i])
示例#21
0
    def setUp(self):
        super(OpportunityServicesUnitTest, self).setUp()
        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
        self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)

        self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)

        self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])

        self.TOPIC_ID = 'topic'
        self.STORY_ID = 'story'
        explorations = [self.save_new_valid_exploration(
            '%s' % i,
            self.owner_id,
            title='title %d' % i,
            category='category%d' % i,
            end_state_name='End State',
            correctness_feedback_enabled=True
        ) for i in python_utils.RANGE(5)]

        for exp in explorations:
            self.publish_exploration(self.owner_id, exp.id)

        topic = topic_domain.Topic.create_default_topic(
            self.TOPIC_ID, 'topic', 'abbrev', 'description')
        topic.thumbnail_filename = 'thumbnail.svg'
        topic.thumbnail_bg_color = '#C6DCDA'
        topic.subtopics = [
            topic_domain.Subtopic(
                1, 'Title', ['skill_id_1'], 'image.svg',
                constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131,
                'dummy-subtopic-url')]
        topic.next_subtopic_id = 2
        topic_services.save_new_topic(self.owner_id, topic)
        topic_services.publish_topic(self.TOPIC_ID, self.admin_id)

        story = story_domain.Story.create_default_story(
            self.STORY_ID, 'A story', 'Description', self.TOPIC_ID,
            'story-two')
        story_services.save_new_story(self.owner_id, story)
        topic_services.add_canonical_story(
            self.owner_id, self.TOPIC_ID, self.STORY_ID)
        topic_services.publish_story(
            self.TOPIC_ID, self.STORY_ID, self.admin_id)

        story_services.update_story(
            self.owner_id, self.STORY_ID, [story_domain.StoryChange({
                'cmd': 'add_story_node',
                'node_id': 'node_1',
                'title': 'Node1',
            }), story_domain.StoryChange({
                'cmd': 'update_story_node_property',
                'property_name': 'exploration_id',
                'node_id': 'node_1',
                'old_value': None,
                'new_value': '0'
            })], 'Changes.')
示例#22
0
 def test_grouper(self) -> None:
     self.assertEqual(
         [list(g) for g in utils.grouper(python_utils.RANGE(7), 3)],
         [[0, 1, 2], [3, 4, 5], [6, None, None]])
     # Returns an iterable of iterables, so we need to combine them into
     # strings for easier comparison.
     self.assertEqual(
         [''.join(g) for g in utils.grouper('ABCDEFG', 3, fillvalue='x')],
         ['ABC', 'DEF', 'Gxx'])
示例#23
0
 def test_request_too_many_skills_raises_error_when_fetch_by_difficulty(
         self) -> None:
     skill_ids = [
         'skill_id%s' % number for number in python_utils.RANGE(25)
     ]
     with self.assertRaisesRegexp(  # type: ignore[no-untyped-call]
             Exception, 'Please keep the number of skill IDs below 20.'):
         (question_models.QuestionSkillLinkModel.
          get_question_skill_links_based_on_difficulty_equidistributed_by_skill(  # pylint: disable=line-too-long
              3, skill_ids, 0.6))
示例#24
0
 def test_execute_task_with_multiple_task(self):
     task_list = []
     for _ in python_utils.RANGE(6):
         task = concurrent_task_utils.create_task(
             test_function('unused_arg'), False, self.semaphore)
         task_list.append(task)
     with self.print_swap:
         concurrent_task_utils.execute_tasks(task_list, self.semaphore)
     expected_output = [s for s in self.task_stdout if 'FINISHED' in s]
     self.assertTrue(len(expected_output) == 6)
示例#25
0
def try_upgrading_draft_to_exp_version(
        draft_change_list, current_draft_version, to_exp_version, exp_id):
    """Try upgrading a list of ExplorationChange domain objects to match the
    latest exploration version.

    For now, this handles the scenario where all commits between
    current_draft_version and to_exp_version migrate only the state schema.

    Args:
        draft_change_list: list(ExplorationChange). The list of
            ExplorationChange domain objects to upgrade.
        current_draft_version: int. Current draft version.
        to_exp_version: int. Target exploration version.
        exp_id: str. Exploration id.

    Returns:
        list(ExplorationChange) or None. A list of ExplorationChange domain
        objects after upgrade or None if upgrade fails.

    Raises:
        InvalidInputException. The current_draft_version is greater than
            to_exp_version.
    """
    if current_draft_version > to_exp_version:
        raise utils.InvalidInputException(
            'Current draft version is greater than the exploration version.')
    if current_draft_version == to_exp_version:
        return None

    exp_versions = list(
        python_utils.RANGE(current_draft_version + 1, to_exp_version + 1))
    commits_list = (
        exp_models.ExplorationCommitLogEntryModel.get_multi(
            exp_id, exp_versions))
    upgrade_times = 0
    while current_draft_version + upgrade_times < to_exp_version:
        commit = commits_list[upgrade_times]
        if (
                len(commit.commit_cmds) != 1 or
                commit.commit_cmds[0]['cmd'] !=
                exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
            return None
        conversion_fn_name = '_convert_states_v%s_dict_to_v%s_dict' % (
            commit.commit_cmds[0]['from_version'],
            commit.commit_cmds[0]['to_version'])
        if not hasattr(DraftUpgradeUtil, conversion_fn_name):
            logging.warning('%s is not implemented' % conversion_fn_name)
            return None
        conversion_fn = getattr(DraftUpgradeUtil, conversion_fn_name)
        try:
            draft_change_list = conversion_fn(draft_change_list)
        except InvalidDraftConversionException:
            return None
        upgrade_times += 1
    return draft_change_list
示例#26
0
    def test_fetch_ignores_obsolete_tasks(self):
        tasks = [
            self._new_obsolete_task(state_name='State %d' % (i, ))
            for i in python_utils.RANGE(50)
        ]
        improvements_services.put_tasks(tasks)
        open_tasks, resolved_task_types_by_state_name = (
            improvements_services.fetch_exploration_tasks(self.exp))

        self.assertEqual(open_tasks, [])
        self.assertEqual(resolved_task_types_by_state_name, {})
示例#27
0
    def test_call_counter_counts_the_number_of_times_a_function_gets_called(
            self):
        f = lambda x: x**2

        wrapped_function = test_utils.CallCounter(f)

        self.assertEqual(wrapped_function.times_called, 0)

        for i in python_utils.RANGE(5):
            self.assertEqual(wrapped_function(i), i**2)
            self.assertEqual(wrapped_function.times_called, i + 1)
示例#28
0
    def test_delete_multi(self) -> None:
        model_1_id = 'model_1_id'
        model_1 = TestVersionedModel(id=model_1_id)
        model_1.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model_1.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model_1.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model_1_version_numbers = python_utils.RANGE(1, model_1.version + 1)
        model_1_snapshot_ids = [
            model_1.get_snapshot_id(model_1.id, version_number)
            for version_number in model_1_version_numbers
        ]

        model_2_id = 'model_2_id'
        model_2 = TestVersionedModel(id=model_2_id)
        model_2.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model_2.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
        model_2_version_numbers = python_utils.RANGE(1, model_2.version + 1)
        model_2_snapshot_ids = [
            model_2.get_snapshot_id(model_2.id, version_number)
            for version_number in model_2_version_numbers
        ]

        with self.swap(feconf, 'MAX_NUMBER_OF_OPS_IN_TRANSACTION', 2):
            TestVersionedModel.delete_multi([model_1_id, model_2_id],
                                            feconf.SYSTEM_COMMITTER_ID,
                                            'commit_msg',
                                            force_deletion=True)

        self.assertIsNone(TestVersionedModel.get_by_id(model_1_id))
        for model_snapshot_id in model_1_snapshot_ids:
            self.assertIsNone(
                TestSnapshotContentModel.get_by_id(model_snapshot_id))
            self.assertIsNone(
                TestSnapshotMetadataModel.get_by_id(model_snapshot_id))

        self.assertIsNone(TestVersionedModel.get_by_id(model_2_id))
        for model_snapshot_id in model_2_snapshot_ids:
            self.assertIsNone(
                TestSnapshotContentModel.get_by_id(model_snapshot_id))
            self.assertIsNone(
                TestSnapshotMetadataModel.get_by_id(model_snapshot_id))
示例#29
0
def _get_new_model_id(model_class: base_models.BaseModel) -> str:
    """Generates an ID for a new model.

    Returns:
        str. The new ID.
    """
    for _ in python_utils.RANGE(_MAX_ID_GENERATION_ATTEMPTS):
        new_id = utils.convert_to_hash(uuid.uuid4().hex, 22)
        if model_class.get(new_id, strict=False) is None:
            return new_id
    raise RuntimeError('Failed to generate a unique ID after %d attempts' % (
        _MAX_ID_GENERATION_ATTEMPTS))
示例#30
0
 def test_execute_task_with_exception(self):
     task_list = []
     for _ in python_utils.RANGE(6):
         task = concurrent_task_utils.create_task(
             test_function, True, self.semaphore)
         task_list.append(task)
     with self.print_swap:
         concurrent_task_utils.execute_tasks(task_list, self.semaphore)
     self.assertIn(
         'test_function() missing 1 required '
         'positional argument: \'unused_arg\'',
         self.task_stdout
     )