Пример #1
0
class TopicSimilaritiesModel(base_models.BaseModel):
    """This model stores the similarity between any two topics. The topic
    similarities are stored as a JSON object, representing a 2D dict where the
    keys are topic names and the values are the similarities. The dict should
    be symmetric. A similarity value is a real number between 0.0 and 1.0.

    There should only be one instance of this class, and it is keyed by
    TOPIC_SIMILARITIES_ID.

    Currently, topics are the same as the default categories. However, this may
    change in the future.
    """

    content = datastore_services.JsonProperty(required=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(),
            **{'content': base_models.EXPORT_POLICY.NOT_APPLICABLE})
Пример #2
0
class UnsentFeedbackEmailModel(base_models.BaseModel):
    """Model for storing feedback messages that need to be sent to creators.

    Instances of this model contain information about feedback messages that
    have been received by the site, but have not yet been sent to creators.
    The model instances will be deleted once the corresponding email has been
    sent.

    The id of each model instance is the user_id of the user who should receive
    the messages.
    """

    # The list of feedback messages that need to be sent to this user.
    # Each element in this list is a dict with keys 'entity_type', 'entity_id',
    # 'thread_id' and 'message_id'; this information is used to retrieve
    # corresponding FeedbackMessageModel instance.
    feedback_message_references = datastore_services.JsonProperty(
        repeated=True)
    # The number of failed attempts that have been made (so far) to
    # send an email to this user.
    retries = datastore_services.IntegerProperty(default=0,
                                                 required=True,
                                                 indexed=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model contains data corresponding to a user: id field but it isn't
        deleted because it is needed for auditing purposes.
        """
        return base_models.DELETION_POLICY.KEEP

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'feedback_message_references':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'retries': base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def has_reference_to_user_id(cls, user_id: str) -> bool:
        """Check whether UnsentFeedbackEmailModel exists for user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether the model for user_id exists.
        """
        return cls.get_by_id(user_id) is not None
Пример #3
0
class ActivityReferencesModel(base_models.BaseModel):
    """Storage model for a list of activity references.

    The id of each model instance is the name of the list. This should be one
    of the constants in feconf.ALL_ACTIVITY_REFERENCE_LIST_TYPES.
    """

    # The types and ids of activities to show in the library page. Each item
    # in this list is a dict with two keys: 'type' and 'id'.
    activity_references = datastore_services.JsonProperty(repeated=True)

    @staticmethod
    def get_deletion_policy():
        # type: () -> base_models.DELETION_POLICY
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user():
        # type: () -> base_models.MODEL_ASSOCIATION_TO_USER
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls):
        # type: () -> Dict[Text, base_models.EXPORT_POLICY]
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'activity_references': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    @classmethod
    def get_or_create(cls, list_name):
         # type: (Text) -> ActivityReferencesModel
        """This creates the relevant model instance, if it does not already
        exist.
        """
        if list_name not in feconf.ALL_ACTIVITY_REFERENCE_LIST_TYPES:
            raise Exception(
                'Invalid ActivityListModel id: %s' % list_name)

        entity = cls.get(list_name, strict=False)
        if entity is None:
            entity = cls(id=list_name, activity_references=[])
            entity.update_timestamps()
            entity.put()

        return entity
Пример #4
0
class ConfigPropertyModel(base_models.VersionedModel):
    """A class that represents a named configuration property.

    The id is the name of the property.
    """

    SNAPSHOT_METADATA_CLASS = ConfigPropertySnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = ConfigPropertySnapshotContentModel

    # The property value.
    value = datastore_services.JsonProperty(indexed=False)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """ConfigPropertyModel is not related to users."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'value': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to
    # remove Any from type-annotation below.
    # We have ignored [override] here because the signature of this method
    # doesn't match with VersionedModel.commit().
    # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override
    def commit( # type: ignore[override]
            self,
            committer_id: str,
            commit_cmds: List[Dict[str, Any]]
    ) -> None:
        super(ConfigPropertyModel, self).commit(committer_id, '', commit_cmds)
Пример #5
0
class JobModel(base_models.BaseModel):
    """Class representing a datastore entity for a long-running job."""

    # The job type.
    job_type = datastore_services.StringProperty(indexed=True)
    # The time at which the job was queued, in milliseconds since the epoch.
    time_queued_msec = datastore_services.FloatProperty(indexed=True)
    # The time at which the job was started, in milliseconds since the epoch.
    # This is never set if the job was canceled before it was started.
    time_started_msec = datastore_services.FloatProperty(indexed=True)
    # The time at which the job was completed, failed or canceled, in
    # milliseconds since the epoch.
    time_finished_msec = datastore_services.FloatProperty(indexed=True)
    # The current status code for the job.
    status_code = datastore_services.StringProperty(
        indexed=True,
        default=STATUS_CODE_NEW,
        choices=[
            STATUS_CODE_NEW, STATUS_CODE_QUEUED, STATUS_CODE_STARTED,
            STATUS_CODE_COMPLETED, STATUS_CODE_FAILED, STATUS_CODE_CANCELED
        ])
    # Any metadata for the job, such as the root pipeline id for mapreduce
    # jobs.
    metadata = datastore_services.JsonProperty(indexed=False)
    # The output of the job. This is only populated if the job has status code
    # STATUS_CODE_COMPLETED, and is None otherwise. If populated, this is
    # expected to be a list of strings.
    output = datastore_services.JsonProperty(indexed=False)
    # The error message, if applicable. Only populated if the job has status
    # code STATUS_CODE_FAILED or STATUS_CODE_CANCELED; None otherwise.
    error = datastore_services.TextProperty(indexed=False)
    # Whether the datastore models associated with this job have been cleaned
    # up (i.e., deleted).
    has_been_cleaned_up = (datastore_services.BooleanProperty(default=False,
                                                              indexed=True))
    # Store additional params passed with job.
    additional_job_params = datastore_services.JsonProperty(default=None)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'job_type': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'time_queued_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'time_started_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'time_finished_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'status_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'metadata': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'output': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'error': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'has_been_cleaned_up':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'additional_job_params':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @property
    def is_cancelable(self) -> bool:
        """Checks if the job is cancelable.

        Returns:
            bool. Whether the job's status_code is 'queued' or 'started'.
        """
        # Whether the job is currently in 'queued' or 'started' status.
        return self.status_code in [STATUS_CODE_QUEUED, STATUS_CODE_STARTED]

    @classmethod
    def get_all_unfinished_jobs(cls, limit: int) -> Sequence['JobModel']:
        """Gets at most `limit` unfinished jobs.

        Args:
            limit: int. A limit on the number of jobs to return.

        Returns:
            list(JobModel) or None. A list of at most `limit` number
            of unfinished jobs.
        """
        return cls.query().filter(
            JobModel.status_code.IN([
                STATUS_CODE_QUEUED, STATUS_CODE_STARTED
            ])).order(-cls.time_queued_msec).fetch(limit)

    @classmethod
    def get_unfinished_jobs(cls, job_type: str) -> datastore_services.Query:
        """Gets jobs that are unfinished.

        Args:
            job_type: str. The type of jobs that may be unfinished.

        Returns:
            list(JobModel) or None. A list of all jobs that belong
            to the given job_type.
        """
        return cls.query().filter(cls.job_type == job_type).filter(
            JobModel.status_code.IN([STATUS_CODE_QUEUED, STATUS_CODE_STARTED]))

    @classmethod
    def do_unfinished_jobs_exist(cls, job_type: str) -> bool:
        """Checks if unfinished jobs exist.

        Args:
            job_type: str. Type of job for which to check.

        Returns:
            bool. True if unfinished jobs exist, otherwise false.
        """
        return bool(cls.get_unfinished_jobs(job_type).count(limit=1))
Пример #6
0
class BulkEmailModel(base_models.BaseModel):
    """Records the content of an email sent from Oppia to multiple users.

    This model is read-only; entries cannot be modified once created. The
    id/key of instances of this model is randomly generated string of
    length 12.
    """

    # The user IDs of the email recipients.
    recipient_ids = datastore_services.JsonProperty(default=[],
                                                    compressed=True)
    # The user ID of the email sender. For site-generated emails this is equal
    # to SYSTEM_COMMITTER_ID.
    sender_id = datastore_services.StringProperty(required=True, indexed=True)
    # The email address used to send the notification.
    sender_email = datastore_services.StringProperty(required=True)
    # The intent of the email.
    intent = datastore_services.StringProperty(
        required=True,
        indexed=True,
        choices=[
            feconf.BULK_EMAIL_INTENT_MARKETING,
            feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION,
            feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION,
            feconf.BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT,
            feconf.BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT,
            feconf.BULK_EMAIL_INTENT_ML_JOB_FAILURE
        ])
    # The subject line of the email.
    subject = datastore_services.TextProperty(required=True)
    # The HTML content of the email body.
    html_body = datastore_services.TextProperty(required=True)
    # The datetime the email was sent, in UTC.
    sent_datetime = (datastore_services.DateTimeProperty(required=True,
                                                         indexed=True))

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model contains data corresponding to a user: recipient_ids,
        sender_id, and sender_email, but this isn't deleted because this model
        is needed for auditing purposes.
        """
        return base_models.DELETION_POLICY.KEEP

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Users already have access to this data since the emails were sent
        to them.
        """
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model contains data corresponding to a user, but this isn't exported
        because users already have access to noteworthy details of this data
        (since emails were sent to them).
        """
        return dict(
            super(cls, cls).get_export_policy(), **{
                'recipient_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'sender_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'sender_email': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'subject': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'html_body': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'sent_datetime': base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def has_reference_to_user_id(cls, user_id: str) -> bool:
        """Check whether BulkEmailModel exists for user. Since recipient_ids
        can't be indexed it also can't be checked by this method, we can allow
        this because the deletion policy for this model is keep , thus even the
        deleted user's id will remain here.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return (cls.query(cls.sender_id == user_id).get(keys_only=True)
                is not None)

    @classmethod
    def create(cls, instance_id: str, recipient_ids: List[str], sender_id: str,
               sender_email: str, intent: str, subject: str, html_body: str,
               sent_datetime: datetime.datetime) -> None:
        """Creates a new BulkEmailModel entry.

        Args:
            instance_id: str. The ID of the instance.
            recipient_ids: list(str). The user IDs of the email recipients.
            sender_id: str. The user ID of the email sender.
            sender_email: str. The email address used to send the notification.
            intent: str. The intent string, i.e. the purpose of the email.
            subject: str. The subject line of the email.
            html_body: str. The HTML content of the email body.
            sent_datetime: datetime.datetime. The date and time the email
                was sent, in UTC.
        """
        email_model_instance = cls(id=instance_id,
                                   recipient_ids=recipient_ids,
                                   sender_id=sender_id,
                                   sender_email=sender_email,
                                   intent=intent,
                                   subject=subject,
                                   html_body=html_body,
                                   sent_datetime=sent_datetime)
        email_model_instance.update_timestamps()
        email_model_instance.put()
Пример #7
0
class ExplorationOpportunitySummaryModel(base_models.BaseModel):
    """Summary of translation and voiceover opportunities in an exploration.

    The id of each instance is the id of the corresponding exploration.
    """

    topic_id = datastore_services.StringProperty(required=True, indexed=True)
    topic_name = datastore_services.StringProperty(required=True, indexed=True)
    story_id = datastore_services.StringProperty(required=True, indexed=True)
    story_title = datastore_services.StringProperty(required=True, indexed=True)
    chapter_title = (
        datastore_services.StringProperty(required=True, indexed=True))
    content_count = (
        datastore_services.IntegerProperty(required=True, indexed=True))
    incomplete_translation_language_codes = datastore_services.StringProperty(
        repeated=True, indexed=True)
    translation_counts = (
        datastore_services.JsonProperty(default={}, indexed=False))
    language_codes_with_assigned_voice_artists = (
        datastore_services.StringProperty(repeated=True, indexed=True))
    language_codes_needing_voice_artists = datastore_services.StringProperty(
        repeated=True, indexed=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'topic_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'story_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'story_title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'chapter_title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'content_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'incomplete_translation_language_codes':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'translation_counts': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_codes_with_assigned_voice_artists':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_codes_needing_voice_artists':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    # TODO(#13523): Change the return value of the function below from
    # tuple(list, str|None, bool) to a domain object.
    @classmethod
    def get_all_translation_opportunities(
            cls,
            page_size: int,
            urlsafe_start_cursor: Optional[str],
            language_code: str,
            topic_name: str
    ) -> Tuple[
        Sequence[ExplorationOpportunitySummaryModel], Optional[str], bool
    ]:
        """Returns a list of opportunities available for translation in a
        specific language.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.
            language_code: str. The language for which translation opportunities
                are to be fetched.
            topic_name: str or None. The topic for which translation
                opportunities should be fetched. If topic_name is None or empty,
                fetch translation opportunities from all topics.

        Returns:
            3-tuple of (results, cursor, more). As described in fetch_page() at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                results: list(ExplorationOpportunitySummaryModel). A list
                    of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        if urlsafe_start_cursor:
            start_cursor = datastore_services.make_cursor(
                urlsafe_cursor=urlsafe_start_cursor)
        else:
            start_cursor = datastore_services.make_cursor()

        language_query = cls.query(
            cls.incomplete_translation_language_codes == language_code
        ).order(cls.topic_name)

        if topic_name:
            language_query = language_query.filter(cls.topic_name == topic_name)

        fetch_result: Tuple[
            Sequence[ExplorationOpportunitySummaryModel],
            datastore_services.Cursor,
            bool
        ] = language_query.fetch_page(page_size, start_cursor=start_cursor)
        results, cursor, _ = fetch_result

        # TODO(#13462): Refactor this so that we don't do the lookup.
        # Do a forward lookup so that we can know if there are more values.
        fetch_result = (
            language_query.fetch_page(page_size + 1, start_cursor=start_cursor))
        plus_one_query_models, _, _ = fetch_result
        more_results = len(plus_one_query_models) == page_size + 1

        # The urlsafe returns bytes and we need to decode them to string.
        return (
            results,
            (cursor.urlsafe().decode('utf-8') if cursor else None),
            more_results
        )

    # TODO(#13523): Change the return value of the function below from
    # tuple(list, str|None, bool) to a domain object.
    @classmethod
    def get_all_voiceover_opportunities(
        cls,
        page_size: int,
        urlsafe_start_cursor: Optional[str],
        language_code: str
    ) -> Tuple[
        Sequence[ExplorationOpportunitySummaryModel], Optional[str], bool
    ]:
        """Returns a list of opportunities available for voiceover in a
        specific language.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.
            language_code: str. The language for which voiceover opportunities
                to be fetched.

        Returns:
            3-tuple of (results, cursor, more). As described in fetch_page() at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                results: list(ExplorationOpportunitySummaryModel). A list
                    of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        start_cursor = datastore_services.make_cursor(
            urlsafe_cursor=urlsafe_start_cursor)

        language_created_on_query = cls.query(
            cls.language_codes_needing_voice_artists == language_code
        ).order(cls.created_on)

        fetch_result: Tuple[
            Sequence[ExplorationOpportunitySummaryModel],
            datastore_services.Cursor,
            bool
        ] = language_created_on_query.fetch_page(
            page_size, start_cursor=start_cursor)
        results, cursor, _ = fetch_result
        # TODO(#13462): Refactor this so that we don't do the lookup.
        # Do a forward lookup so that we can know if there are more values.
        fetch_result = language_created_on_query.fetch_page(
            page_size + 1, start_cursor=start_cursor)
        plus_one_query_models, _, _ = fetch_result
        more_results = len(plus_one_query_models) == page_size + 1
        # The urlsafe returns bytes and we need to decode them to string.
        return (
            results,
            (cursor.urlsafe().decode('utf-8') if cursor else None),
            more_results
        )

    @classmethod
    def get_by_topic(
        cls, topic_id: str
    ) -> Sequence[ExplorationOpportunitySummaryModel]:
        """Returns all the models corresponding to the specific topic.

        Returns:
            list(ExplorationOpportunitySummaryModel). A list of
            ExplorationOpportunitySummaryModel having given topic_id.
        """
        return cls.query(cls.topic_id == topic_id).fetch()
Пример #8
0
class EntityTranslationsModel(base_models.BaseModel):
    """Model for storing entity translations."""

    # The id of the corresponding entity.
    entity_id = datastore_services.StringProperty(required=True, indexed=True)
    # The type of the corresponding entity.
    entity_type = datastore_services.StringProperty(
        required=True,
        indexed=True,
        choices=[feconf.ENTITY_TYPE_EXPLORATION, feconf.ENTITY_TYPE_QUESTION])
    # The version of the corresponding entity.
    entity_version = datastore_services.IntegerProperty(required=True,
                                                        indexed=True)
    # The ISO 639-1 code for the language an entity is written in.
    language_code = datastore_services.StringProperty(required=True,
                                                      indexed=True)
    # A dict representing content-id as keys and dict(TranslatedContent)
    # as values.
    translations = datastore_services.JsonProperty(required=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'entity_type': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'entity_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'translations': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            })

    @staticmethod
    def _generate_id(entity_type: feconf.TranslatableEntityType,
                     entity_id: str, entity_version: int,
                     language_code: str) -> str:
        """Generates the ID for an entity translations model.

        Args:
            entity_type: TranslatableEntityType. The type of the entity.
            entity_id: str. The ID of the entity.
            entity_version: int. The version of the entity.
            language_code: str. The language code for the entity.

        Returns:
            str. Returns a unique id of the form
            [entity_type]-[entity_id]-[entity_version]-[language_code].
        """
        return '%s-%s-%s-%s' % (entity_type.value, entity_id, entity_version,
                                language_code)

    @classmethod
    def get_model(cls, entity_type: feconf.TranslatableEntityType,
                  entity_id: str, entity_version: int,
                  language_code: str) -> EntityTranslationsModel:
        """Gets EntityTranslationsModel by help of entity_type, entity_id,
        entity_version and language_code.

        Args:
            entity_type: TranslatableEntityType. The type of the entity whose
                translations are to be fetched.
            entity_id: str. The ID of the entity whose translations are to be
                fetched.
            entity_version: int. The version of the entity whose translations
                are to be fetched.
            language_code: str. The language code of the entity whose
                translations are to be fetched.

        Returns:
            EntityTranslationsModel. The EntityTranslationsModel
            instance corresponding to the given inputs, if such a translation
            exists, or None if no translation is found.
        """
        model_id = cls._generate_id(entity_type, entity_id, entity_version,
                                    language_code)
        return cls.get_by_id(model_id)

    @classmethod
    def get_all_for_entity(
            cls, entity_type: feconf.TranslatableEntityType, entity_id: str,
            entity_version: int
    ) -> Sequence[Optional[EntityTranslationsModel]]:
        """Gets EntityTranslationsModels corresponding to the given entity, for
        all languages in which such models exist.

        Args:
            entity_type: TranslatableEntityType. The type of the entity whose
                translations are to be fetched.
            entity_id: str. The ID of the entity whose translations are to be
                fetched.
            entity_version: int. The version of the entity whose translations
                are to be fetched.

        Returns:
            list(EntityTranslationsModel|None). The EntityTranslationsModel
            instances corresponding to the given inputs, if such translations
            exist.
        """
        return cls.query(cls.entity_type == entity_type.value,
                         cls.entity_id == entity_id,
                         cls.entity_version == entity_version).fetch()

    @classmethod
    def create_new(
        cls, entity_type: feconf.TranslatableEntityType, entity_id: str,
        entity_version: int, language_code: str,
        translations: Dict[str, feconf.TranslatedContentDict]
    ) -> EntityTranslationsModel:
        """Creates and returns a new EntityTranslationsModel instance.

        Args:
            entity_type: TranslatableEntityType. The type of the entity.
            entity_id: str. The ID of the entity.
            entity_version: int. The version of the entity.
            language_code: str. The language code for the entity.
            translations: dict(str, TranslatedContentDict). A dict representing
                content-id as keys and dict(TranslatedContent) as values.

        Returns:
            EntityTranslationsModel. Returns a new EntityTranslationsModel.
        """
        return cls(id=cls._generate_id(entity_type, entity_id, entity_version,
                                       language_code),
                   entity_type=entity_type.value,
                   entity_id=entity_id,
                   entity_version=entity_version,
                   language_code=language_code,
                   translations=translations)
Пример #9
0
class GeneralSuggestionModel(base_models.BaseModel):
    """Model to store suggestions made by Oppia users.

    The ID of the suggestions created is the same as the ID of the thread
    linked to the suggestion.
    """

    # We use the model id as a key in the Takeout dict.
    ID_IS_USED_AS_TAKEOUT_KEY = True

    # The type of suggestion.
    suggestion_type = datastore_services.StringProperty(
        required=True, indexed=True, choices=feconf.SUGGESTION_TYPE_CHOICES)
    # The type of the target entity which the suggestion is linked to.
    target_type = datastore_services.StringProperty(
        required=True,
        indexed=True,
        choices=feconf.SUGGESTION_TARGET_TYPE_CHOICES)
    # The ID of the target entity being suggested to.
    target_id = datastore_services.StringProperty(required=True, indexed=True)
    # The version number of the target entity at the time of creation of the
    # suggestion.
    target_version_at_submission = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # Status of the suggestion.
    status = datastore_services.StringProperty(required=True,
                                               indexed=True,
                                               choices=STATUS_CHOICES)
    # The ID of the author of the suggestion.
    author_id = datastore_services.StringProperty(required=True, indexed=True)
    # The ID of the reviewer who accepted/rejected the suggestion.
    final_reviewer_id = datastore_services.StringProperty(indexed=True)
    # The change command linked to the suggestion. Contains the details of the
    # change.
    change_cmd = datastore_services.JsonProperty(required=True)
    # The category to score the suggestor in. This field will contain 2 values
    # separated by a ., the first will be a value from SCORE_TYPE_CHOICES and
    # the second will be the subcategory of the suggestion.
    score_category = (datastore_services.StringProperty(required=True,
                                                        indexed=True))
    # The ISO 639-1 code used to query suggestions by language, or None if the
    # suggestion type is not queryable by language.
    language_code = datastore_services.StringProperty(indexed=True)
    # A flag that indicates whether the suggestion is edited by the reviewer.
    edited_by_reviewer = datastore_services.BooleanProperty(default=False,
                                                            indexed=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model contains data to pseudonymize corresponding to a user:
        author_id, and final_reviewer_id fields.
        """
        return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model is exported as multiple unshared instance since there
        are multiple suggestions per user.
        """
        return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model contains data to export corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(),
            **{
                'suggestion_type': base_models.EXPORT_POLICY.EXPORTED,
                'target_type': base_models.EXPORT_POLICY.EXPORTED,
                'target_id': base_models.EXPORT_POLICY.EXPORTED,
                'target_version_at_submission':
                base_models.EXPORT_POLICY.EXPORTED,
                'status': base_models.EXPORT_POLICY.EXPORTED,
                # The author_id and final_reviewer_id are not exported since
                # we do not want to reveal internal user ids.
                'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'final_reviewer_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'change_cmd': base_models.EXPORT_POLICY.EXPORTED,
                'score_category': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'language_code': base_models.EXPORT_POLICY.EXPORTED,
                'edited_by_reviewer': base_models.EXPORT_POLICY.EXPORTED
            })

    @classmethod
    def has_reference_to_user_id(cls, user_id: str) -> bool:
        """Check whether GeneralSuggestionModel exists for the user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.query(
            datastore_services.any_of(cls.author_id == user_id,
                                      cls.final_reviewer_id == user_id)).get(
                                          keys_only=True) is not None

    # TODO(#13523): Change 'change_cmd' to TypedDict/Domain Object
    # to remove Any used below.
    @classmethod
    def create(cls, suggestion_type: str, target_type: str, target_id: str,
               target_version_at_submission: int, status: str, author_id: str,
               final_reviewer_id: str, change_cmd: Dict[str, Any],
               score_category: str, thread_id: str,
               language_code: Optional[str]) -> None:
        """Creates a new SuggestionModel entry.

        Args:
            suggestion_type: str. The type of the suggestion.
            target_type: str. The type of target entity being edited.
            target_id: str. The ID of the target entity being edited.
            target_version_at_submission: int. The version number of the target
                entity at the time of creation of the suggestion.
            status: str. The status of the suggestion.
            author_id: str. The ID of the user who submitted the suggestion.
            final_reviewer_id: str. The ID of the reviewer who has
                accepted/rejected the suggestion.
            change_cmd: dict. The actual content of the suggestion.
            score_category: str. The scoring category for the suggestion.
            thread_id: str. The ID of the feedback thread linked to the
                suggestion.
            language_code: str|None. The ISO 639-1 code used to query
                suggestions by language, or None if the suggestion type is not
                queryable by language.

        Raises:
            Exception. There is already a suggestion with the given id.
        """
        instance_id = thread_id

        if cls.get_by_id(instance_id):
            raise Exception('There is already a suggestion with the given'
                            ' id: %s' % instance_id)

        cls(id=instance_id,
            suggestion_type=suggestion_type,
            target_type=target_type,
            target_id=target_id,
            target_version_at_submission=target_version_at_submission,
            status=status,
            author_id=author_id,
            final_reviewer_id=final_reviewer_id,
            change_cmd=change_cmd,
            score_category=score_category,
            language_code=language_code).put()

    @classmethod
    def query_suggestions(
        cls, query_fields_and_values: List[Tuple[str, str]]
    ) -> List['GeneralSuggestionModel']:
        """Queries for suggestions.

        Args:
            query_fields_and_values: list(tuple(str, str)). A list of queries.
                The first element in each tuple is the field to be queried, and
                the second element is the corresponding value to query for.

        Returns:
            list(SuggestionModel). A list of suggestions that match the given
            query values, up to a maximum of feconf.DEFAULT_QUERY_LIMIT
            suggestions.
        """
        query = cls.query()
        for (field, value) in query_fields_and_values:
            if field not in feconf.ALLOWED_SUGGESTION_QUERY_FIELDS:
                raise Exception('Not allowed to query on field %s' % field)
            query = query.filter(getattr(cls, field) == value)

        return cast(List[GeneralSuggestionModel],
                    query.fetch(feconf.DEFAULT_QUERY_LIMIT))

    @classmethod
    def get_translation_suggestions_in_review_with_exp_id(
            cls, exp_id: str,
            language_code: str) -> List['GeneralSuggestionModel']:
        """Returns translation suggestions which are in review with target_id
        == exp_id.

        Args:
            exp_id: str. Exploration ID matching the target ID of the
                translation suggestions.
            language_code: str. Language code.

        Returns:
            list(SuggestionModel). A list of translation suggestions in review
            with target_id of exp_id. The number of returned results is capped
            by feconf.DEFAULT_QUERY_LIMIT.
        """
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.language_code == language_code).filter(
                    cls.suggestion_type ==
                    feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT).filter(
                        cls.target_id == exp_id).fetch(
                            feconf.DEFAULT_QUERY_LIMIT))

    @classmethod
    def get_translation_suggestion_ids_with_exp_ids(
            cls, exp_ids: List[str]) -> List[str]:
        """Gets the ids of translation suggestions corresponding to
        explorations with the given exploration ids.

        Args:
            exp_ids: list(str). List of exploration ids to query for.

        Returns:
            list(str). A list of translation suggestion ids that
            correspond to the given exploration ids. Note: it is not
            guaranteed that the suggestion ids returned are ordered by the
            exploration ids in exp_ids.
        """
        query = (cls.get_all().filter(
            cls.suggestion_type ==
            feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT).filter(
                cls.target_id.IN(exp_ids)))
        suggestion_models = []
        offset, more = (0, True)
        while more:
            results = cast(
                List[GeneralSuggestionModel],
                query.fetch(feconf.DEFAULT_QUERY_LIMIT, offset=offset))
            if len(results):
                offset = offset + len(results)
                suggestion_models.extend(results)
            else:
                more = False
        return [suggestion_model.id for suggestion_model in suggestion_models]

    @classmethod
    def get_all_stale_suggestion_ids(cls) -> List[str]:
        """Gets the ids of the suggestions which were last updated before the
        threshold time.

        Returns:
            list(str). A list of the ids of the suggestions that are stale.
        """
        threshold_time = (
            datetime.datetime.utcnow() -
            datetime.timedelta(0, 0, 0, THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS))
        suggestion_models = cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.last_updated < threshold_time).fetch())
        return [suggestion_model.id for suggestion_model in suggestion_models]

    @classmethod
    def get_suggestions_waiting_too_long_for_review(
            cls) -> List['GeneralSuggestionModel']:
        """Returns a list of suggestions that have been waiting for a review
        longer than SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS days on the
        Contributor Dashboard. MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_ADMIN
        suggestions are returned, sorted in descending order by their review
        wait time.

        Returns:
            list(GeneralSuggestionModel). A list of suggestions, sorted in
            descending order by their review wait time.

        Raises:
            Exception. If there are no suggestion types offered on the
                Contributor Dashboard.
        """
        if not feconf.CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES:
            raise Exception(
                'Expected the suggestion types offered on the Contributor '
                'Dashboard to be nonempty.')
        threshold_time = (datetime.datetime.utcnow() - datetime.timedelta(
            days=SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS))
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.last_updated < threshold_time).filter(
                    cls.suggestion_type.IN(
                        feconf.CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES)).order(
                            cls.last_updated).fetch(
                                MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_ADMIN))

    @classmethod
    def get_in_review_suggestions_in_score_categories(
            cls, score_categories: List[str],
            user_id: str) -> List['GeneralSuggestionModel']:
        """Gets all suggestions which are in review in the given
        score_categories.

        Args:
            score_categories: list(str). List of score categories to query for.
            user_id: str. The id of the user trying to make this query.
                As a user cannot review their own suggestions, suggestions
                authored by the user will be excluded.

        Returns:
            list(SuggestionModel). A list of suggestions that are in the given
            score categories, which are in review, but not created by the
            given user.
        """
        if len(score_categories) == 0:
            raise Exception('Received empty list of score categories')

        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.score_category.IN(score_categories)).filter(
                    cls.author_id != user_id).fetch(
                        feconf.DEFAULT_QUERY_LIMIT))

    @classmethod
    def get_in_review_translation_suggestions(
            cls, user_id: str,
            language_codes: List[str]) -> List['GeneralSuggestionModel']:
        """Gets all translation suggestions which are in review.

        Args:
            user_id: str. The id of the user trying to make this query.
                As a user cannot review their own suggestions, suggestions
                authored by the user will be excluded.
            language_codes: list(str). The list of language codes.

        Returns:
            list(SuggestionModel). A list of suggestions that are of the given
            type, which are in review, but not created by the given user.
        """
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT
            ).filter(cls.author_id != user_id).filter(
                cls.language_code.IN(language_codes)).fetch(
                    feconf.DEFAULT_QUERY_LIMIT))

    @classmethod
    def get_in_review_question_suggestions(
            cls, user_id: str) -> List['GeneralSuggestionModel']:
        """Gets all question suggestions which are in review.

        Args:
            user_id: str. The id of the user trying to make this query.
                As a user cannot review their own suggestions, suggestions
                authored by the user will be excluded.

        Returns:
            list(SuggestionModel). A list of suggestions that are of the given
            type, which are in review, but not created by the given user.
        """
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION).
            filter(cls.author_id != user_id).fetch(feconf.DEFAULT_QUERY_LIMIT))

    @classmethod
    def get_question_suggestions_waiting_longest_for_review(
            cls) -> List['GeneralSuggestionModel']:
        """Returns MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS number
        of question suggestions, sorted in descending order by review wait
        time.

        Returns:
            list(GeneralSuggestionModel). A list of question suggestions,
            sorted in descending order based on how long the suggestions have
            been waiting for review.
        """
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.suggestion_type ==
                feconf.SUGGESTION_TYPE_ADD_QUESTION).order(
                    cls.last_updated).fetch(
                        MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS))

    @classmethod
    def get_translation_suggestions_waiting_longest_for_review(
            cls, language_code: str) -> List['GeneralSuggestionModel']:
        """Returns MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS
        number of translation suggestions in the specified language code,
        sorted in descending order by review wait time.

        Args:
            language_code: str. The ISO 639-1 language code of the translation
                suggestions.

        Returns:
            list(GeneralSuggestionModel). A list of translation suggestions,
            sorted in descending order based on how long the suggestions have
            been waiting for review.
        """
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
                cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT
            ).filter(cls.language_code == language_code).order(
                cls.last_updated).fetch(
                    MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS))

    @classmethod
    def get_user_created_suggestions_of_suggestion_type(
            cls, suggestion_type: str,
            user_id: str) -> List['GeneralSuggestionModel']:
        """Gets all suggestions of suggestion_type which the user has created.

        Args:
            suggestion_type: str. The type of suggestion to query for.
            user_id: str. The id of the user trying to make this query.

        Returns:
            list(SuggestionModel). A list of suggestions that are of the given
            type, which the given user has created.
        """
        return cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(
                cls.suggestion_type == suggestion_type).filter(
                    cls.author_id == user_id).order(-cls.created_on).fetch(
                        feconf.DEFAULT_QUERY_LIMIT))

    @classmethod
    def get_all_score_categories(cls) -> List[str]:
        """Gets all the score categories for which suggestions have been
        created.

        Returns:
            list(str). A list of all the score categories.
        """
        query_set = cast(
            List[GeneralSuggestionModel],
            cls.query(projection=['score_category'], distinct=True))
        return [data.score_category for data in query_set]

    # TODO(#13523): Change 'change_cmd' to TypedDict/Domain Object
    # to remove Any used below.
    @classmethod
    def export_data(
        cls, user_id: str
    ) -> Dict[str, Dict[str, Union[str, int, bool, Dict[str, Any], None]]]:
        """Exports the data from GeneralSuggestionModel
        into dict format for Takeout.

        Args:
            user_id: str. The ID of the user whose data should be exported.

        Returns:
            dict. Dictionary of the data from GeneralSuggestionModel.
        """

        user_data = {}
        suggestion_models = cast(
            List[GeneralSuggestionModel],
            cls.get_all().filter(cls.author_id == user_id).fetch())

        for suggestion_model in suggestion_models:
            user_data[suggestion_model.id] = {
                'suggestion_type':
                suggestion_model.suggestion_type,
                'target_type':
                suggestion_model.target_type,
                'target_id':
                suggestion_model.target_id,
                'target_version_at_submission':
                (suggestion_model.target_version_at_submission),
                'status':
                suggestion_model.status,
                'change_cmd':
                suggestion_model.change_cmd,
                'language_code':
                suggestion_model.language_code,
                'edited_by_reviewer':
                suggestion_model.edited_by_reviewer
            }

        return user_data
Пример #10
0
class QuestionModel(base_models.VersionedModel):
    """Model for storing Questions.

    The ID of instances of this class are in form of random hash of 12 chars.
    """

    SNAPSHOT_METADATA_CLASS = QuestionSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = QuestionSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = QuestionCommitLogEntryModel
    ALLOW_REVERT = True

    # An object representing the question state data.
    question_state_data = (datastore_services.JsonProperty(indexed=False,
                                                           required=True))
    # The schema version for the question state data.
    question_state_data_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # The ISO 639-1 code for the language this question is written in.
    language_code = (datastore_services.StringProperty(required=True,
                                                       indexed=True))
    # The skill ids linked to this question.
    linked_skill_ids = datastore_services.StringProperty(indexed=True,
                                                         repeated=True)
    # The optional skill misconception ids marked as not relevant to the
    # question.
    # Note: Misconception ids are represented in two ways. In the Misconception
    # domain object the id is a number. But in the context of a question
    # (used here), the skill id needs to be included along with the
    # misconception id, this is because questions can have multiple skills
    # attached to it. Hence, the format for this field will be
    # <skill-id>-<misconceptionid>.
    inapplicable_skill_misconception_ids = datastore_services.StringProperty(
        indexed=True, repeated=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'question_state_data':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'question_state_data_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'language_code':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'linked_skill_ids':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'inapplicable_skill_misconception_ids':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def _get_new_id(cls) -> str:
        """Generates a unique ID for the question in the form of random hash
        of 12 chars.

        Returns:
            new_id: str. ID of the new QuestionModel instance.

        Raises:
            Exception. The ID generator for QuestionModel is
                producing too many collisions.
        """

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for QuestionModel is producing too many '
            'collisions.')

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def _trusted_commit(self, committer_id: str, commit_type: str,
                        commit_message: str,
                        commit_cmds: List[Dict[str, Any]]) -> None:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(QuestionModel, self)._trusted_commit(committer_id, commit_type,
                                                   commit_message, commit_cmds)

        question_commit_log = QuestionCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False)
        question_commit_log.question_id = self.id
        question_commit_log.update_timestamps()
        question_commit_log.put()

    # TODO(#13523): Change 'question_state_data' to TypedDict/Domain Object
    # to remove Any used below.
    @classmethod
    def create(
            cls, question_state_data: Dict[str, Any], language_code: str,
            version: int, linked_skill_ids: List[str],
            inapplicable_skill_misconception_ids: List[str]
    ) -> 'QuestionModel':
        """Creates a new QuestionModel entry.

        Args:
            question_state_data: dict. An dict representing the question
                state data.
            language_code: str. The ISO 639-1 code for the language this
                question is written in.
            version: int. The version of the question.
            linked_skill_ids: list(str). The skill ids linked to the question.
            inapplicable_skill_misconception_ids: list(str). The optional
                skill misconception ids marked as not applicable to the
                question.

        Returns:
            QuestionModel. Instance of the new QuestionModel entry.

        Raises:
            Exception. A model with the same ID already exists.
        """
        instance_id = cls._get_new_id()
        question_model_instance = cls(
            id=instance_id,
            question_state_data=question_state_data,
            language_code=language_code,
            version=version,
            linked_skill_ids=linked_skill_ids,
            inapplicable_skill_misconception_ids=(
                inapplicable_skill_misconception_ids))

        return question_model_instance

    @classmethod
    def put_multi_questions(cls, questions: List['QuestionModel']) -> None:
        """Puts multiple question models into the datastore.

        Args:
            questions: list(Question). The list of question objects
                to put into the datastore.
        """
        cls.update_timestamps_multi(questions)
        cls.put_multi(questions)
Пример #11
0
class StateTrainingJobsMappingModel(base_models.BaseModel):
    """Model for mapping exploration attributes to a ClassifierTrainingJob.

    The ID of instances of this class has the form
    [exp_id].[exp_version].[state_name].
    """

    # The exploration_id of the exploration to whose state the model belongs.
    exp_id = datastore_services.StringProperty(required=True, indexed=True)
    # The exploration version at the time the corresponding classifier's
    # training job was created.
    exp_version = (
        datastore_services.IntegerProperty(required=True, indexed=True))
    # The name of the state to which the model belongs.
    state_name = datastore_services.StringProperty(required=True, indexed=True)
    # The IDs of the training jobs corresponding to the exploration state. Each
    # algorithm_id corresponding to the interaction of the exploration state is
    # mapped to its unique job_id.
    algorithm_ids_to_job_ids = datastore_services.JsonProperty(
        required=True, indexed=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'algorithm_ids_to_job_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    @classmethod
    def _generate_id(
            cls,
            exp_id: str,
            exp_version: int,
            state_name: str
    ) -> str:
        """Generates a unique ID for the Classifier Exploration Mapping of the
        form [exp_id].[exp_version].[state_name].

        Args:
            exp_id: str. ID of the exploration.
            exp_version: int. The exploration version at the time
                this training job was created.
            state_name: unicode. The name of the state to which the classifier
                belongs.

        Returns:
            str. ID of the new Classifier Exploration Mapping instance.
        """
        return '%s.%s.%s' % (exp_id, exp_version, state_name)

    @classmethod
    def get_models(
            cls,
            exp_id: str,
            exp_version: int,
            state_names: List[str]
    ) -> List[Optional['StateTrainingJobsMappingModel']]:
        """Retrieves the Classifier Exploration Mapping models given Exploration
        attributes.

        Args:
            exp_id: str. ID of the exploration.
            exp_version: int. The exploration version at the time
                this training job was created.
            state_names: list(unicode). The state names for which we retrieve
                the mapping models.

        Returns:
            list(ClassifierExplorationMappingModel|None). The model instances
            for the classifier exploration mapping.
        """
        mapping_ids = []
        for state_name in state_names:
            mapping_id = cls._generate_id(exp_id, exp_version, state_name)
            mapping_ids.append(mapping_id)
        mapping_instances = cls.get_multi(mapping_ids)
        return mapping_instances

    @classmethod
    def get_model(
            cls,
            exp_id: str,
            exp_version: int,
            state_name: str
    ) -> Optional['StateTrainingJobsMappingModel']:
        """Retrieves the Classifier Exploration Mapping model for given
        exploration.

        Args:
            exp_id: str. ID of the exploration.
            exp_version: int. The exploration version at the time
                this training job was created.
            state_name: unicode. The state name for which we retrieve
                the mapping model.

        Returns:
            ClassifierExplorationMappingModel|None. The model instance
            for the classifier exploration mapping. It returns None if the no
            entry for given <exp_id, exp_version, state_name> is found.
        """
        mapping_id = cls._generate_id(exp_id, exp_version, state_name)
        model = cls.get_by_id(mapping_id)
        return model

    @classmethod
    def create(
            cls,
            exp_id: str,
            exp_version: int,
            state_name: str,
            algorithm_ids_to_job_ids: Dict[str, str]
    ) -> str:
        """Creates a new ClassifierExplorationMappingModel entry.

        Args:
            exp_id: str. ID of the exploration.
            exp_version: int. The exploration version at the time
                this training job was created.
            state_name: unicode. The name of the state to which the classifier
                belongs.
            algorithm_ids_to_job_ids: dict(str, str). The mapping from
                algorithm IDs to the IDs of their corresponding classifier
                training jobs.

        Returns:
            str. ID of the new ClassifierExplorationMappingModel entry.

        Raises:
            Exception. A model with the same ID already exists.
        """

        instance_id = cls._generate_id(exp_id, exp_version, state_name)
        if not cls.get_by_id(instance_id):
            mapping_instance = cls(
                id=instance_id, exp_id=exp_id, exp_version=exp_version,
                state_name=state_name,
                algorithm_ids_to_job_ids=algorithm_ids_to_job_ids)

            mapping_instance.update_timestamps()
            mapping_instance.put()
            return instance_id
        raise Exception('A model with the same ID already exists.')

    @classmethod
    def create_multi(
            cls,
            state_training_jobs_mappings: List['StateTrainingJobsMappingModel']
    ) -> List[str]:
        """Creates multiple new StateTrainingJobsMappingModel entries.

        Args:
            state_training_jobs_mappings: list(StateTrainingJobsMapping). The
                list of StateTrainingJobsMapping domain objects.

        Returns:
            list(int). The list of mapping IDs.
        """
        mapping_models = []
        mapping_ids = []
        for state_training_job_mapping in state_training_jobs_mappings:
            instance_id = cls._generate_id(
                state_training_job_mapping.exp_id,
                state_training_job_mapping.exp_version,
                state_training_job_mapping.state_name)
            mapping_instance = cls(
                id=instance_id, exp_id=state_training_job_mapping.exp_id,
                exp_version=state_training_job_mapping.exp_version,
                state_name=state_training_job_mapping.state_name,
                algorithm_ids_to_job_ids=(
                    state_training_job_mapping.algorithm_ids_to_job_ids
                ))

            mapping_models.append(mapping_instance)
            mapping_ids.append(instance_id)
        cls.update_timestamps_multi(mapping_models)
        cls.put_multi(mapping_models)
        return mapping_ids
Пример #12
0
class AppFeedbackReportModel(base_models.BaseModel):
    """Model for storing feedback reports sent from learners.

    Instances of this model contain information about learner's device and Oppia
    app settings, as well as information provided by the user in the feedback
    report.

    The id of each model instance is determined by concatenating the platform,
    the timestamp of the report's submission date (in sec since epoch, in UTC),
    and a hash of a string representation of a random int.
    """

    # We use the model id as a key in the Takeout dict.
    ID_IS_USED_AS_TAKEOUT_KEY = True

    # The platform (web or Android) that the report is sent from and that the
    # feedback corresponds to.
    platform = datastore_services.StringProperty(required=True,
                                                 indexed=True,
                                                 choices=PLATFORM_CHOICES)
    # The ID of the user that scrubbed this report, if it has been scrubbed.
    scrubbed_by = datastore_services.StringProperty(required=False,
                                                    indexed=True)
    # Unique ID for the ticket this report is assigned to (see
    # AppFeedbackReportTicketModel for how this is constructed). This defaults
    # to None since initially, new reports received will not be assigned to a
    # ticket.
    ticket_id = datastore_services.StringProperty(required=False, indexed=True)
    # The local datetime of when the report was submitted by the user on their
    # device. This may be much earlier than the model entity's creation date if
    # the report was locally cached for a long time on an Android device.
    submitted_on = datastore_services.DateTimeProperty(required=True,
                                                       indexed=True)
    # The nuber of hours offset from UTC of the user's local timezone.
    local_timezone_offset_hrs = datastore_services.IntegerProperty(
        required=False, indexed=True)
    # The type of feedback for this report; this can be an arbitrary string
    # since future iterations of the report structure may introduce new types
    # and we cannot rely on the backend updates to fully sync with the frontend
    # report updates.
    report_type = datastore_services.StringProperty(required=True,
                                                    indexed=True)
    # The category that this feedback is for. Possible categories include:
    # suggestion_feature, suggestion_language, suggestion_other,
    # issue_lesson_question, issue_general_language, issue_audio_language,
    # issue_text_language, issue_topics, issue_profile, issue_other, crash.
    category = datastore_services.StringProperty(required=True, indexed=True)
    # The version of the app; on Android this is the package version name (e.g.
    # 0.1-alpha-abcdef1234) and on web this is the release version (e.g. 3.0.8).
    platform_version = datastore_services.StringProperty(required=True,
                                                         indexed=True)
    # The entry point location that the user is accessing the feedback report
    # from on both web & Android devices. Possible entry points include:
    # navigation_drawer, lesson_player, revision_card, or crash.
    entry_point = datastore_services.StringProperty(required=True,
                                                    indexed=True)
    # Additional topic / story / exploration IDs that may be collected depending
    # on the entry_point used to send the report; a lesson player entry point
    # will have topic_id, story_id, and exploration_id, while revision cards
    # will have topic_id and subtopic_id.
    entry_point_topic_id = datastore_services.StringProperty(required=False,
                                                             indexed=True)
    entry_point_story_id = datastore_services.StringProperty(required=False,
                                                             indexed=True)
    entry_point_exploration_id = datastore_services.StringProperty(
        required=False, indexed=True)
    entry_point_subtopic_id = datastore_services.IntegerProperty(
        required=False, indexed=True)
    # The text language on Oppia set by the user in its ISO-639 language code;
    # this is set by the user in Oppia's app preferences on all platforms.
    text_language_code = datastore_services.StringProperty(required=True,
                                                           indexed=True)
    # The audio language ISO-639 code on Oppia set by the user; this is set in
    # Oppia's app preferences on all platforms.
    audio_language_code = datastore_services.StringProperty(required=True,
                                                            indexed=True)
    # The user's country locale represented as a ISO-3166 code; the locale is
    # determined by the user's Android device settings.
    android_device_country_locale_code = datastore_services.StringProperty(
        required=False, indexed=True)
    # The Android device model used to submit the report.
    android_device_model = datastore_services.StringProperty(required=False,
                                                             indexed=True)
    # The Android SDK version on the user's device.
    android_sdk_version = datastore_services.IntegerProperty(required=False,
                                                             indexed=True)
    # The feedback collected for Android reports; None if the platform is 'web'.
    android_report_info = datastore_services.JsonProperty(required=False,
                                                          indexed=False)
    # The schema version for the feedback report info; None if the platform is
    # 'web'.
    android_report_info_schema_version = datastore_services.IntegerProperty(
        required=False, indexed=True)
    # The feedback collected for Web reports; None if the platform is 'android'.
    web_report_info = datastore_services.JsonProperty(required=False,
                                                      indexed=False)
    # The schema version for the feedback report info; None if the platform is
    # 'android'.
    web_report_info_schema_version = datastore_services.IntegerProperty(
        required=False, indexed=True)

    # TODO(#13523): Change 'android_report_info' and 'web_report_info' to domain
    # objects/TypedDict to remove Any from type-annotation below.
    @classmethod
    def create(cls, entity_id: str, platform: str,
               submitted_on: datetime.datetime, local_timezone_offset_hrs: int,
               report_type: str, category: str, platform_version: str,
               android_device_country_locale_code: Optional[str],
               android_sdk_version: Optional[int],
               android_device_model: Optional[str], entry_point: str,
               entry_point_topic_id: Optional[str],
               entry_point_story_id: Optional[str],
               entry_point_exploration_id: Optional[str],
               entry_point_subtopic_id: Optional[str], text_language_code: str,
               audio_language_code: str,
               android_report_info: Optional[Dict[str, Any]],
               web_report_info: Optional[Dict[str, Any]]) -> str:
        """Creates a new AppFeedbackReportModel instance and returns its ID.

        Args:
            entity_id: str. The ID used for this entity.
            platform: str. The platform the report is submitted on.
            submitted_on: datetime.datetime. The date and time the report was
                submitted, in the user's local time zone.
            local_timezone_offset_hrs: int. The hours offset from UTC of the
                user's local time zone.
            report_type: str. The type of report.
            category: str. The category the report is providing feedback on.
            platform_version: str. The version of Oppia that the report was
                submitted on.
            android_device_country_locale_code: str|None. The ISO-3166 code for
                the user's country locale or None if it's a web report.
            android_sdk_version: int|None. The SDK version running when on the
                device or None if it's a web report.
            android_device_model: str|None. The device model of the Android
                device, or None if it's a web report.
            entry_point: str. The entry point used to start the report.
            entry_point_topic_id: str|None. The current topic ID depending on
                the type of entry point used.
            entry_point_story_id: str|None. The current story ID depending on
                the type of entry point used.
            entry_point_exploration_id: str|None. The current exploration ID
                depending on the type of entry point used.
            entry_point_subtopic_id: int|None. The current subtopic ID depending
                on the type of entry point used.
            text_language_code: str. The ISO-639 language code for the text
                language set by the user on the Oppia app.
            audio_language_code: str. The language code for the audio language
                set by the user on the Oppia app, as defined by Oppia (not
                necessarily an ISO-639 code).
            android_report_info: dict|None. The information collected as part
                of the Android-specific feedback report.
            web_report_info: dict|None. The information collected as part of the
                web-specific feedback report.

        Returns:
            AppFeedbackReportModel. The newly created AppFeedbackReportModel
            instance.
        """
        android_schema_version = None
        web_schema_version = None
        if platform == PLATFORM_CHOICE_ANDROID:
            android_schema_version = (
                feconf.CURRENT_ANDROID_REPORT_SCHEMA_VERSION)
        else:
            web_schema_version = (feconf.CURRENT_WEB_REPORT_SCHEMA_VERSION)
        report_entity = cls(
            id=entity_id,
            platform=platform,
            submitted_on=submitted_on,
            local_timezone_offset_hrs=local_timezone_offset_hrs,
            report_type=report_type,
            category=category,
            platform_version=platform_version,
            android_device_country_locale_code=(
                android_device_country_locale_code),
            android_sdk_version=android_sdk_version,
            android_device_model=android_device_model,
            entry_point=entry_point,
            entry_point_topic_id=entry_point_topic_id,
            entry_point_exploration_id=entry_point_exploration_id,
            entry_point_story_id=entry_point_story_id,
            entry_point_subtopic_id=entry_point_subtopic_id,
            text_language_code=text_language_code,
            audio_language_code=audio_language_code,
            android_report_info=android_report_info,
            android_report_info_schema_version=android_schema_version,
            web_report_info=web_report_info,
            web_report_info_schema_version=web_schema_version)
        report_entity.update_timestamps()
        report_entity.put()
        return entity_id

    @classmethod
    def generate_id(cls, platform: str,
                    submitted_on_datetime: datetime.datetime) -> str:
        """Generates key for the instance of AppFeedbackReportModel class in the
        required format with the arguments provided.

        Args:
            platform: str. The platform the user is the report from.
            submitted_on_datetime: datetime.datetime. The datetime that the
                report was submitted on in UTC.

        Returns:
            str. The generated ID for this entity using platform,
            submitted_on_sec, and a random string, of the form
            '[platform].[submitted_on_msec].[random hash]'.
        """
        submitted_datetime_in_msec = utils.get_time_in_millisecs(
            submitted_on_datetime)
        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            random_hash = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            new_id = '%s.%s.%s' % (platform, int(submitted_datetime_in_msec),
                                   random_hash)
            if not cls.get_by_id(new_id):
                return new_id
        raise Exception(
            'The id generator for AppFeedbackReportModel is producing too '
            'many collisions.')

    @classmethod
    def get_all_unscrubbed_expiring_report_models(
            cls) -> Sequence['AppFeedbackReportModel']:
        """Fetches the reports that are past their 90-days in storage and must
        be scrubbed.

        Returns:
            list(AppFeedbackReportModel). A list of AppFeedbackReportModel
            entities that need to be scrubbed.
        """
        datetime_now = datetime.datetime.utcnow()
        datetime_before_which_to_scrub = datetime_now - (
            feconf.APP_FEEDBACK_REPORT_MAXIMUM_LIFESPAN +
            datetime.timedelta(days=1))
        # The below return checks for '== None' rather than 'is None' since
        # the latter throws "Cannot filter a non-Node argument; received False".
        report_models: Sequence['AppFeedbackReportModel'] = cls.query(
            cls.created_on < datetime_before_which_to_scrub,
            cls.scrubbed_by == None).fetch()  # pylint: disable=singleton-comparison
        return report_models

    @classmethod
    def get_filter_options_for_field(cls, filter_field: str) -> List[str]:
        """Fetches values that can be used to filter reports by.

        Args:
            filter_field: FILTER_FIELD_NAME. The enum type of the field we want
                to fetch all possible values for.

        Returns:
            list(str). The possible values that the field name can have.
        """
        query = cls.query(projection=[filter_field.name],
                          distinct=True)  # type: ignore[attr-defined]
        filter_values = []
        if filter_field == FILTER_FIELD_NAMES.report_type:
            filter_values = [model.report_type for model in query]
        elif filter_field == FILTER_FIELD_NAMES.platform:
            filter_values = [model.platform for model in query]
        elif filter_field == FILTER_FIELD_NAMES.entry_point:
            filter_values = [model.entry_point for model in query]
        elif filter_field == FILTER_FIELD_NAMES.submitted_on:
            filter_values = [model.submitted_on.date() for model in query]
        elif filter_field == FILTER_FIELD_NAMES.android_device_model:
            filter_values = [model.android_device_model for model in query]
        elif filter_field == FILTER_FIELD_NAMES.android_sdk_version:
            filter_values = [model.android_sdk_version for model in query]
        elif filter_field == FILTER_FIELD_NAMES.text_language_code:
            filter_values = [model.text_language_code for model in query]
        elif filter_field == FILTER_FIELD_NAMES.audio_language_code:
            filter_values = [model.audio_language_code for model in query]
        elif filter_field == FILTER_FIELD_NAMES.platform_version:
            filter_values = [model.platform_version for model in query]
        elif filter_field == (
                FILTER_FIELD_NAMES.android_device_country_locale_code):
            filter_values = [
                model.android_device_country_locale_code for model in query
            ]
        else:
            raise utils.InvalidInputException(
                'The field %s is not a valid field to filter reports on' %
                (filter_field.name))  # type: ignore[attr-defined]
        return filter_values

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model stores the user ID of who has scrubbed this report for auditing
        purposes but otherwise does not contain data directly corresponding to
        the user themselves.
        """
        return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model contains data referencing user and will be exported."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'platform':
                base_models.EXPORT_POLICY.EXPORTED,
                'scrubbed_by':
                base_models.EXPORT_POLICY.EXPORTED,
                'ticket_id':
                base_models.EXPORT_POLICY.EXPORTED,
                'submitted_on':
                base_models.EXPORT_POLICY.EXPORTED,
                'local_timezone_offset_hrs':
                base_models.EXPORT_POLICY.EXPORTED,
                'report_type':
                base_models.EXPORT_POLICY.EXPORTED,
                'category':
                base_models.EXPORT_POLICY.EXPORTED,
                'platform_version':
                base_models.EXPORT_POLICY.EXPORTED,
                'android_device_country_locale_code':
                (base_models.EXPORT_POLICY.NOT_APPLICABLE),
                'android_device_model':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'android_sdk_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'entry_point':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'entry_point_topic_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'entry_point_story_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'entry_point_exploration_id':
                (base_models.EXPORT_POLICY.NOT_APPLICABLE),
                'entry_point_subtopic_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'text_language_code':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'audio_language_code':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'android_report_info':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'android_report_info_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'web_report_info':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'web_report_info_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def export_data(cls, user_id: str) -> Dict[str, Dict[str, str]]:
        """Exports the data from AppFeedbackReportModel into dict format for
        Takeout.

        Args:
            user_id: str. The ID of the user whose data should be exported;
                this would be the ID of the user who has scrubbed the report.

        Returns:
            dict. Dictionary of the data from AppFeedbackReportModel.
        """
        user_data = {}
        report_models: Sequence[AppFeedbackReportModel] = (
            cls.get_all().filter(cls.scrubbed_by == user_id).fetch())
        for report_model in report_models:
            submitted_on_msec = utils.get_time_in_millisecs(
                report_model.submitted_on)
            user_data[report_model.id] = {
                'scrubbed_by':
                report_model.scrubbed_by,
                'platform':
                report_model.platform,
                'ticket_id':
                report_model.ticket_id,
                'submitted_on':
                utils.get_human_readable_time_string(submitted_on_msec),
                'local_timezone_offset_hrs':
                (report_model.local_timezone_offset_hrs),
                'report_type':
                report_model.report_type,
                'category':
                report_model.category,
                'platform_version':
                report_model.platform_version
            }
        return user_data

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model is exported as multiple instances per user since there
        are multiple reports relevant to a user.
        """
        return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER

    @staticmethod
    def get_lowest_supported_role() -> str:
        """The lowest supported role for feedback reports will be moderator."""
        return feconf.ROLE_ID_MODERATOR

    @classmethod
    def has_reference_to_user_id(cls, user_id: str) -> bool:
        """Check whether AppFeedbackReportModel exists for user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether a model is associated with the user.
        """
        return cls.query(cls.scrubbed_by == user_id).get(
            keys_only=True) is not None
Пример #13
0
class SubtopicPageModel(base_models.VersionedModel):
    """Model for storing Subtopic pages.

    This stores the HTML data for a subtopic page.
    """

    SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = SubtopicPageCommitLogEntryModel
    ALLOW_REVERT = False

    # The topic id that this subtopic is a part of.
    topic_id = datastore_services.StringProperty(required=True, indexed=True)
    # The json data of the subtopic consisting of subtitled_html,
    # recorded_voiceovers and written_translations fields.
    page_contents = datastore_services.JsonProperty(required=True)
    # The schema version for the page_contents field.
    page_contents_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # The ISO 639-1 code for the language this subtopic page is written in.
    language_code = (
        datastore_services.StringProperty(required=True, indexed=True))

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def compute_models_to_commit(
        self,
        committer_id: str,
        commit_type: str,
        commit_message: str,
        commit_cmds: List[Dict[str, Any]],
        # We expect Mapping because we want to allow models that inherit
        # from BaseModel as the values, if we used Dict this wouldn't
        # be allowed.
        additional_models: Mapping[str, base_models.BaseModel]
    ) -> base_models.ModelsToPutDict:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
            additional_models: dict(str, BaseModel). Additional models that are
                needed for the commit process.

        Returns:
            ModelsToPutDict. A dict of models that should be put into
            the datastore.
        """
        models_to_put = super().compute_models_to_commit(
            committer_id,
            commit_type,
            commit_message,
            commit_cmds,
            additional_models
        )

        subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
        )
        subtopic_page_commit_log_entry.subtopic_page_id = self.id
        # The order is important here, as the 'versioned_model' needs to be
        # after 'snapshot_content_model' otherwise it leads to problems with
        # putting the models into the datastore.
        return {
            'snapshot_metadata_model': models_to_put['snapshot_metadata_model'],
            'snapshot_content_model': models_to_put['snapshot_content_model'],
            'commit_log_model': subtopic_page_commit_log_entry,
            'versioned_model': models_to_put['versioned_model'],
        }

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'page_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'page_contents_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })
Пример #14
0
class SkillModel(base_models.VersionedModel):
    """Model for storing Skills.

    This class should only be imported by the skill services file
    and the skill model test file.
    """

    SNAPSHOT_METADATA_CLASS = SkillSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = SkillSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = SkillCommitLogEntryModel
    ALLOW_REVERT = False

    # The description of the skill.
    description = datastore_services.StringProperty(required=True,
                                                    indexed=True)
    # The schema version for each of the misconception dicts.
    misconceptions_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # The schema version for each of the rubric dicts.
    rubric_schema_version = datastore_services.IntegerProperty(required=True,
                                                               indexed=True)
    # A list of misconceptions associated with the skill, in which each
    # element is a dict.
    misconceptions = (datastore_services.JsonProperty(repeated=True,
                                                      indexed=False))
    # The rubrics for the skill that explain each difficulty level.
    rubrics = datastore_services.JsonProperty(repeated=True, indexed=False)
    # The ISO 639-1 code for the language this skill is written in.
    language_code = (datastore_services.StringProperty(required=True,
                                                       indexed=True))
    # The schema version for the skill_contents.
    skill_contents_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # A dict representing the skill contents.
    skill_contents = datastore_services.JsonProperty(indexed=False)
    # The prerequisite skills for the skill.
    prerequisite_skill_ids = (datastore_services.StringProperty(repeated=True,
                                                                indexed=True))
    # The id to be used by the next misconception added.
    next_misconception_id = (datastore_services.IntegerProperty(required=True,
                                                                indexed=False))
    # The id that the skill is merged into, in case the skill has been
    # marked as duplicate to another one and needs to be merged.
    # This is an optional field.
    superseding_skill_id = datastore_services.StringProperty(indexed=True)
    # A flag indicating whether deduplication is complete for this skill.
    # It will initially be False, and set to true only when there is a value
    # for superseding_skill_id and the merge was completed.
    all_questions_merged = (datastore_services.BooleanProperty(indexed=True,
                                                               required=True))

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @classmethod
    def get_merged_skills(cls) -> List['SkillModel']:
        """Returns the skill models which have been merged.

        Returns:
            list(SkillModel). List of skill models which have been merged.
        """

        return [
            skill for skill in cls.query()
            if (skill.superseding_skill_id is not None and (
                len(skill.superseding_skill_id) > 0))
        ]

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def _trusted_commit(self, committer_id: str, commit_type: str,
                        commit_message: str,
                        commit_cmds: List[Dict[str, Any]]) -> None:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(SkillModel, self)._trusted_commit(committer_id, commit_type,
                                                commit_message, commit_cmds)

        skill_commit_log_entry = SkillCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False)
        skill_commit_log_entry.skill_id = self.id
        skill_commit_log_entry.update_timestamps()
        skill_commit_log_entry.put()

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'misconceptions_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'rubric_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'misconceptions': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'rubrics': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'skill_contents_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'skill_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'prerequisite_skill_ids':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'next_misconception_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'superseding_skill_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'all_questions_merged':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def get_by_description(cls, description: str) -> Optional['SkillModel']:
        """Gets SkillModel by description. Returns None if the skill with
        description doesn't exist.

        Args:
            description: str. The description of the skill.

        Returns:
            SkillModel|None. The skill model of the skill or None if not
            found.
        """
        return cls.get_all().filter(cls.description == description).get()
Пример #15
0
class TopicModel(base_models.VersionedModel):
    """Model for storing Topics.

    This class should only be imported by the topic services file
    and the topic model test file.
    """

    SNAPSHOT_METADATA_CLASS = TopicSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = TopicSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = TopicCommitLogEntryModel
    ALLOW_REVERT = False

    # The name of the topic.
    name = datastore_services.StringProperty(required=True, indexed=True)
    # The canonical name of the topic, created by making `name` lowercase.
    canonical_name = (
        datastore_services.StringProperty(required=True, indexed=True))
    # The abbreviated name of the topic.
    abbreviated_name = (
        datastore_services.StringProperty(indexed=True, default=''))
    # The thumbnail filename of the topic.
    thumbnail_filename = datastore_services.StringProperty(indexed=True)
    # The thumbnail background color of the topic.
    thumbnail_bg_color = datastore_services.StringProperty(indexed=True)
    # The thumbnail size in bytes of the topic.
    thumbnail_size_in_bytes = (
        datastore_services.IntegerProperty(indexed=True))
    # The description of the topic.
    description = datastore_services.TextProperty(indexed=False)
    # This consists of the list of objects referencing canonical stories that
    # are part of this topic.
    canonical_story_references = (
        datastore_services.JsonProperty(repeated=True, indexed=False))
    # This consists of the list of objects referencing additional stories that
    # are part of this topic.
    additional_story_references = (
        datastore_services.JsonProperty(repeated=True, indexed=False))
    # The schema version for the story reference object on each of the above 2
    # lists.
    story_reference_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # This consists of the list of uncategorized skill ids that are not part of
    # any subtopic.
    uncategorized_skill_ids = (
        datastore_services.StringProperty(repeated=True, indexed=True))
    # The list of subtopics that are part of the topic.
    subtopics = datastore_services.JsonProperty(repeated=True, indexed=False)
    # The schema version of the subtopic dict.
    subtopic_schema_version = (
        datastore_services.IntegerProperty(required=True, indexed=True))
    # The id for the next subtopic.
    next_subtopic_id = datastore_services.IntegerProperty(required=True)
    # The ISO 639-1 code for the language this topic is written in.
    language_code = (
        datastore_services.StringProperty(required=True, indexed=True))
    # The url fragment of the topic.
    url_fragment = (
        datastore_services.StringProperty(required=True, indexed=True))
    # Whether to show practice tab in the Topic viewer page.
    practice_tab_is_displayed = datastore_services.BooleanProperty(
        required=True, default=False)
    # The content of the meta tag in the Topic viewer page.
    meta_tag_content = datastore_services.StringProperty(
        indexed=True, default='')
    # The page title fragment used in the Topic viewer web page.
    # For example, if the full Topic viewer web page title is
    # 'Learn Fractions | Add, Subtract, Multiply and Divide | Oppia'
    # the page title fragment field represents the middle value 'Add, Subtract,
    # Multiply and Divide'.
    page_title_fragment_for_web = datastore_services.StringProperty(
        indexed=True, default='')

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def _trusted_commit(
            self,
            committer_id: str,
            commit_type: str,
            commit_message: str,
            commit_cmds: List[Dict[str, Any]]
    ) -> None:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(TopicModel, self)._trusted_commit(
            committer_id, commit_type, commit_message, commit_cmds)

        topic_rights = TopicRightsModel.get_by_id(self.id)
        if topic_rights.topic_is_published:
            status = constants.ACTIVITY_STATUS_PUBLIC
        else:
            status = constants.ACTIVITY_STATUS_PRIVATE

        topic_commit_log_entry = TopicCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type,
            commit_message, commit_cmds, status, False
        )
        topic_commit_log_entry.topic_id = self.id
        topic_commit_log_entry.update_timestamps()
        topic_commit_log_entry.put()

    @classmethod
    def get_by_name(cls, topic_name: str) -> Optional['TopicModel']:
        """Gets TopicModel by topic_name. Returns None if the topic with
        name topic_name doesn't exist.

        Args:
            topic_name: str. The name of the topic.

        Returns:
            TopicModel|None. The topic model of the topic or None if not
            found.
        """
        return cls.get_all().filter(
            cls.canonical_name == topic_name.lower()
        ).get()

    @classmethod
    def get_by_url_fragment(cls, url_fragment: str) -> Optional['TopicModel']:
        """Gets TopicModel by url_fragment. Returns None if the topic with
        name url_fragment doesn't exist.

        Args:
            url_fragment: str. The url fragment of the topic.

        Returns:
            TopicModel|None. The topic model of the topic or None if not
            found.
        """
        # TODO(#10210): Make fetching by URL fragment faster.
        return cls.get_all().filter(cls.url_fragment == url_fragment).get()

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'abbreviated_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'thumbnail_size_in_bytes': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'canonical_story_references':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'additional_story_references':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'story_reference_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'uncategorized_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'subtopics': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'subtopic_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'next_subtopic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'page_title_fragment_for_web': (
                base_models.EXPORT_POLICY.NOT_APPLICABLE),
            'practice_tab_is_displayed':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
        })
Пример #16
0
class StoryModel(base_models.VersionedModel):
    """Model for storing stories.

    This class should only be imported by the story services file
    and the story model test file.
    """

    SNAPSHOT_METADATA_CLASS = StorySnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = StorySnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = StoryCommitLogEntryModel
    ALLOW_REVERT = False

    # The title of the story.
    title = datastore_services.StringProperty(required=True, indexed=True)
    # The thumbnail filename of the story.
    thumbnail_filename = datastore_services.StringProperty(indexed=True)
    # The thumbnail background color of the story.
    thumbnail_bg_color = datastore_services.StringProperty(indexed=True)
    # The thumbnail size of the story.
    thumbnail_size_in_bytes = datastore_services.IntegerProperty(indexed=True)
    # A high-level description of the story.
    description = datastore_services.TextProperty(indexed=False)
    # A set of notes, that describe the characters, main storyline, and setting.
    notes = datastore_services.TextProperty(indexed=False)
    # The ISO 639-1 code for the language this story is written in.
    language_code = (
        datastore_services.StringProperty(required=True, indexed=True))
    # The story contents dict specifying the list of story nodes and the
    # connection between them. Modelled by class StoryContents
    # (see story_domain.py for its current schema).
    story_contents = datastore_services.JsonProperty(default={}, indexed=False)
    # The schema version for the story_contents.
    story_contents_schema_version = (
        datastore_services.IntegerProperty(required=True, indexed=True))
    # The topic id to which the story belongs.
    corresponding_topic_id = (
        datastore_services.StringProperty(indexed=True, required=True))
    # The url fragment for the story.
    url_fragment = (
        datastore_services.StringProperty(required=True, indexed=True))
    # The content of the meta tag in the Story viewer page.
    meta_tag_content = datastore_services.StringProperty(
        indexed=True, default='')

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def compute_models_to_commit(
        self,
        committer_id: str,
        commit_type: str,
        commit_message: str,
        commit_cmds: List[Dict[str, Any]],
        # We expect Mapping because we want to allow models that inherit
        # from BaseModel as the values, if we used Dict this wouldn't
        # be allowed.
        additional_models: Mapping[str, base_models.BaseModel]
    ) -> base_models.ModelsToPutDict:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
            additional_models: dict(str, BaseModel). Additional models that are
                needed for the commit process.

        Returns:
            ModelsToPutDict. A dict of models that should be put into
            the datastore.
        """
        models_to_put = super().compute_models_to_commit(
            committer_id,
            commit_type,
            commit_message,
            commit_cmds,
            additional_models
        )

        story_commit_log_entry = StoryCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
        )
        story_commit_log_entry.story_id = self.id
        return {
            'snapshot_metadata_model': models_to_put['snapshot_metadata_model'],
            'snapshot_content_model': models_to_put['snapshot_content_model'],
            'commit_log_model': story_commit_log_entry,
            'versioned_model': models_to_put['versioned_model'],
        }

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'thumbnail_size_in_bytes': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'notes': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'story_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'story_contents_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'corresponding_topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    @classmethod
    def get_by_url_fragment(cls, url_fragment: str) -> Optional[StoryModel]:
        """Gets StoryModel by url_fragment. Returns None if the story with
        name url_fragment doesn't exist.

        Args:
            url_fragment: str. The url fragment of the story.

        Returns:
            StoryModel|None. The story model of the story or None if not
            found.
        """
        return cls.get_all().filter(cls.url_fragment == url_fragment).get()
Пример #17
0
class PlatformParameterModel(base_models.VersionedModel):
    """A class that represents a named dynamic platform parameter.
    This model only stores fields that can be updated in run time.

    The id is the name of the parameter.
    """

    SNAPSHOT_METADATA_CLASS = PlatformParameterSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = PlatformParameterSnapshotContentModel

    rules = datastore_services.JsonProperty(repeated=True)
    rule_schema_version = (
        datastore_services.IntegerProperty(required=True, indexed=True))

    @staticmethod
    def get_deletion_policy():
        # type: () -> base_models.DELETION_POLICY
        """PlatformParameterModel is not related to users."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user():
        # type: () -> base_models.MODEL_ASSOCIATION_TO_USER
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls):
        # type: () -> Dict[Text, base_models.EXPORT_POLICY]
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'rules': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'rule_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    # TODO(#13523): Change 'rule_dicts' to domain object/TypedDict to
    # remove Any from type-annotation below.
    @classmethod
    def create(cls, param_name, rule_dicts, rule_schema_version):
        # type: (Text, List[Dict[Text, Any]], int) -> PlatformParameterModel
        """Creates a PlatformParameterModel instance.

        Args:
            param_name: str. The name of the parameter, which is immutable.
            rule_dicts: list(dict). List of dict representation of
                PlatformParameterRule objects, which have the following
                structure:
                    - value_when_matched: *. The result of the rule when it's
                        matched.
                    - filters: list(dict). List of dict representation of
                        PlatformParameterFilter objects, having the following
                        structure:
                            - type: str. The type of the filter.
                            - value: *. The value of the filter to match
                                against.
            rule_schema_version: int. The schema version for the rule dicts.

        Returns:
            PlatformParameterModel. The created PlatformParameterModel
            instance.
        """
        return cls(
            id=param_name,
            rules=rule_dicts,
            rule_schema_version=rule_schema_version)
Пример #18
0
class AppFeedbackReportStatsModel(base_models.BaseModel):
    """Model for storing aggregate report stats on the tickets created.

    Instances of this model contain statistics for different report types based
    on the ticket they are assigned to and the date of the aggregation is on.

    The id of each model instance is calculated by concatenating the platform,
    ticket ID, and the date (in isoformat) this entity is tracking stats for.
    """

    # The unique ticket ID that this entity is aggregating for.
    ticket_id = datastore_services.StringProperty(required=True, indexed=True)
    # The platform that these statistics are for.
    platform = datastore_services.StringProperty(required=True,
                                                 indexed=True,
                                                 choices=PLATFORM_CHOICES)
    # The date in UTC that this entity is tracking on -- this should correspond
    # to the creation date of the reports aggregated in this model.
    stats_tracking_date = datastore_services.DateProperty(required=True,
                                                          indexed=True)
    # The total number of reports submitted on this date.
    total_reports_submitted = datastore_services.IntegerProperty(required=True,
                                                                 indexed=True)
    # JSON struct that maps the daily statistics for this ticket on the date
    # specified in stats_tracking_date. The JSON will map each param_name
    # (defined by a domain const ALLOWED_STATS_PARAM_NAMES) to a dictionary of
    # all the possible param_values for that parameter and the number of reports
    # submitted on that day that satisfy that param value, similar to e.g.:
    #
    #   param_name1 : { param_value1 : report_count1,
    #                   param_value2 : report_count2,
    #                   param_value3 : report_count3 },
    #   param_name2 : { param_value1 : report_count1,
    #                   param_value2 : report_count2,
    #                   param_value3 : report_count3 } }.
    daily_param_stats = datastore_services.JsonProperty(required=True,
                                                        indexed=False)
    # The schema version for parameter statistics in this entity.
    daily_param_stats_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)

    @classmethod
    def create(cls, entity_id: str, platform: str, ticket_id: str,
               stats_tracking_date: datetime.date,
               total_reports_submitted: int,
               daily_param_stats: Dict[str, Dict[str, int]]) -> str:
        """Creates a new AppFeedbackReportStatsModel instance and returns its
        ID.

        Args:
            entity_id: str. The ID used for this entity.
            ticket_id: str. The ID for the ticket these stats aggregate on.
            platform: str. The platform the stats are aggregating for.
            stats_tracking_date: datetime.date. The date in UTC that this entity
                is tracking stats for.
            total_reports_submitted: int. The total number of reports submitted
                on this date.
            daily_param_stats: dict. The daily stats for this entity, keyed
                by the parameter witch each value mapping a parameter value to
                the number of reports that satisfy that parameter value.

        Returns:
            AppFeedbackReportStatsModel. The newly created
            AppFeedbackReportStatsModel instance.
        """
        stats_entity = cls(
            id=entity_id,
            ticket_id=ticket_id,
            platform=platform,
            stats_tracking_date=stats_tracking_date,
            total_reports_submitted=total_reports_submitted,
            daily_param_stats=daily_param_stats,
            daily_param_stats_schema_version=(
                feconf.CURRENT_FEEDBACK_REPORT_STATS_SCHEMA_VERSION))
        stats_entity.update_timestamps()
        stats_entity.put()
        return entity_id

    @classmethod
    def calculate_id(cls, platform: str, ticket_id: Optional[str],
                     stats_tracking_date: datetime.date) -> str:
        """Generates key for the instance of AppFeedbackReportStatsModel
        class in the required format with the arguments provided.

        Args:
            platform: str. The platform this entity is aggregating on.
            ticket_id: str. The ID for the ticket these stats aggregate on.
            stats_tracking_date: date. The date these stats are tracking on.

        Returns:
            str. The ID for this entity of the form
            '[platform]:[ticket_id]:[stats_date in YYYY-MM-DD]'.
        """
        if ticket_id is None:
            ticket_id = UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID
        return '%s:%s:%s' % (platform, ticket_id,
                             stats_tracking_date.isoformat())

    @classmethod
    def get_stats_for_ticket(
            cls, ticket_id: str) -> Sequence['AppFeedbackReportStatsModel']:
        """Fetches the stats for a single ticket.

        Args:
            ticket_id: str. The ID of the ticket to get stats for.

        Returns:
            list(str). A list of IDs corresponding to
            AppFeedbackReportStatsModel entities that record stats on the
            ticket.
        """
        return cls.query(cls.ticket_id == ticket_id).fetch()

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any information directly corresponding to a
        user.
        """
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'ticket_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'platform': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'stats_tracking_date':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'total_reports_submitted':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'daily_param_stats_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'daily_param_stats': base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @staticmethod
    def get_lowest_supported_role() -> str:
        """The lowest supported role for feedback reports stats will be
        moderator.
        """
        return feconf.ROLE_ID_MODERATOR
Пример #19
0
class ExplorationModel(base_models.VersionedModel):
    """Versioned storage model for an Oppia exploration.

    This class should only be imported by the exploration services file
    and the exploration model test file.
    """

    SNAPSHOT_METADATA_CLASS = ExplorationSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = ExplorationSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = ExplorationCommitLogEntryModel
    ALLOW_REVERT = True

    # What this exploration is called.
    title = datastore_services.StringProperty(required=True)
    # The category this exploration belongs to.
    category = datastore_services.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = datastore_services.TextProperty(default='', indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = datastore_services.StringProperty(
        default=constants.DEFAULT_LANGUAGE_CODE, indexed=True)
    # Tags (topics, skills, concepts, etc.) associated with this
    # exploration.
    tags = datastore_services.StringProperty(repeated=True, indexed=True)
    # A blurb for this exploration.
    blurb = datastore_services.TextProperty(default='', indexed=False)
    # 'Author notes' for this exploration.
    author_notes = datastore_services.TextProperty(default='', indexed=False)

    # The version of the states blob schema.
    states_schema_version = datastore_services.IntegerProperty(required=True,
                                                               default=0,
                                                               indexed=True)
    # The name of the initial state of this exploration.
    init_state_name = (datastore_services.StringProperty(required=True,
                                                         indexed=True))
    # A dict representing the states of this exploration. This dict should
    # not be empty.
    states = datastore_services.JsonProperty(default={}, indexed=False)
    # The dict of parameter specifications associated with this exploration.
    # Each specification is a dict whose keys are param names and whose values
    # are each dicts with a single key, 'obj_type', whose value is a string.
    param_specs = datastore_services.JsonProperty(default={}, indexed=False)
    # The list of parameter changes to be performed once at the start of a
    # reader's encounter with an exploration.
    param_changes = (datastore_services.JsonProperty(repeated=True,
                                                     indexed=False))
    # A boolean indicating whether automatic text-to-speech is enabled in
    # this exploration.
    auto_tts_enabled = (datastore_services.BooleanProperty(default=True,
                                                           indexed=True))
    # A boolean indicating whether correctness feedback is enabled in this
    # exploration.
    correctness_feedback_enabled = datastore_services.BooleanProperty(
        default=False, indexed=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'title':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'category':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'objective':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'language_code':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'tags':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'blurb':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'author_notes':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'states_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'init_state_name':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'states':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'param_specs':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'param_changes':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'auto_tts_enabled':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'correctness_feedback_enabled':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def get_exploration_count(cls) -> int:
        """Returns the total number of explorations."""
        return cls.get_all().count()

    # We expect Mapping because we want to allow models that inherit
    # from BaseModel as the values, if we used Dict this wouldn't be allowed.
    def _prepare_additional_models(
            self) -> Mapping[str, base_models.BaseModel]:
        """Prepares additional models needed for the commit process.

        Returns:
            dict(str, BaseModel). Additional models needed for
            the commit process. Contains the ExplorationRightsModel.
        """
        return {'rights_model': ExplorationRightsModel.get_by_id(self.id)}

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def compute_models_to_commit(
        self,
        committer_id: str,
        commit_type: str,
        commit_message: str,
        commit_cmds: List[Dict[str, Any]],
        # We expect Mapping because we want to allow models that inherit
        # from BaseModel as the values, if we used Dict this wouldn't
        # be allowed.
        additional_models: Mapping[str, base_models.BaseModel]
    ) -> base_models.ModelsToPutDict:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
            additional_models: dict(str, BaseModel). Additional models that are
                needed for the commit process.

        Returns:
            ModelsToPutDict. A dict of models that should be put into
            the datastore.
        """
        models_to_put = super().compute_models_to_commit(
            committer_id, commit_type, commit_message, commit_cmds,
            additional_models)

        # The cast is needed because the additional_models is list of BaseModels
        # and we want to hint the mypy that this is ExplorationRightsModel.
        exploration_rights_model = cast(ExplorationRightsModel,
                                        additional_models['rights_model'])
        exploration_commit_log = ExplorationCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, exploration_rights_model.status,
            exploration_rights_model.community_owned)
        exploration_commit_log.exploration_id = self.id
        return {
            'snapshot_metadata_model':
            models_to_put['snapshot_metadata_model'],
            'snapshot_content_model': models_to_put['snapshot_content_model'],
            'commit_log_model': exploration_commit_log,
            'versioned_model': models_to_put['versioned_model'],
        }

    # We have ignored [override] here because the signature of this method
    # doesn't match with BaseModel.delete_multi().
    @classmethod
    def delete_multi(  # type: ignore[override]
            cls,
            entity_ids: List[str],
            committer_id: str,
            commit_message: str,
            force_deletion: bool = False) -> None:
        """Deletes the given cls instances with the given entity_ids.

        Note that this extends the superclass method.

        Args:
            entity_ids: list(str). Ids of entities to delete.
            committer_id: str. The user_id of the user who committed the change.
            commit_message: str. The commit description message.
            force_deletion: bool. If True these models are deleted completely
                from storage, otherwise there are only marked as deleted.
                Default is False.
        """
        super(ExplorationModel,
              cls).delete_multi(entity_ids,
                                committer_id,
                                commit_message,
                                force_deletion=force_deletion)

        if not force_deletion:
            commit_log_models = []
            exp_rights_models = ExplorationRightsModel.get_multi(
                entity_ids, include_deleted=True)
            versioned_models = cls.get_multi(entity_ids, include_deleted=True)

            versioned_and_exp_rights_models = zip(versioned_models,
                                                  exp_rights_models)
            for model, rights_model in versioned_and_exp_rights_models:
                # Ruling out the possibility of None for mypy type checking.
                assert model is not None
                assert rights_model is not None
                exploration_commit_log = ExplorationCommitLogEntryModel.create(
                    model.id, model.version, committer_id,
                    feconf.COMMIT_TYPE_DELETE, commit_message,
                    [{
                        'cmd': cls.CMD_DELETE_COMMIT
                    }], rights_model.status, rights_model.community_owned)
                exploration_commit_log.exploration_id = model.id
                commit_log_models.append(exploration_commit_log)
            ExplorationCommitLogEntryModel.update_timestamps_multi(
                commit_log_models)
            datastore_services.put_multi(commit_log_models)

    # TODO(#13523): Change snapshot of this model to TypedDict/Domain Object
    # to remove Any used below.
    @staticmethod
    def convert_to_valid_dict(snapshot_dict: Dict[str, Any]) -> Dict[str, Any]:
        """Replace invalid fields and values in the ExplorationModel dict.
        Some old ExplorationModels can contain fields
        and field values that are no longer supported and would cause
        an exception when we try to reconstitute a ExplorationModel from
        them. We need to remove or replace these fields and values.

        Args:
            snapshot_dict: dict. The content of the model. Some fields and field
                values might no longer exist in the ExplorationModel
                schema.

        Returns:
            dict. The content of the model. Only valid fields and values are
            present.
        """

        if 'skill_tags' in snapshot_dict:
            del snapshot_dict['skill_tags']
        if 'default_skin' in snapshot_dict:
            del snapshot_dict['default_skin']
        if 'skin_customizations' in snapshot_dict:
            del snapshot_dict['skin_customizations']

        return snapshot_dict

    # TODO(#13523): Change 'snapshot_dict' to TypedDict/Domain Object
    # to remove Any used below.
    def _reconstitute(self, snapshot_dict: Dict[str, Any]) -> ExplorationModel:
        """Populates the model instance with the snapshot.
        Some old ExplorationSnapshotContentModels can contain fields
        and field values that are no longer supported and would cause
        an exception when we try to reconstitute a ExplorationModel from
        them. We need to remove or replace these fields and values.

        Args:
            snapshot_dict: dict(str, *). The snapshot with the model
                property values.

        Returns:
            VersionedModel. The instance of the VersionedModel class populated
            with the snapshot.
        """

        self.populate(**ExplorationModel.convert_to_valid_dict(snapshot_dict))
        return self
Пример #20
0
class ClassifierTrainingJobModel(base_models.BaseModel):
    """Model for storing classifier training jobs.

    The id of instances of this class has the form
    '[exp_id].[random hash of 12 chars]'.
    """

    # The ID of the algorithm used to create the model.
    algorithm_id = datastore_services.StringProperty(
        required=True, indexed=True)
    # The ID of the interaction to which the algorithm belongs.
    interaction_id = (
        datastore_services.StringProperty(required=True, indexed=True))
    # The exploration_id of the exploration to whose state the model belongs.
    exp_id = datastore_services.StringProperty(required=True, indexed=True)
    # The exploration version at the time this training job was created.
    exp_version = (
        datastore_services.IntegerProperty(required=True, indexed=True))
    # The name of the state to which the model belongs.
    state_name = datastore_services.StringProperty(required=True, indexed=True)
    # The status of the training job. It can be either NEW, COMPLETE or PENDING.
    status = datastore_services.StringProperty(
        required=True, choices=feconf.ALLOWED_TRAINING_JOB_STATUSES,
        default=feconf.TRAINING_JOB_STATUS_PENDING, indexed=True)
    # The training data which is to be populated when retrieving the job.
    # The list contains dicts where each dict represents a single training
    # data group. The training data are computed from answers that have been
    # anonymized and that are not connected to any existing or deleted users.
    training_data = datastore_services.JsonProperty(default=None)
    # The time when the job's status should next be checked.
    # It is incremented by TTL when a job with status NEW is picked up by VM.
    next_scheduled_check_time = datastore_services.DateTimeProperty(
        required=True, indexed=True)
    # The algorithm version for the classifier. Algorithm version identifies
    # the format of the classifier_data as well as the prediction API to be
    # used.
    algorithm_version = datastore_services.IntegerProperty(
        required=True, indexed=True)

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'algorithm_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'status': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'training_data': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'next_scheduled_check_time':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'algorithm_version': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    @classmethod
    def _generate_id(cls, exp_id: str) -> str:
        """Generates a unique id for the training job of the form
        '[exp_id].[random hash of 16 chars]'.

        Args:
            exp_id: str. ID of the exploration.

        Returns:
            str. ID of the new ClassifierTrainingJobModel instance.

        Raises:
            Exception. The id generator for ClassifierTrainingJobModel is
                producing too many collisions.
        """

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = '%s.%s' % (
                exp_id,
                utils.convert_to_hash(
                    python_utils.UNICODE(
                        utils.get_random_int(base_models.RAND_RANGE)),
                    base_models.ID_LENGTH))
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for ClassifierTrainingJobModel is producing '
            'too many collisions.')

    @classmethod
    def create(
            cls,
            algorithm_id: str,
            interaction_id: str,
            exp_id: str,
            exp_version: int,
            next_scheduled_check_time: datetime.datetime,
            training_data: TrainingDataUnionType,
            state_name: str,
            status: str,
            algorithm_version: int
    ) -> str:
        """Creates a new ClassifierTrainingJobModel entry.

        Args:
            algorithm_id: str. ID of the algorithm used to generate the model.
            interaction_id: str. ID of the interaction to which the algorithm
                belongs.
            exp_id: str. ID of the exploration.
            exp_version: int. The exploration version at the time
                this training job was created.
            next_scheduled_check_time: datetime.datetime. The next scheduled
                time to check the job.
            training_data: dict. The data used in training phase.
            state_name: str. The name of the state to which the classifier
                belongs.
            status: str. The status of the training job.
            algorithm_version: int. The version of the classifier model to be
                trained.

        Returns:
            str. ID of the new ClassifierModel entry.

        Raises:
            Exception. A model with the same ID already exists.
        """

        instance_id = cls._generate_id(exp_id)
        training_job_instance = cls(
            id=instance_id, algorithm_id=algorithm_id,
            interaction_id=interaction_id,
            exp_id=exp_id,
            exp_version=exp_version,
            next_scheduled_check_time=next_scheduled_check_time,
            state_name=state_name, status=status,
            training_data=training_data,
            algorithm_version=algorithm_version
        )

        training_job_instance.update_timestamps()
        training_job_instance.put()
        return instance_id

    @classmethod
    def query_new_and_pending_training_jobs(
            cls, offset: int
    ) -> Tuple[Sequence['ClassifierTrainingJobModel'], int]:
        """Gets the next 10 jobs which are either in status "new" or "pending",
        ordered by their next_scheduled_check_time attribute.

        Args:
            offset: int. Number of query results to skip.

        Returns:
            tuple(list(ClassifierTrainingJobModel), int).
            A tuple containing the list of the ClassifierTrainingJobModels
            with status new or pending and the offset value.
        """
        query = (
            cls.get_all()
            .filter(
                datastore_services.all_of(
                    cls.status.IN([
                        feconf.TRAINING_JOB_STATUS_NEW,
                        feconf.TRAINING_JOB_STATUS_PENDING
                    ]),
                    cls.next_scheduled_check_time <= datetime.datetime.utcnow()
                )
            )
            .order(cls.next_scheduled_check_time)
        )

        classifier_job_models: Sequence[ClassifierTrainingJobModel] = (
            query.fetch(
                NEW_AND_PENDING_TRAINING_JOBS_FETCH_LIMIT, offset=offset))
        offset = offset + len(classifier_job_models)
        return classifier_job_models, offset

    # TODO(#13523): Change 'job_dict' to domain object/TypedDict to
    # remove Any from type-annotation below.
    @classmethod
    def create_multi(cls, job_dicts_list: List[Dict[str, Any]]) -> List[str]:
        """Creates multiple new  ClassifierTrainingJobModel entries.

        Args:
            job_dicts_list: list(dict). The list of dicts where each dict
                represents the attributes of one ClassifierTrainingJobModel.

        Returns:
            list(str). List of job IDs.
        """
        job_models = []
        job_ids = []
        for job_dict in job_dicts_list:
            instance_id = cls._generate_id(job_dict['exp_id'])
            training_job_instance = cls(
                id=instance_id, algorithm_id=job_dict['algorithm_id'],
                interaction_id=job_dict['interaction_id'],
                exp_id=job_dict['exp_id'],
                exp_version=job_dict['exp_version'],
                next_scheduled_check_time=job_dict['next_scheduled_check_time'],
                state_name=job_dict['state_name'], status=job_dict['status'],
                training_data=job_dict['training_data'],
                algorithm_version=job_dict['algorithm_version'])

            job_models.append(training_job_instance)
            job_ids.append(instance_id)
        cls.update_timestamps_multi(job_models)
        cls.put_multi(job_models)
        return job_ids
Пример #21
0
class SubtopicPageModel(base_models.VersionedModel):
    """Model for storing Subtopic pages.

    This stores the HTML data for a subtopic page.
    """

    SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = SubtopicPageCommitLogEntryModel
    ALLOW_REVERT = False

    # The topic id that this subtopic is a part of.
    topic_id = datastore_services.StringProperty(required=True, indexed=True)
    # The json data of the subtopic consisting of subtitled_html,
    # recorded_voiceovers and written_translations fields.
    page_contents = datastore_services.JsonProperty(required=True)
    # The schema version for the page_contents field.
    page_contents_schema_version = datastore_services.IntegerProperty(
        required=True, indexed=True)
    # The ISO 639-1 code for the language this subtopic page is written in.
    language_code = (datastore_services.StringProperty(required=True,
                                                       indexed=True))

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object
    # to remove Any used below.
    def _trusted_commit(self, committer_id: str, commit_type: str,
                        commit_message: str,
                        commit_cmds: List[Dict[str, Any]]) -> None:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(SubtopicPageModel,
              self)._trusted_commit(committer_id, commit_type, commit_message,
                                    commit_cmds)

        subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False)
        subtopic_page_commit_log_entry.subtopic_page_id = self.id
        subtopic_page_commit_log_entry.update_timestamps()
        subtopic_page_commit_log_entry.put()

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'page_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'page_contents_schema_version':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE
            })
Пример #22
0
class PlatformParameterModel(base_models.VersionedModel):
    """A class that represents a named dynamic platform parameter.
    This model only stores fields that can be updated in run time.

    The id is the name of the parameter.
    """

    SNAPSHOT_METADATA_CLASS = PlatformParameterSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = PlatformParameterSnapshotContentModel

    rules = datastore_services.JsonProperty(repeated=True)
    rule_schema_version = (datastore_services.IntegerProperty(required=True,
                                                              indexed=True))

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """PlatformParameterModel is not related to users."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'rules': base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'rule_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def create(cls, param_name: str, rule_dicts: List[
        platform_parameter_domain.PlatformParameterRuleDict],
               rule_schema_version: int) -> PlatformParameterModel:
        """Creates a PlatformParameterModel instance.

        Args:
            param_name: str. The name of the parameter, which is immutable.
            rule_dicts: list(dict). List of dict representation of
                PlatformParameterRule objects, which have the following
                structure:
                    - value_when_matched: *. The result of the rule when it's
                        matched.
                    - filters: list(dict). List of dict representation of
                        PlatformParameterFilter objects, having the following
                        structure:
                            - type: str. The type of the filter.
                            - conditions: list((str, str)). Each element of the
                                list is a 2-tuple (op, value), where op is the
                                operator for comparison and value is the value
                                used for comparison.
            rule_schema_version: int. The schema version for the rule dicts.

        Returns:
            PlatformParameterModel. The created PlatformParameterModel
            instance.
        """
        return cls(id=param_name,
                   rules=rule_dicts,
                   rule_schema_version=rule_schema_version)
Пример #23
0
class CollectionModel(base_models.VersionedModel):
    """Versioned storage model for an Oppia collection.

    This class should only be imported by the collection services file
    and the collection model test file.
    """

    SNAPSHOT_METADATA_CLASS = CollectionSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = CollectionSnapshotContentModel
    COMMIT_LOG_ENTRY_CLASS = CollectionCommitLogEntryModel
    ALLOW_REVERT = True

    # What this collection is called.
    title = datastore_services.StringProperty(required=True)
    # The category this collection belongs to.
    category = datastore_services.StringProperty(required=True, indexed=True)
    # The objective of this collection.
    objective = datastore_services.TextProperty(default='', indexed=False)
    # The language code of this collection.
    language_code = datastore_services.StringProperty(
        default=constants.DEFAULT_LANGUAGE_CODE, indexed=True)
    # Tags associated with this collection.
    tags = datastore_services.StringProperty(repeated=True, indexed=True)

    # The version of all property blob schemas.
    schema_version = datastore_services.IntegerProperty(
        required=True, default=1, indexed=True)

    # A dict representing the contents of a collection. Currently, this
    # contains the list of nodes. This dict should contain collection data
    # whose structure might need to be changed in the future.
    collection_contents = (
        datastore_services.JsonProperty(default={}, indexed=False))

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model does not contain user data."""
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user."""
        return dict(super(cls, cls).get_export_policy(), **{
            'title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'category': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'objective': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'tags': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'collection_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    @classmethod
    def get_collection_count(cls) -> int:
        """Returns the total number of collections."""
        return cls.get_all().count()

    # TODO(#13523): Change 'model_dict' to domain object/TypedDict to
    # remove Any from type-annotation below.
    @staticmethod
    def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]:
        """Replace invalid fields and values in the CollectionModel dict.

        Some old CollectionModels can contain fields
        and field values that are no longer supported and would cause
        an exception when we try to reconstitute a CollectionModel from
        them. We need to remove or replace these fields and values.

        Args:
            model_dict: dict. The content of the model. Some fields and field
                values might no longer exist in the CollectionModel
                schema.

        Returns:
            dict. The content of the model. Only valid fields and values are
            present.
        """

        # The nodes field is moved to collection_contents dict. We
        # need to move the values from nodes field to collection_contents dict
        # and delete nodes.
        if 'nodes' in model_dict and model_dict['nodes']:
            model_dict['collection_contents']['nodes'] = (
                copy.deepcopy(model_dict['nodes']))
            del model_dict['nodes']

        return model_dict

    # TODO(#13523): Change 'snapshot_dict' to domain object/TypedDict to
    # remove Any from type-annotation below.
    def _reconstitute(self, snapshot_dict: Dict[str, Any]) -> CollectionModel:
        """Populates the model instance with the snapshot.

        Some old CollectionModels can contain fields
        and field values that are no longer supported and would cause
        an exception when we try to reconstitute a CollectionModel from
        them. We need to remove or replace these fields and values.

        Args:
            snapshot_dict: dict(str, *). The snapshot with the model
                property values.

        Returns:
            VersionedModel. The instance of the VersionedModel class populated
            with the the snapshot.
        """
        self.populate(
            **CollectionModel.convert_to_valid_dict(snapshot_dict))
        return self

    # We expect Mapping because we want to allow models that inherit
    # from BaseModel as the values, if we used Dict this wouldn't be allowed.
    def _prepare_additional_models(self) -> Mapping[str, base_models.BaseModel]:
        """Prepares additional models needed for the commit process.

        Returns:
            dict(str, BaseModel). Additional models needed for
            the commit process. Contains the CollectionRightsModel.
        """
        return {
            'rights_model': CollectionRightsModel.get_by_id(self.id)
        }

    # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to
    # remove Any from type-annotation below.
    def compute_models_to_commit(
        self,
        committer_id: str,
        commit_type: str,
        commit_message: str,
        commit_cmds: List[Dict[str, Any]],
        # We expect Mapping because we want to allow models that inherit
        # from BaseModel as the values, if we used Dict this wouldn't
        # be allowed.
        additional_models: Mapping[str, base_models.BaseModel]
    ) -> base_models.ModelsToPutDict:
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
            additional_models: dict(str, BaseModel). Additional models that are
                needed for the commit process.

        Returns:
            ModelsToPutDict. A dict of models that should be put into
            the datastore.
        """
        models_to_put = super().compute_models_to_commit(
            committer_id,
            commit_type,
            commit_message,
            commit_cmds,
            additional_models
        )

        # The cast is needed because the additional_models is list of BaseModels
        # and we want to hint the mypy that this is CollectionRightsModel.
        collection_rights_model = cast(
            CollectionRightsModel, additional_models['rights_model']
        )
        collection_commit_log = CollectionCommitLogEntryModel.create(
            self.id,
            self.version,
            committer_id,
            commit_type,
            commit_message,
            commit_cmds,
            collection_rights_model.status,
            collection_rights_model.community_owned
        )
        collection_commit_log.collection_id = self.id
        return {
            'snapshot_metadata_model': models_to_put['snapshot_metadata_model'],
            'snapshot_content_model': models_to_put['snapshot_content_model'],
            'commit_log_model': collection_commit_log,
            'versioned_model': models_to_put['versioned_model'],
        }

    # We have ignored [override] here because the signature of this method
    # doesn't match with BaseModel.delete_multi().
    # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override
    @classmethod
    def delete_multi( # type: ignore[override]
            cls,
            entity_ids: List[str],
            committer_id: str,
            commit_message: str,
            force_deletion: bool = False
    ) -> None:
        """Deletes the given cls instances with the given entity_ids.

        Note that this extends the superclass method.

        Args:
            entity_ids: list(str). Ids of entities to delete.
            committer_id: str. The user_id of the user who committed the change.
            commit_message: str. The commit description message.
            force_deletion: bool. If True these models are deleted completely
                from storage, otherwise there are only marked as deleted.
                Default is False.
        """
        super(CollectionModel, cls).delete_multi(
            entity_ids, committer_id,
            commit_message, force_deletion=force_deletion)

        if not force_deletion:
            commit_log_models = []
            collection_rights_models = CollectionRightsModel.get_multi(
                entity_ids, include_deleted=True)
            versioned_models = cls.get_multi(entity_ids, include_deleted=True)
            for model, rights_model in zip(
                versioned_models, collection_rights_models):
                # Ruling out the possibility of None for mypy type checking.
                assert model is not None
                assert rights_model is not None
                collection_commit_log = CollectionCommitLogEntryModel.create(
                    model.id, model.version, committer_id,
                    feconf.COMMIT_TYPE_DELETE,
                    commit_message, [{'cmd': cls.CMD_DELETE_COMMIT}],
                    rights_model.status, rights_model.community_owned
                )
                collection_commit_log.collection_id = model.id
                commit_log_models.append(collection_commit_log)
            CollectionCommitLogEntryModel.update_timestamps_multi(
                commit_log_models)
            datastore_services.put_multi(commit_log_models)
Пример #24
0
class CommunityContributionStatsModel(base_models.BaseModel):
    """Records the contributor dashboard contribution stats. This includes the
    total number of reviewers for each suggestion type and the total number of
    suggestions in review for each suggestion type. There is only ever one
    instance of this model, and its ID is COMMUNITY_CONTRIBUTION_STATS_MODEL_ID.

    Note: since this is a singleton model, the model GET and PUT must be done in
    a transaction to avoid the loss of updates that come in rapid succession.
    """

    # A dictionary where the keys represent the language codes that translation
    # suggestions are offered in and the values correspond to the total number
    # of reviewers who have permission to review translation suggestions in
    # that language.
    translation_reviewer_counts_by_lang_code = (
        datastore_services.JsonProperty(required=True))
    # A dictionary where the keys represent the language codes that translation
    # suggestions are offered in and the values correspond to the total number
    # of translation suggestions that are currently in review in that language.
    translation_suggestion_counts_by_lang_code = (
        datastore_services.JsonProperty(required=True))
    # The total number of reviewers who have permission to review question
    # suggestions.
    question_reviewer_count = datastore_services.IntegerProperty(required=True)
    # The total number of question suggestions that are currently in review.
    question_suggestion_count = (datastore_services.IntegerProperty(
        required=True))

    # We have ignored [override] here because the signature of this method
    # doesn't match with BaseModel.get().
    # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override
    @classmethod
    def get(
        cls
    ) -> Optional['CommunityContributionStatsModel']:  # type: ignore[override]
        """Gets the CommunityContributionStatsModel instance. If the
        CommunityContributionStatsModel does not exist yet, it is created.
        This method helps enforce that there should only ever be one instance
        of this model.

        Returns:
            CommunityContributionStatsModel|None. The single model instance,
            or None if no such model instance exists.
        """
        community_contribution_stats_model = cls.get_by_id(
            COMMUNITY_CONTRIBUTION_STATS_MODEL_ID)

        if community_contribution_stats_model is None:
            community_contribution_stats_model = cls(
                id=COMMUNITY_CONTRIBUTION_STATS_MODEL_ID,
                translation_reviewer_counts_by_lang_code={},
                translation_suggestion_counts_by_lang_code={},
                question_reviewer_count=0,
                question_suggestion_count=0)
            community_contribution_stats_model.update_timestamps()
            community_contribution_stats_model.put()
            return community_contribution_stats_model

        else:
            return super(CommunityContributionStatsModel,
                         cls).get(COMMUNITY_CONTRIBUTION_STATS_MODEL_ID)

    @classmethod
    def get_deletion_policy(cls) -> base_models.DELETION_POLICY:
        """Model doesn't contain any data directly corresponding to a user."""
        return base_models.DELETION_POLICY.NOT_APPLICABLE

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """This model only contains general statistical information about the
        contributor dashboard and does not include any individual user
        information.
        """
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model doesn't contain any data directly corresponding to a user
        because the data is aggregated.
        """
        return dict(
            super(cls, cls).get_export_policy(), **{
                'translation_reviewer_counts_by_lang_code':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'translation_suggestion_counts_by_lang_code':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'question_reviewer_count':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'question_suggestion_count':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })
Пример #25
0
class CollectionSummaryModel(base_models.BaseModel):
    """Summary model for an Oppia collection.

    This should be used whenever the content blob of the collection is not
    needed (e.g. search results, etc).

    A CollectionSummaryModel instance stores the following information:

        id, title, category, objective, language_code, tags, ratings,
        last_updated, created_on, status (private, public),
        community_owned, owner_ids, editor_ids,
        viewer_ids, version.

    The key of each instance is the collection id.
    """

    # What this collection is called.
    title = datastore_services.StringProperty(required=True)
    # The category this collection belongs to.
    category = datastore_services.StringProperty(required=True, indexed=True)
    # The objective of this collection.
    objective = datastore_services.TextProperty(required=True, indexed=False)
    # The ISO 639-1 code for the language this collection is written in.
    language_code = (
        datastore_services.StringProperty(required=True, indexed=True))
    # Tags associated with this collection.
    tags = datastore_services.StringProperty(repeated=True, indexed=True)

    # Aggregate user-assigned ratings of the collection.
    ratings = datastore_services.JsonProperty(default=None, indexed=False)

    # Time when the collection model was last updated (not to be
    # confused with last_updated, which is the time when the
    # collection *summary* model was last updated).
    collection_model_last_updated = (
        datastore_services.DateTimeProperty(indexed=True))
    # Time when the collection model was created (not to be confused
    # with created_on, which is the time when the collection *summary*
    # model was created).
    collection_model_created_on = (
        datastore_services.DateTimeProperty(indexed=True))

    # The publication status of this collection.
    status = datastore_services.StringProperty(
        default=constants.ACTIVITY_STATUS_PRIVATE, indexed=True,
        choices=[
            constants.ACTIVITY_STATUS_PRIVATE,
            constants.ACTIVITY_STATUS_PUBLIC
        ]
    )

    # Whether this collection is owned by the community.
    community_owned = (
        datastore_services.BooleanProperty(required=True, indexed=True))

    # The user_ids of owners of this collection.
    owner_ids = datastore_services.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this collection.
    editor_ids = datastore_services.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this collection.
    viewer_ids = datastore_services.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who have contributed (humans who have made a
    # positive (not just a revert) change to the collection's content).
    # NOTE TO DEVELOPERS: contributor_ids and contributors_summary need to be
    # synchronized, meaning that the keys in contributors_summary need be
    # equal to the contributor_ids list.
    contributor_ids = (
        datastore_services.StringProperty(indexed=True, repeated=True))
    # A dict representing the contributors of non-trivial commits to this
    # collection. Each key of this dict is a user_id, and the corresponding
    # value is the number of non-trivial commits that the user has made.
    contributors_summary = (
        datastore_services.JsonProperty(default={}, indexed=False))
    # The version number of the collection after this commit. Only populated
    # for commits to an collection (as opposed to its rights, etc.).
    version = datastore_services.IntegerProperty()
    # The number of nodes(explorations) that are within this collection.
    node_count = datastore_services.IntegerProperty()

    @staticmethod
    def get_deletion_policy() -> base_models.DELETION_POLICY:
        """Model contains data to pseudonymize or delete corresponding
        to a user: viewer_ids, editor_ids, owner_ids, contributor_ids,
        and contributors_summary fields.
        """
        return (
            base_models.DELETION_POLICY.PSEUDONYMIZE_IF_PUBLIC_DELETE_IF_PRIVATE
        )

    @staticmethod
    def get_model_association_to_user(
    ) -> base_models.MODEL_ASSOCIATION_TO_USER:
        """Model data has already been exported as a part of the
        CollectionRightsModel, and thus does not need an export_data
        function.
        """
        return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER

    @classmethod
    def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
        """Model contains data corresponding to a user, but this isn't exported
        because noteworthy details that belong to this model have already been
        exported as a part of the CollectionRightsModel.
        """
        return dict(super(cls, cls).get_export_policy(), **{
            'title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'category': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'objective': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'tags': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'ratings': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'collection_model_last_updated':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'collection_model_created_on':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'status': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'community_owned': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'owner_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'editor_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'viewer_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'contributor_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'contributors_summary': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
            'node_count': base_models.EXPORT_POLICY.NOT_APPLICABLE
        })

    @classmethod
    def has_reference_to_user_id(cls, user_id: str) -> bool:
        """Check whether CollectionSummaryModel references user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.query(datastore_services.any_of(
            cls.owner_ids == user_id,
            cls.editor_ids == user_id,
            cls.viewer_ids == user_id,
            cls.contributor_ids == user_id)).get(keys_only=True) is not None

    @classmethod
    def get_non_private(cls) -> Sequence[CollectionSummaryModel]:
        """Returns an iterable with non-private collection summary models.

        Returns:
            iterable. An iterable with non-private collection summary models.
        """
        return cls.get_all().filter(
            cls.status != constants.ACTIVITY_STATUS_PRIVATE
        ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_private_at_least_viewable(
        cls, user_id: str
    ) -> Sequence[CollectionSummaryModel]:
        """Returns an iterable with private collection summary models that are
        at least viewable by the given user.

        Args:
            user_id: str. The id of the given user.

        Returns:
            iterable. An iterable with private collection summary models that
            are at least viewable by the given user.
        """
        return cls.get_all().filter(
            cls.status == constants.ACTIVITY_STATUS_PRIVATE
        ).filter(
            datastore_services.any_of(
                cls.owner_ids == user_id,
                cls.editor_ids == user_id,
                cls.viewer_ids == user_id
            )
        ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_at_least_editable(
        cls, user_id: str
    ) -> Sequence[CollectionSummaryModel]:
        """Returns an iterable with collection summary models that are at least
        editable by the given user.

        Args:
            user_id: str. The id of the given user.

        Returns:
            iterable. An iterable with collection summary models that are at
            least viewable by the given user.
        """
        return CollectionSummaryModel.get_all().filter(
            datastore_services.any_of(
                CollectionSummaryModel.owner_ids == user_id,
                CollectionSummaryModel.editor_ids == user_id
            )
        ).fetch(feconf.DEFAULT_QUERY_LIMIT)