コード例 #1
0
class PersonIChat(ndb.Model):
    # who I chat with and a pointer to the chat history
    person_account = ndb.StringProperty()
    person_name = ndb.StringProperty()
    chat_history = ndb.JsonProperty(repeated=True)
    new_message_unread = ndb.BooleanProperty()
コード例 #2
0
class RoshReviewUserStats(ndb.Model):
    """Save Rosh Review user stats.

    Can be used to query all stats of a users.

    """
    data = ndb.JsonProperty(required=True)
    last_updated_at = ndb.DateTimeProperty(auto_now=True)
    performance = ndb.ComputedProperty(
        lambda self: float(self.data.get('cumulativePerformance', 0.0)))
    percentage_complete = ndb.ComputedProperty(
        lambda self: float(self.data.get('percentageComplete', 0.0)))
    year = ndb.ComputedProperty(lambda self: self.data.get('year', 0.0))

    @property
    def student_id(self):
        return self.key.id()

    @property
    def display_name(self):
        return self.data.get('displayName', '')

    def details(self):
        props = ('displayName', 'trainingLevel', 'year', 'percentageComplete',
                 'categoryPerformances')
        data = {k: v for k, v in self.data.iteritems() if k in props}
        data['id'] = data['studentId'] = self.student_id
        data['performance'] = self.performance
        data['categoryPerformances'] = sorted([{
            'id': k,
            'label': k.title(),
            'performance': v
        } for k, v in data.get('categoryPerformances', {}).iteritems()],
                                              key=lambda p: (
                                                  -p['performance'],
                                                  p['label'],
                                              ))
        return data

    def summary(self):
        data = self.details()
        data.pop('categoryPerformances', None)
        return data

    @classmethod
    def new_stats(cls, student, stats, commit=True):
        data = {utils.to_camel_case(k): v for k, v in stats.iteritems()}
        data['displayName'] = student.display_name
        data['year'] = student.year
        stats = cls(id=student.key.id(), data=data)
        if commit:
            stats.put()
        return stats

    def _pre_put_hook(self):
        self.validate(self.data)

    def update_topic_stats(self, commit=True):
        topics = self.data.get('categoryPerformances', {})
        topic_stats = []

        for top, perf in topics.iteritems():
            topic_stats.append(
                RoshReviewUserTopicStats.new_topic_stats(self,
                                                         top,
                                                         perf,
                                                         commit=False))

        if commit is True:
            ndb.put_multi(topic_stats)

        return topic_stats

    @staticmethod
    def validate(data):
        education.api.validate('RoshReviewUserStatsData', data)

    @classmethod
    def get_stats(cls,
                  cursor_key=None,
                  limit=None,
                  year=None,
                  topic=None,
                  sort_by=None,
                  **kw):
        if topic:
            return RoshReviewUserTopicStats.get_stats(topic,
                                                      cursor_key=cursor_key,
                                                      limit=limit,
                                                      year=year,
                                                      **kw)

        limit = limit if limit else 20
        sort_by = sort_by if sort_by else 'performance'
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query()
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),

    @classmethod
    def get_topics(cls):
        return RoshReviewUserTopicStats.get_topics()
コード例 #3
0
class ExplorationCommitLogEntryModel(base_models.BaseModel):
    """Log of commits to explorations.

    A new instance of this model is created and saved every time a commit to
    ExplorationModel or ExplorationRightsModel occurs.

    The id for this model is of the form
    'exploration-{{EXP_ID}}-{{EXP_VERSION}}'.
    """
    # Update superclass model to make these properties indexed.
    created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
    last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)

    # The id of the user.
    user_id = ndb.StringProperty(indexed=True, required=True)
    # The username of the user, at the time of the edit.
    username = ndb.StringProperty(indexed=True, required=True)
    # The id of the exploration being edited.
    exploration_id = ndb.StringProperty(indexed=True, required=True)
    # The type of the commit: 'create', 'revert', 'edit', 'delete'.
    commit_type = ndb.StringProperty(indexed=True, required=True)
    # The commit message.
    commit_message = ndb.TextProperty(indexed=False)
    # The commit_cmds dict for this commit.
    commit_cmds = ndb.JsonProperty(indexed=False, required=True)
    # The version number of the exploration after this commit. Only populated
    # for commits to an exploration (as opposed to its rights, etc.)
    version = ndb.IntegerProperty()

    # The status of the exploration after the edit event ('private', 'public',
    # 'publicized').
    post_commit_status = ndb.StringProperty(indexed=True, required=True)
    # Whether the exploration is community-owned after the edit event.
    post_commit_community_owned = ndb.BooleanProperty(indexed=True)
    # Whether the exploration is private after the edit event. Having a
    # separate field for this makes queries faster, since an equality query
    # on this property is faster than an inequality query on
    # post_commit_status.
    post_commit_is_private = ndb.BooleanProperty(indexed=True)

    @classmethod
    def get_all_commits(cls, page_size, urlsafe_start_cursor):
        """Fetches a list of all the commits sorted by their last updated
        attribute.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.

        Returns:
            3-tuple of (results, cursor, more) as described in fetch_page() at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                results: List of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this will
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        return cls._fetch_page_sorted_by_last_updated(cls.query(), page_size,
                                                      urlsafe_start_cursor)

    @classmethod
    def get_all_non_private_commits(cls,
                                    page_size,
                                    urlsafe_start_cursor,
                                    max_age=None):
        """Fetches a list of all the non-private commits sorted by their
        last updated attribute.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.
            max_age: datetime.timedelta. The maximum time duration within which
                commits are needed.

        Returns:
            3-tuple of (results, cursor, more) which were created which were
            created no earlier than max_age before the current time where:
                results: List of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this will
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """

        if not isinstance(max_age, datetime.timedelta) and max_age is not None:
            raise ValueError(
                'max_age must be a datetime.timedelta instance or None.')

        query = cls.query(cls.post_commit_is_private == False)  # pylint: disable=singleton-comparison
        if max_age:
            query = query.filter(
                cls.last_updated >= datetime.datetime.utcnow() - max_age)
        return cls._fetch_page_sorted_by_last_updated(query, page_size,
                                                      urlsafe_start_cursor)
コード例 #4
0
ファイル: gae_models.py プロジェクト: zhengxit/oppia
class MaybeLeaveExplorationEventLogEntryModel(base_models.BaseModel):
    """An event triggered by a reader attempting to leave the
    exploration without completing.

    Due to complexity on browser end, this event may be logged when user clicks
    close and then cancel. Thus, the real event is the last event of this type
    logged for the session id.

    Note: shortly after the release of v2.0.0.rc.2, some of these events
    were migrated from StateHitEventLogEntryModel. These events have their
    client_time_spent_in_secs field set to 0.0 (since this field was not
    recorded in StateHitEventLogEntryModel), and they also have the wrong
    'last updated' timestamp. However, the 'created_on' timestamp is the
    same as that of the original model.

    Event schema documentation
    --------------------------
    V1:
        event_type: 'leave' (there are no 'maybe leave' events in V0)
        exploration_id: id of exploration currently being played
        exploration_version: version of exploration
        state_name: Name of current state
        play_type: 'normal'
        created_on date
        event_schema_version: 1
        session_id: ID of current student's session
        params: current parameter values, in the form of a map of parameter
            name to value
        client_time_spent_in_secs: time spent in this state before the event
            was triggered
    """
    # This value should be updated in the event of any event schema change.
    CURRENT_EVENT_SCHEMA_VERSION = 1

    # Which specific type of event this is
    event_type = ndb.StringProperty(indexed=True)
    # Id of exploration currently being played.
    exploration_id = ndb.StringProperty(indexed=True)
    # Current version of exploration.
    exploration_version = ndb.IntegerProperty(indexed=True)
    # Name of current state.
    state_name = ndb.StringProperty(indexed=True)
    # ID of current student's session
    session_id = ndb.StringProperty(indexed=True)
    # Time since start of this state before this event occurred (in sec).
    # Note: Some of these events were migrated from StateHit event instances
    # which did not record timestamp data. For this, we use a placeholder
    # value of 0.0 for client_time_spent_in_secs.
    client_time_spent_in_secs = ndb.FloatProperty(indexed=True)
    # Current parameter values, map of parameter name to value
    params = ndb.JsonProperty(indexed=False)
    # Which type of play-through this is (editor preview, or learner view).
    # Note that the 'playtest' option is legacy, since editor preview
    # playthroughs no longer emit events.
    play_type = ndb.StringProperty(indexed=True,
                                   choices=[feconf.PLAY_TYPE_PLAYTEST,
                                            feconf.PLAY_TYPE_NORMAL])
    # The version of the event schema used to describe an event of this type.
    # Details on the schema are given in the docstring for this class.
    event_schema_version = ndb.IntegerProperty(
        indexed=True, default=CURRENT_EVENT_SCHEMA_VERSION)

    @classmethod
    def get_new_event_entity_id(cls, exp_id, session_id):
        timestamp = datetime.datetime.utcnow()
        return cls.get_new_id('%s:%s:%s' % (
            utils.get_time_in_millisecs(timestamp),
            exp_id,
            session_id))

    @classmethod
    def create(cls, exp_id, exp_version, state_name, session_id,
               client_time_spent_in_secs, params, play_type):
        """Creates a new leave exploration event."""
        # TODO(sll): Some events currently do not have an entity id that was
        # set using this method; it was randomly set instead due to an error.
        # Might need to migrate them.
        entity_id = cls.get_new_event_entity_id(
            exp_id, session_id)
        leave_event_entity = cls(
            id=entity_id,
            event_type=feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION,
            exploration_id=exp_id,
            exploration_version=exp_version,
            state_name=state_name,
            session_id=session_id,
            client_time_spent_in_secs=client_time_spent_in_secs,
            params=params,
            play_type=play_type)
        leave_event_entity.put()
コード例 #5
0
ファイル: gae_models.py プロジェクト: zhengxit/oppia
class StateHitEventLogEntryModel(base_models.BaseModel):
    """An event triggered by a student getting to a particular state. The
    definitions of the fields are as follows:
    - event_type: 'state_hit'
    - exploration_id: id of exploration currently being played
    - exploration_version: version of exploration
    - state_name: Name of current state
    - play_type: 'normal'
    - created_on date
    - event_schema_version: 1
    - session_id: ID of current student's session
    - params: current parameter values, in the form of a map of parameter name
              to its value
    NOTE TO DEVELOPERS: Unlike other events, this event does not have a
    client_time_spent_in_secs. Instead, it is the reference event for
    all other client_time_spent_in_secs values, which each represent the
    amount of time between this event (i.e., the learner entering the
    state) and the other event.
    """
    # This value should be updated in the event of any event schema change.
    CURRENT_EVENT_SCHEMA_VERSION = 1

    # Which specific type of event this is
    event_type = ndb.StringProperty(indexed=True)
    # Id of exploration currently being played.
    exploration_id = ndb.StringProperty(indexed=True)
    # Current version of exploration.
    exploration_version = ndb.IntegerProperty(indexed=True)
    # Name of current state.
    state_name = ndb.StringProperty(indexed=True)
    # ID of current student's session
    session_id = ndb.StringProperty(indexed=True)
    # Current parameter values, map of parameter name to value
    params = ndb.JsonProperty(indexed=False)
    # Which type of play-through this is (editor preview, or learner view).
    # Note that the 'playtest' option is legacy, since editor preview
    # playthroughs no longer emit events.
    play_type = ndb.StringProperty(indexed=True,
                                   choices=[feconf.PLAY_TYPE_PLAYTEST,
                                            feconf.PLAY_TYPE_NORMAL])
    # The version of the event schema used to describe an event of this type.
    # Details on the schema are given in the docstring for this class.
    event_schema_version = ndb.IntegerProperty(
        indexed=True, default=CURRENT_EVENT_SCHEMA_VERSION)

    @classmethod
    def get_new_event_entity_id(cls, exp_id, session_id):
        timestamp = datetime.datetime.utcnow()
        return cls.get_new_id('%s:%s:%s' % (
            utils.get_time_in_millisecs(timestamp),
            exp_id,
            session_id))

    @classmethod
    def create(
            cls, exp_id, exp_version, state_name, session_id, params,
            play_type):
        """Creates a new leave exploration event."""
        # TODO(sll): Some events currently do not have an entity id that was
        # set using this method; it was randomly set instead due to an error.
        # Might need to migrate them.
        entity_id = cls.get_new_event_entity_id(exp_id, session_id)
        state_event_entity = cls(
            id=entity_id,
            event_type=feconf.EVENT_TYPE_STATE_HIT,
            exploration_id=exp_id,
            exploration_version=exp_version,
            state_name=state_name,
            session_id=session_id,
            params=params,
            play_type=play_type)
        state_event_entity.put()
コード例 #6
0
ファイル: job.py プロジェクト: rtk4616/catapult
class Job(ndb.Model):
    """A Pinpoint job."""

    state = ndb.PickleProperty(required=True, compressed=True)

    #####
    # Job arguments passed in through the API.
    #####

    # Request parameters.
    arguments = ndb.JsonProperty(required=True)

    # TODO: The bug id is only used for posting bug comments when a job starts and
    # completes. This probably should not be the responsibility of Pinpoint.
    bug_id = ndb.IntegerProperty()

    comparison_mode = ndb.StringProperty()

    # The Gerrit server url and change id of the code review to update upon
    # completion.
    gerrit_server = ndb.StringProperty()
    gerrit_change_id = ndb.StringProperty()

    # User-provided name of the job.
    name = ndb.StringProperty()

    tags = ndb.JsonProperty()

    # Email of the job creator.
    user = ndb.StringProperty()

    #####
    # Job state generated by running the job.
    #####

    created = ndb.DateTimeProperty(required=True, auto_now_add=True)
    # Don't use `auto_now` for `updated`. When we do data migration, we need
    # to be able to modify the Job without changing the Job's completion time.
    updated = ndb.DateTimeProperty(required=True, auto_now_add=True)

    completed = ndb.ComputedProperty(lambda self: not self.task)
    failed = ndb.ComputedProperty(lambda self: bool(self.exception))

    # The name of the Task Queue task this job is running on. If it's present, the
    # job is running. The task is also None for Task Queue retries.
    task = ndb.StringProperty()

    # The string contents of any Exception that was thrown to the top level.
    # If it's present, the job failed.
    exception = ndb.TextProperty()

    difference_count = ndb.IntegerProperty()

    @classmethod
    def New(cls,
            quests,
            changes,
            arguments=None,
            bug_id=None,
            comparison_mode=None,
            comparison_magnitude=None,
            gerrit_server=None,
            gerrit_change_id=None,
            name=None,
            pin=None,
            tags=None,
            user=None):
        """Creates a new Job, adds Changes to it, and puts it in the Datstore.

    Args:
      quests: An iterable of Quests for the Job to run.
      changes: An iterable of the initial Changes to run on.
      arguments: A dict with the original arguments used to start the Job.
      bug_id: A monorail issue id number to post Job updates to.
      comparison_mode: Either 'functional' or 'performance', which the Job uses
          to figure out whether to perform a functional or performance bisect.
          If None, the Job will not automatically add any Attempts or Changes.
      comparison_magnitude: The estimated size of the regression or improvement
          to look for. Smaller magnitudes require more repeats.
      gerrit_server: Server of the Gerrit code review to update with job
          results.
      gerrit_change_id: Change id of the Gerrit code review to update with job
          results.
      name: The user-provided name of the Job.
      pin: A Change (Commits + Patch) to apply to every Change in this Job.
      tags: A dict of key-value pairs used to filter the Jobs listings.
      user: The email of the Job creator.

    Returns:
      A Job object.
    """
        state = job_state.JobState(quests,
                                   comparison_mode=comparison_mode,
                                   comparison_magnitude=comparison_magnitude,
                                   pin=pin)
        job = cls(state=state,
                  arguments=arguments or {},
                  bug_id=bug_id,
                  comparison_mode=comparison_mode,
                  gerrit_server=gerrit_server,
                  gerrit_change_id=gerrit_change_id,
                  name=name,
                  tags=tags,
                  user=user)

        for c in changes:
            job.AddChange(c)

        job.put()
        return job

    @property
    def job_id(self):
        return '%x' % self.key.id()

    @property
    def status(self):
        if not self.completed:
            return 'Running'

        if self.failed:
            return 'Failed'

        return 'Completed'

    @property
    def url(self):
        return 'https://%s/job/%s' % (os.environ['HTTP_HOST'], self.job_id)

    @property
    def results_url(self):
        if not self.task:
            url = results2.GetCachedResults2(self)
            if url:
                return url
        # Point to the default status page if no results are available.
        return '/results2/%s' % self.job_id

    @property
    def auto_name(self):
        if self.name:
            return self.name

        if self.comparison_mode == job_state.FUNCTIONAL:
            name = 'Functional bisect'
        elif self.comparison_mode == job_state.PERFORMANCE:
            name = 'Performance bisect'
        else:
            name = 'Try job'

        if 'configuration' in self.arguments:
            name += ' on ' + self.arguments['configuration']
            if 'benchmark' in self.arguments:
                name += '/' + self.arguments['benchmark']

        return name

    def AddChange(self, change):
        self.state.AddChange(change)

    def Start(self):
        """Starts the Job and updates it in the Datastore.

    This method is designed to return fast, so that Job creation is responsive
    to the user. It schedules the Job on the task queue without running
    anything. It also posts a bug comment, and updates the Datastore.
    """
        self._Schedule()
        self.put()

        title = _ROUND_PUSHPIN + ' Pinpoint job started.'
        comment = '\n'.join((title, self.url))
        self._PostBugComment(comment, send_email=False)

    def _Complete(self):
        if self.comparison_mode:
            self.difference_count = len(self.state.Differences())

        try:
            results2.ScheduleResults2Generation(self)
        except taskqueue.Error:
            pass

        self._FormatAndPostBugCommentOnComplete()
        self._UpdateGerritIfNeeded()

    def _FormatAndPostBugCommentOnComplete(self):
        if not self.comparison_mode:
            # There is no comparison metric.
            title = "<b>%s Job complete. See results below.</b>" % _ROUND_PUSHPIN
            self._PostBugComment('\n'.join((title, self.url)))
            return

        # There is a comparison metric.
        differences = self.state.Differences()

        if not differences:
            title = "<b>%s Couldn't reproduce a difference.</b>" % _ROUND_PUSHPIN
            self._PostBugComment('\n'.join((title, self.url)))
            return

        # Include list of Changes.
        owner = None
        sheriff = None
        cc_list = set()
        difference_details = []
        for change_a, change_b in differences:
            if change_b.patch:
                commit_info = change_b.patch.AsDict()
            else:
                commit_info = change_b.last_commit.AsDict()

            # TODO: Assign the largest difference, not the last one.
            owner = commit_info['author']
            sheriff = utils.GetSheriffForAutorollCommit(
                commit_info['author'], commit_info['message'])
            cc_list.add(commit_info['author'])

            values_a = self.state.ResultValues(change_a)
            values_b = self.state.ResultValues(change_b)
            difference = _FormatDifferenceForBug(commit_info, values_a,
                                                 values_b, self.state.metric)
            difference_details.append(difference)

        # Header.
        if len(differences) == 1:
            status = 'Found a significant difference after 1 commit.'
        else:
            status = (
                'Found significant differences after each of %d commits.' %
                len(differences))

        title = '<b>%s %s</b>' % (_ROUND_PUSHPIN, status)
        header = '\n'.join((title, self.url))

        # Body.
        body = '\n\n'.join(difference_details)
        if sheriff:
            owner = sheriff
            body += '\n\nAssigning to sheriff %s because "%s" is a roll.' % (
                sheriff, commit_info['subject'])

        # Footer.
        footer = ('Understanding performance regressions:\n'
                  '  http://g.co/ChromePerformanceRegressions')

        if differences:
            footer += self._FormatDocumentationUrls()

        # Bring it all together.
        comment = '\n\n'.join((header, body, footer))
        current_bug_status = self._GetBugStatus()
        if (not current_bug_status or current_bug_status
                in ['Untriaged', 'Unconfirmed', 'Available']):
            # Set the bug status and owner if this bug is opened and unowned.
            self._PostBugComment(comment,
                                 status='Assigned',
                                 cc_list=sorted(cc_list),
                                 owner=owner)
        else:
            # Only update the comment and cc list if this bug is assigned or closed.
            self._PostBugComment(comment, cc_list=sorted(cc_list))

    def _FormatDocumentationUrls(self):
        if not self.tags:
            return ''

        # TODO(simonhatch): Tags isn't the best way to get at this, but wait until
        # we move this back into the dashboard so we have a better way of getting
        # at the test path.
        # crbug.com/876899
        test_path = self.tags.get('test_path')
        if not test_path:
            return ''

        test_suite = utils.TestKey('/'.join(test_path.split('/')[:3]))

        docs = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync(
            test_suite, [reserved_infos.DOCUMENTATION_URLS.name])

        if not docs:
            return ''

        docs = docs[reserved_infos.DOCUMENTATION_URLS.name].get('values')

        footer = '\n\n%s:\n  %s' % (docs[0][0], docs[0][1])

        return footer

    def _UpdateGerritIfNeeded(self):
        if self.gerrit_server and self.gerrit_change_id:
            gerrit_service.PostChangeComment(
                self.gerrit_server, self.gerrit_change_id,
                '%s Job complete.\n\nSee results at: %s' %
                (_ROUND_PUSHPIN, self.url))

    def Fail(self):
        self.exception = traceback.format_exc()

        title = _CRYING_CAT_FACE + ' Pinpoint job stopped with an error.'
        comment = '\n'.join((title, self.url, '', sys.exc_info()[1].message))
        self._PostBugComment(comment)

    def _Schedule(self):
        # Set a task name to deduplicate retries. This adds some latency, but we're
        # not latency-sensitive. If Job.Run() works asynchronously in the future,
        # we don't need to worry about duplicate tasks.
        # https://github.com/catapult-project/catapult/issues/3900
        task_name = str(uuid.uuid4())
        try:
            task = taskqueue.add(queue_name='job-queue',
                                 url='/api/run/' + self.job_id,
                                 name=task_name,
                                 countdown=_TASK_INTERVAL)
        except (apiproxy_errors.DeadlineExceededError,
                taskqueue.TransientError):
            task = taskqueue.add(queue_name='job-queue',
                                 url='/api/run/' + self.job_id,
                                 name=task_name,
                                 countdown=_TASK_INTERVAL)

        self.task = task.name

    def Run(self):
        """Runs this Job.

    Loops through all Attempts and checks the status of each one, kicking off
    tasks as needed. Does not block to wait for all tasks to finish. Also
    compares adjacent Changes' results and adds any additional Attempts or
    Changes as needed. If there are any incomplete tasks, schedules another
    Run() call on the task queue.
    """
        self.exception = None  # In case the Job succeeds on retry.
        self.task = None  # In case an exception is thrown.

        try:
            if self.comparison_mode:
                self.state.Explore()
            work_left = self.state.ScheduleWork()

            # Schedule moar task.
            if work_left:
                self._Schedule()
            else:
                self._Complete()
        except BaseException:
            self.Fail()
            raise
        finally:
            # Don't use `auto_now` for `updated`. When we do data migration, we need
            # to be able to modify the Job without changing the Job's completion time.
            self.updated = datetime.datetime.now()
            try:
                self.put()
            except (datastore_errors.Timeout,
                    datastore_errors.TransactionFailedError):
                # Retry once.
                self.put()
            except datastore_errors.BadRequestError:
                if self.task:
                    queue = taskqueue.Queue('job-queue')
                    queue.delete_tasks(taskqueue.Task(name=self.task))
                self.task = None

                # The _JobState is too large to fit in an ndb property.
                # Load the Job from before we updated it, and fail it.
                job = self.key.get(use_cache=False)
                job.task = None
                job.Fail()
                job.updated = datetime.datetime.now()
                job.put()
                raise

    def AsDict(self, options=None):
        d = {
            'job_id': self.job_id,
            'results_url': self.results_url,
            'arguments': self.arguments,
            'bug_id': self.bug_id,
            'comparison_mode': self.comparison_mode,
            'name': self.auto_name,
            'user': self.user,
            'created': self.created.isoformat(),
            'updated': self.updated.isoformat(),
            'difference_count': self.difference_count,
            'exception': self.exception,
            'status': self.status,
        }
        if not options:
            return d

        if OPTION_STATE in options:
            d.update(self.state.AsDict())
        if OPTION_TAGS in options:
            d['tags'] = {'tags': self.tags}
        return d

    def _PostBugComment(self, *args, **kwargs):
        if not self.bug_id:
            return

        issue_tracker = issue_tracker_service.IssueTrackerService(
            utils.ServiceAccountHttp())
        issue_tracker.AddBugComment(self.bug_id, *args, **kwargs)

    def _GetBugStatus(self):
        if not self.bug_id:
            return None

        issue_tracker = issue_tracker_service.IssueTrackerService(
            utils.ServiceAccountHttp())
        issue_data = issue_tracker.GetIssue(self.bug_id)
        return issue_data.get('status')
コード例 #7
0
ファイル: gae_models.py プロジェクト: victor-ludorum/oppia
class QuestionModel(base_models.VersionedModel):
    """Model for storing Questions.

    The ID of instances of this class has the form
    {{random_hash_of_16_chars}}
    """
    SNAPSHOT_METADATA_CLASS = QuestionSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = QuestionSnapshotContentModel
    ALLOW_REVERT = True

    # An object representing the question state data.
    question_state_data = ndb.JsonProperty(indexed=False, required=True)
    # The schema version for the question state data.
    question_state_schema_version = ndb.IntegerProperty(required=True,
                                                        indexed=True)
    # The ISO 639-1 code for the language this question is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)

    @classmethod
    def _get_new_id(cls):
        """Generates a unique ID for the question of the form
        {{random_hash_of_16_chars}}

        Returns:
           new_id: int. ID of the new QuestionModel instance.

        Raises:
            Exception: The ID generator for QuestionModel is
            producing too many collisions.
        """

        for _ in range(base_models.MAX_RETRIES):
            new_id = utils.convert_to_hash(
                str(utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for QuestionModel is producing too many '
            'collisions.')

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(QuestionModel, self)._trusted_commit(committer_id, commit_type,
                                                   commit_message, commit_cmds)

        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (committer_user_settings_model.username
                              if committer_user_settings_model else '')

        question_commit_log = QuestionCommitLogEntryModel.create(
            self.id, self.version, committer_id, committer_username,
            commit_type, commit_message, commit_cmds,
            constants.ACTIVITY_STATUS_PUBLIC, False)
        question_commit_log.question_id = self.id
        question_commit_log.put()

    @classmethod
    def create(cls, question_state_data, language_code, version):
        """Creates a new QuestionModel entry.

        Args:
            question_state_data: dict. An dict representing the question
                state data.
            language_code: str. The ISO 639-1 code for the language this
                question is written in.
            version: str. The version of the question.

        Returns:
            QuestionModel. Instance of the new QuestionModel entry.

        Raises:
            Exception: A model with the same ID already exists.
        """
        instance_id = cls._get_new_id()
        question_model_instance = cls(id=instance_id,
                                      question_state_data=question_state_data,
                                      language_code=language_code,
                                      version=version)

        return question_model_instance
コード例 #8
0
class UserStatsModel(base_models.BaseMapReduceBatchResultsModel):
    """User-specific statistics keyed by user id.
    Values for total plays and average ratings are recorded by aggregating over
    all explorations owned by a user.
    Impact scores are calculated over explorations for which a user
    is listed as a contributor.

    The impact score for a particular user is defined as:
    Sum of (
    ln(playthroughs) * (ratings_scaler) * (average(ratings) - 2.5))
    *(multiplier),
    where multiplier = 10, and ratings_scaler is .1 * (number of ratings)
    if there are < 10 ratings for that exploration.

    The impact score is 0 for an exploration with 0 playthroughs or with an
    average rating of less than 2.5.
    """
    # The impact score.
    impact_score = ndb.FloatProperty(indexed=True)
    # The total plays of all the explorations.
    total_plays = ndb.IntegerProperty(indexed=True, default=0)
    # The average of average ratings of all explorations.
    average_ratings = ndb.FloatProperty(indexed=True)
    # The number of ratings of all explorations.
    num_ratings = ndb.IntegerProperty(indexed=True, default=0)
    # A list which stores history of creator stats.
    # Each item in the list is a Json object keyed by a datetime string and
    # value as another Json object containing key-value pairs to be stored.
    # [
    #  {
    #   (date_1): {
    #    "average_ratings": 4.3,
    #    "total_plays": 40
    #   }
    #  },
    #  {
    #   (date_2): {
    #    "average_ratings": 4.1,
    #    "total_plays": 60
    #   }
    #  },
    # ]
    weekly_creator_stats_list = ndb.JsonProperty(repeated=True)
    # The version of dashboard stats schema.
    schema_version = (ndb.IntegerProperty(
        required=True,
        default=feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION,
        indexed=True))

    @classmethod
    def get_or_create(cls, user_id):
        """Creates a new UserStatsModel instance, if it does not already exist.

        Args:
            user_id: str. The user_id to be associated with the UserStatsModel.

        Returns:
            UserStatsModel. Either an existing one which matches the
                given user_id, or the newly created one if it did not already
                exist.
        """
        entity = cls.get(user_id, strict=False)
        if not entity:
            entity = cls(id=user_id)
        return entity

    @staticmethod
    def export_data(user_id):
        """(Takeout) Export the user-relevant properties of UserStatsModel.

        Args:
            user_id: str. The user_id denotes which user's data to extract.
                If the user_id is not valid, this method returns None.

        Returns:
            dict. The user-relevant properties of UserStatsModel in a python
                dict format.
        """
        user_model = UserStatsModel.get(user_id, strict=False)
        if not user_model:
            return None

        weekly_stats = user_model.weekly_creator_stats_list
        weekly_stats_constructed = []
        for weekly_stat in weekly_stats:
            for date_key in weekly_stat:
                stat_dict = weekly_stat[date_key]
                constructed_stat = {
                    date_key: {
                        'average_ratings': stat_dict['average_ratings'],
                        'total_plays': stat_dict['total_plays']
                    }
                }
                weekly_stats_constructed.append(constructed_stat)

        user_data = {
            'impact_score': user_model.impact_score,
            'total_plays': user_model.total_plays,
            'average_ratings': user_model.average_ratings,
            'num_ratings': user_model.num_ratings,
            'weekly_creator_stats_list': weekly_stats_constructed
        }

        return user_data
コード例 #9
0
class ExplorationUserDataModel(base_models.BaseModel):
    """User-specific data pertaining to a specific exploration.

    Instances of this class have keys of the form
    [USER_ID].[EXPLORATION_ID]
    """
    # The user id.
    user_id = ndb.StringProperty(required=True, indexed=True)
    # The exploration id.
    exploration_id = ndb.StringProperty(required=True, indexed=True)
    # The rating (1-5) the user assigned to the exploration. Note that this
    # represents a rating given on completion of the exploration.
    rating = ndb.IntegerProperty(default=None, indexed=True)
    # When the most recent rating was awarded, or None if not rated.
    rated_on = ndb.DateTimeProperty(default=None, indexed=False)
    # List of uncommitted changes made by the user to the exploration.
    draft_change_list = ndb.JsonProperty(default=None)
    # Timestamp of when the change list was last updated.
    draft_change_list_last_updated = ndb.DateTimeProperty(default=None)
    # The exploration version that this change list applied to.
    draft_change_list_exp_version = ndb.IntegerProperty(default=None)
    # The version of the draft change list which was last saved by the user.
    # Can be zero if the draft is None or if the user has not committed
    # draft changes to this exploration since the draft_change_list_id property
    # was introduced.
    draft_change_list_id = ndb.IntegerProperty(default=0)
    # The user's preference for receiving suggestion emails for this
    # exploration.
    mute_suggestion_notifications = ndb.BooleanProperty(
        default=feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
    # The user's preference for receiving feedback emails for this exploration.
    mute_feedback_notifications = ndb.BooleanProperty(
        default=feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)

    @classmethod
    def _generate_id(cls, user_id, exploration_id):
        """Generates key for the instance of ExplorationUserDataModel class in
        the required format with the arguments provided.

        Args:
            user_id: str. The id of the user.
            exploration_id: str. The id of the exploration.

        Returns:
            str. The generated key using user_id and exploration_id
                of the form [user_id].[exploration_id].
        """
        return '%s.%s' % (user_id, exploration_id)

    @classmethod
    def create(cls, user_id, exploration_id):
        """Creates a new ExplorationUserDataModel instance and returns it.

        Note that the client is responsible for actually saving this entity to
        the datastore.

        Args:
            user_id: str. The id of the user.
            exploration_id: str. The id of the exploration.

        Returns:
            ExplorationUserDataModel. The newly created
                ExplorationUserDataModel instance.
        """
        instance_id = cls._generate_id(user_id, exploration_id)
        return cls(id=instance_id,
                   user_id=user_id,
                   exploration_id=exploration_id)

    @classmethod
    def get(cls, user_id, exploration_id):
        """Gets the ExplorationUserDataModel for the given user and exploration
         ids.

        Args:
            user_id: str. The id of the user.
            exploration_id: str. The id of the exploration.

        Returns:
            ExplorationUserDataModel. The ExplorationUserDataModel instance
                which matches with the given user_id and exploration_id.
        """
        instance_id = cls._generate_id(user_id, exploration_id)
        return super(ExplorationUserDataModel, cls).get(instance_id,
                                                        strict=False)

    @classmethod
    def get_multi(cls, user_ids, exploration_id):
        """Gets the ExplorationUserDataModel for the given user and exploration
         ids.

        Args:
            user_ids: list(str). A list of user_ids.
            exploration_id: str. The id of the exploration.

        Returns:
            ExplorationUserDataModel. The ExplorationUserDataModel instance
                which matches with the given user_ids and exploration_id.
        """
        instance_ids = (cls._generate_id(user_id, exploration_id)
                        for user_id in user_ids)
        return super(ExplorationUserDataModel, cls).get_multi(instance_ids)

    @classmethod
    def export_data(cls, user_id):
        """Takeout: Export user-relevant properties of ExplorationUserDataModel.

        Args:
            user_id: str. The user_id denotes which user's data to extract.

        Returns:
            dict. The user-relevant properties of ExplorationUserDataModel
            in a python dict format. In this case, the ids of created
            explorations and edited explorations.
        """
        found_models = cls.get_all().filter(cls.user_id == user_id)
        user_data = {}
        for user_model in found_models:
            user_data[user_model.exploration_id] = {
                'rating':
                user_model.rating,
                'rated_on':
                user_model.rated_on,
                'draft_change_list':
                user_model.draft_change_list,
                'draft_change_list_last_updated':
                (user_model.draft_change_list_last_updated),
                'draft_change_list_exp_version':
                (user_model.draft_change_list_exp_version),
                'draft_change_list_id':
                user_model.draft_change_list_id,
                'mute_suggestion_notifications':
                (user_model.mute_suggestion_notifications),
                'mute_feedback_notifications':
                (user_model.mute_feedback_notifications)
            }

        return user_data
コード例 #10
0
ファイル: gae_models.py プロジェクト: xiexiaopeng2016/oppia
class ExpSummaryModel(base_models.BaseModel):
    """Summary model for an Oppia exploration.

    This should be used whenever the content blob of the exploration is not
    needed (e.g. in search results, etc).

    A ExpSummaryModel instance stores the following information:

        id, title, category, objective, language_code, tags,
        last_updated, created_on, status (private, public),
        community_owned, owner_ids, editor_ids,
        viewer_ids, version.

    The key of each instance is the exploration id.
    """

    # What this exploration is called.
    title = ndb.StringProperty(required=True)
    # The category this exploration belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = ndb.TextProperty(required=True, indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # Tags associated with this exploration.
    tags = ndb.StringProperty(repeated=True, indexed=True)

    # Aggregate user-assigned ratings of the exploration.
    ratings = ndb.JsonProperty(default=None, indexed=False)

    # Scaled average rating for the exploration.
    scaled_average_rating = ndb.FloatProperty(indexed=True)

    # Time when the exploration model was last updated (not to be
    # confused with last_updated, which is the time when the
    # exploration *summary* model was last updated).
    exploration_model_last_updated = ndb.DateTimeProperty(indexed=True)
    # Time when the exploration model was created (not to be confused
    # with created_on, which is the time when the exploration *summary*
    # model was created).
    exploration_model_created_on = ndb.DateTimeProperty(indexed=True)
    # Time when the exploration was first published.
    first_published_msec = ndb.FloatProperty(indexed=True)

    # The publication status of this exploration.
    status = ndb.StringProperty(default=constants.ACTIVITY_STATUS_PRIVATE,
                                indexed=True,
                                choices=[
                                    constants.ACTIVITY_STATUS_PRIVATE,
                                    constants.ACTIVITY_STATUS_PUBLIC
                                ])

    # Whether this exploration is owned by the community.
    community_owned = ndb.BooleanProperty(required=True, indexed=True)

    # The user_ids of owners of this exploration.
    owner_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this exploration.
    editor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to voiceover this exploration.
    voice_artist_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this exploration.
    viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who have contributed (humans who have made a
    # positive (not just a revert) change to the exploration's content).
    # NOTE TO DEVELOPERS: contributor_ids and contributors_summary need to be
    # synchronized, meaning that the keys in contributors_summary need be
    # equal to the contributor_ids list.
    contributor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # A dict representing the contributors of non-trivial commits to this
    # exploration. Each key of this dict is a user_id, and the corresponding
    # value is the number of non-trivial commits that the user has made.
    contributors_summary = ndb.JsonProperty(default={}, indexed=False)
    # The version number of the exploration after this commit. Only populated
    # for commits to an exploration (as opposed to its rights, etc.).
    version = ndb.IntegerProperty()
    # DEPRECATED in v2.8.3. Do not use.
    translator_ids = ndb.StringProperty(indexed=True, repeated=True)

    @staticmethod
    def get_deletion_policy():
        """Exploration summary is deleted only if the corresponding exploration
        is not public.
        """
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether ExpSummaryModel references user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.query(
            ndb.OR(cls.owner_ids == user_id, cls.editor_ids == user_id,
                   cls.voice_artist_ids == user_id, cls.viewer_ids == user_id,
                   cls.contributor_ids == user_id)).get(
                       keys_only=True) is not None

    @staticmethod
    def get_user_id_migration_policy():
        """ExpSummaryModel has multiple fields with user ID."""
        return base_models.USER_ID_MIGRATION_POLICY.CUSTOM

    @classmethod
    def migrate_model(cls, old_user_id, new_user_id):
        """Migrate model to use the new user ID in the owner_ids, editor_ids,
        voice_artist_ids, viewer_ids and contributor_ids.

        Args:
            old_user_id: str. The old user ID.
            new_user_id: str. The new user ID.
        """
        migrated_models = []
        for model in cls.query(
                ndb.OR(cls.owner_ids == old_user_id,
                       cls.editor_ids == old_user_id,
                       cls.voice_artist_ids == old_user_id,
                       cls.viewer_ids == old_user_id,
                       cls.contributor_ids == old_user_id)).fetch():
            model.owner_ids = [
                new_user_id if owner_id == old_user_id else owner_id
                for owner_id in model.owner_ids
            ]
            model.editor_ids = [
                new_user_id if editor_id == old_user_id else editor_id
                for editor_id in model.editor_ids
            ]
            model.voice_artist_ids = [
                new_user_id if voice_art_id == old_user_id else voice_art_id
                for voice_art_id in model.voice_artist_ids
            ]
            model.viewer_ids = [
                new_user_id if viewer_id == old_user_id else viewer_id
                for viewer_id in model.viewer_ids
            ]
            model.contributor_ids = [
                new_user_id
                if contributor_id == old_user_id else contributor_id
                for contributor_id in model.contributor_ids
            ]
            migrated_models.append(model)
        cls.put_multi(migrated_models, update_last_updated_time=False)

    @classmethod
    def get_non_private(cls):
        """Returns an iterable with non-private ExpSummary models.

        Returns:
            iterable. An iterable with non-private ExpSummary models.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status != constants.ACTIVITY_STATUS_PRIVATE
        ).filter(ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                 ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_top_rated(cls, limit):
        """Fetches the top-rated exp summaries that are public in descending
        order of scaled_average_rating.

        Args:
            limit: int. The maximum number of results to return.

        Returns:
            iterable. An iterable with the top rated exp summaries that are
                public in descending order of scaled_average_rating.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status == constants.ACTIVITY_STATUS_PUBLIC).filter(
                ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
            ).order(-ExpSummaryModel.scaled_average_rating).fetch(limit)

    @classmethod
    def get_private_at_least_viewable(cls, user_id):
        """Fetches private exp summaries that are at least viewable by the
        given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with private exp summaries that are at least
                viewable by the given user.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status ==
            constants.ACTIVITY_STATUS_PRIVATE).filter(
                ndb.OR(ExpSummaryModel.owner_ids == user_id,
                       ExpSummaryModel.editor_ids == user_id,
                       ExpSummaryModel.voice_artist_ids == user_id,
                       ExpSummaryModel.viewer_ids == user_id)).filter(
                           ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                       ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_at_least_editable(cls, user_id):
        """Fetches exp summaries that are at least editable by the given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with exp summaries that are at least
                editable by the given user.
        """
        return ExpSummaryModel.query().filter(
            ndb.OR(ExpSummaryModel.owner_ids == user_id,
                   ExpSummaryModel.editor_ids == user_id)).filter(
                       ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                   ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_recently_published(cls, limit):
        """Fetches exp summaries that are recently published.

        Args:
            limit: int. The maximum number of results to return.

        Returns:
            An iterable with exp summaries that are recently published. The
                returned list is sorted by the time of publication with latest
                being first in the list.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status == constants.ACTIVITY_STATUS_PUBLIC).filter(
                ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
            ).order(-ExpSummaryModel.first_published_msec).fetch(limit)

    @staticmethod
    def get_export_policy():
        """Model data has already been exported as a part of the
        ExplorationModel and thus does not need a separate export_data
        function.
        """
        return base_models.EXPORT_POLICY.NOT_APPLICABLE

    def verify_model_user_ids_exist(self):
        """Check if UserSettingsModel exists for all the ids in owner_ids,
        editor_ids, voice_artist_ids, viewer_ids and contributor_ids.
        """
        user_ids = (self.owner_ids + self.editor_ids + self.voice_artist_ids +
                    self.viewer_ids + self.contributor_ids)
        user_ids = [
            user_id for user_id in user_ids
            if user_id not in feconf.SYSTEM_USERS
        ]
        user_settings_models = user_models.UserSettingsModel.get_multi(
            user_ids, include_deleted=True)
        return all(model is not None for model in user_settings_models)
コード例 #11
0
class gaetk_Credential(ndb.Model):
    """Encodes a user and his permissions."""
    _default_indexed = True  # ensure additional properties get indexed
    uid = ndb.StringProperty(
        required=True)  # == key.id(), oder aus externem System
    email = ndb.StringProperty(required=False)
    secret = ndb.StringProperty(
        required=True, indexed=False)  # "Password" - NOT user-settable
    name = ndb.StringProperty(required=False, indexed=False, default='')
    text = ndb.StringProperty(required=False, indexed=False)
    permissions = ndb.StringProperty(repeated=True, indexed=False)

    sysadmin = ndb.BooleanProperty(default=False, indexed=True)
    staff = ndb.BooleanProperty(default=False, indexed=True)

    meta = ndb.JsonProperty(indexed=False, default={})
    org_designator = ndb.StringProperty(
        required=False)  # ref to the "parent", e.g. Customer Number
    external_uid = ndb.StringProperty(required=False)

    last_seen = ndb.DateTimeProperty(required=False, indexed=False)

    deleted = ndb.BooleanProperty(default=False)
    created_at = ndb.DateTimeProperty(auto_now_add=True)
    updated_at = ndb.DateTimeProperty(auto_now=True)

    @classmethod
    def create(cls, uid=None, org_designator=None, **kwargs):
        """Creates a credential Object generating a random secret and a random uid if needed."""
        # secret hopfully contains about 40 bits of entropy - more than most passwords
        if not uid:
            uid = "u%s" % (cls.allocate_ids(1)[0])
        if 'secret' not in kwargs:
            kwargs['secret'] = guid128()[1:17]
        if 'permissions' not in kwargs:
            kwargs['permissions'] = []
        ret = cls.get_or_insert(uid,
                                uid=uid,
                                org_designator=org_designator,
                                **kwargs)
        return ret

    def __str__(self):
        return str(self.uid)

    @classmethod
    def from_gaetk1(cls, cred1):
        """Generate `gaetk_Credential` from legacy `NdbCredential` and store it."""
        cred2 = cls.create(uid=cred1.uid,
                           email=cred1.email,
                           secret=cred1.secret,
                           permissions=cred1.permissions,
                           text=cred1.text,
                           sysadmin=cred1.admin)
        cred2.put()
        cred2.populate(created_at=cred1.created_at,
                       updated_at=cred1.updated_at)
        if getattr(cred1, 'org_designator', None):
            cred2.meta['org_designator'] = cred1.kundennr
            cred2.org_designator = cred2.meta['org_designator']
        if getattr(cred1, 'name', None) and not cred2.name:
            cred2.name = cred1.name
        cred2.put()
        return cred2
コード例 #12
0
ファイル: gae_models.py プロジェクト: xiexiaopeng2016/oppia
class ExplorationModel(base_models.VersionedModel):
    """Versioned storage model for an Oppia exploration.

    This class should only be imported by the exploration services file
    and the exploration model test file.
    """
    SNAPSHOT_METADATA_CLASS = ExplorationSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = ExplorationSnapshotContentModel
    ALLOW_REVERT = True

    # What this exploration is called.
    title = ndb.StringProperty(required=True)
    # The category this exploration belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = ndb.TextProperty(default='', indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = ndb.StringProperty(default=constants.DEFAULT_LANGUAGE_CODE,
                                       indexed=True)
    # Tags (topics, skills, concepts, etc.) associated with this
    # exploration.
    tags = ndb.StringProperty(repeated=True, indexed=True)
    # A blurb for this exploration.
    blurb = ndb.TextProperty(default='', indexed=False)
    # 'Author notes' for this exploration.
    author_notes = ndb.TextProperty(default='', indexed=False)

    # The version of the states blob schema.
    states_schema_version = ndb.IntegerProperty(required=True,
                                                default=0,
                                                indexed=True)
    # The name of the initial state of this exploration.
    init_state_name = ndb.StringProperty(required=True, indexed=False)
    # A dict representing the states of this exploration. This dict should
    # not be empty.
    states = ndb.JsonProperty(default={}, indexed=False)
    # The dict of parameter specifications associated with this exploration.
    # Each specification is a dict whose keys are param names and whose values
    # are each dicts with a single key, 'obj_type', whose value is a string.
    param_specs = ndb.JsonProperty(default={}, indexed=False)
    # The list of parameter changes to be performed once at the start of a
    # reader's encounter with an exploration.
    param_changes = ndb.JsonProperty(repeated=True, indexed=False)
    # A boolean indicating whether automatic text-to-speech is enabled in
    # this exploration.
    auto_tts_enabled = ndb.BooleanProperty(default=True, indexed=True)
    # A boolean indicating whether correctness feedback is enabled in this
    # exploration.
    correctness_feedback_enabled = ndb.BooleanProperty(default=False,
                                                       indexed=True)

    # DEPRECATED in v2.0.0.rc.2. Do not use. Retaining it here because deletion
    # caused GAE to raise an error on fetching a specific version of the
    # exploration model.
    # TODO(sll): Fix this error and remove this property.
    skill_tags = ndb.StringProperty(repeated=True, indexed=True)
    # DEPRECATED in v2.0.1. Do not use.
    # TODO(sll): Remove this property from the model.
    default_skin = ndb.StringProperty(default='conversation_v1')
    # DEPRECATED in v2.5.4. Do not use.
    skin_customizations = ndb.JsonProperty(indexed=False)

    @staticmethod
    def get_deletion_policy():
        """Exploration is deleted only if it is not public."""
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @staticmethod
    def get_export_policy():
        """Model does not contain user data."""
        return base_models.EXPORT_POLICY.NOT_APPLICABLE

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether ExplorationModel or its snapshots references the given
        user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)

    @staticmethod
    def get_user_id_migration_policy():
        """ExplorationModel doesn't have any field with user ID."""
        return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE

    @classmethod
    def get_exploration_count(cls):
        """Returns the total number of explorations."""
        return cls.get_all().count()

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(ExplorationModel,
              self)._trusted_commit(committer_id, commit_type, commit_message,
                                    commit_cmds)

        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (committer_user_settings_model.username
                              if committer_user_settings_model else '')

        exp_rights = ExplorationRightsModel.get_by_id(self.id)

        # TODO(msl): test if put_async() leads to any problems (make
        # sure summary dicts get updated correctly when explorations
        # are changed).
        exploration_commit_log = ExplorationCommitLogEntryModel.create(
            self.id, self.version, committer_id, committer_username,
            commit_type, commit_message, commit_cmds, exp_rights.status,
            exp_rights.community_owned)
        exploration_commit_log.exploration_id = self.id
        exploration_commit_log.put()

    @classmethod
    def delete_multi(cls,
                     entity_ids,
                     committer_id,
                     commit_message,
                     force_deletion=False):
        """Deletes the given cls instances with the given entity_ids.

        Note that this extends the superclass method.

        Args:
            entity_ids: list(str). Ids of entities to delete.
            committer_id: str. The user_id of the user who committed the change.
            commit_message: str. The commit description message.
            force_deletion: bool. If True these models are deleted completely
                from storage, otherwise there are only marked as deleted.
                Default is False.
        """
        super(ExplorationModel,
              cls).delete_multi(entity_ids,
                                committer_id,
                                commit_message,
                                force_deletion=force_deletion)

        if not force_deletion:
            committer_user_settings_model = (
                user_models.UserSettingsModel.get_by_id(committer_id))
            committer_username = (committer_user_settings_model.username
                                  if committer_user_settings_model else '')

            commit_log_models = []
            exp_rights_models = ExplorationRightsModel.get_multi(
                entity_ids, include_deleted=True)
            versioned_models = cls.get_multi(entity_ids, include_deleted=True)

            versioned_and_exp_rights_models = python_utils.ZIP(
                versioned_models, exp_rights_models)
            for model, rights_model in versioned_and_exp_rights_models:
                exploration_commit_log = ExplorationCommitLogEntryModel.create(
                    model.id, model.version, committer_id, committer_username,
                    cls._COMMIT_TYPE_DELETE, commit_message,
                    [{
                        'cmd': cls.CMD_DELETE_COMMIT
                    }], rights_model.status, rights_model.community_owned)
                exploration_commit_log.exploration_id = model.id
                commit_log_models.append(exploration_commit_log)
            ndb.put_multi_async(commit_log_models)
コード例 #13
0
ファイル: gae_models.py プロジェクト: dheeraj1997/oppia
class QuestionModel(base_models.VersionedModel):
    """Model for storing Questions.

    The ID of instances of this class are in form of random hash of 12 chars.
    """

    SNAPSHOT_METADATA_CLASS = QuestionSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = QuestionSnapshotContentModel
    ALLOW_REVERT = True

    # An object representing the question state data.
    question_state_data = ndb.JsonProperty(indexed=False, required=True)
    # The schema version for the question state data.
    question_state_data_schema_version = ndb.IntegerProperty(
        required=True, indexed=True)
    # The ISO 639-1 code for the language this question is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # The skill ids linked to this question.
    linked_skill_ids = ndb.StringProperty(
        indexed=True, repeated=True)
    # The optional misconception ids marked as not relevant to the question.
    inapplicable_misconception_ids = ndb.StringProperty(
        indexed=True, repeated=True)

    @staticmethod
    def get_deletion_policy():
        """Question should be kept but the creator should be anonymized."""
        return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE

    @staticmethod
    def get_export_policy():
        """Model does not contain user data."""
        return base_models.EXPORT_POLICY.NOT_APPLICABLE

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether QuestionModel snapshots references the given user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)

    @classmethod
    def _get_new_id(cls):
        """Generates a unique ID for the question in the form of random hash
        of 12 chars.

        Returns:
            new_id: int. ID of the new QuestionModel instance.

        Raises:
            Exception. The ID generator for QuestionModel is
                producing too many collisions.
        """

        for _ in python_utils.RANGE(base_models.MAX_RETRIES):
            new_id = utils.convert_to_hash(
                python_utils.UNICODE(
                    utils.get_random_int(base_models.RAND_RANGE)),
                base_models.ID_LENGTH)
            if not cls.get_by_id(new_id):
                return new_id

        raise Exception(
            'The id generator for QuestionModel is producing too many '
            'collisions.')

    def _trusted_commit(
            self, committer_id, commit_type, commit_message, commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(QuestionModel, self)._trusted_commit(
            committer_id, commit_type, commit_message, commit_cmds)

        question_commit_log = QuestionCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
        )
        question_commit_log.question_id = self.id
        question_commit_log.put()

    @classmethod
    def create(
            cls, question_state_data, language_code, version, linked_skill_ids,
            inapplicable_misconception_ids):
        """Creates a new QuestionModel entry.

        Args:
            question_state_data: dict. An dict representing the question
                state data.
            language_code: str. The ISO 639-1 code for the language this
                question is written in.
            version: str. The version of the question.
            linked_skill_ids: list(str). The skill ids linked to the question.
            inapplicable_misconception_ids: list(str). The optional
                misconception ids marked as not applicable to the question.

        Returns:
            QuestionModel. Instance of the new QuestionModel entry.

        Raises:
            Exception. A model with the same ID already exists.
        """
        instance_id = cls._get_new_id()
        question_model_instance = cls(
            id=instance_id,
            question_state_data=question_state_data,
            language_code=language_code,
            version=version,
            linked_skill_ids=linked_skill_ids,
            inapplicable_misconception_ids=inapplicable_misconception_ids)

        return question_model_instance

    @classmethod
    def put_multi_questions(cls, questions):
        """Puts multiple question models into the datastore.

        Args:
            questions: list(Question). The list of question objects
                to put into the datastore.
        """
        cls.put_multi(questions)
コード例 #14
0
class UserStatsModel(base_models.BaseMapReduceBatchResultsModel):
    """User-specific statistics keyed by user id.
    Values for total plays and average ratings are recorded by aggregating over
    all explorations owned by a user.
    Impact scores are calculated over explorations for which a user
    is listed as a contributor

    The impact score for a particular user is defined as:
    Sum of (
    ln(playthroughs) * (ratings_scaler) * (average(ratings) - 2.5))
    *(multiplier),
    where multiplier = 10, and ratings_scaler is .1 * (number of ratings)
    if there are < 10 ratings for that exploration.

    The impact score is 0 for an exploration with 0 playthroughs or with an
    average rating of less than 2.5.
    """
    # The impact score.
    impact_score = ndb.FloatProperty(indexed=True)

    # The total plays of all the explorations.
    total_plays = ndb.IntegerProperty(indexed=True, default=0)

    # The average of average ratings of all explorations.
    average_ratings = ndb.FloatProperty(indexed=True)

    # The number of ratings of all explorations.
    num_ratings = ndb.IntegerProperty(indexed=True, default=0)

    # A list which stores history of creator stats.
    # Each item in the list is a Json object keyed by a datetime string and
    # value as another Json object containing key-value pairs to be stored.
    # [
    #  {
    #   (date_1): {
    #    "average_ratings": 4.3,
    #    "total_plays": 40
    #   }
    #  },
    #  {
    #   (date_2): {
    #    "average_ratings": 4.1,
    #    "total_plays": 60
    #   }
    #  },
    # ]
    weekly_creator_stats_list = ndb.JsonProperty(repeated=True)

    # The version of dashboard stats schema.
    schema_version = (ndb.IntegerProperty(
        required=True,
        default=feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION,
        indexed=True))

    @classmethod
    def get_or_create(cls, user_id):
        """Creates a new UserStatsModel instance, if it does not already
        exist.
        """
        entity = cls.get(user_id, strict=False)
        if not entity:
            entity = cls(id=user_id)
        return entity
コード例 #15
0
class UserRawJson(ndb.Model):
    email = ndb.StringProperty(indexed=True, required=True)
    content = ndb.JsonProperty(indexed=False)
コード例 #16
0
class UserQueryModel(base_models.BaseModel):
    """Model for storing result of queries.

    The id of each instance of this model is alphanumeric id of length 12
    unique to each model instance.
    """
    # Options for a query specified by query submitter.
    # Query option to specify whether user has created or edited one or more
    # explorations in last n days. This only returns users who have ever
    # created or edited at least one exploration.
    inactive_in_last_n_days = ndb.IntegerProperty(default=None)
    # Query option to check whether given user has logged in
    # since last n days.
    has_not_logged_in_for_n_days = ndb.IntegerProperty(default=None)
    # Query option to check whether user has created at least
    # n explorations.
    created_at_least_n_exps = ndb.IntegerProperty(default=None)
    # Query option to check whether user has created fewer than
    # n explorations.
    created_fewer_than_n_exps = ndb.IntegerProperty(default=None)
    # Query option to check if user has edited at least n explorations.
    edited_at_least_n_exps = ndb.IntegerProperty(default=None)
    # Query option to check if user has edited fewer than n explorations.
    edited_fewer_than_n_exps = ndb.IntegerProperty(default=None)
    # List of all user_ids who satisfy all parameters given in above query.
    # This list will be empty initially. Once query has completed its execution
    # this list will be populated with all qualifying user ids.
    user_ids = ndb.JsonProperty(default=[], compressed=True)
    # ID of the user who submitted the query.
    submitter_id = ndb.StringProperty(indexed=True, required=True)
    # ID of the instance of BulkEmailModel which stores information
    # about sent emails.
    sent_email_model_id = ndb.StringProperty(default=None, indexed=True)
    # Current status of the query.
    query_status = ndb.StringProperty(indexed=True,
                                      choices=[
                                          feconf.USER_QUERY_STATUS_PROCESSING,
                                          feconf.USER_QUERY_STATUS_COMPLETED,
                                          feconf.USER_QUERY_STATUS_ARCHIVED,
                                          feconf.USER_QUERY_STATUS_FAILED
                                      ])

    @classmethod
    def fetch_page(cls, page_size, cursor):
        """Fetches a list of all query_models sorted by creation date.

        Args:
            page_size: int. The maximum number of entities to be returned.
            cursor: str or None. The list of returned entities starts from this
                datastore cursor.

        Returns:
            3-tuple of (query_models, cursor, more) as described in fetch_page()
            at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                query_models: List of UserQueryModel instances.
                next_cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are probably more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        cursor = datastore_query.Cursor(urlsafe=cursor)
        query_models, next_cursor, more = (
            cls.query().order(-cls.created_on).fetch_page(page_size,
                                                          start_cursor=cursor))
        next_cursor = next_cursor.urlsafe() if (next_cursor and more) else None
        return query_models, next_cursor, more
コード例 #17
0
class OverlappingUser(ndb.Model):
    membership_count = ndb.IntegerProperty()
    report = ndb.JsonProperty(compressed=True)
コード例 #18
0
class SuggestionModel(base_models.BaseModel):
    """Suggestions made by learners.

    The id of each instance is the id of the corresponding thread.
    """

    # ID of the user who submitted the suggestion.
    author_id = ndb.StringProperty(required=True, indexed=True)
    # ID of the corresponding exploration.
    exploration_id = ndb.StringProperty(required=True, indexed=True)
    # The exploration version for which the suggestion was made.
    exploration_version = ndb.IntegerProperty(required=True, indexed=True)
    # Name of the corresponding state.
    state_name = ndb.StringProperty(required=True, indexed=True)
    # Learner-provided description of suggestion.
    description = ndb.TextProperty(required=True, indexed=False)
    # The state's content after the suggested edits.
    # For legacy reasons, contains keys 'type' (always 'text') and 'value' (the
    # HTML string representing the actual content).
    state_content = ndb.JsonProperty(required=True, indexed=False)

    @classmethod
    def _convert_suggestion_html_to_legacy_state_content(cls, suggestion_html):
        """Converts a suggestion HTML string to a legacy state content object.

        The state content object is a dict containing two keys, "type" and
        "value". For historical reasons, the value of "type" is always "text"
        while the value of "value" is the actual suggestion made by the learner
        in the form of html content.

        Args:
            suggestion_html: str. The HTML representing the suggestion.

        Returns:
            dict. The legacy content object that corresponds to the given
            suggestion HTML.
        """
        return {
            'type': 'text',
            'value': suggestion_html,
        }

    @classmethod
    def _get_instance_id(cls, exploration_id, thread_id):
        """Returns the full thread ID corresponding to the
        given exploration ID and thread ID.

         Args:
             exploration_id: str. ID of the exploration the thread
                belongs to.
             thread_id: str. ID of the thread.

         Returns:
             str. Returns the full thread ID corresponding to
                the given exploration ID and thread ID.
        """
        return '.'.join([exploration_id, thread_id])

    @classmethod
    def create(cls, exploration_id, thread_id, author_id, exploration_version,
               state_name, description, suggestion_html):
        """Creates a new SuggestionModel entry.

        Args:
            exploration_id: str. ID of the corresponding exploration.
            thread_id: str. ID of the corresponding thread.
            author_id: str. ID of the user who submitted the suggestion.
            exploration_version: int. exploration version for which the
                suggestion was made.
            state_name: str. ID of the state the suggestion is for.
            description: str. Learner-provided description of suggestion.
            suggestion_html: str. The content of the suggestion.

        Raises:
            Exception: There is already a feedback thread with the same
                exploration_id and thread_id.
        """
        instance_id = cls._get_instance_id(exploration_id, thread_id)
        if cls.get_by_id(instance_id):
            raise Exception(
                'There is already a feedback thread with the given '
                'thread id: %s' % instance_id)
        state_content = cls._convert_suggestion_html_to_legacy_state_content(
            suggestion_html)
        cls(id=instance_id,
            author_id=author_id,
            exploration_id=exploration_id,
            exploration_version=exploration_version,
            state_name=state_name,
            description=description,
            state_content=state_content).put()

    def get_suggestion_html(self):
        """Retrieves the suggestion HTML of this instance as a string.

        Returns:
            str. The suggested content HTML string.
        """
        return self.state_content['value']

    @classmethod
    def get_by_exploration_and_thread_id(cls, exploration_id, thread_id):
        """Gets a suggestion by the corresponding exploration and thread IDs.

        Args:
            exploration_id: str. ID of the exploration to which the
                suggestion belongs.
            thread_id: str. Thread ID of the suggestion thread.

        Returns:
            SuggestionModel or None. Suggestion related to the given
                exploration and thread IDs, or None if no such SuggestionModel
                exists.
        """
        return cls.get_by_id(cls._get_instance_id(exploration_id, thread_id))
コード例 #19
0
class Client(ndb.Model):
    """Model and individual client."""
    name = ndb.StringProperty()
    description = ndb.StringProperty()
    screen_list_id = ndb.JsonProperty()
コード例 #20
0
class Schedule(ndb.Model):
    shards = ndb.JsonProperty()
コード例 #21
0
ファイル: gae_models.py プロジェクト: zhengxit/oppia
class StartExplorationEventLogEntryModel(base_models.BaseModel):
    """An event triggered by a student starting the exploration.

    Event schema documentation
    --------------------------
    V1:
        event_type: 'start'
        exploration_id: id of exploration currently being played
        exploration_version: version of exploration
        state_name: Name of current state
        client_time_spent_in_secs: 0
        play_type: 'normal'
        created_on date
        event_schema_version: 1
        session_id: ID of current student's session
        params: current parameter values, in the form of a map of parameter
            name to value
    """
    # This value should be updated in the event of any event schema change.
    CURRENT_EVENT_SCHEMA_VERSION = 1

    # Which specific type of event this is
    event_type = ndb.StringProperty(indexed=True)
    # Id of exploration currently being played.
    exploration_id = ndb.StringProperty(indexed=True)
    # Current version of exploration.
    exploration_version = ndb.IntegerProperty(indexed=True)
    # Name of current state.
    state_name = ndb.StringProperty(indexed=True)
    # ID of current student's session
    session_id = ndb.StringProperty(indexed=True)
    # Time since start of this state before this event occurred (in sec).
    client_time_spent_in_secs = ndb.FloatProperty(indexed=True)
    # Current parameter values, map of parameter name to value
    params = ndb.JsonProperty(indexed=False)
    # Which type of play-through this is (editor preview, or learner view).
    # Note that the 'playtest' option is legacy, since editor preview
    # playthroughs no longer emit events.
    play_type = ndb.StringProperty(indexed=True,
                                   choices=[feconf.PLAY_TYPE_PLAYTEST,
                                            feconf.PLAY_TYPE_NORMAL])
    # The version of the event schema used to describe an event of this type.
    # Details on the schema are given in the docstring for this class.
    event_schema_version = ndb.IntegerProperty(
        indexed=True, default=CURRENT_EVENT_SCHEMA_VERSION)

    @classmethod
    def get_new_event_entity_id(cls, exp_id, session_id):
        timestamp = datetime.datetime.utcnow()
        return cls.get_new_id('%s:%s:%s' % (
            utils.get_time_in_millisecs(timestamp),
            exp_id,
            session_id))

    @classmethod
    def create(cls, exp_id, exp_version, state_name, session_id,
               params, play_type, unused_version=1):
        """Creates a new start exploration event."""
        # TODO(sll): Some events currently do not have an entity id that was
        # set using this method; it was randomly set instead due tg an error.
        # Might need to migrate them.
        entity_id = cls.get_new_event_entity_id(
            exp_id, session_id)
        start_event_entity = cls(
            id=entity_id,
            event_type=feconf.EVENT_TYPE_START_EXPLORATION,
            exploration_id=exp_id,
            exploration_version=exp_version,
            state_name=state_name,
            session_id=session_id,
            client_time_spent_in_secs=0.0,
            params=params,
            play_type=play_type)
        start_event_entity.put()
コード例 #22
0
ファイル: template.py プロジェクト: lzpel/randomchan
class base(ndb.Model):
	# default未使用低容量
	# 他から計算できる情報は保存しない。コメントマイリス数
	# 時刻
	bone=ndb.DateTimeProperty(auto_now_add=True)
	last=ndb.DateTimeProperty(auto_now=True)

	# 分類
	cate=ndb.StringProperty(default=u"what")

	# 関係検索用
	kusr=ndb.KeyProperty()  # 作者
	kint=ndb.KeyProperty()  # 米等の対象物
	kner=ndb.KeyProperty(repeated=True)
	kfar=ndb.KeyProperty(repeated=True)

	# 文章検索用
	name=ndb.StringProperty()
	text=ndb.TextProperty()
	mail=ndb.StringProperty()
	word=ndb.StringProperty()
	tags=ndb.StringProperty(repeated=True)

	# 整列用
	len0=ndb.IntegerProperty()
	len1=ndb.IntegerProperty()
	len2=ndb.IntegerProperty()
	lenA=ndb.IntegerProperty()
	lenB=ndb.IntegerProperty()
	lenC=ndb.IntegerProperty()

	# ファイル
	blob=ndb.BlobKeyProperty(repeated=True)

	# JSON
	data=ndb.JsonProperty()
	temp=ndb.JsonProperty()
	@classmethod
	def get(cls, **kwargs):
		if "urlsafe" in kwargs:
			return ndb.Key(urlsafe=kwargs["urlsafe"]).get()
		if "id" in kwargs:
			return cls.get_by_id(kwargs["id"])

	@classmethod
	def delete_multi(cls,keys):
		ndb.delete_multi(keys)

	@classmethod
	def get_multi(cls,keys):
		ndb.get_multi(keys)

	@classmethod
	def put_multi(cls,keys):
		ndb.put_multi(keys)

	@classmethod
	def _pre_delete_hook(c,k):
		s=k.get()
		blobstore.delete(s.blob)
		ndb.delete_multi(c.query(ndb.OR(c.kusr==s.key,c.kint==s.key)).fetch(keys_only=True))
コード例 #23
0
ファイル: gae_models.py プロジェクト: zhengxit/oppia
class CompleteExplorationEventLogEntryModel(base_models.BaseModel):
    """An event triggered by a learner reaching a terminal state of an
    exploration.

    Event schema documentation
    --------------------------
    V1:
        event_type: 'complete'
        exploration_id: id of exploration currently being played
        exploration_version: version of exploration
        state_name: Name of the terminal state
        play_type: 'normal'
        created_on date
        event_schema_version: 1
        session_id: ID of current student's session
        params: current parameter values, in the form of a map of parameter
            name to value
        client_time_spent_in_secs: time spent in this state before the event
            was triggered

    Note: shortly after the release of v2.0.0.rc.3, some of these events
    were migrated from MaybeLeaveExplorationEventLogEntryModel. These events
    have the wrong 'last updated' timestamp. However, the 'created_on'
    timestamp is the same as that of the original model.
    """
    # This value should be updated in the event of any event schema change.
    CURRENT_EVENT_SCHEMA_VERSION = 1

    # Which specific type of event this is
    event_type = ndb.StringProperty(indexed=True)
    # Id of exploration currently being played.
    exploration_id = ndb.StringProperty(indexed=True)
    # Current version of exploration.
    exploration_version = ndb.IntegerProperty(indexed=True)
    # Name of current state.
    state_name = ndb.StringProperty(indexed=True)
    # ID of current student's session
    session_id = ndb.StringProperty(indexed=True)
    # Time since start of this state before this event occurred (in sec).
    # Note: Some of these events were migrated from StateHit event instances
    # which did not record timestamp data. For this, we use a placeholder
    # value of 0.0 for client_time_spent_in_secs.
    client_time_spent_in_secs = ndb.FloatProperty(indexed=True)
    # Current parameter values, map of parameter name to value
    params = ndb.JsonProperty(indexed=False)
    # Which type of play-through this is (editor preview, or learner view).
    # Note that the 'playtest' option is legacy, since editor preview
    # playthroughs no longer emit events.
    play_type = ndb.StringProperty(indexed=True,
                                   choices=[feconf.PLAY_TYPE_PLAYTEST,
                                            feconf.PLAY_TYPE_NORMAL])
    # The version of the event schema used to describe an event of this type.
    # Details on the schema are given in the docstring for this class.
    event_schema_version = ndb.IntegerProperty(
        indexed=True, default=CURRENT_EVENT_SCHEMA_VERSION)

    @classmethod
    def get_new_event_entity_id(cls, exp_id, session_id):
        timestamp = datetime.datetime.utcnow()
        return cls.get_new_id('%s:%s:%s' % (
            utils.get_time_in_millisecs(timestamp),
            exp_id,
            session_id))

    @classmethod
    def create(cls, exp_id, exp_version, state_name, session_id,
               client_time_spent_in_secs, params, play_type):
        """Creates a new exploration completion event."""
        entity_id = cls.get_new_event_entity_id(exp_id, session_id)
        complete_event_entity = cls(
            id=entity_id,
            event_type=feconf.EVENT_TYPE_COMPLETE_EXPLORATION,
            exploration_id=exp_id,
            exploration_version=exp_version,
            state_name=state_name,
            session_id=session_id,
            client_time_spent_in_secs=client_time_spent_in_secs,
            params=params,
            play_type=play_type)
        complete_event_entity.put()
コード例 #24
0
class SubtopicPageModel(base_models.VersionedModel):
    """Model for storing Subtopic pages.

    This stores the HTML data for a subtopic page.
    """
    SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
    ALLOW_REVERT = False

    # The topic id that this subtopic is a part of.
    topic_id = ndb.StringProperty(required=True, indexed=True)
    # The json data of the subtopic consisting of subtitled_html,
    # recorded_voiceovers and written_translations fields.
    page_contents = ndb.JsonProperty(required=True)
    # The schema version for the page_contents field.
    page_contents_schema_version = ndb.IntegerProperty(required=True,
                                                       indexed=True)
    # The ISO 639-1 code for the language this subtopic page is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)

    @staticmethod
    def get_deletion_policy():
        """Subtopic should be kept if associated topic is published."""
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether SubtopicPageModel snapshots references the given user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(SubtopicPageModel,
              self)._trusted_commit(committer_id, commit_type, commit_message,
                                    commit_cmds)
        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (committer_user_settings_model.username
                              if committer_user_settings_model else '')

        subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
            self.id, self.version, committer_id, committer_username,
            commit_type, commit_message, commit_cmds,
            constants.ACTIVITY_STATUS_PUBLIC, False)
        subtopic_page_commit_log_entry.subtopic_page_id = self.id
        subtopic_page_commit_log_entry.put()
コード例 #25
0
ファイル: gae_models.py プロジェクト: zhengxit/oppia
class StateRuleAnswerLogModel(base_models.BaseModel):
    """The log of all answers hitting a given state rule.

    The id/key of instances of this class has the form
        [EXPLORATION_ID].[STATE_NAME].[HANDLER_NAME].[RULE_NAME]

    WARNING: If a change is made to existing rules in data/objects (e.g.
    renaming them or changing their signature), this class will contain
    invalid values.

    WARNING: Rule names and args that are used to construct the key here must
    be < 400 characters in length, since these are used as part of the key.
    """
    # Log of answers that hit this rule and that have not been resolved. The
    # JSON blob represents a dict. The keys of this dict are the answers
    # encoded as HTML strings, and the values are integer counts representing
    # how many times the answer has been entered.
    # WARNING: do not use default={} in JsonProperty, it does not work as you
    # expect.
    answers = ndb.JsonProperty(indexed=False)

    @classmethod
    def get_or_create(cls, exploration_id, state_name, rule_str):
        # TODO(sll): Deprecate this method.
        return cls.get_or_create_multi_for_multi_explorations(
            [(exploration_id, state_name)], [rule_str])[0][0]

    @classmethod
    def _get_entity_key(cls, unused_exploration_id, entity_id):
        return ndb.Key(cls._get_kind(), entity_id)

    @classmethod
    def get_or_create_multi_for_multi_explorations(
            cls, exploration_state_list, rule_str_list):
        """Gets entities given a list of exploration ID and state name tuples,
        and a list of rule spec strings to filter answers matched for each of
        the given explorations and states. Returns a list containing a list of
        matched entities for each input exploration ID-state name tuple.

        Args:
            exploration_state_list: a list of exploration ID and state name
                tuples
            rule_str_list: a list of rule spec strings which are used to filter
                the answers matched to the provided explorations and states
        """
        # TODO(sll): Use a hash instead to disambiguate.
        exploration_ids = []
        state_names = []
        entity_ids = []
        for exploration_id, state_name in exploration_state_list:
            for rule_str in rule_str_list:
                exploration_ids.append(exploration_id)
                state_names.append(state_name)
                entity_ids.append('.'.join([
                    exploration_id, state_name, _OLD_SUBMIT_HANDLER_NAME,
                    rule_str])[:490])

        entity_keys = [
            cls._get_entity_key(exploration_id, entity_id)
            for exploration_id, entity_id in zip(exploration_ids, entity_ids)]

        entities = ndb.get_multi(entity_keys)
        entities_to_put = []
        for ind, entity in enumerate(entities):
            if entity is None:
                new_entity = cls(id=entity_ids[ind], answers={})
                entities_to_put.append(new_entity)
                entities[ind] = new_entity
        if entities_to_put:
            ndb.put_multi(entities_to_put)

        exploration_entities_list = []
        exploration_state_name_entity_map = {}
        for ind, exp_state_tuple in enumerate(exploration_state_list):
            exploration_entities_list.append([])
            exploration_state_name_entity_map[exp_state_tuple] = (
                exploration_entities_list[ind])

        for (exploration_id, state_name, entity) in zip(
                exploration_ids, state_names, entities):
            exploration_state_name_entity_map[(
                exploration_id, state_name)].append(entity)
        return exploration_entities_list

    @classmethod
    def get_or_create_multi(cls, exploration_id, rule_data):
        """Gets or creates entities for the given rules.
        Args:
            exploration_id: the exploration id
            rule_data: a list of dicts, each with the following keys:
                (state_name, rule_str).
        """
        # TODO(sll): Use a hash instead to disambiguate.
        entity_ids = ['.'.join([
            exploration_id, datum['state_name'],
            _OLD_SUBMIT_HANDLER_NAME, datum['rule_str']
        ])[:490] for datum in rule_data]

        entity_keys = [cls._get_entity_key(exploration_id, entity_id)
                       for entity_id in entity_ids]

        entities = ndb.get_multi(entity_keys)
        entities_to_put = []
        for ind, entity in enumerate(entities):
            if entity is None:
                new_entity = cls(id=entity_ids[ind], answers={})
                entities_to_put.append(new_entity)
                entities[ind] = new_entity

        ndb.put_multi(entities_to_put)
        return entities
コード例 #26
0
class TopicModel(base_models.VersionedModel):
    """Model for storing Topics.

    This class should only be imported by the topic services file
    and the topic model test file.
    """
    SNAPSHOT_METADATA_CLASS = TopicSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = TopicSnapshotContentModel
    ALLOW_REVERT = False

    # The name of the topic.
    name = ndb.StringProperty(required=True, indexed=True)
    # The canonical name of the topic, created by making `name` lowercase.
    canonical_name = ndb.StringProperty(required=True, indexed=True)
    # The description of the topic.
    description = ndb.TextProperty(indexed=False)
    # This consists of the list of objects referencing canonical stories that
    # are part of this topic.
    canonical_story_references = ndb.JsonProperty(repeated=True, indexed=False)
    # This consists of the list of objects referencing additional stories that
    # are part of this topic.
    additional_story_references = ndb.JsonProperty(repeated=True,
                                                   indexed=False)
    # The schema version for the story reference object on each of the above 2
    # lists.
    story_reference_schema_version = ndb.IntegerProperty(required=True,
                                                         indexed=True)
    # This consists of the list of uncategorized skill ids that are not part of
    # any subtopic.
    uncategorized_skill_ids = ndb.StringProperty(repeated=True, indexed=True)
    # The list of subtopics that are part of the topic.
    subtopics = ndb.JsonProperty(repeated=True, indexed=False)
    # The schema version of the subtopic dict.
    subtopic_schema_version = ndb.IntegerProperty(required=True, indexed=True)
    # The id for the next subtopic.
    next_subtopic_id = ndb.IntegerProperty(required=True)
    # The ISO 639-1 code for the language this topic is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)

    @staticmethod
    def get_deletion_policy():
        """Topic should be kept if it is published."""
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether TopicModel snapshots references the given user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(TopicModel, self)._trusted_commit(committer_id, commit_type,
                                                commit_message, commit_cmds)

        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (committer_user_settings_model.username
                              if committer_user_settings_model else '')

        topic_rights = TopicRightsModel.get_by_id(self.id)
        status = ''
        if topic_rights.topic_is_published:
            status = constants.ACTIVITY_STATUS_PUBLIC
        else:
            status = constants.ACTIVITY_STATUS_PRIVATE

        topic_commit_log_entry = TopicCommitLogEntryModel.create(
            self.id, self.version, committer_id, committer_username,
            commit_type, commit_message, commit_cmds, status, False)
        topic_commit_log_entry.topic_id = self.id
        topic_commit_log_entry.put()

    @classmethod
    def get_by_name(cls, topic_name):
        """Gets TopicModel by topic_name. Returns None if the topic with
        name topic_name doesn't exist.

        Args:
            topic_name: str. The name of the topic.

        Returns:
            TopicModel|None. The topic model of the topic or None if not
            found.
        """
        return TopicModel.query().filter(
            cls.canonical_name == topic_name.lower()).filter(
                cls.deleted == False).get()  #pylint: disable=singleton-comparison
コード例 #27
0
ファイル: gae_models.py プロジェクト: rcorfield/oppia
class ExplorationModel(base_models.VersionedModel):
    """Versioned storage model for an Oppia exploration.

    This class should only be imported by the exploration domain file, the
    exploration services file, and the Exploration model test file.
    """
    SNAPSHOT_METADATA_CLASS = ExplorationSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = ExplorationSnapshotContentModel
    ALLOW_REVERT = True

    # What this exploration is called.
    title = ndb.StringProperty(required=True)
    # The category this exploration belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = ndb.TextProperty(default='', indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = ndb.StringProperty(
        default=feconf.DEFAULT_LANGUAGE_CODE, indexed=True)
    # Tags (topics, skills, concepts, etc.) associated with this
    # exploration.
    tags = ndb.StringProperty(repeated=True, indexed=True)
    # A blurb for this exploration.
    blurb = ndb.TextProperty(default='', indexed=False)
    # 'Author notes' for this exploration.
    author_notes = ndb.TextProperty(default='', indexed=False)
    # Schema storing specifications of the contents of any gadget panels,
    # along with associated customizations for each gadget instance.
    skin_customizations = ndb.JsonProperty(indexed=False)

    # The version of the states blob schema.
    states_schema_version = ndb.IntegerProperty(
        required=True, default=0, indexed=True)
    # The name of the initial state of this exploration.
    init_state_name = ndb.StringProperty(required=True, indexed=False)
    # A dict representing the states of this exploration. This dict should
    # not be empty.
    states = ndb.JsonProperty(default={}, indexed=False)
    # The dict of parameter specifications associated with this exploration.
    # Each specification is a dict whose keys are param names and whose values
    # are each dicts with a single key, 'obj_type', whose value is a string.
    param_specs = ndb.JsonProperty(default={}, indexed=False)
    # The list of parameter changes to be performed once at the start of a
    # reader's encounter with an exploration.
    param_changes = ndb.JsonProperty(repeated=True, indexed=False)

    # DEPRECATED in v2.0.0.rc.2. Do not use. Retaining it here because deletion
    # caused GAE to raise an error on fetching a specific version of the
    # exploration model.
    # TODO(sll): Fix this error and remove this property.
    skill_tags = ndb.StringProperty(repeated=True, indexed=True)
    # DEPRECATED in v2.0.1. Do not use.
    # TODO(sll): Remove this property from the model.
    default_skin = ndb.StringProperty(default=feconf.DEFAULT_SKIN_ID)

    @classmethod
    def get_exploration_count(cls):
        """Returns the total number of explorations."""
        return cls.get_all().count()

    def commit(self, committer_id, commit_message, commit_cmds):
        """Updates the exploration using the properties dict, then saves it."""
        super(ExplorationModel, self).commit(
            committer_id, commit_message, commit_cmds)

    def _trusted_commit(
            self, committer_id, commit_type, commit_message, commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.
        """
        super(ExplorationModel, self)._trusted_commit(
            committer_id, commit_type, commit_message, commit_cmds)

        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (
            committer_user_settings_model.username
            if committer_user_settings_model else '')

        exp_rights = ExplorationRightsModel.get_by_id(self.id)

        # TODO(msl): test if put_async() leads to any problems (make
        # sure summary dicts get updated correctly when explorations
        # are changed)
        ExplorationCommitLogEntryModel(
            id=('exploration-%s-%s' % (self.id, self.version)),
            user_id=committer_id,
            username=committer_username,
            exploration_id=self.id,
            commit_type=commit_type,
            commit_message=commit_message,
            commit_cmds=commit_cmds,
            version=self.version,
            post_commit_status=exp_rights.status,
            post_commit_community_owned=exp_rights.community_owned,
            post_commit_is_private=(
                exp_rights.status == feconf.ACTIVITY_STATUS_PRIVATE)
        ).put_async()
コード例 #28
0
ファイル: backend.py プロジェクト: hoff/cloud-db
class Document(ndb.Model):
    json = ndb.JsonProperty()
コード例 #29
0
class ExpSummaryModel(base_models.BaseModel):
    """Summary model for an Oppia exploration.

    This should be used whenever the content blob of the exploration is not
    needed (e.g. in search results, etc).

    A ExpSummaryModel instance stores the following information:

        id, title, category, objective, language_code, tags,
        last_updated, created_on, status (private, public or
        publicized), community_owned, owner_ids, editor_ids,
        viewer_ids, version.

    The key of each instance is the exploration id.
    """

    # What this exploration is called.
    title = ndb.StringProperty(required=True)
    # The category this exploration belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = ndb.TextProperty(required=True, indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # Tags associated with this exploration.
    tags = ndb.StringProperty(repeated=True, indexed=True)

    # Aggregate user-assigned ratings of the exploration
    ratings = ndb.JsonProperty(default=None, indexed=False)

    # Scaled average rating for the exploration.
    scaled_average_rating = ndb.FloatProperty(indexed=True)

    # Time when the exploration model was last updated (not to be
    # confused with last_updated, which is the time when the
    # exploration *summary* model was last updated)
    exploration_model_last_updated = ndb.DateTimeProperty(indexed=True)
    # Time when the exploration model was created (not to be confused
    # with created_on, which is the time when the exploration *summary*
    # model was created)
    exploration_model_created_on = ndb.DateTimeProperty(indexed=True)
    # Time when the exploration was first published.
    first_published_msec = ndb.FloatProperty(indexed=True)

    # The publication status of this exploration.
    status = ndb.StringProperty(default=feconf.ACTIVITY_STATUS_PRIVATE,
                                indexed=True,
                                choices=[
                                    feconf.ACTIVITY_STATUS_PRIVATE,
                                    feconf.ACTIVITY_STATUS_PUBLIC,
                                    feconf.ACTIVITY_STATUS_PUBLICIZED
                                ])

    # Whether this exploration is owned by the community.
    community_owned = ndb.BooleanProperty(required=True, indexed=True)

    # The user_ids of owners of this exploration.
    owner_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this exploration.
    editor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this exploration.
    viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who have contributed (humans who have made a
    # positive (not just a revert) change to the exploration's content)
    contributor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # A dict representing the contributors of non-trivial commits to this
    # exploration. Each key of this dict is a user_id, and the corresponding
    # value is the number of non-trivial commits that the user has made.
    contributors_summary = ndb.JsonProperty(default={}, indexed=False)
    # The version number of the exploration after this commit. Only populated
    # for commits to an exploration (as opposed to its rights, etc.)
    version = ndb.IntegerProperty()

    @classmethod
    def get_non_private(cls):
        """Returns an iterable with non-private ExpSummary models.

        Returns:
            iterable. An iterable with non-private ExpSummary models.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status != feconf.ACTIVITY_STATUS_PRIVATE).filter(
                ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
            ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_top_rated(cls, limit):
        """Fetches the top-rated exp summaries that are public in descending
        order of scaled_average_rating.

        Args:
            limit: int. The maximum number of results to return.

        Returns:
            iterable. An iterable with the top rated exp summaries that are
                public in descending order of scaled_average_rating.
        """
        return ExpSummaryModel.query().filter(
            ndb.OR(ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PUBLIC,
                   ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PUBLICIZED)
        ).filter(ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                 ).order(-ExpSummaryModel.scaled_average_rating).fetch(limit)

    @classmethod
    def get_private_at_least_viewable(cls, user_id):
        """Fetches private exp summaries that are at least viewable by the
        given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with private exp summaries that are at least
                viewable by the given user.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PRIVATE).filter(
                ndb.OR(ExpSummaryModel.owner_ids == user_id,
                       ExpSummaryModel.editor_ids == user_id,
                       ExpSummaryModel.viewer_ids == user_id)).filter(
                           ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                       ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_at_least_editable(cls, user_id):
        """Fetches exp summaries that are at least editable by the given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with exp summaries that are at least
                editable by the given user.
        """
        return ExpSummaryModel.query().filter(
            ndb.OR(ExpSummaryModel.owner_ids == user_id,
                   ExpSummaryModel.editor_ids == user_id)).filter(
                       ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                   ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_recently_published(cls, limit):
        """Fetches exp summaries that are recently published.

        Args:
            limit: int. The maximum number of results to return.

        Returns:
            An iterable with exp summaries that are recently published. The
                returned list is sorted by the time of publication with latest
                being first in the list.
        """
        return ExpSummaryModel.query().filter(
            ndb.OR(ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PUBLIC,
                   ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PUBLICIZED)
        ).filter(ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                 ).order(-ExpSummaryModel.first_published_msec).fetch(limit)
コード例 #30
0
ファイル: gae_models.py プロジェクト: shrutigrover/oppia
class ExplorationOpportunitySummaryModel(base_models.BaseModel):
    """Summary of translation and voiceover opportunities in an exploration.

    The id of each instance is the id of the corresponding exploration.
    """

    topic_id = ndb.StringProperty(required=True, indexed=True)
    topic_name = ndb.StringProperty(required=True, indexed=True)
    story_id = ndb.StringProperty(required=True, indexed=True)
    story_title = ndb.StringProperty(required=True, indexed=True)
    chapter_title = ndb.StringProperty(required=True, indexed=True)
    content_count = ndb.IntegerProperty(required=True, indexed=True)
    incomplete_translation_language_codes = ndb.StringProperty(repeated=True,
                                                               indexed=True)
    translation_counts = ndb.JsonProperty(default={}, indexed=False)
    assigned_voice_artist_in_language_codes = ndb.StringProperty(repeated=True,
                                                                 indexed=True)
    need_voice_artist_in_language_codes = ndb.StringProperty(repeated=True,
                                                             indexed=True)

    @staticmethod
    def get_deletion_policy():
        """Exploration opporturnity summary is deleted only if the corresponding
        exploration is not public.
        """
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @classmethod
    def get_export_policy(cls):
        """Model does not contain user data."""
        return dict(
            super(cls, cls).get_export_policy(), **{
                'topic_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'topic_name':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'story_id':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'story_title':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'chapter_title':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'content_count':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'incomplete_translation_language_codes':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'translation_counts':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'assigned_voice_artist_in_language_codes':
                base_models.EXPORT_POLICY.NOT_APPLICABLE,
                'need_voice_artist_in_language_codes':
                base_models.EXPORT_POLICY.NOT_APPLICABLE
            })

    @classmethod
    def has_reference_to_user_id(cls, unused_user_id):
        """ExplorationOpportunitySummaryModel doesn't reference any user_id
        directly.

        Args:
            unused_user_id: str. The (unused) ID of the user whose data
                should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return False

    @classmethod
    def get_all_translation_opportunities(cls, page_size, urlsafe_start_cursor,
                                          language_code):
        """Returns a list of opportunities available for translation in a
        specific language.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.
            language_code: str. The language for which translation opportunities
                are to be fetched.

        Returns:
            3-tuple of (results, cursor, more). As described in fetch_page() at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                results: list(ExplorationOpportunitySummaryModel)|None. A list
                    of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        if urlsafe_start_cursor:
            start_cursor = datastore_services.make_cursor(
                urlsafe_cursor=urlsafe_start_cursor)
        else:
            start_cursor = datastore_services.make_cursor()

        results, cursor, more = cls.query(
            cls.incomplete_translation_language_codes == language_code).order(
                cls.incomplete_translation_language_codes).fetch_page(
                    page_size, start_cursor=start_cursor)
        return (results, (cursor.urlsafe() if cursor else None), more)

    @classmethod
    def get_all_voiceover_opportunities(cls, page_size, urlsafe_start_cursor,
                                        language_code):
        """Returns a list of opportunities available for voiceover in a
        specific language.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.
            language_code: str. The language for which voiceover opportunities
                to be fetched.

        Returns:
            3-tuple of (results, cursor, more). As described in fetch_page() at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                results: list(ExplorationOpportunitySummaryModel)|None. A list
                    of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        if urlsafe_start_cursor:
            start_cursor = datastore_services.make_cursor(
                urlsafe_cursor=urlsafe_start_cursor)
        else:
            start_cursor = None

        results, cursor, more = cls.query(
            cls.need_voice_artist_in_language_codes == language_code).order(
                cls.created_on).fetch_page(page_size,
                                           start_cursor=start_cursor)
        return (results, (cursor.urlsafe() if cursor else None), more)

    @classmethod
    def get_by_topic(cls, topic_id):
        """Returns all the models corresponding to the specific topic.

        Returns:
            list(ExplorationOpportunitySummaryModel)|None. A list of
            ExplorationOpportunitySummaryModel having given topic_id.
        """
        return cls.query(cls.topic_id == topic_id).fetch()

    @classmethod
    def delete_all(cls):
        """Deletes all entities of this class."""
        keys = cls.query().fetch(keys_only=True)
        ndb.delete_multi(keys)