コード例 #1
0
class UserRights(ndb.Expando):

    strUserID = ndb.StringProperty()
    strUserEmail = ndb.StringProperty()
    strAdminUser = ndb.BooleanProperty(default=True)
    strSuperUser = ndb.BooleanProperty(default=False)
    strGeneralUser = ndb.BooleanProperty(default=False)

    def setAdminUser(self,strinput):
        try:
            if strinput == True:
                self.strAdminUser = strinput
                return True
            elif strinput == False:
                self.strAdminUser = strinput
                return True
            else:
                return False
        except:
            return False

    def setSuperUser(self,strinput):
        try:
            if strinput == True:
                self.strSuperUser = strinput
                return True
            elif strinput == False:
                self.strSuperUser = strinput
                return True
            else:
                return False
        except:
            return False

    def setGeneralUser(self,strinput):
        try:
            if strinput == True:
                self.strGeneralUser = strinput
                return True
            elif strinput == False:
                self.strGeneralUser = strinput
                return True
            else:
                return False
        except:
            return False

    def writeUserID(self,strinput):
        try:
            strinput = str(strinput)
            if not(strinput == None):
                self.strUserID = strinput
                return True
            else:
                return False
        except:
            return False

    def writeEmail(self,strinput):
        try:
            strinput = str(strinput)
            if not(strinput == None):
                self.strUserEmail = strinput
                return True
            else:
                return False
        except:
            return False
コード例 #2
0
class CollectionCommitLogEntryModel(base_models.BaseModel):
    """Log of commits to collections.

    A new instance of this model is created and saved every time a commit to
    CollectionModel or CollectionRightsModel occurs.

    The id for this model is of the form
    'collection-{{COLLECTION_ID}}-{{COLLECTION_VERSION}}'.
    """
    # Update superclass model to make these properties indexed.
    created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
    last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)

    # The id of the user.
    user_id = ndb.StringProperty(indexed=True, required=True)
    # The username of the user, at the time of the edit.
    username = ndb.StringProperty(indexed=True, required=True)
    # The id of the collection being edited.
    collection_id = ndb.StringProperty(indexed=True, required=True)
    # The type of the commit: 'create', 'revert', 'edit', 'delete'.
    commit_type = ndb.StringProperty(indexed=True, required=True)
    # The commit message.
    commit_message = ndb.TextProperty(indexed=False)
    # The commit_cmds dict for this commit.
    commit_cmds = ndb.JsonProperty(indexed=False, required=True)
    # The version number of the collection after this commit. Only populated
    # for commits to an collection (as opposed to its rights, etc.).
    version = ndb.IntegerProperty()

    # The status of the collection after the edit event ('private', 'public').
    post_commit_status = ndb.StringProperty(indexed=True, required=True)
    # Whether the collection is community-owned after the edit event.
    post_commit_community_owned = ndb.BooleanProperty(indexed=True)
    # Whether the collection is private after the edit event. Having a
    # separate field for this makes queries faster, since an equality query
    # on this property is faster than an inequality query on
    # post_commit_status.
    post_commit_is_private = ndb.BooleanProperty(indexed=True)

    @classmethod
    def get_commit(cls, collection_id, version):
        """Returns the commit corresponding to the given collection id and
        version number.

        Args:
            collection_id: str. The id of the collection being edited.
            version: int. The version number of the collection after the commit.

        Returns:
            The commit with the given collection id and version number.
        """
        return cls.get_by_id('collection-%s-%s' % (collection_id, version))

    @classmethod
    def get_all_commits(cls, page_size, urlsafe_start_cursor):
        """Fetches a list of all the commits sorted by their last updated
        attribute.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.

        Returns:
            3-tuple of (results, cursor, more) as described in fetch_page() at:
            https://developers.google.com/appengine/docs/python/ndb/queryclass,
            where:
                results: List of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        return cls._fetch_page_sorted_by_last_updated(
            cls.query(), page_size, urlsafe_start_cursor)

    @classmethod
    def get_all_non_private_commits(
            cls, page_size, urlsafe_start_cursor, max_age=None):
        """Fetches a list of all the non-private commits sorted by their last
        updated attribute.

        Args:
            page_size: int. The maximum number of entities to be returned.
            urlsafe_start_cursor: str or None. If provided, the list of
                returned entities starts from this datastore cursor.
                Otherwise, the returned entities start from the beginning
                of the full list of entities.
            max_age: datetime.timedelta. An instance of datetime.timedelta
                representing the maximum age of the non-private commits to be
                fetched.

        Raises:
            ValueError. max_age is neither an instance of datetime.timedelta nor
                None.

        Returns:
            3-tuple of (results, cursor, more) where:
                results: List of query results.
                cursor: str or None. A query cursor pointing to the next
                    batch of results. If there are no more results, this might
                    be None.
                more: bool. If True, there are (probably) more results after
                    this batch. If False, there are no further results after
                    this batch.
        """
        if not isinstance(max_age, datetime.timedelta) and max_age is not None:
            raise ValueError(
                'max_age must be a datetime.timedelta instance or None.')

        query = cls.query(
            cls.post_commit_is_private == False)  # pylint: disable=singleton-comparison
        if max_age:
            query = query.filter(
                cls.last_updated >= datetime.datetime.utcnow() - max_age)
        return cls._fetch_page_sorted_by_last_updated(
            query, page_size, urlsafe_start_cursor)
コード例 #3
0
class DeletableModel(Model):
    """Functionality to implement (soft) delete."""
    deleted = ndb.BooleanProperty(default=False, indexed=True)
コード例 #4
0
class Hacker(ndb.Model):
    #TODO: If you add a new property, please remember to add that property to deletedHacker.py.

    name = ndb.StringProperty(validator=models.stringValidator)
    school = ndb.StringProperty(validator=models.stringValidator)
    year = ndb.StringProperty(choices=[
        'highschool', 'freshman', 'sophomore', 'junior', 'senior',
        'grad_student'
    ])
    email = ndb.StringProperty(validator=models.stringValidator)
    shirt_gen = ndb.StringProperty(choices=['M', 'W'])
    shirt_size = ndb.StringProperty(choices=['XS', 'S', 'M', 'L', 'XL', 'XXL'])
    dietary_restrictions = ndb.StringProperty(validator=models.stringValidator)
    resume = ndb.BlobKeyProperty()
    receipts = ndb.BlobKeyProperty(repeated=True)
    date = ndb.DateTimeProperty(auto_now_add=True)
    links = ndb.StringProperty(default=None)
    teammates = ndb.StringProperty(default=None,
                                   validator=models.stringValidator)
    teammates_emailed = ndb.BooleanProperty(default=False)
    hardware_hack = ndb.StringProperty(choices=["yes", 'no'])
    first_hackathon = ndb.StringProperty(choices=['yes', 'no'])
    major = ndb.StringProperty()

    phone_number = ndb.StringProperty(
        validator=models.phoneValidator
    )  # normalized to only digits, no country code

    def pretty_phone(self):
        if self.phone_number:
            return "({0}) {1}-{2}".format(self.phone_number[:3],
                                          self.phone_number[3:6],
                                          self.phone_number[6:])
        else:
            return None

    secret = ndb.StringProperty()

    admit_priority = ndb.FloatProperty(default=0)
    admitted_email_sent_date = ndb.DateTimeProperty()
    post_registration_email_sent_date = ndb.DateTimeProperty()

    waitlist_email_sent_date = ndb.DateTimeProperty()
    rsvp_reminder_sent_date = ndb.DateTimeProperty(default=None)

    rsvpd = ndb.BooleanProperty(default=False)
    checked_in = ndb.BooleanProperty(default=False)
    deadline = ndb.DateTimeProperty()

    ip = ndb.StringProperty()

    #Only collected for reimbursements.
    address1 = ndb.StringProperty()
    address2 = ndb.StringProperty()
    city = ndb.StringProperty()
    state = ndb.StringProperty()
    zip = ndb.StringProperty()
    country = ndb.StringProperty()

    rmax = ndb.IntegerProperty(default=0)
    rtotal = ndb.IntegerProperty(default=0)

    def computeStatus(self):
        if self is None:
            return "not found"
        if self.checked_in == True:
            return "checked in"
        elif self.rsvpd == True:
            return "confirmed"
        elif self.admitted_email_sent_date != None:
            return "accepted"
        elif self.waitlist_email_sent_date != None:
            return "waitlisted"
        else:
            return "pending"

    def asDict(self, include_keys):
        d = {key: getattr(self, key, None) for key in include_keys}
        if 'status' in include_keys:
            d['status'] = self.computeStatus()
        if 'has_resume' in include_keys:
            d['has_resume'] = False if (not hasattr(self, 'resume')
                                        or self.resume == {}
                                        or self.resume == None) else True
        if 'resume' in include_keys:
            d['resume'] = "None" if (
                not hasattr(self, 'resume') or self.resume == {}
                or self.resume == None
            ) else "http://hackatbrown.org/__serve/" + str(self.resume)
        if 'secret' in include_keys:
            d['secret'] = "http://hackatbrown.org/secret/" + str(self.secret)
        if 'receipt_urls' in include_keys:
            receipt_keys = self.receipts if self.receipts else []
            d['receipt_urls'] = ' '.join([
                "http://hackatbrown.org/__serve/" + str(receipt)
                for receipt in receipt_keys
            ])
        return d

    @classmethod
    def WithSecret(cls, secret):
        results = cls.query(cls.secret == secret).fetch(1)
        return results[0] if len(results) else None
コード例 #5
0
ファイル: match.py プロジェクト: phckopper/the-blue-alliance
class Match(ndb.Model):
    """
    Matches represent individual matches at Events.
    Matches have many Videos.
    Matches have many Alliances.
    key_name is like 2010ct_qm10 or 2010ct_sf1m2
    """

    COMP_LEVELS = ["qm", "ef", "qf", "sf", "f"]
    ELIM_LEVELS = ["ef", "qf", "sf", "f"]
    COMP_LEVELS_VERBOSE = {
        "qm": "Quals",
        "ef": "Eighths",
        "qf": "Quarters",
        "sf": "Semis",
        "f": "Finals",
    }
    COMP_LEVELS_PLAY_ORDER = {
        'qm': 1,
        'ef': 2,
        'qf': 3,
        'sf': 4,
        'f': 5,
    }

    FRC_GAMES = [
        "frc_2012_rebr",
        "frc_2011_logo",
        "frc_2010_bkwy",
        "frc_2009_lncy",
        "frc_2008_ovdr",
        "frc_2007_rkrl",
        "frc_2006_amhi",
        "frc_2005_trpl",
        "frc_2004_frnz",
        "frc_2003_stck",
        "frc_2002_znzl",
        "frc_2001_dbdy",
        "frc_2000_coop",
        "frc_1999_trbl",
        "frc_1998_lddr",
        "frc_1997_trdt",
        "frc_1996_hxgn",
        "frc_1995_rmpr",
        "frc_1994_tpwr",
        "frc_1993_rgrg",
        "frc_1992_maiz",
        "frc_unknown",
    ]

    FRC_GAMES_BY_YEAR = {
        2012: "frc_2012_rebr",
        2011: "frc_2011_logo",
        2010: "frc_2010_bkwy",
        2009: "frc_2009_lncy",
        2008: "frc_2008_ovdr",
        2007: "frc_2007_rkrl",
        2006: "frc_2006_amhi",
        2005: "frc_2005_trpl",
        2004: "frc_2004_frnz",
        2003: "frc_2003_stck",
        2002: "frc_2002_znzl",
        2001: "frc_2001_dbdy",
        2000: "frc_2000_coop",
        1999: "frc_1999_trbl",
        1998: "frc_1998_lddr",
        1997: "frc_1997_trdt",
        1996: "frc_1996_hxgn",
        1995: "frc_1995_rmpr",
        1994: "frc_1994_tpwr",
        1993: "frc_1993_rgrg",
        1992: "frc_1992_maiz",
    }

    alliances_json = ndb.StringProperty(
        required=True,
        indexed=False)  # JSON dictionary with alliances and scores.

    # {
    # "red": {
    # "teams": ["frc177", "frc195", "frc125"], # These are Team keys
    #    "score": 25
    # },
    # "blue": {
    #    "teams": ["frc433", "frc254", "frc222"],
    #    "score": 12
    # }
    # }

    score_breakdown_json = ndb.StringProperty(
        indexed=False
    )  # JSON dictionary with score breakdowns. Fields are those used for seeding. Varies by year.
    # Example for 2014. Seeding outlined in Section 5.3.4 in the 2014 manual.
    # {"red": {
    #     "auto": 20,
    #     "assist": 40,
    #     "truss+catch": 20,
    #     "teleop_goal+foul": 20,
    # },
    # "blue"{
    #     "auto": 40,
    #     "assist": 60,
    #     "truss+catch": 10,
    #     "teleop_goal+foul": 40,
    # }}

    comp_level = ndb.StringProperty(required=True, choices=set(COMP_LEVELS))
    event = ndb.KeyProperty(kind=Event, required=True)
    game = ndb.StringProperty(required=True,
                              choices=set(FRC_GAMES),
                              indexed=False)
    match_number = ndb.IntegerProperty(required=True, indexed=False)
    no_auto_update = ndb.BooleanProperty(
        default=False, indexed=False)  # Set to True after manual update
    set_number = ndb.IntegerProperty(required=True, indexed=False)
    team_key_names = ndb.StringProperty(
        repeated=True)  # list of teams in Match, for indexing.
    time = ndb.DateTimeProperty()  # UTC
    time_string = ndb.StringProperty(
        indexed=False
    )  # the time as displayed on FIRST's site (event's local time)
    youtube_videos = ndb.StringProperty(repeated=True)  # list of Youtube IDs
    tba_videos = ndb.StringProperty(
        repeated=True)  # list of filetypes a TBA video exists for

    created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
    updated = ndb.DateTimeProperty(auto_now=True)

    def __init__(self, *args, **kw):
        # store set of affected references referenced keys for cache clearing
        # keys must be model properties
        self._affected_references = {
            'key': set(),
            'event': set(),
            'team_keys': set(),
            'year': set(),
        }
        self._alliances = None
        self._score_breakdown = None
        self._tba_video = None
        self._winning_alliance = None
        self._youtube_videos = None
        super(Match, self).__init__(*args, **kw)

    @property
    def alliances(self):
        """
        Lazy load alliances_json
        """
        if self._alliances is None:
            self._alliances = json.loads(self.alliances_json)

            # score types are inconsistent in the db. convert everything to ints for now.
            for color in ['red', 'blue']:
                score = self._alliances[color]['score']
                if score is None:
                    self._alliances[color]['score'] = -1
                else:
                    self._alliances[color]['score'] = int(score)

        return self._alliances

    @property
    def score_breakdown(self):
        """
        Lazy load score_breakdown_json
        """
        if self._score_breakdown is None and self.score_breakdown_json is not None:
            self._score_breakdown = json.loads(self.score_breakdown_json)

        return self._score_breakdown

    @property
    def winning_alliance(self):
        if self._winning_alliance is None:
            highest_score = 0
            for alliance in self.alliances:
                if int(self.alliances[alliance]["score"]) > highest_score:
                    highest_score = int(self.alliances[alliance]["score"])
                    self._winning_alliance = alliance
                elif int(self.alliances[alliance]["score"]) == highest_score:
                    self._winning_alliance = ""
        return self._winning_alliance

    @property
    def event_key_name(self):
        return self.event.id()

    @property
    def team_keys(self):
        return [
            ndb.Key(Team, team_key_name)
            for team_key_name in self.team_key_names
        ]

    @property
    def year(self):
        return self.event.id()[:4]

    @property
    def key_name(self):
        return self.renderKeyName(self.event_key_name, self.comp_level,
                                  self.set_number, self.match_number)

    @property
    def has_been_played(self):
        """If there are scores, it's been played"""
        for alliance in self.alliances:
            if (self.alliances[alliance]["score"] == None) or \
            (self.alliances[alliance]["score"] == -1):
                return False
        return True

    @property
    def verbose_name(self):
        if self.comp_level == "qm" or self.comp_level == "f":
            return "%s %s" % (self.COMP_LEVELS_VERBOSE[self.comp_level],
                              self.match_number)
        else:
            return "%s %s Match %s" % (self.COMP_LEVELS_VERBOSE[
                self.comp_level], self.set_number, self.match_number)

    @property
    def short_name(self):
        if self.comp_level == "qm":
            return "Q%s" % self.match_number
        elif self.comp_level == "f":
            return "F%s" % self.match_number
        else:
            return "%s%s-%s" % (self.comp_level.upper(), self.set_number,
                                self.match_number)

    @property
    def has_video(self):
        return (len(self.youtube_videos) + len(self.tba_videos)) > 0

    @property
    def details_url(self):
        return "/match/%s" % self.key_name

    @property
    def tba_video(self):
        if len(self.tba_videos) > 0:
            if self._tba_video is None:
                self._tba_video = TBAVideoHelper(self)
        return self._tba_video

    @property
    def play_order(self):
        return self.COMP_LEVELS_PLAY_ORDER[
            self.
            comp_level] * 1000000 + self.match_number * 1000 + self.set_number

    @property
    def name(self):
        return "%s" % (self.COMP_LEVELS_VERBOSE[self.comp_level])

    @property
    def youtube_videos_formatted(self):
        """
        Get youtube video ids formatted for embedding
        """
        if self._youtube_videos is None:
            self._youtube_videos = []
            for video in self.youtube_videos:
                if '#t=' in video:  # Old style-timetamp, convert it!
                    sp = video.split('#t=')
                    video_id = sp[0]
                    old_ts = sp[1]
                    match = re.match(
                        '((?P<hour>\d*?)h)?((?P<min>\d*?)m)?((?P<sec>\d*)s?)?',
                        old_ts).groupdict()
                    hours = match['hour'] or 0
                    minutes = match['min'] or 0
                    seconds = match['sec'] or 0
                    total_seconds = (int(hours) * 3600) + (int(minutes) *
                                                           60) + int(seconds)
                    video = '%s?start=%i' % (video_id, total_seconds)
                self._youtube_videos.append(video)
        return self._youtube_videos

    @property
    def videos(self):
        videos = []
        for v in self.youtube_videos_formatted:
            videos.append({"type": "youtube", "key": v})
        if self.tba_video is not None:
            tba_path = self.tba_video.streamable_path
            if tba_path is not None:
                videos.append({"type": "tba", "key": tba_path})
        return videos

    @classmethod
    def renderKeyName(self, event_key_name, comp_level, set_number,
                      match_number):
        if comp_level == "qm":
            return "%s_qm%s" % (event_key_name, match_number)
        else:
            return "%s_%s%sm%s" % (event_key_name, comp_level, set_number,
                                   match_number)

    @classmethod
    def validate_key_name(self, match_key):
        key_name_regex = re.compile(
            r'^[1-9]\d{3}[a-z]+[1-9]?\_(?:qm|ef|qf\dm|sf\dm|f\dm)\d+$')
        match = re.match(key_name_regex, match_key)
        return True if match else False
コード例 #6
0
class ExplorationRightsModel(base_models.VersionedModel):
    """Storage model for rights related to an exploration.

    The id of each instance is the id of the corresponding exploration.
    """

    SNAPSHOT_METADATA_CLASS = ExplorationRightsSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = ExplorationRightsSnapshotContentModel
    ALLOW_REVERT = False

    # The user_ids of owners of this exploration.
    owner_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this exploration.
    editor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to voiceover this exploration.
    voice_artist_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this exploration.
    viewer_ids = ndb.StringProperty(indexed=True, repeated=True)

    # Whether this exploration is owned by the community.
    community_owned = ndb.BooleanProperty(indexed=True, default=False)
    # The exploration id which this exploration was cloned from. If None, this
    # exploration was created from scratch.
    cloned_from = ndb.StringProperty()
    # For private explorations, whether this exploration can be viewed
    # by anyone who has the URL. If the exploration is not private, this
    # setting is ignored.
    viewable_if_private = ndb.BooleanProperty(indexed=True, default=False)
    # Time, in milliseconds, when the exploration was first published.
    first_published_msec = ndb.FloatProperty(indexed=True, default=None)

    # The publication status of this exploration.
    status = ndb.StringProperty(default=constants.ACTIVITY_STATUS_PRIVATE,
                                indexed=True,
                                choices=[
                                    constants.ACTIVITY_STATUS_PRIVATE,
                                    constants.ACTIVITY_STATUS_PUBLIC
                                ])
    # DEPRECATED in v2.8.3. Do not use.
    translator_ids = ndb.StringProperty(indexed=True, repeated=True)

    @staticmethod
    def get_deletion_policy():
        """Exploration rights are deleted only if the corresponding exploration
        is not public.
        """
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @staticmethod
    def get_export_policy():
        """Model contains user data."""
        return base_models.EXPORT_POLICY.CONTAINS_USER_DATA

    @staticmethod
    def transform_dict_to_valid(model_dict):
        """Replace invalid fields and values in the ExplorationRightsModel dict.

        Some old ExplorationRightsSnapshotContentModels can contain fields
        and field values that are no longer supported and would cause
        an exception when we try to reconstitute a ExplorationRightsModel from
        them. We need to remove or replace these fields and values.

        Args:
            model_dict: dict. The content of the model. Some fields and field
                values might no longer exist in the ExplorationRightsModel
                schema.

        Returns:
            dict. The content of the model. Only valid fields and values are
            present.
        """
        # The all_viewer_ids field was previously used in some versions of the
        # model, we need to remove it.
        if 'all_viewer_ids' in model_dict:
            del model_dict['all_viewer_ids']
        # The status field could historically take the value 'publicized', this
        # value is now equivalent to 'public'.
        if model_dict['status'] == 'publicized':
            model_dict['status'] = constants.ACTIVITY_STATUS_PUBLIC
        # The voice_artist_ids field was previously named translator_ids. We
        # need to move the values from translator_ids field to voice_artist_ids
        # and delete translator_ids.
        if 'translator_ids' in model_dict and model_dict['translator_ids']:
            model_dict['voice_artist_ids'] = model_dict['translator_ids']
            model_dict['translator_ids'] = []
        return model_dict

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether ExplorationRightsModel reference user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return (cls.query(
            ndb.OR(cls.owner_ids == user_id, cls.editor_ids == user_id,
                   cls.voice_artist_ids == user_id, cls.viewer_ids
                   == user_id)).get(keys_only=True) is not None
                or cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id))

    @staticmethod
    def get_user_id_migration_policy():
        """ExplorationRightsModel has multiple fields with user ID."""
        return base_models.USER_ID_MIGRATION_POLICY.CUSTOM

    @classmethod
    def migrate_model(cls, old_user_id, new_user_id):
        """Migrate model to use the new user ID in the owner_ids, editor_ids,
        voice_artist_ids and viewer_ids.

        Args:
            old_user_id: str. The old user ID.
            new_user_id: str. The new user ID.
        """
        migrated_models = []
        for model in cls.query(
                ndb.OR(cls.owner_ids == old_user_id,
                       cls.editor_ids == old_user_id,
                       cls.voice_artist_ids == old_user_id,
                       cls.viewer_ids == old_user_id)).fetch():
            model.owner_ids = [
                new_user_id if owner_id == old_user_id else owner_id
                for owner_id in model.owner_ids
            ]
            model.editor_ids = [
                new_user_id if editor_id == old_user_id else editor_id
                for editor_id in model.editor_ids
            ]
            model.voice_artist_ids = [
                new_user_id if voice_art_id == old_user_id else voice_art_id
                for voice_art_id in model.voice_artist_ids
            ]
            model.viewer_ids = [
                new_user_id if viewer_id == old_user_id else viewer_id
                for viewer_id in model.viewer_ids
            ]
            migrated_models.append(model)
        cls.put_multi(migrated_models, update_last_updated_time=False)

    def verify_model_user_ids_exist(self):
        """Check if UserSettingsModel exists for all the ids in owner_ids,
        editor_ids, voice_artist_ids and viewer_ids.
        """
        user_ids = (self.owner_ids + self.editor_ids + self.voice_artist_ids +
                    self.viewer_ids)
        user_ids = [
            user_id for user_id in user_ids
            if user_id not in feconf.SYSTEM_USERS
        ]
        user_settings_models = user_models.UserSettingsModel.get_multi(
            user_ids, include_deleted=True)
        return all(model is not None for model in user_settings_models)

    def save(self, committer_id, commit_message, commit_cmds):
        """Saves a new version of the exploration, updating the Exploration
        datastore model.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. The type of the command. A full list of command
                        types can be found in core/domain/exp_domain.py.
                and then additional arguments for that command. For example:

                {'cmd': 'AUTO_revert_version_number',
                 'version_number': 4}
        """
        super(ExplorationRightsModel, self).commit(committer_id,
                                                   commit_message, commit_cmds)

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(ExplorationRightsModel,
              self)._trusted_commit(committer_id, commit_type, commit_message,
                                    commit_cmds)

        # Create and delete events will already be recorded in the
        # ExplorationModel.
        if commit_type not in ['create', 'delete']:
            committer_user_settings_model = (
                user_models.UserSettingsModel.get_by_id(committer_id))
            committer_username = (committer_user_settings_model.username
                                  if committer_user_settings_model else '')
            # TODO(msl): test if put_async() leads to any problems (make
            # sure summary dicts get updated correctly when explorations
            # are changed).
            ExplorationCommitLogEntryModel(
                id=('rights-%s-%s' % (self.id, self.version)),
                user_id=committer_id,
                username=committer_username,
                exploration_id=self.id,
                commit_type=commit_type,
                commit_message=commit_message,
                commit_cmds=commit_cmds,
                version=None,
                post_commit_status=self.status,
                post_commit_community_owned=self.community_owned,
                post_commit_is_private=(self.status == constants.
                                        ACTIVITY_STATUS_PRIVATE)).put_async()

    @classmethod
    def export_data(cls, user_id):
        """(Takeout) Export user-relevant properties of ExplorationRightsModel.

        Args:
            user_id: str. The user_id denotes which user's data to extract.

        Returns:
            dict or None. The user-relevant properties of ExplorationRightsModel
            in a python dict format. In this case, we are returning all the
            ids of explorations that the user is connected to, so they either
            own, edit, voice, or have permission to view.
        """
        owned_explorations = cls.get_all().filter(cls.owner_ids == user_id)
        editable_explorations = cls.get_all().filter(cls.editor_ids == user_id)
        voiced_explorations = (cls.get_all().filter(
            cls.voice_artist_ids == user_id))
        viewable_explorations = cls.get_all().filter(cls.viewer_ids == user_id)

        owned_exploration_ids = [exp.key.id() for exp in owned_explorations]
        editable_exploration_ids = ([
            exp.key.id() for exp in editable_explorations
        ])
        voiced_exploration_ids = [exp.key.id() for exp in voiced_explorations]
        viewable_exploration_ids = ([
            exp.key.id() for exp in viewable_explorations
        ])

        return {
            'owned_exploration_ids': owned_exploration_ids,
            'editable_exploration_ids': editable_exploration_ids,
            'voiced_exploration_ids': voiced_exploration_ids,
            'viewable_exploration_ids': viewable_exploration_ids
        }
コード例 #7
0
class ExpSummaryModel(base_models.BaseModel):
    """Summary model for an Oppia exploration.

    This should be used whenever the content blob of the exploration is not
    needed (e.g. in search results, etc).

    A ExpSummaryModel instance stores the following information:

        id, title, category, objective, language_code, tags,
        last_updated, created_on, status (private, public),
        community_owned, owner_ids, editor_ids,
        viewer_ids, version.

    The key of each instance is the exploration id.
    """

    # What this exploration is called.
    title = ndb.StringProperty(required=True)
    # The category this exploration belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = ndb.TextProperty(required=True, indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # Tags associated with this exploration.
    tags = ndb.StringProperty(repeated=True, indexed=True)

    # Aggregate user-assigned ratings of the exploration.
    ratings = ndb.JsonProperty(default=None, indexed=False)

    # Scaled average rating for the exploration.
    scaled_average_rating = ndb.FloatProperty(indexed=True)

    # Time when the exploration model was last updated (not to be
    # confused with last_updated, which is the time when the
    # exploration *summary* model was last updated).
    exploration_model_last_updated = ndb.DateTimeProperty(indexed=True)
    # Time when the exploration model was created (not to be confused
    # with created_on, which is the time when the exploration *summary*
    # model was created).
    exploration_model_created_on = ndb.DateTimeProperty(indexed=True)
    # Time when the exploration was first published.
    first_published_msec = ndb.FloatProperty(indexed=True)

    # The publication status of this exploration.
    status = ndb.StringProperty(default=constants.ACTIVITY_STATUS_PRIVATE,
                                indexed=True,
                                choices=[
                                    constants.ACTIVITY_STATUS_PRIVATE,
                                    constants.ACTIVITY_STATUS_PUBLIC
                                ])

    # Whether this exploration is owned by the community.
    community_owned = ndb.BooleanProperty(required=True, indexed=True)

    # The user_ids of owners of this exploration.
    owner_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this exploration.
    editor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to voiceover this exploration.
    voice_artist_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this exploration.
    viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who have contributed (humans who have made a
    # positive (not just a revert) change to the exploration's content).
    contributor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # A dict representing the contributors of non-trivial commits to this
    # exploration. Each key of this dict is a user_id, and the corresponding
    # value is the number of non-trivial commits that the user has made.
    contributors_summary = ndb.JsonProperty(default={}, indexed=False)
    # The version number of the exploration after this commit. Only populated
    # for commits to an exploration (as opposed to its rights, etc.).
    version = ndb.IntegerProperty()
    # DEPRECATED in v2.8.3. Do not use.
    translator_ids = ndb.StringProperty(indexed=True, repeated=True)

    @staticmethod
    def get_deletion_policy():
        """Exploration summary is deleted only if the corresponding exploration
        is not public.
        """
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether ExpSummaryModel references user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.query(
            ndb.OR(cls.owner_ids == user_id, cls.editor_ids == user_id,
                   cls.voice_artist_ids == user_id, cls.viewer_ids == user_id,
                   cls.contributor_ids == user_id)).get(
                       keys_only=True) is not None

    @staticmethod
    def get_user_id_migration_policy():
        """ExpSummaryModel has multiple fields with user ID."""
        return base_models.USER_ID_MIGRATION_POLICY.CUSTOM

    @classmethod
    def migrate_model(cls, old_user_id, new_user_id):
        """Migrate model to use the new user ID in the owner_ids, editor_ids,
        voice_artist_ids, viewer_ids and contributor_ids.

        Args:
            old_user_id: str. The old user ID.
            new_user_id: str. The new user ID.
        """
        migrated_models = []
        for model in cls.query(
                ndb.OR(cls.owner_ids == old_user_id,
                       cls.editor_ids == old_user_id,
                       cls.voice_artist_ids == old_user_id,
                       cls.viewer_ids == old_user_id,
                       cls.contributor_ids == old_user_id)).fetch():
            model.owner_ids = [
                new_user_id if owner_id == old_user_id else owner_id
                for owner_id in model.owner_ids
            ]
            model.editor_ids = [
                new_user_id if editor_id == old_user_id else editor_id
                for editor_id in model.editor_ids
            ]
            model.voice_artist_ids = [
                new_user_id if voice_art_id == old_user_id else voice_art_id
                for voice_art_id in model.voice_artist_ids
            ]
            model.viewer_ids = [
                new_user_id if viewer_id == old_user_id else viewer_id
                for viewer_id in model.viewer_ids
            ]
            model.contributor_ids = [
                new_user_id
                if contributor_id == old_user_id else contributor_id
                for contributor_id in model.contributor_ids
            ]
            migrated_models.append(model)
        cls.put_multi(migrated_models, update_last_updated_time=False)

    @classmethod
    def get_non_private(cls):
        """Returns an iterable with non-private ExpSummary models.

        Returns:
            iterable. An iterable with non-private ExpSummary models.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status != constants.ACTIVITY_STATUS_PRIVATE
        ).filter(ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                 ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_top_rated(cls, limit):
        """Fetches the top-rated exp summaries that are public in descending
        order of scaled_average_rating.

        Args:
            limit: int. The maximum number of results to return.

        Returns:
            iterable. An iterable with the top rated exp summaries that are
                public in descending order of scaled_average_rating.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status == constants.ACTIVITY_STATUS_PUBLIC).filter(
                ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
            ).order(-ExpSummaryModel.scaled_average_rating).fetch(limit)

    @classmethod
    def get_private_at_least_viewable(cls, user_id):
        """Fetches private exp summaries that are at least viewable by the
        given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with private exp summaries that are at least
                viewable by the given user.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status ==
            constants.ACTIVITY_STATUS_PRIVATE).filter(
                ndb.OR(ExpSummaryModel.owner_ids == user_id,
                       ExpSummaryModel.editor_ids == user_id,
                       ExpSummaryModel.voice_artist_ids == user_id,
                       ExpSummaryModel.viewer_ids == user_id)).filter(
                           ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                       ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_at_least_editable(cls, user_id):
        """Fetches exp summaries that are at least editable by the given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with exp summaries that are at least
                editable by the given user.
        """
        return ExpSummaryModel.query().filter(
            ndb.OR(ExpSummaryModel.owner_ids == user_id,
                   ExpSummaryModel.editor_ids == user_id)).filter(
                       ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
                   ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_recently_published(cls, limit):
        """Fetches exp summaries that are recently published.

        Args:
            limit: int. The maximum number of results to return.

        Returns:
            An iterable with exp summaries that are recently published. The
                returned list is sorted by the time of publication with latest
                being first in the list.
        """
        return ExpSummaryModel.query().filter(
            ExpSummaryModel.status == constants.ACTIVITY_STATUS_PUBLIC).filter(
                ExpSummaryModel.deleted == False  # pylint: disable=singleton-comparison
            ).order(-ExpSummaryModel.first_published_msec).fetch(limit)

    @staticmethod
    def get_export_policy():
        """Model data has already been exported as a part of the
        ExplorationModel and thus does not need a separate export_data
        function.
        """
        return base_models.EXPORT_POLICY.NOT_APPLICABLE

    def verify_model_user_ids_exist(self):
        """Check if UserSettingsModel exists for all the ids in owner_ids,
        editor_ids, voice_artist_ids, viewer_ids and contributor_ids.
        """
        user_ids = (self.owner_ids + self.editor_ids + self.voice_artist_ids +
                    self.viewer_ids + self.contributor_ids)
        user_ids = [
            user_id for user_id in user_ids
            if user_id not in feconf.SYSTEM_USERS
        ]
        user_settings_models = user_models.UserSettingsModel.get_multi(
            user_ids, include_deleted=True)
        return all(model is not None for model in user_settings_models)
コード例 #8
0
ファイル: entities.py プロジェクト: CaliScrub/palarin
class EmailList(ndb.Model):
    listname = ndb.StringProperty()
    email = ndb.StringProperty()
    subscribed = ndb.BooleanProperty()
コード例 #9
0
class Library(ndb.Model):
  github_access_token = ndb.StringProperty(indexed=False)

  kind = ndb.StringProperty(default='element')
  collection_sequence_number = ndb.IntegerProperty(indexed=False, default=0)

  spdx_identifier = ndb.StringProperty(indexed=False)

  metadata = ndb.TextProperty(indexed=False)
  metadata_etag = ndb.StringProperty(indexed=False)
  metadata_updated = ndb.DateTimeProperty()

  contributors = ndb.TextProperty(indexed=False)
  contributors_etag = ndb.StringProperty(indexed=False)
  contributors_updated = ndb.DateTimeProperty()

  tags = ndb.StringProperty(repeated=True, indexed=False)
  tag_map = ndb.TextProperty(indexed=False)
  tags_etag = ndb.StringProperty(indexed=False)
  tags_updated = ndb.DateTimeProperty()

  participation = ndb.TextProperty(indexed=False)
  participation_etag = ndb.StringProperty(indexed=False)
  participation_updated = ndb.DateTimeProperty()

  shallow_ingestion = ndb.BooleanProperty(default=False)

  status = ndb.StringProperty(default=Status.pending)
  error = ndb.StringProperty(indexed=False)
  updated = ndb.DateTimeProperty(auto_now=True)

  @staticmethod
  def id(owner, repo):
    return '%s/%s' % (owner.lower(), repo.lower())

  @staticmethod
  def maybe_create_with_kind(owner, repo, kind):
    library = Library.get_or_insert('%s/%s' % (owner, repo))
    # FIXME: Probably don't want libraries to change kind.
    if library.kind != kind:
      library.kind = kind
      library.put()
    return library

  @staticmethod
  @ndb.tasklet
  def versions_for_key_async(key):
    version_cache = yield VersionCache.get_by_id_async('versions', parent=key)
    versions = []
    if version_cache is not None:
      versions = version_cache.versions
    raise ndb.Return(versions)

  @staticmethod
  @ndb.tasklet
  def default_version_for_key_async(key):
    versions = yield Library.versions_for_key_async(key)
    if versions == []:
      raise ndb.Return(None)
    raise ndb.Return(versiontag.default_version(versions))

  @staticmethod
  def uncached_versions_for_key(key):
    versions = Version.query(Version.status != Status.pending, ancestor=key).fetch(keys_only=True)
    versions = [key.id() for key in versions if versiontag.is_valid(key.id())]
    versions.sort(versiontag.compare)
    return versions
コード例 #10
0
ファイル: gae_models.py プロジェクト: unit-00/oppia
class SkillModel(base_models.VersionedModel):
    """Model for storing Skills.

    This class should only be imported by the skill services file
    and the skill model test file.
    """
    SNAPSHOT_METADATA_CLASS = SkillSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = SkillSnapshotContentModel
    ALLOW_REVERT = False

    # The description of the skill.
    description = ndb.StringProperty(required=True, indexed=True)
    # The schema version for each of the misconception dicts.
    misconceptions_schema_version = ndb.IntegerProperty(required=True,
                                                        indexed=True)
    # A list of misconceptions associated with the skill, in which each
    # element is a dict.
    misconceptions = ndb.JsonProperty(repeated=True, indexed=False)
    # The ISO 639-1 code for the language this skill is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # The schema version for the skill_contents.
    skill_contents_schema_version = ndb.IntegerProperty(required=True,
                                                        indexed=True)
    # A dict representing the skill contents.
    skill_contents = ndb.JsonProperty(indexed=False)
    # The id to be used by the next misconception added.
    next_misconception_id = ndb.IntegerProperty(required=True, indexed=False)
    # The id that the skill is merged into, in case the skill has been
    # marked as duplicate to another one and needs to be merged.
    # This is an optional field.
    superseding_skill_id = ndb.StringProperty(indexed=True)
    # A flag indicating whether deduplication is complete for this skill.
    # It will initially be False, and set to true only when there is a value
    # for superseding_skill_id and the merge was completed.
    all_questions_merged = ndb.BooleanProperty(indexed=True, required=True)

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(SkillModel, self)._trusted_commit(committer_id, commit_type,
                                                commit_message, commit_cmds)

        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (committer_user_settings_model.username
                              if committer_user_settings_model else '')

        skill_commit_log_entry = SkillCommitLogEntryModel.create(
            self.id, self.version, committer_id, committer_username,
            commit_type, commit_message, commit_cmds,
            constants.ACTIVITY_STATUS_PUBLIC, False)
        skill_commit_log_entry.skill_id = self.id
        skill_commit_log_entry.put()
コード例 #11
0
ファイル: models_param.py プロジェクト: wilrona/GateauApps
class TypeGateaux(ndb.Model):
    name = ndb.StringProperty()
    pr_sable = ndb.BooleanProperty(default=False)
コード例 #12
0
class SendOutformat(ndb.Model):
    success = ndb.BooleanProperty(required=True)
    roses = ndb.StructuredProperty(model.Rose, repeated=True)
コード例 #13
0
class Person(ndb.Model):
    chat_id = ndb.IntegerProperty()
    state = ndb.IntegerProperty(default=-1, indexed=True)
    name = ndb.StringProperty()
    last_name = ndb.StringProperty()
    username = ndb.StringProperty()
    enabled = ndb.BooleanProperty(default=True)
    lang_code = ndb.StringProperty(default='eng')  # language code
    last_seen = ndb.DateTimeProperty(auto_now=True)
    show_alpha_names = ndb.BooleanProperty(default=False)

    def getFirstName(self):
        return self.name.encode('utf-8') if self.name else None

    def getLastName(self):
        return self.last_name.encode('utf-8') if self.last_name else None

    def getFirstLastName(self):
        result = self.getFirstName()
        if self.last_name:
            result += ' ' + self.getLastName()
        return result

    def getUsername(self):
        return self.username.encode('utf-8') if self.username else None

    def getUserInfoString(self):
        info = self.getFirstName()
        if self.last_name:
            info += ' ' + self.getLastName()
        if self.username:
            info += ' @' + self.getUsername()
        info += ' ({0})'.format(str(self.chat_id))
        return info

    def getLanguageCode(self):
        return self.lang_code.encode('utf-8')

    def getLanguageName(self):
        #return self.language.encode('utf-8')
        return languages.getLanguageName(self.getLanguageCode())

    def setState(self, newstate, put=True):
        self.state = newstate
        if put:
            self.put()

    def setEnabled(self, enabled, put=False):
        self.enabled = enabled
        if put:
            self.put()

    def setLanguageAndLangCode(self, index, put=False):
        self.lang_code = languages.ALL_LANG_CODES[index]
        self.language = languages.ALL_LANGUAGES[index]
        #logging.debug("changing language to {0} {1}".format(self.getLanguageCode(),self.getLanguageName()))
        if put:
            self.put()

    def updateUsername(self, username, put=False):
        if (self.username != username):
            self.username = username
            if put:
                self.put()

    def isAdmin(self):
        return self.chat_id in key.MASTER_CHAT_ID
コード例 #14
0
class Avatar(Media):
    ''' Describes a user avatar. '''

    version = ndb.IntegerProperty('v', indexed=True, default=1)
    active = ndb.BooleanProperty('e', indexed=True, default=False)
    content = ndb.BlobProperty('bc', indexed=False)
コード例 #15
0
class StudentAttendence(ndb.Model):
    studentKey = ndb.KeyProperty()
    date = ndb.DateProperty()
    isPresent = ndb.BooleanProperty(default=True)
    employeeKey = ndb.KeyProperty()
    when = ndb.DateTimeProperty(auto_now=True)
コード例 #16
0
class DataPoint(ndb.Model):
    # The build number corresponding to this data point. Only relevant for
    # analysis at the build level.
    build_number = ndb.IntegerProperty(indexed=False)

    # The pass rate of the test when run against this commit.
    # -1 means that the test doesn't exist at this commit/build.
    pass_rate = ndb.FloatProperty(indexed=False)

    # The ID of the swarming task responsible for generating this data.
    task_id = ndb.StringProperty(indexed=False)

    # The commit position of this data point.
    commit_position = ndb.IntegerProperty(indexed=False)

    # The git hash of this data point.
    git_hash = ndb.StringProperty(indexed=False)

    # The commit position of the build preceding this one. Only relevant if this
    # data point is generated at the build level.
    previous_build_commit_position = ndb.IntegerProperty(indexed=False)

    # The git hash of the data point 1 build before this one. Only relevant if
    # this data point is generated as the result of a flake swarming task.
    previous_build_git_hash = ndb.StringProperty(indexed=False)

    # The list of revisions between this build and the previous build. Only
    # relevant if this data point is generated as the result of a flake swarming
    # task.
    blame_list = ndb.StringProperty(repeated=True)

    # The URL to the try job that generated this data point, if any.
    try_job_url = ndb.StringProperty(indexed=False)

    # A flag indicates whether the checked build has valid artifact.
    # This flag is only for build level data points.
    has_valid_artifact = ndb.BooleanProperty(indexed=False, default=True)

    def GetCommitPosition(self, revision):
        """Gets the commit position of a revision within blame_list.

    Args:
      revision (str): The revision to search for.

    Returns:
      commit_position (int): The calculated commit position of revision.
    """
        assert revision in self.blame_list

        for i in range(0, len(self.blame_list)):  # pragma: no branch
            if revision == self.blame_list[i]:
                return i + self.previous_build_commit_position + 1

    def GetRevisionAtCommitPosition(self, commit_position):
        """Gets the corresponding revision to commit_position.

    Args:
      commit_position (int): The commit position for which to find the
          corresponding revision within self.blame_list.

    Returns:
      revision (str): The git revision corresponding to commit_position.
    """
        length = len(self.blame_list)
        assert (commit_position > self.commit_position - length
                and commit_position <= self.commit_position)
        return self.blame_list[length -
                               (self.commit_position - commit_position) - 1]

    def GetDictOfCommitPositionAndRevision(self):
        """Gets a dict of commit_position:revision items for this data_point."""
        blamed_cls = {}
        commit_position = self.commit_position
        for i in xrange(len(self.blame_list) - 1, -1, -1):
            blamed_cls[commit_position] = self.blame_list[i]
            commit_position -= 1

        return blamed_cls
コード例 #17
0
class Link(ndb.Model):
    url = ndb.StringProperty()
    owner_id = ndb.StringProperty()
    owner_name = ndb.StringProperty()
    viewcount = ndb.IntegerProperty()
    public = ndb.BooleanProperty()
コード例 #18
0
class MasterFlakeAnalysis(BaseAnalysis, BaseBuildModel, VersionedModel,
                          TriagedModel):
    """Represents an analysis of a flaky test on a Waterfall test cycle."""
    @ndb.ComputedProperty
    def step_name(self):
        return self.key.pairs()[0][1].split('/')[3]

    @ndb.ComputedProperty
    def canonical_step_name(self):
        return self.step_name.split(' on ')[0]

    @ndb.ComputedProperty
    def test_name(self):
        return base64.urlsafe_b64decode(self.key.pairs()[0][1].split('/')[4])

    @property
    def error_message(self):
        if not self.error:
            return None
        return self.error.get('message')

    @property
    def iterations_to_rerun(self):
        if not self.algorithm_parameters:
            return -1
        return (self.algorithm_parameters.get('swarming_rerun',
                                              {}).get('iterations_to_rerun')
                or self.algorithm_parameters.get('iterations_to_rerun'))

    @staticmethod
    def _CreateAnalysisId(master_name, builder_name, build_number, step_name,
                          test_name):
        encoded_test_name = base64.urlsafe_b64encode(test_name)
        return '%s/%s/%s/%s/%s' % (master_name, builder_name, build_number,
                                   step_name, encoded_test_name)

    @staticmethod
    def GetBuildConfigurationFromKey(master_flake_analysis_key):
        """Extracts master_name and builder_name from key."""
        if not master_flake_analysis_key:
            return None, None

        components = master_flake_analysis_key.pairs()[0][1].split('/')
        master_name = components[0]
        builder_name = components[1]
        return master_name, builder_name

    # Arguments number differs from overridden method - pylint: disable=W0221
    @classmethod
    def Create(cls, master_name, builder_name, build_number, step_name,
               test_name):  # pragma: no cover.
        return super(MasterFlakeAnalysis, cls).Create(
            MasterFlakeAnalysis._CreateAnalysisId(master_name, builder_name,
                                                  build_number, step_name,
                                                  test_name))

    # Arguments number differs from overridden method - pylint: disable=W0221
    @classmethod
    def GetVersion(cls,
                   master_name,
                   builder_name,
                   build_number,
                   step_name,
                   test_name,
                   version=None):  # pragma: no cover.
        return super(MasterFlakeAnalysis,
                     cls).GetVersion(key=MasterFlakeAnalysis._CreateAnalysisId(
                         master_name, builder_name, build_number, step_name,
                         test_name),
                                     version=version)

    def UpdateTriageResult(self,
                           triage_result,
                           suspect_info,
                           user_name,
                           version_number=None):
        """Updates triage result for a flake analysis.

    If there is culprit for the analysis, triage will be at CL level;
    otherwise the triage will be for suspected_flake_build.
    """
        super(MasterFlakeAnalysis,
              self).UpdateTriageResult(triage_result,
                                       suspect_info,
                                       user_name,
                                       version_number=version_number)

        if triage_result == triage_status.TRIAGED_CORRECT:
            self.result_status = result_status.FOUND_CORRECT
            if suspect_info.get('culprit_revision'):
                self.correct_culprit = True
        else:
            self.result_status = result_status.FOUND_INCORRECT
            if suspect_info.get('culprit_revision'):
                self.correct_culprit = False

    def GetDataPointOfSuspectedBuild(self):
        """Gets the corresponding data point to the suspected flake build."""
        if self.suspected_flake_build_number is not None:
            for data_point in self.data_points:
                if data_point.build_number == self.suspected_flake_build_number:
                    return data_point

        return None

    def Reset(self):
        super(MasterFlakeAnalysis, self).Reset()
        self.original_master_name = None
        self.original_builder_name = None
        self.original_build_number = None
        self.original_step_name = None
        self.original_test_name = None
        self.bug_id = None
        self.swarming_rerun_results = []
        self.error = None
        self.correct_regression_range = None
        self.correct_culprit = None
        self.algorithm_parameters = None
        self.suspected_flake_build_number = None
        self.culprit = None
        self.try_job_status = None
        self.data_points = []
        self.result_status = None
        self.last_attempted_build_number = None
        self.last_attempted_swarming_task_id = None
        self.last_attempted_revision = None

    # The original build/step/test in which a flake actually occurred.
    # A CQ trybot step has to be mapped to a Waterfall buildbot step.
    # A gtest suite.PRE_PRE_test has to be normalized to suite.test.
    original_master_name = ndb.StringProperty(indexed=True)
    original_builder_name = ndb.StringProperty(indexed=True)
    original_build_number = ndb.IntegerProperty(indexed=True)
    original_step_name = ndb.StringProperty(indexed=True)
    original_test_name = ndb.StringProperty(indexed=True)

    # The bug id in which this flake is reported.
    bug_id = ndb.IntegerProperty(indexed=True)

    # A list of dicts containing information about each swarming rerun's results
    # that were involved in this analysis. The contents of this list will be used
    # for metrics, such as the number of cache hits this analysis benefited from,
    # the number of swarming tasks that were needed end-to-end to find the
    # regressed build number (if any), etc. See FlakeSwarmingTaskData for exact
    # fields.
    swarming_rerun_results = ndb.LocalStructuredProperty(FlakeSwarmingTaskData,
                                                         repeated=True,
                                                         compressed=True)

    # Error code and message, if any.
    error = ndb.JsonProperty(indexed=False)

    # Boolean whether the suspected regression range/build number is correct.
    correct_regression_range = ndb.BooleanProperty(indexed=True)

    # Boolean whether the suspected CL for found in the regression range
    # is correct.
    correct_culprit = ndb.BooleanProperty(indexed=True)

    # The look back algorithm parameters that were used, as specified in Findit's
    # configuration. For example,
    # {
    #     'iterations_to_rerun': 100,
    #     'lower_flake_threshold': 0.02,
    #     'max_build_numbers_to_look_back': 500,
    #     'max_flake_in_a_row': 4,
    #     'max_stable_in_a_row': 4,
    #     'upper_flake_threshold': 0.98
    # }
    algorithm_parameters = ndb.JsonProperty(indexed=False)

    # The suspected build number to have introduced the flakiness.
    suspected_flake_build_number = ndb.IntegerProperty()

    # The confidence in the suspected build to have introduced the flakiness.
    confidence_in_suspected_build = ndb.FloatProperty(indexed=False)

    # The culprit CL associated with the try job results, if any.
    culprit = ndb.LocalStructuredProperty(FlakeCulprit)

    # The status of try jobs, if any. None if analysis is still performing
    # swarming reruns, SKIPPED if try jobs will not be triggered, RUNNING when
    # the first try job is triggered, COMPLETED when the last one finishes, and
    # ERROR if any try job ends with error.
    try_job_status = ndb.IntegerProperty(indexed=False)

    # The data points used to plot the flakiness graph build over build.
    data_points = ndb.LocalStructuredProperty(DataPoint,
                                              repeated=True,
                                              compressed=True)

    # Whether the analysis was triggered by a manual request through check flake,
    # Findit's automatic analysis upon detection, or Findit API.
    triggering_source = ndb.IntegerProperty(default=None, indexed=True)

    # Who triggered the analysis. Used for differentiating between manual and
    # automatic runs, and determining the most active users to gather feedback.
    triggering_user_email = ndb.StringProperty(default=None, indexed=False)

    # Whether the user email is obscured.
    triggering_user_email_obscured = ndb.BooleanProperty(default=False,
                                                         indexed=True)

    # Overall conclusion of analysis result for the flake. Found untriaged, Found
    # Correct, etc. used to filter what is displayed on the check flake dashboard.
    result_status = ndb.IntegerProperty(indexed=True)

    # The build number corresponding to the last attempted swarming task.
    last_attempted_build_number = ndb.IntegerProperty(indexed=False)

    # The task id of the last-attempted swarming task.
    last_attempted_swarming_task_id = ndb.StringProperty(indexed=False)

    # The revision the last-attempted try job tried to run on.
    last_attempted_revision = ndb.StringProperty(indexed=False)
コード例 #19
0
class ExplorationModel(base_models.VersionedModel):
    """Versioned storage model for an Oppia exploration.

    This class should only be imported by the exploration services file
    and the exploration model test file.
    """
    SNAPSHOT_METADATA_CLASS = ExplorationSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = ExplorationSnapshotContentModel
    ALLOW_REVERT = True

    # What this exploration is called.
    title = ndb.StringProperty(required=True)
    # The category this exploration belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this exploration.
    objective = ndb.TextProperty(default='', indexed=False)
    # The ISO 639-1 code for the language this exploration is written in.
    language_code = ndb.StringProperty(default=constants.DEFAULT_LANGUAGE_CODE,
                                       indexed=True)
    # Tags (topics, skills, concepts, etc.) associated with this
    # exploration.
    tags = ndb.StringProperty(repeated=True, indexed=True)
    # A blurb for this exploration.
    blurb = ndb.TextProperty(default='', indexed=False)
    # 'Author notes' for this exploration.
    author_notes = ndb.TextProperty(default='', indexed=False)

    # The version of the states blob schema.
    states_schema_version = ndb.IntegerProperty(required=True,
                                                default=0,
                                                indexed=True)
    # The name of the initial state of this exploration.
    init_state_name = ndb.StringProperty(required=True, indexed=False)
    # A dict representing the states of this exploration. This dict should
    # not be empty.
    states = ndb.JsonProperty(default={}, indexed=False)
    # The dict of parameter specifications associated with this exploration.
    # Each specification is a dict whose keys are param names and whose values
    # are each dicts with a single key, 'obj_type', whose value is a string.
    param_specs = ndb.JsonProperty(default={}, indexed=False)
    # The list of parameter changes to be performed once at the start of a
    # reader's encounter with an exploration.
    param_changes = ndb.JsonProperty(repeated=True, indexed=False)
    # A boolean indicating whether automatic text-to-speech is enabled in
    # this exploration.
    auto_tts_enabled = ndb.BooleanProperty(default=True, indexed=True)
    # A boolean indicating whether correctness feedback is enabled in this
    # exploration.
    correctness_feedback_enabled = ndb.BooleanProperty(default=False,
                                                       indexed=True)

    # DEPRECATED in v2.0.0.rc.2. Do not use. Retaining it here because deletion
    # caused GAE to raise an error on fetching a specific version of the
    # exploration model.
    # TODO(sll): Fix this error and remove this property.
    skill_tags = ndb.StringProperty(repeated=True, indexed=True)
    # DEPRECATED in v2.0.1. Do not use.
    # TODO(sll): Remove this property from the model.
    default_skin = ndb.StringProperty(default='conversation_v1')
    # DEPRECATED in v2.5.4. Do not use.
    skin_customizations = ndb.JsonProperty(indexed=False)

    @staticmethod
    def get_deletion_policy():
        """Exploration is deleted only if it is not public."""
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @staticmethod
    def get_export_policy():
        """Model does not contain user data."""
        return base_models.EXPORT_POLICY.NOT_APPLICABLE

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether ExplorationModel or its snapshots references the given
        user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)

    @staticmethod
    def get_user_id_migration_policy():
        """ExplorationModel doesn't have any field with user ID."""
        return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE

    @classmethod
    def get_exploration_count(cls):
        """Returns the total number of explorations."""
        return cls.get_all().count()

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(ExplorationModel,
              self)._trusted_commit(committer_id, commit_type, commit_message,
                                    commit_cmds)

        committer_user_settings_model = (
            user_models.UserSettingsModel.get_by_id(committer_id))
        committer_username = (committer_user_settings_model.username
                              if committer_user_settings_model else '')

        exp_rights = ExplorationRightsModel.get_by_id(self.id)

        # TODO(msl): test if put_async() leads to any problems (make
        # sure summary dicts get updated correctly when explorations
        # are changed).
        exploration_commit_log = ExplorationCommitLogEntryModel.create(
            self.id, self.version, committer_id, committer_username,
            commit_type, commit_message, commit_cmds, exp_rights.status,
            exp_rights.community_owned)
        exploration_commit_log.exploration_id = self.id
        exploration_commit_log.put()

    @classmethod
    def delete_multi(cls,
                     entity_ids,
                     committer_id,
                     commit_message,
                     force_deletion=False):
        """Deletes the given cls instances with the given entity_ids.

        Note that this extends the superclass method.

        Args:
            entity_ids: list(str). Ids of entities to delete.
            committer_id: str. The user_id of the user who committed the change.
            commit_message: str. The commit description message.
            force_deletion: bool. If True these models are deleted completely
                from storage, otherwise there are only marked as deleted.
                Default is False.
        """
        super(ExplorationModel,
              cls).delete_multi(entity_ids,
                                committer_id,
                                commit_message,
                                force_deletion=force_deletion)

        if not force_deletion:
            committer_user_settings_model = (
                user_models.UserSettingsModel.get_by_id(committer_id))
            committer_username = (committer_user_settings_model.username
                                  if committer_user_settings_model else '')

            commit_log_models = []
            exp_rights_models = ExplorationRightsModel.get_multi(
                entity_ids, include_deleted=True)
            versioned_models = cls.get_multi(entity_ids, include_deleted=True)

            versioned_and_exp_rights_models = python_utils.ZIP(
                versioned_models, exp_rights_models)
            for model, rights_model in versioned_and_exp_rights_models:
                exploration_commit_log = ExplorationCommitLogEntryModel.create(
                    model.id, model.version, committer_id, committer_username,
                    cls._COMMIT_TYPE_DELETE, commit_message,
                    [{
                        'cmd': cls.CMD_DELETE_COMMIT
                    }], rights_model.status, rights_model.community_owned)
                exploration_commit_log.exploration_id = model.id
                commit_log_models.append(exploration_commit_log)
            ndb.put_multi_async(commit_log_models)
コード例 #20
0
ファイル: gae_models.py プロジェクト: mfr88/oppia
class TopicRightsModel(base_models.VersionedModel):
    """Storage model for rights related to a topic.

    The id of each instance is the id of the corresponding topic.
    """

    SNAPSHOT_METADATA_CLASS = TopicRightsSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = TopicRightsSnapshotContentModel
    ALLOW_REVERT = False

    # The user_ids of the managers of this topic.
    manager_ids = ndb.StringProperty(indexed=True, repeated=True)

    # Whether this topic is published.
    topic_is_published = ndb.BooleanProperty(indexed=True,
                                             required=True,
                                             default=False)

    @staticmethod
    def get_deletion_policy():
        """Topic rights should be kept if associated topic is published."""
        return base_models.DELETION_POLICY.KEEP_IF_PUBLIC

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether TopicRightsModel references user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return (cls.query(cls.manager_ids == user_id).get(keys_only=True)
                is not None
                or cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id))

    @classmethod
    def get_by_user(cls, user_id):
        """Retrieves the rights object for all topics assigned to given user

        Args:
            user_id: str. ID of user.

        Returns:
            list(TopicRightsModel). The list of TopicRightsModel objects in
            which the given user is a manager.
        """
        topic_rights_models = cls.query(cls.manager_ids == user_id)
        return topic_rights_models

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(TopicRightsModel,
              self)._trusted_commit(committer_id, commit_type, commit_message,
                                    commit_cmds)

        topic_rights = TopicRightsModel.get_by_id(self.id)
        if topic_rights.topic_is_published:
            status = constants.ACTIVITY_STATUS_PUBLIC
        else:
            status = constants.ACTIVITY_STATUS_PRIVATE

        TopicCommitLogEntryModel(
            id=('rights-%s-%s' % (self.id, self.version)),
            user_id=committer_id,
            topic_id=self.id,
            commit_type=commit_type,
            commit_message=commit_message,
            commit_cmds=commit_cmds,
            version=None,
            post_commit_status=status,
            post_commit_community_owned=False,
            post_commit_is_private=not topic_rights.topic_is_published).put()

    @staticmethod
    def get_export_policy():
        """Model contains user data."""
        return base_models.EXPORT_POLICY.CONTAINS_USER_DATA

    @classmethod
    def export_data(cls, user_id):
        """(Takeout) Export user-relevant properties of TopicRightsModel.

        Args:
            user_id: str. The user_id denotes which user's data to extract.

        Returns:
            dict. The user-relevant properties of TopicRightsModel in a dict
            format. In this case, we are returning all the ids of the topics
            this user manages.
        """
        managed_topics = cls.get_all().filter(cls.manager_ids == user_id)
        managed_topic_ids = [right.id for right in managed_topics]

        return {'managed_topic_ids': managed_topic_ids}
コード例 #21
0
class Device(base_model.BaseModel):
    """Datastore model representing a device.

  Attributes:
    serial_number: str, unique serial number used to identify the device.
    asset_tag: str, unique org-specific identifier for the device.
    enrolled: bool, indicates the enrollment status of the device.
    device_model: int, identifies the model name of the device.
    due_date: datetime, the date that device is due for return.
    last_know_healthy: datetime, the date to indicate the last known healthy
        status.
    shelf: ndb.key, The shelf key the device is placed on.
    assigned_user: str, The email of the user who is assigned to the device.
    assignment_date: datetime, The date the device was assigned to a user.
    current_ou: str, The current organizational unit the device belongs to.
    ou_change_date: datetime, The date the organizational unit was changed.
    locked: bool, indicates whether or not the device is locked.
    lost: bool, indicates whether or not the device is lost.
    mark_pending_return_date: datetime, The date a user marked device returned.
    chrome_device_id: str, a unique device ID.
    last_heartbeat: datetime, the date of the last time the device checked in.
    damaged: bool, indicates the if the device is damaged.
    damaged_reason: str, A string denoting the reason for being reported as
        damaged.
    last_reminder: Reminder, Level, time, and count of the last reminder
        the device had.
    next_reminder: Reminder, Level, time, and count of the next reminder.
  """
    serial_number = ndb.StringProperty()
    asset_tag = ndb.StringProperty()
    enrolled = ndb.BooleanProperty(default=True)
    device_model = ndb.StringProperty()
    due_date = ndb.DateTimeProperty()
    last_known_healthy = ndb.DateTimeProperty()
    shelf = ndb.KeyProperty(kind='Shelf')
    assigned_user = ndb.StringProperty()
    assignment_date = ndb.DateTimeProperty()
    current_ou = ndb.StringProperty()
    ou_changed_date = ndb.DateTimeProperty()
    locked = ndb.BooleanProperty(default=False)
    lost = ndb.BooleanProperty(default=False)
    mark_pending_return_date = ndb.DateTimeProperty()
    chrome_device_id = ndb.StringProperty()
    last_heartbeat = ndb.DateTimeProperty()
    damaged = ndb.BooleanProperty(default=False)
    damaged_reason = ndb.StringProperty()
    last_reminder = ndb.StructuredProperty(Reminder)
    next_reminder = ndb.StructuredProperty(Reminder)

    _INDEX_NAME = constants.DEVICE_INDEX_NAME
    _SEARCH_PARAMETERS = {
        'a': 'asset_tag',
        'at': 'asset_tag',
        's': 'serial_number',
        'sn': 'serial_number',
        'u': 'assigned_user',
        'au': 'assigned_user'
    }

    @property
    def is_assigned(self):
        return bool(self.assigned_user)

    @property
    def is_on_shelf(self):
        return bool(self.shelf)

    @property
    def overdue(self):
        if self.due_date:
            return bool(self.due_date < datetime.datetime.utcnow())
        return False

    @property
    def identifier(self):
        return self.asset_tag or self.serial_number

    @property
    def guest_enabled(self):
        return self.current_ou == constants.ORG_UNIT_DICT['GUEST']

    def _post_put_hook(self, future):
        """Overrides the _post_put_hook method."""
        del future  # Unused.
        index = Device.get_index()
        index.put(self.to_document())

    @classmethod
    def list_by_user(cls, user):
        """Returns a list of devices assigned to a user.

    Args:
      user: str, the user's email address.

    Returns:
      A query of devices assigned to the user.
    """
        return cls.query(
            ndb.AND(cls.assigned_user == user,
                    cls.mark_pending_return_date == None)).fetch()  # pylint: disable=g-equals-none,singleton-comparison

    @classmethod
    def enroll(cls, user_email, serial_number=None, asset_tag=None):
        """Enrolls a new device.

    Args:
      user_email: str, email address of the user making the request.
      serial_number: str, serial number of the device.
      asset_tag: str, optional, asset tag of the device.

    Returns:
      The enrolled device object.

    Raises:
      DeviceCreationError: raised when moving the device's OU fails or when the
          directory API responds with incomplete information or if the device is
          not found in the directory API.
    """
        device_identifier_mode = config_model.Config.get(
            'device_identifier_mode')
        if not asset_tag and device_identifier_mode in (
                config_defaults.DeviceIdentifierMode.BOTH_REQUIRED,
                config_defaults.DeviceIdentifierMode.ASSET_TAG):
            raise datastore_errors.BadValueError(_ASSET_TAGS_REQUIRED_MSG)
        elif not serial_number and device_identifier_mode in (
                config_defaults.DeviceIdentifierMode.BOTH_REQUIRED,
                config_defaults.DeviceIdentifierMode.SERIAL_NUMBER):
            raise datastore_errors.BadValueError(_SERIAL_NUMBERS_REQUIRED_MSG)
        directory_client = directory.DirectoryApiClient(user_email)
        device = cls.get(serial_number=serial_number, asset_tag=asset_tag)
        now = datetime.datetime.utcnow()

        existing_device = bool(device)
        if existing_device:
            device = _update_existing_device(device, user_email, asset_tag)
        else:
            device = cls(serial_number=serial_number, asset_tag=asset_tag)

        identifier = serial_number or asset_tag
        logging.info('Enrolling device %s', identifier)
        device = events.raise_event('device_enroll', device=device)
        if device.serial_number:
            serial_number = device.serial_number
        else:
            raise DeviceCreationError('No serial number for device.')

        if not existing_device:
            # If this implementation of the app can translate asset tags to serial
            # numbers, recheck for an existing device now that we may have the serial.
            if device_identifier_mode == (
                    config_defaults.DeviceIdentifierMode.ASSET_TAG):
                device_by_serial = cls.get(serial_number=serial_number)
                if device_by_serial:
                    device = _update_existing_device(device_by_serial,
                                                     user_email, asset_tag)
                    existing_device = True

        try:
            # Get a Chrome OS Device object as per
            # https://developers.google.com/admin-sdk/directory/v1/reference/chromeosdevices
            directory_device_object = directory_client.get_chrome_device_by_serial(
                serial_number)
        except directory.DeviceDoesNotExistError as err:
            raise DeviceCreationError(str(err))
        try:
            device.chrome_device_id = directory_device_object[
                directory.DEVICE_ID]
            device.current_ou = directory_device_object[
                directory.ORG_UNIT_PATH]
            device.device_model = directory_device_object[directory.MODEL]
        except KeyError:
            raise DeviceCreationError(_DIRECTORY_INFO_INCOMPLETE_MSG)

        try:
            directory_client.move_chrome_device_org_unit(
                device_id=directory_device_object[directory.DEVICE_ID],
                org_unit_path=constants.ORG_UNIT_DICT['DEFAULT'])
        except directory.DirectoryRPCError as err:
            raise DeviceCreationError(
                _FAILED_TO_MOVE_DEVICE_MSG %
                (serial_number, constants.ORG_UNIT_DICT['DEFAULT'], str(err)))
        device.current_ou = constants.ORG_UNIT_DICT['DEFAULT']
        device.ou_changed_date = now
        device.last_known_healthy = now
        device.put()
        device.stream_to_bq(user_email, 'Enrolling device.')
        return device

    def unenroll(self, user_email):
        """Unenrolls a device, removing it from the Grab n Go program.

    This moves the device to the root Chrome OU, however it does not change its
    losr or locked attributes, nor does it unlock it if it's locked (i.e.,
    disabled in the Directory API).

    Args:
      user_email: str, email address of the user making the request.

    Returns:
      The unenrolled device.

    Raises:
      FailedToUnenrollError: raised when moving the device's OU fails.
    """
        unenroll_ou = config_model.Config.get('unenroll_ou')
        directory_client = directory.DirectoryApiClient(user_email)
        try:
            directory_client.move_chrome_device_org_unit(
                device_id=self.chrome_device_id, org_unit_path=unenroll_ou)
        except directory.DirectoryRPCError as err:
            raise FailedToUnenrollError(
                _FAILED_TO_MOVE_DEVICE_MSG %
                (self.identifier, unenroll_ou, str(err)))
        self.enrolled = False
        self.due_date = None
        self.shelf = None
        self.assigned_user = None
        self.assignment_date = None
        self.current_ou = unenroll_ou
        self.ou_changed_date = datetime.datetime.utcnow()
        self.mark_pending_return_date = None
        self.last_reminder = None
        self.next_reminder = None
        self = events.raise_event('device_unenroll', device=self)
        self.put()
        self.stream_to_bq(user_email, 'Unenrolling device.')
        return self

    @classmethod
    def create_unenrolled(cls, device_id, user_email):
        """Creates a Device but leave it unenrolled from the Grab n Go program.

    Args:
      device_id: str, a Chrome Device ID to pass to the directory API.
      user_email: str, email address of the user making the request.

    Returns:
      The newly created device.

    Raises:
      DeviceCreationError: if the Directory API doesn't find this device in the
        org or the info retrieved from the Directory API is incomplete.
    """
        directory_client = directory.DirectoryApiClient(user_email)
        directory_info = directory_client.get_chrome_device(device_id)
        if not directory_info:
            raise DeviceCreationError('Device ID not found in org.')
        try:
            device = cls(serial_number=directory_info[directory.SERIAL_NUMBER],
                         enrolled=False,
                         device_model=directory_info.get(directory.MODEL),
                         current_ou=directory_info[directory.ORG_UNIT_PATH],
                         chrome_device_id=directory_info[directory.DEVICE_ID])
        except KeyError:
            raise DeviceCreationError(_DIRECTORY_INFO_INCOMPLETE_MSG)

        device.put()
        return device

    @classmethod
    def get(cls,
            asset_tag=None,
            chrome_device_id=None,
            serial_number=None,
            unknown_identifier=None):
        """Retrieves a device object using one of several device identifiers.

    Args:
      asset_tag: str, the asset tag of the device.
      chrome_device_id: str, the Chrome device ID of a device.
      serial_number: str, the serial number of a device.
      unknown_identifier: str, either an asset tag or serial number of the
          device, and this function will attempt both.

    Returns:
      A device model, or None if one cannot be found.

    Raises:
      DeviceIdentifierError: if there is no device identifier supplied, or if an
          invalid URL-safe key is supplied.
    """
        if asset_tag:
            return cls.query(cls.asset_tag == asset_tag).get()
        elif chrome_device_id:
            return cls.query(cls.chrome_device_id == chrome_device_id).get()
        elif serial_number:
            return cls.query(cls.serial_number == serial_number).get()
        elif unknown_identifier:
            return (cls.query(cls.serial_number == unknown_identifier).get()
                    or cls.query(cls.asset_tag == unknown_identifier).get())
        else:
            raise DeviceIdentifierError(
                'No identifier supplied to get device.')

    def calculate_return_dates(self):
        """Calculates maximum and default return dates for a loan.

    Returns:
      A ReturnDates NamedTuple of datetimes.

    Raises:
      ReturnDatesCalculationError: When trying to calculate return dates for a
          device that has not been assigned.
    """
        if not self.is_assigned:
            raise ReturnDatesCalculationError(_NOT_ASSIGNED_MSG)
        loan_duration = config_model.Config.get('loan_duration')
        max_loan_duration = config_model.Config.get('maximum_loan_duration')
        default_date = self.assignment_date + datetime.timedelta(
            days=loan_duration)
        max_loan_date = self.assignment_date + datetime.timedelta(
            days=max_loan_duration)

        return ReturnDates(max_loan_date, default_date)

    def lock(self, user_email):
        """Disables a device via the Directory API.

    Args:
      user_email: string, email address of the user making the request.
    """
        logging.info('Contacting Directory to lock (disable) Device %s.',
                     self.identifier)
        client = directory.DirectoryApiClient(user_email)
        try:
            client.disable_chrome_device(self.chrome_device_id)
        except directory.DeviceAlreadyDisabledError as err:
            logging.error(_ALREADY_DISABLED_MSG, err)
        else:
            self.stream_to_bq(user_email, 'Disabling device.')
        self.locked = True
        self.put()

    def unlock(self, user_email):
        """Re-enables a device via the Directory API.

    Args:
      user_email: str, email address of the user making the request.
    """
        logging.info('Contacting Directory to unlock (re-enable) Device %s.',
                     self.identifier)
        client = directory.DirectoryApiClient(user_email)
        client.reenable_chrome_device(self.chrome_device_id)
        if self.lost:
            self.lost = False
        self.locked = False
        self.move_to_default_ou(user_email=user_email)
        self.stream_to_bq(user_email, 'Re-enabling disabled device.')
        self.put()

    def loan_assign(self, user_email):
        """Assigns a device to a user.

    Args:
      user_email: str, email address of the user to whom the device should be
          assigned.

    Returns:
      The key of the datastore record.

    Raises:
      AssignmentError: if the device is not enrolled.
    """
        if not self.enrolled:
            raise AssignmentError('Cannot assign an unenrolled device.')

        if self.assigned_user and self.assigned_user != user_email:
            self._loan_return(user_email)

        self.assigned_user = user_email
        self.assignment_date = datetime.datetime.utcnow()
        self.mark_pending_return_date = None
        self.shelf = None
        self.due_date = self.calculate_return_dates().default
        self.move_to_default_ou(user_email=user_email)
        self = events.raise_event('device_loan_assign', device=self)
        self.put()
        self.stream_to_bq(user_email, 'Beginning new loan.')
        return self.key

    def resume_loan(self, user_email, message='Resuming loan.'):
        """Resumes a loan if it has been marked pending return.

    Args:
      user_email: str, email address of the user initiating the resume.
      message: str, the optional string to stream to bigquery.
    """
        if self.mark_pending_return_date:
            self.mark_pending_return_date = None
            self.put()
            self.stream_to_bq(user_email, message)

    def loan_resumes_if_late(self, user_email):
        """Resumes a loan on a device if it was marked returned, but later used.

    This allows a user who has marked their device returned to keep using it
    for the return_grace_period, but beyond that it restores the loan, with any
    ongoing reminders and consequences that entails.

    Args:
      user_email: str, email address of the user initiating the return.
    """
        if self.mark_pending_return_date:
            time_since = datetime.datetime.utcnow(
            ) - self.mark_pending_return_date
            if time_since.total_seconds() / 60.0 > config_model.Config.get(
                    'return_grace_period'):
                self.resume_loan(user_email,
                                 message='Resuming loan, since use continued.')

    @validate_assignee_or_admin
    def loan_extend(self, user_email, extend_date_time):
        """Requests an extension to the provided date.

    Args:
      user_email: str, user_email of the user requesting the extension.
      extend_date_time: DateTime, the requested date to extend to.

    Raises:
      ExtendError: If the date is out of an acceptable range.
      UnassignedDeviceError: if the device is not assigned, guest mode should
          not be allowed.
    """
        if not self.is_assigned:
            raise UnassignedDeviceError(_UNASSIGNED_DEVICE)
        extend_date = extend_date_time.date()
        if extend_date < datetime.date.today():
            raise ExtendError('Extension date cannot be in the past.')
        return_dates = self.calculate_return_dates()
        if extend_date <= return_dates.max.date():
            self.due_date = datetime.datetime.combine(
                extend_date, return_dates.default.time())
        else:
            raise ExtendError('Extension date outside allowable date range.')
        self.put()
        self.stream_to_bq(user_email, 'Extending loan.')

    def _loan_return(self, user_email):
        """Returns a device in a loan.

    Args:
      user_email: str, user_email of the user initiating the return.

    Returns:
      The key of the datastore record.
    """
        if self.lost:
            self.lost = False
        if self.locked:
            self.unlock(user_email)
        self.assigned_user = None
        self.assignment_date = None
        self.due_date = None
        self.mark_pending_return_date = None
        self.move_to_default_ou(user_email=user_email)
        self.last_reminder = None
        self.next_reminder = None
        self = events.raise_event('device_loan_return', device=self)
        self.put()
        self.stream_to_bq(user_email, 'Marking device as returned.')
        return self.key

    def record_heartbeat(self):
        """Records a heartbeat for a device."""
        now = datetime.datetime.utcnow()
        self.last_heartbeat = now
        self.last_known_healthy = now
        self.put()

    @validate_assignee_or_admin
    def mark_pending_return(self, user_email):
        """Marks a device as returned, as reported by the user.

    Args:
      user_email: str, The email of the acting user.

    Raises:
      UnassignedDeviceError: if the device is not assigned, guest mode should
          not be allowed.
    """
        if not self.is_assigned:
            raise UnassignedDeviceError(_UNASSIGNED_DEVICE)
        self.mark_pending_return_date = datetime.datetime.utcnow()
        self.move_to_default_ou(user_email=user_email)
        self.stream_to_bq(user_email, 'Marking device as Pending Return.')
        self.put()

    def set_last_reminder(self, reminder_level):
        """Records the last_reminder for a loaned device, overwriting existing one.

    Args:
      reminder_level: int, the level of the reminder, matching the reminder
          rule's reminder_level.
    """
        count = 0
        if self.last_reminder and self.last_reminder.level == reminder_level:
            count = self.last_reminder.count or 0
        self.last_reminder = Reminder(level=reminder_level,
                                      time=datetime.datetime.utcnow(),
                                      count=count + 1)
        self.put()

    def set_next_reminder(self, reminder_level, delay_delta):
        """Sets the next_reminder for a loaned device, overwriting existing one.

    Args:
      reminder_level: int, the level of the reminder, matching the reminder
          rule's reminder_level.
      delay_delta: datetime.timedelta, noting time to wait until the reminder
          should happen, which this method will record as a UTC datetime.
    """
        reminder_time = datetime.datetime.utcnow() + delay_delta
        self.next_reminder = Reminder(level=reminder_level, time=reminder_time)
        self.put()

    @validate_assignee_or_admin
    def mark_damaged(self, user_email, damaged_reason=None):
        """Marks a device as damaged.

    Args:
      user_email: string, the user that marked the device as damaged.
      damaged_reason: string, the reason the device is considered damaged.
    """
        if not damaged_reason:
            damaged_reason = 'No reason provided'
        self.damaged = True
        self.damaged_reason = damaged_reason
        self.move_to_default_ou(user_email=user_email)
        self.stream_to_bq(
            user_email, 'Marking device as damaged, reason: {reason}'.format(
                reason=damaged_reason))
        self.put()

    @validate_assignee_or_admin
    def mark_undamaged(self, user_email):
        """Resets a device's damaged state.

    Args:
      user_email: string, the user that is marking a device as undamaged
    """
        self.damaged = False
        self.stream_to_bq(user_email, "Clearning the device's damaged state.")
        self.put()

    @validate_assignee_or_admin
    def mark_lost(self, user_email):
        """Marks a device as lost.

    Args:
      user_email: str, The email of the acting user.
    """
        self.lost = True
        self.assigned_user = None
        self.assignment_date = None
        self.due_date = None
        self.last_reminder = None
        self.next_reminder = None
        self.move_to_default_ou(user_email=user_email)
        self.lock(user_email)
        self.stream_to_bq(user_email, 'Marking device lost and locking it.')

    @validate_assignee_or_admin
    def enable_guest_mode(self, user_email):
        """Moves a device into guest mode if allowed.

    Args:
      user_email: str, The email of the acting user.

    Raises:
      GuestNotAllowedError: when the allow_guest_mode config is not True.
      EnableGuestError: if there is an RPC error in the Directory API, or the
          allow_guest_mode setting is not True.
      UnassignedDeviceError: if the device is not assigned, guest mode should
          not be allowed.
    """
        if not self.is_assigned:
            raise UnassignedDeviceError(_UNASSIGNED_DEVICE)
        if config_model.Config.get('allow_guest_mode'):
            directory_client = directory.DirectoryApiClient(user_email)
            guest_ou = constants.ORG_UNIT_DICT['GUEST']

            try:
                directory_client.move_chrome_device_org_unit(
                    device_id=self.chrome_device_id, org_unit_path=guest_ou)
            except directory.DirectoryRPCError as err:
                raise EnableGuestError(str(err))
            else:
                self.current_ou = guest_ou
                self.ou_changed_date = datetime.datetime.utcnow()
                self.stream_to_bq(user_email, 'Moving device into Guest Mode.')
                self.put()
                if config_model.Config.get('timeout_guest_mode'):
                    countdown = datetime.timedelta(
                        hours=config_model.Config.get(
                            'guest_mode_timeout_in_hours')).total_seconds()
                    deferred.defer(self._disable_guest_mode,
                                   user_email,
                                   _countdown=countdown)
        else:
            raise GuestNotAllowedError(_GUEST_MODE_DISABLED_MSG)

    def _disable_guest_mode(self, user_email):
        """Moves a device back to the default OU if still assigned.

    Args:
      user_email: str, The email of the acting user.
    """
        if self.assigned_user == user_email:
            self.move_to_default_ou(user_email=user_email)
            self.put()

    def move_to_default_ou(self, user_email):
        """Corrects the current ou to be default during user actions.

    Args:
      user_email: str, The email of the acting user.
    Raises:
      UnableToMoveToDefaultOUError: when the directory api call fails to move
          the device into the default OU.
    """
        if self.current_ou != constants.ORG_UNIT_DICT['DEFAULT']:
            directory_client = directory.DirectoryApiClient(
                user_email=user_email)

            try:
                directory_client.move_chrome_device_org_unit(
                    device_id=self.chrome_device_id,
                    org_unit_path=constants.ORG_UNIT_DICT['DEFAULT'])
            except directory.DirectoryRPCError as err:
                raise UnableToMoveToDefaultOUError(
                    _FAILED_TO_MOVE_DEVICE_MSG %
                    (self.identifier, constants.ORG_UNIT_DICT['DEFAULT'],
                     str(err)))
            else:
                self.current_ou = constants.ORG_UNIT_DICT['DEFAULT']
                self.ou_changed_date = datetime.datetime.utcnow()

    def device_audit_check(self):
        """Checks a device to make sure it passes all prechecks for audit.

    Raises:
      DeviceNotEnrolledError: when a device is not enrolled in the application.
      UnableToMoveToShelfError: when a deivce can not be checked into a shelf.
    """
        if not self.enrolled:
            raise DeviceNotEnrolledError(DEVICE_NOT_ENROLLED_MSG %
                                         self.identifier)
        if self.damaged:
            raise UnableToMoveToShelfError(_DEVICE_DAMAGED_MSG %
                                           self.identifier)

    def move_to_shelf(self, shelf, user_email):
        """Checks a device into a shelf.

    Args:
      shelf: shelf_model.Shelf obj, the shelf to check device into.
      user_email: str, the email of the user taking the action.

    Raises:
      UnableToMoveToShelfError: when a deivce can not be checked into a shelf.
    """
        if not shelf.enabled:
            raise UnableToMoveToShelfError(
                'Unable to check device {} to shelf. Shelf {} is not '
                'active.'.format(self.identifier, shelf.location))
        logging.info('Checking device %s into shelf %s.', self.identifier,
                     shelf.location)
        self.shelf = shelf.key
        self.last_known_healthy = datetime.datetime.utcnow()
        self._loan_return(user_email=user_email)
        self.stream_to_bq(
            user_email,
            'Placing device: {} on shelf: {}'.format(self.identifier,
                                                     shelf.location))

    def remove_from_shelf(self, shelf, user_email):
        """Removes a device's associated shelf.

    Args:
      shelf: shelf_model.Shelf obj, the shelf to remove device from.
      user_email: str, the email of the user taking the action.
    """
        if self.shelf:
            if self.shelf.get().location is shelf.location:
                self.shelf = None
                self.put()
                self.stream_to_bq(
                    user_email, 'Removing device: {} from shelf: {}'.format(
                        self.identifier, shelf.location))
コード例 #22
0
class BigQueryRow(base_model.BaseModel):
  """Datastore model representing a single row in BigQuery.

  Attributes:
    ndb_key: ndb.key, The key of the ndb entity being streamed to BigQuery.
    model_type: str, the model type being streamed to BigQuery.
    timestamp: datetime, the timestamp of when the action occurred.
    actor: str, the acting user of the action.
    method: str, method name performing the action.
    summary: str, Human-readable summary of what is occurring.
    entity: json, a flattened representation of the entity.
    streamed: bool, indicates if the data was streamed successfully.
  """
  ndb_key = ndb.KeyProperty(required=True)
  model_type = ndb.StringProperty(required=True)
  timestamp = ndb.DateTimeProperty(required=True)
  actor = ndb.StringProperty(required=True)
  method = ndb.StringProperty(required=True)
  summary = ndb.StringProperty(required=True)
  entity = ndb.JsonProperty(required=True)
  streamed = ndb.BooleanProperty(default=False)

  @classmethod
  def add(cls, model_instance, timestamp, actor, method, summary):
    """Adds a row to the queue to be submitted to BigQuery.

    Args:
      model_instance: ndb model, the instance of the affected model.
      timestamp: datetime, a timestamp of when the change occurred.
      actor: str, user performing the action.
      method: str, the method name performing the action.
      summary: str, human-readable summary of what is occurring.

    Returns:
      The created row entity.
    """
    row = cls(
        ndb_key=model_instance.key,
        model_type=type(model_instance).__name__,
        timestamp=timestamp,
        actor=actor,
        method=method,
        summary=summary,
        entity=model_instance.to_json_dict())
    row.put()
    return row

  @classmethod
  def _fetch_unstreamed_rows(cls):
    """Retrieves all rows that have not been streamed."""
    return cls.query(cls.streamed == False).fetch(  # pylint: disable=g-explicit-bool-comparison,singleton-comparison
        limit=constants.BIGQUERY_ROW_MAX_BATCH_SIZE)

  @classmethod
  def _get_last_unstreamed_row(cls):
    """Retrieves the last row that was not streamed."""
    return cls.query(cls.streamed == False).order(  # pylint: disable=g-explicit-bool-comparison,singleton-comparison
        cls.streamed, cls.timestamp).get()

  @classmethod
  def _time_threshold_reached(cls):
    """Checks if the time threshold for a BigQuery stream was met."""
    threshold = datetime.datetime.utcnow() - datetime.timedelta(
        minutes=constants.BIGQUERY_ROW_TIME_THRESHOLD)
    return cls._get_last_unstreamed_row().timestamp <= threshold

  @classmethod
  def _row_threshold_reached(cls):
    """Checks if the unstreamed row threshold for a BigQuery stream was met."""
    return (cls.query(cls.streamed == False).count(  # pylint: disable=g-explicit-bool-comparison,singleton-comparison
        limit=constants.BIGQUERY_ROW_MAX_BATCH_SIZE) >=
            constants.BIGQUERY_ROW_SIZE_THRESHOLD)

  @classmethod
  def threshold_reached(cls):
    """Determines whether or not entities should be streamed to BigQuery."""
    return cls._time_threshold_reached() or cls._row_threshold_reached()

  @classmethod
  def stream_rows(cls):
    """Streams all unstreamed rows if a threshold has been reached."""
    logging.info('Streaming rows to BigQuery.')
    if not cls.threshold_reached():
      logging.info('Not streaming rows, thresholds not met.')
      return
    bq_client = bigquery.BigQueryClient()
    rows = cls._fetch_unstreamed_rows()
    tables = _format_for_bq(rows)
    try:
      for table_name in tables:
        bq_client.stream_table(table_name, tables[table_name])
    except bigquery.InsertError:
      logging.error('Unable to stream rows.')
      return
    _set_streamed(rows)
コード例 #23
0
class Event(ndb.Model):
    """
    Events represent FIRST Robotics Competition events, both official and unofficial.
    key_name is like '2010ct'
    """
    name = ndb.StringProperty()
    event_type_enum = ndb.IntegerProperty(required=True)
    short_name = ndb.StringProperty(
        indexed=False
    )  # Should not contain "Regional" or "Division", like "Hartford"
    event_short = ndb.StringProperty(
        required=True, indexed=False)  # Smaller abbreviation like "CT"
    first_code = ndb.StringProperty(
    )  # Event code used in FIRST's API, if different from event_short
    year = ndb.IntegerProperty(required=True)
    event_district_enum = ndb.IntegerProperty(
        default=DistrictType.NO_DISTRICT
    )  # Deprecated, use district_key instead
    district_key = ndb.KeyProperty(kind=District)
    start_date = ndb.DateTimeProperty()
    end_date = ndb.DateTimeProperty()
    playoff_type = ndb.IntegerProperty()

    # venue, venue_addresss, city, state_prov, country, and postalcode are from FIRST
    venue = ndb.StringProperty(indexed=False)  # Name of the event venue
    venue_address = ndb.StringProperty(
        indexed=False
    )  # Most detailed venue address (includes venue, street, and location separated by \n)
    city = ndb.StringProperty()  # Equivalent to locality. From FRCAPI
    state_prov = ndb.StringProperty()  # Equivalent to region. From FRCAPI
    country = ndb.StringProperty()  # From FRCAPI
    postalcode = ndb.StringProperty(
    )  # From ElasticSearch only. String because it can be like "95126-1215"
    # Normalized address from the Google Maps API, constructed using the above
    normalized_location = ndb.StructuredProperty(Location)

    timezone_id = ndb.StringProperty(
    )  # such as 'America/Los_Angeles' or 'Asia/Jerusalem'
    official = ndb.BooleanProperty(
        default=False)  # Is the event FIRST-official?
    first_eid = ndb.StringProperty()  # from USFIRST
    parent_event = ndb.KeyProperty(
    )  # This is the division -> event champs relationship
    divisions = ndb.KeyProperty(repeated=True)  # event champs -> all divisions
    facebook_eid = ndb.StringProperty(indexed=False)  # from Facebook
    custom_hashtag = ndb.StringProperty(indexed=False)  # Custom HashTag
    website = ndb.StringProperty(indexed=False)
    webcast_json = ndb.TextProperty(
        indexed=False
    )  # list of dicts, valid keys include 'type' and 'channel'
    enable_predictions = ndb.BooleanProperty(default=False)
    remap_teams = ndb.JsonProperty(
    )  # Map of temporary team numbers to pre-rookie and B teams

    created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
    updated = ndb.DateTimeProperty(auto_now=True, indexed=False)

    def __init__(self, *args, **kw):
        # store set of affected references referenced keys for cache clearing
        # keys must be model properties
        self._affected_references = {
            'key': set(),
            'year': set(),
            'district_key': set()
        }
        self._awards = None
        self._details = None
        self._location = None
        self._city_state_country = None
        self._matches = None
        self._teams = None
        self._venue_address_safe = None
        self._webcast = None
        self._updated_attrs = [
        ]  # Used in EventManipulator to track what changed
        self._week = None
        super(Event, self).__init__(*args, **kw)

    @ndb.tasklet
    def get_awards_async(self):
        from database import award_query
        self._awards = yield award_query.EventAwardsQuery(
            self.key_name).fetch_async()

    @property
    def alliance_selections(self):
        if self.details is None:
            return None
        else:
            return self.details.alliance_selections

    @property
    def alliance_teams(self):
        """
        Load a list of team keys playing in elims
        """
        alliances = self.alliance_selections
        if alliances is None:
            return []
        teams = []
        for alliance in alliances:
            for pick in alliance['picks']:
                teams.append(pick)
        return teams

    @property
    def awards(self):
        if self._awards is None:
            self.get_awards_async().wait()
        return self._awards

    @property
    def details(self):
        if self._details is None:
            self._details = EventDetails.get_by_id(self.key.id())
        elif type(self._details) == Future:
            self._details = self._details.get_result()
        return self._details

    def prep_details(self):
        if self._details is None:
            self._details = ndb.Key(EventDetails, self.key.id()).get_async()

    @property
    def district_points(self):
        if self.details is None:
            return None
        else:
            return self.details.district_points

    @ndb.tasklet
    def get_matches_async(self):
        if self._matches is None:
            from database import match_query
            self._matches = yield match_query.EventMatchesQuery(
                self.key_name).fetch_async()

    def prep_matches(self):
        if self._matches is None:
            from database import match_query
            self._matches = match_query.EventMatchesQuery(
                self.key_name).fetch_async()

    @property
    def matches(self):
        if self._matches is None:
            self.get_matches_async().wait()
        elif type(self._matches) == Future:
            self._matches = self._matches.get_result()
        return self._matches

    def local_time(self):
        import pytz
        now = datetime.datetime.now()
        if self.timezone_id is not None:
            tz = pytz.timezone(self.timezone_id)
            try:
                now = now + tz.utcoffset(now)
            except (pytz.NonExistentTimeError,
                    pytz.AmbiguousTimeError):  # may happen during DST
                now = now + tz.utcoffset(now + datetime.timedelta(
                    hours=1))  # add offset to get out of non-existant time
        return now

    def withinDays(self, negative_days_before, days_after):
        if not self.start_date or not self.end_date:
            return False
        now = self.local_time()
        after_start = self.start_date.date() + datetime.timedelta(
            days=negative_days_before) <= now.date()
        before_end = self.end_date.date() + datetime.timedelta(
            days=days_after) >= now.date()

        return (after_start and before_end)

    @property
    def now(self):
        if self.timezone_id is not None:
            return self.withinDays(0, 0)
        else:
            return self.within_a_day  # overestimate what is "now" if no timezone

    @property
    def within_a_day(self):
        return self.withinDays(-1, 1)

    @property
    def past(self):
        return self.end_date.date() < self.local_time().date() and not self.now

    @property
    def future(self):
        return self.start_date.date() > self.local_time().date(
        ) and not self.now

    @property
    def starts_today(self):
        return self.start_date.date() == self.local_time().date()

    @property
    def ends_today(self):
        return self.end_date.date() == self.local_time().date()

    @property
    def week(self):
        """
        Returns the week of the event relative to the first official season event as an integer
        Returns None if the event is not of type NON_CMP_EVENT_TYPES or is not official
        """
        if self.event_type_enum not in EventType.NON_CMP_EVENT_TYPES or not self.official:
            return None

        # Cache week_start for the same context
        cache_key = '{}_week_start:{}'.format(self.year,
                                              ndb.get_context().__hash__())
        week_start = context_cache.get(cache_key)
        if week_start is None:
            e = Event.query(
                Event.year == self.year,
                Event.event_type_enum.IN(EventType.NON_CMP_EVENT_TYPES),
                Event.start_date != None).order(Event.start_date).fetch(
                    1, projection=[Event.start_date])
            if e:
                first_start_date = e[0].start_date
                diff_from_wed = (first_start_date.weekday() -
                                 2) % 7  # 2 is Wednesday
                week_start = first_start_date - datetime.timedelta(
                    days=diff_from_wed)
            else:
                week_start = None
        context_cache.set(cache_key, week_start)

        if self._week is None and week_start is not None:
            days = (self.start_date - week_start).days
            self._week = days / 7

        return self._week

    @property
    def is_season_event(self):
        return self.event_type_enum in EventType.SEASON_EVENT_TYPES

    @ndb.tasklet
    def get_teams_async(self):
        from database import team_query
        self._teams = yield team_query.EventTeamsQuery(
            self.key_name).fetch_async()

    @property
    def teams(self):
        if self._teams is None:
            self.get_teams_async().wait()
        return self._teams

    @ndb.toplevel
    def prepAwardsMatchesTeams(self):
        yield self.get_awards_async(), self.get_matches_async(
        ), self.get_teams_async()

    @ndb.toplevel
    def prepTeams(self):
        yield self.get_teams_async()

    @ndb.toplevel
    def prepTeamsMatches(self):
        yield self.get_matches_async(), self.get_teams_async()

    @property
    def matchstats(self):
        if self.details is None:
            return None
        else:
            return self.details.matchstats

    @property
    def rankings(self):
        if self.details is None:
            return None
        else:
            return self.details.rankings

    @property
    def location(self):
        if self._location is None:
            split_location = []
            if self.city:
                split_location.append(self.city)
            if self.state_prov:
                if self.postalcode:
                    split_location.append(self.state_prov + ' ' +
                                          self.postalcode)
                else:
                    split_location.append(self.state_prov)
            if self.country:
                split_location.append(self.country)
            self._location = ', '.join(split_location)
        return self._location

    @property
    def city_state_country(self):
        if not self._city_state_country and self.nl:
            self._city_state_country = self.nl.city_state_country

        if not self._city_state_country:
            location_parts = []
            if self.city:
                location_parts.append(self.city)
            if self.state_prov:
                location_parts.append(self.state_prov)
            if self.country:
                country = self.country
                if self.country == 'US':
                    country = 'USA'
                location_parts.append(country)
            self._city_state_country = ', '.join(location_parts)
        return self._city_state_country

    @property
    def nl(self):
        return self.normalized_location

    @property
    def venue_or_venue_from_address(self):
        if self.venue:
            return self.venue
        else:
            try:
                return self.venue_address.split('\r\n')[0]
            except:
                return None

    @property
    def venue_address_safe(self):
        """
        Construct (not detailed) venue address if detailed venue address doesn't exist
        """
        if not self.venue_address:
            if not self.venue or not self.location:
                self._venue_address_safe = None
            else:
                self._venue_address_safe = "{}\n{}".format(
                    self.venue.encode('utf-8'), self.location.encode('utf-8'))
        else:
            self._venue_address_safe = self.venue_address.replace('\r\n', '\n')
        return self._venue_address_safe

    @property
    def webcast(self):
        """
        Lazy load parsing webcast JSON
        """
        if self._webcast is None:
            try:
                self._webcast = json.loads(self.webcast_json)
            except Exception, e:
                self._webcast = None
        return self._webcast
コード例 #24
0
class Token(internal_only_model.InternalOnlyModel):
    """Token is used to get state of request.

  Token can contain multiple Measurement. One per each histogram in the
  request. States of nested Measurements affect state of the Token.
  """
    _use_memcache = True
    _memcache_timeout = _MEMCACHE_TIMEOUT

    internal_only = ndb.BooleanProperty(default=True, indexed=False)

    state_ = ndb.IntegerProperty(name='state',
                                 default=State.PENDING,
                                 indexed=False)

    error_message = ndb.StringProperty(indexed=False, default=None)

    creation_time = ndb.DateTimeProperty(auto_now_add=True, indexed=False)

    update_time = ndb.DateTimeProperty(auto_now=True, indexed=False)

    temporary_staging_file_path = ndb.StringProperty(indexed=False,
                                                     default=None)

    @property
    def state(self):
        measurements = self.GetMeasurements()
        if not measurements:
            return self.state_

        all_states = [
            child.state for child in measurements if child is not None
        ]
        all_states.append(self.state_)
        if all(s == State.PENDING for s in all_states):
            return State.PENDING
        if any(s in (State.PROCESSING, State.PENDING) for s in all_states):
            return State.PROCESSING
        if any(s == State.FAILED for s in all_states):
            return State.FAILED
        return State.COMPLETED

    def _LogStateChanged(self):
        logging.info(
            'Upload completion token updated. Token id: %s, state: %s',
            self.key.id(), StateToString(self.state))

    @classmethod
    @ndb.tasklet
    def UpdateObjectStateAsync(cls, obj, state, error_message=None):
        if obj is None:
            return
        yield obj.UpdateStateAsync(state, error_message)

    @ndb.tasklet
    def UpdateStateAsync(self, state, error_message=None):
        assert error_message is None or state == State.FAILED

        self.state_ = state
        self.error_message = error_message
        yield self.put_async()
        self._LogStateChanged()

    @ndb.tasklet
    def AddMeasurement(self, test_path, is_monitored):
        """Creates measurement, associated to the current token."""

        measurement = Measurement(id=test_path,
                                  parent=self.key,
                                  monitored=is_monitored)
        yield measurement.put_async()

        logging.info(
            'Upload completion token measurement created. Token id: %s, '
            'measurement id: %r', self.key.id(), measurement.key.id())
        raise ndb.Return(measurement)

    def GetMeasurements(self):
        return Measurement.query(ancestor=self.key).fetch()
コード例 #25
0
class CollectionRightsModel(base_models.VersionedModel):
    """Storage model for rights related to a collection.

    The id of each instance is the id of the corresponding collection.
    """
    SNAPSHOT_METADATA_CLASS = CollectionRightsSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = CollectionRightsSnapshotContentModel
    ALLOW_REVERT = False

    # The user_ids of owners of this collection.
    owner_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this collection.
    editor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this collection.
    viewer_ids = ndb.StringProperty(indexed=True, repeated=True)

    # Whether this collection is owned by the community.
    community_owned = ndb.BooleanProperty(indexed=True, default=False)
    # For private collections, whether this collection can be viewed
    # by anyone who has the URL. If the collection is not private, this
    # setting is ignored.
    viewable_if_private = ndb.BooleanProperty(indexed=True, default=False)
    # Time, in milliseconds, when the collection was first published.
    first_published_msec = ndb.FloatProperty(indexed=True, default=None)

    # The publication status of this collection.
    status = ndb.StringProperty(
        default=feconf.ACTIVITY_STATUS_PRIVATE, indexed=True,
        choices=[
            feconf.ACTIVITY_STATUS_PRIVATE,
            feconf.ACTIVITY_STATUS_PUBLIC
        ]
    )

    def save(self, committer_id, commit_message, commit_cmds):
        """Updates the collection rights model by applying the given
        commit_cmds, then saves it.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and additional arguments for that command.
        """
        super(CollectionRightsModel, self).commit(
            committer_id, commit_message, commit_cmds)

    def _trusted_commit(
            self, committer_id, commit_type, commit_message, commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this overrides the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(CollectionRightsModel, self)._trusted_commit(
            committer_id, commit_type, commit_message, commit_cmds)

        # Create and delete events will already be recorded in the
        # CollectionModel.
        if commit_type not in ['create', 'delete']:
            committer_user_settings_model = (
                user_models.UserSettingsModel.get_by_id(committer_id))
            committer_username = (
                committer_user_settings_model.username
                if committer_user_settings_model else '')
            # TODO(msl): test if put_async() leads to any problems (make
            # sure summary dicts get updated correctly when collections
            # are changed).
            CollectionCommitLogEntryModel(
                id=('rights-%s-%s' % (self.id, self.version)),
                user_id=committer_id,
                username=committer_username,
                collection_id=self.id,
                commit_type=commit_type,
                commit_message=commit_message,
                commit_cmds=commit_cmds,
                version=None,
                post_commit_status=self.status,
                post_commit_community_owned=self.community_owned,
                post_commit_is_private=(
                    self.status == feconf.ACTIVITY_STATUS_PRIVATE)
            ).put_async()
コード例 #26
0
class SkillModel(base_models.VersionedModel):
    """Model for storing Skills.

    This class should only be imported by the skill services file
    and the skill model test file.
    """

    SNAPSHOT_METADATA_CLASS = SkillSnapshotMetadataModel
    SNAPSHOT_CONTENT_CLASS = SkillSnapshotContentModel
    ALLOW_REVERT = False

    # The description of the skill.
    description = ndb.StringProperty(required=True, indexed=True)
    # The schema version for each of the misconception dicts.
    misconceptions_schema_version = ndb.IntegerProperty(required=True,
                                                        indexed=True)
    # The schema version for each of the rubric dicts.
    rubric_schema_version = ndb.IntegerProperty(required=True, indexed=True)
    # A list of misconceptions associated with the skill, in which each
    # element is a dict.
    misconceptions = ndb.JsonProperty(repeated=True, indexed=False)
    # The rubrics for the skill that explain each difficulty level.
    rubrics = ndb.JsonProperty(repeated=True, indexed=False)
    # The ISO 639-1 code for the language this skill is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # The schema version for the skill_contents.
    skill_contents_schema_version = ndb.IntegerProperty(required=True,
                                                        indexed=True)
    # A dict representing the skill contents.
    skill_contents = ndb.JsonProperty(indexed=False)
    # The prerequisite skills for the skill.
    prerequisite_skill_ids = ndb.StringProperty(repeated=True, indexed=False)
    # The id to be used by the next misconception added.
    next_misconception_id = ndb.IntegerProperty(required=True, indexed=False)
    # The id that the skill is merged into, in case the skill has been
    # marked as duplicate to another one and needs to be merged.
    # This is an optional field.
    superseding_skill_id = ndb.StringProperty(indexed=True)
    # A flag indicating whether deduplication is complete for this skill.
    # It will initially be False, and set to true only when there is a value
    # for superseding_skill_id and the merge was completed.
    all_questions_merged = ndb.BooleanProperty(indexed=True, required=True)

    @staticmethod
    def get_deletion_policy():
        """Skill should be kept if it is published."""
        return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE

    @classmethod
    def has_reference_to_user_id(cls, user_id):
        """Check whether SkillModel snapshots references the given user.

        Args:
            user_id: str. The ID of the user whose data should be checked.

        Returns:
            bool. Whether any models refer to the given user ID.
        """
        return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)

    @classmethod
    def get_merged_skills(cls):
        """Returns the skill models which have been merged.

        Returns:
            list(SkillModel). List of skill models which have been merged.
        """

        return [
            skill for skill in cls.query()
            if (skill.superseding_skill_id is not None and (
                len(skill.superseding_skill_id) > 0))
        ]

    def _trusted_commit(self, committer_id, commit_type, commit_message,
                        commit_cmds):
        """Record the event to the commit log after the model commit.

        Note that this extends the superclass method.

        Args:
            committer_id: str. The user_id of the user who committed the
                change.
            commit_type: str. The type of commit. Possible values are in
                core.storage.base_models.COMMIT_TYPE_CHOICES.
            commit_message: str. The commit description message.
            commit_cmds: list(dict). A list of commands, describing changes
                made in this model, which should give sufficient information to
                reconstruct the commit. Each dict always contains:
                    cmd: str. Unique command.
                and then additional arguments for that command.
        """
        super(SkillModel, self)._trusted_commit(committer_id, commit_type,
                                                commit_message, commit_cmds)

        skill_commit_log_entry = SkillCommitLogEntryModel.create(
            self.id, self.version, committer_id, commit_type, commit_message,
            commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False)
        skill_commit_log_entry.skill_id = self.id
        skill_commit_log_entry.put()

    @staticmethod
    def get_export_policy():
        """Model does not contain user data."""
        return base_models.EXPORT_POLICY.NOT_APPLICABLE
コード例 #27
0
class CollectionSummaryModel(base_models.BaseModel):
    """Summary model for an Oppia collection.

    This should be used whenever the content blob of the collection is not
    needed (e.g. search results, etc).

    A CollectionSummaryModel instance stores the following information:

        id, title, category, objective, language_code, tags,
        last_updated, created_on, status (private, public),
        community_owned, owner_ids, editor_ids,
        viewer_ids, version.

    The key of each instance is the collection id.
    """

    # What this collection is called.
    title = ndb.StringProperty(required=True)
    # The category this collection belongs to.
    category = ndb.StringProperty(required=True, indexed=True)
    # The objective of this collection.
    objective = ndb.TextProperty(required=True, indexed=False)
    # The ISO 639-1 code for the language this collection is written in.
    language_code = ndb.StringProperty(required=True, indexed=True)
    # Tags associated with this collection.
    tags = ndb.StringProperty(repeated=True, indexed=True)

    # Aggregate user-assigned ratings of the collection.
    ratings = ndb.JsonProperty(default=None, indexed=False)

    # Time when the collection model was last updated (not to be
    # confused with last_updated, which is the time when the
    # collection *summary* model was last updated).
    collection_model_last_updated = ndb.DateTimeProperty(indexed=True)
    # Time when the collection model was created (not to be confused
    # with created_on, which is the time when the collection *summary*
    # model was created).
    collection_model_created_on = ndb.DateTimeProperty(indexed=True)

    # The publication status of this collection.
    status = ndb.StringProperty(
        default=feconf.ACTIVITY_STATUS_PRIVATE, indexed=True,
        choices=[
            feconf.ACTIVITY_STATUS_PRIVATE,
            feconf.ACTIVITY_STATUS_PUBLIC
        ]
    )

    # Whether this collection is owned by the community.
    community_owned = ndb.BooleanProperty(required=True, indexed=True)

    # The user_ids of owners of this collection.
    owner_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to edit this collection.
    editor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who are allowed to view this collection.
    viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
    # The user_ids of users who have contributed (humans who have made a
    # positive (not just a revert) change to the collection's content).
    contributor_ids = ndb.StringProperty(indexed=True, repeated=True)
    # A dict representing the contributors of non-trivial commits to this
    # collection. Each key of this dict is a user_id, and the corresponding
    # value is the number of non-trivial commits that the user has made.
    contributors_summary = ndb.JsonProperty(default={}, indexed=False)
    # The version number of the collection after this commit. Only populated
    # for commits to an collection (as opposed to its rights, etc.).
    version = ndb.IntegerProperty()
    # The number of nodes(explorations) that are within this collection.
    node_count = ndb.IntegerProperty()

    @classmethod
    def get_non_private(cls):
        """Returns an iterable with non-private collection summary models.

        Returns:
            iterable. An iterable with non-private collection summary models.
        """
        return CollectionSummaryModel.query().filter(
            CollectionSummaryModel.status != feconf.ACTIVITY_STATUS_PRIVATE
        ).filter(
            CollectionSummaryModel.deleted == False  # pylint: disable=singleton-comparison
        ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_private_at_least_viewable(cls, user_id):
        """Returns an iterable with private collection summary models that are
        at least viewable by the given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with private collection summary models that
            are at least viewable by the given user.
        """
        return CollectionSummaryModel.query().filter(
            CollectionSummaryModel.status == feconf.ACTIVITY_STATUS_PRIVATE
        ).filter(
            ndb.OR(CollectionSummaryModel.owner_ids == user_id,
                   CollectionSummaryModel.editor_ids == user_id,
                   CollectionSummaryModel.viewer_ids == user_id)
        ).filter(
            CollectionSummaryModel.deleted == False  # pylint: disable=singleton-comparison
        ).fetch(feconf.DEFAULT_QUERY_LIMIT)

    @classmethod
    def get_at_least_editable(cls, user_id):
        """Returns an iterable with collection summary models that are at least
        editable by the given user.

        Args:
            user_id: The id of the given user.

        Returns:
            iterable. An iterable with collection summary models that are at
            least viewable by the given user.
        """
        return CollectionSummaryModel.query().filter(
            ndb.OR(CollectionSummaryModel.owner_ids == user_id,
                   CollectionSummaryModel.editor_ids == user_id)
        ).filter(
            CollectionSummaryModel.deleted == False  # pylint: disable=singleton-comparison
        ).fetch(feconf.DEFAULT_QUERY_LIMIT)
コード例 #28
0
class Subject(ndb.Model):
    """Subject -- Conference object"""
    name = ndb.StringProperty(required=True)
    code = ndb.StringProperty()
    book = ndb.StringProperty()
    mandatory = ndb.BooleanProperty(default=True)
コード例 #29
0
class Anomaly(internal_only_model.InternalOnlyModel):
    """Represents a change-point or step found in the data series for a test.

  An Anomaly can be an upward or downward change, and can represent an
  improvement or a regression.
  """
    # Whether the alert should only be viewable by internal users.
    internal_only = ndb.BooleanProperty(indexed=True, default=False)

    # The time the alert fired.
    timestamp = ndb.DateTimeProperty(indexed=True, auto_now_add=True)

    # Note: -1 denotes an invalid alert and -2 an ignored alert.
    # By default, this is None, which denotes a non-triaged alert.
    bug_id = ndb.IntegerProperty(indexed=True)

    # The sheriff rotation that should handle this alert.
    sheriff = ndb.KeyProperty(kind=sheriff_module.Sheriff, indexed=True)

    # Each Alert is related to one Test.
    test = ndb.KeyProperty(indexed=True)
    statistic = ndb.StringProperty(indexed=True)

    # We'd like to be able to query Alerts by Master, Bot, and Benchmark names.
    master_name = ndb.ComputedProperty(
        lambda self: utils.TestPath(self.test).split('/')[0], indexed=True)
    bot_name = ndb.ComputedProperty(
        lambda self: utils.TestPath(self.test).split('/')[1], indexed=True)
    benchmark_name = ndb.ComputedProperty(
        lambda self: utils.TestPath(self.test).split('/')[2], indexed=True)

    # Each Alert has a revision range it's associated with; however,
    # start_revision and end_revision could be the same.
    start_revision = ndb.IntegerProperty(indexed=True)
    end_revision = ndb.IntegerProperty(indexed=True)

    # The revisions to use for display, if different than point id.
    display_start = ndb.IntegerProperty(indexed=False)
    display_end = ndb.IntegerProperty(indexed=False)

    # Ownership data, mapping e-mails to the benchmark's owners' emails and
    # component as the benchmark's Monorail component
    ownership = ndb.JsonProperty()

    # The number of points before and after this anomaly that were looked at
    # when finding this anomaly.
    segment_size_before = ndb.IntegerProperty(indexed=False)
    segment_size_after = ndb.IntegerProperty(indexed=False)

    # The medians of the segments before and after the anomaly.
    median_before_anomaly = ndb.FloatProperty(indexed=False)
    median_after_anomaly = ndb.FloatProperty(indexed=False)

    # The standard deviation of the segments before the anomaly.
    std_dev_before_anomaly = ndb.FloatProperty(indexed=False)

    # The number of points that were used in the before/after segments.
    # This is also  returned by FindAnomalies
    window_end_revision = ndb.IntegerProperty(indexed=False)

    # In order to estimate how likely it is that this anomaly is due to noise,
    # t-test may be performed on the points before and after. The t-statistic,
    # degrees of freedom, and p-value are potentially-useful intermediary results.
    t_statistic = ndb.FloatProperty(indexed=False)
    degrees_of_freedom = ndb.FloatProperty(indexed=False)
    p_value = ndb.FloatProperty(indexed=False)

    # Whether this anomaly represents an improvement; if false, this anomaly is
    # considered to be a regression.
    is_improvement = ndb.BooleanProperty(indexed=True, default=False)

    # Whether this anomaly recovered (i.e. if this is a step down, whether there
    # is a corresponding step up later on, or vice versa.)
    recovered = ndb.BooleanProperty(indexed=True, default=False)

    # If the TestMetadata alerted upon has a ref build, store the ref build.
    ref_test = ndb.KeyProperty(indexed=False)

    # The corresponding units from the TestMetaData entity.
    units = ndb.StringProperty(indexed=False)

    recipe_bisects = ndb.KeyProperty(repeated=True, indexed=False)
    pinpoint_bisects = ndb.StringProperty(repeated=True, indexed=False)

    @property
    def percent_changed(self):
        """The percent change from before the anomaly to after."""
        if self.median_before_anomaly == 0.0:
            return sys.float_info.max
        difference = self.median_after_anomaly - self.median_before_anomaly
        return 100 * difference / self.median_before_anomaly

    @property
    def absolute_delta(self):
        """The absolute change from before the anomaly to after."""
        return self.median_after_anomaly - self.median_before_anomaly

    @property
    def direction(self):
        """Whether the change is numerically an increase or decrease."""
        if self.median_before_anomaly < self.median_after_anomaly:
            return UP
        return DOWN

    def GetDisplayPercentChanged(self):
        """Gets a string showing the percent change."""
        if abs(self.percent_changed) == sys.float_info.max:
            return FREAKIN_HUGE
        else:
            return '%.1f%%' % abs(self.percent_changed)

    def GetDisplayAbsoluteChanged(self):
        """Gets a string showing the absolute change."""
        if abs(self.absolute_delta) == sys.float_info.max:
            return FREAKIN_HUGE
        else:
            return '%f' % abs(self.absolute_delta)

    def GetRefTestPath(self):
        if not self.ref_test:
            return None
        return utils.TestPath(self.ref_test)

    def SetIsImprovement(self, test=None):
        """Sets whether the alert is an improvement for the given test."""
        if not test:
            test = self.GetTestMetadataKey().get()
        # |self.direction| is never equal to |UNKNOWN| (see the definition above)
        # so when the test improvement direction is |UNKNOWN|, |self.is_improvement|
        # will be False.
        self.is_improvement = (self.direction == test.improvement_direction)

    def GetTestMetadataKey(self):
        """Get the key for the TestMetadata entity of this alert.

    We are in the process of converting from Test entities to TestMetadata.
    Until this is done, it's possible that an alert may store either Test
    or TestMetadata in the 'test' KeyProperty. This gets the TestMetadata key
    regardless of what's stored.
    """
        return utils.TestMetadataKey(self.test)

    @classmethod
    @ndb.tasklet
    def QueryAsync(cls,
                   bot_name=None,
                   bug_id=None,
                   count_limit=0,
                   deadline_seconds=50,
                   inequality_property=None,
                   is_improvement=None,
                   key=None,
                   keys_only=False,
                   limit=100,
                   master_name=None,
                   max_end_revision=None,
                   max_start_revision=None,
                   max_timestamp=None,
                   min_end_revision=None,
                   min_start_revision=None,
                   min_timestamp=None,
                   recovered=None,
                   sheriff=None,
                   start_cursor=None,
                   test=None,
                   test_keys=None,
                   test_suite_name=None):
        if key:
            # This tasklet isn't allowed to catch the internal_only AssertionError.
            alert = yield ndb.Key(urlsafe=key).get_async()
            raise ndb.Return(([alert], None, 1))

        # post_filters can cause results to be empty, depending on the shape of the
        # data and which filters are applied in the query and which filters are
        # applied after the query. Automatically chase cursors until some results
        # are found, but stay under the request timeout.
        results = []
        deadline = time.time() + deadline_seconds
        while not results and time.time() < deadline:
            query = cls.query()
            if sheriff is not None:
                sheriff_key = ndb.Key('Sheriff', sheriff)
                sheriff_entity = yield sheriff_key.get_async()
                if sheriff_entity:
                    query = query.filter(cls.sheriff == sheriff_key)
            if is_improvement is not None:
                query = query.filter(cls.is_improvement == is_improvement)
            if bug_id is not None:
                if bug_id == '':
                    query = query.filter(cls.bug_id == None)
                elif bug_id != '*':
                    query = query.filter(cls.bug_id == int(bug_id))
                # bug_id='*' translates to bug_id != None, which is handled with the
                # other inequality filters.
            if recovered is not None:
                query = query.filter(cls.recovered == recovered)
            if test or test_keys:
                if not test_keys:
                    test_keys = []
                if test:
                    test_keys += [
                        utils.OldStyleTestKey(test),
                        utils.TestMetadataKey(test)
                    ]
                query = query.filter(cls.test.IN(test_keys))
                query = query.order(cls.key)
                inequality_property = 'key'
            if master_name:
                query = query.filter(cls.master_name == master_name)
            if bot_name:
                query = query.filter(cls.bot_name == bot_name)
            if test_suite_name:
                query = query.filter(cls.benchmark_name == test_suite_name)

            query, post_filters = cls._InequalityFilters(
                query, inequality_property, bug_id, min_end_revision,
                max_end_revision, min_start_revision, max_start_revision,
                min_timestamp, max_timestamp)
            if post_filters:
                keys_only = False
            query = query.order(-cls.timestamp)

            if start_cursor:
                # "BadArgumentError: _MultiQuery with cursors requires __key__ order"
                query = query.order(cls.key)

            futures = [
                query.fetch_page_async(limit,
                                       start_cursor=start_cursor,
                                       keys_only=keys_only)
            ]
            if count_limit:
                futures.append(query.count_async(count_limit))
            query_duration = timing.WallTimeLogger('query_duration')
            with query_duration:
                yield futures
            results, start_cursor, more = futures[0].get_result()
            if count_limit:
                count = futures[1].get_result()
            else:
                count = len(results)
            logging.info('query_results_count=%d', len(results))
            if results:
                logging.info('duration_per_result=%f',
                             query_duration.seconds / len(results))
            if post_filters:
                results = [
                    alert for alert in results if all(
                        post_filter(alert) for post_filter in post_filters)
                ]
            if not more:
                start_cursor = None
            if not start_cursor:
                break
        raise ndb.Return((results, start_cursor, count))

    @classmethod
    def _InequalityFilters(cls, query, inequality_property, bug_id,
                           min_end_revision, max_end_revision,
                           min_start_revision, max_start_revision,
                           min_timestamp, max_timestamp):
        # A query cannot have more than one inequality filter.
        # inequality_property allows users to decide which property to filter in the
        # query, which can significantly affect performance. If other inequalities
        # are specified, they will be handled by post_filters.

        # If callers set inequality_property without actually specifying a
        # corresponding inequality filter, then reset the inequality_property and
        # compute it automatically as if it were not specified.
        if inequality_property == 'start_revision':
            if min_start_revision is None and max_start_revision is None:
                inequality_property = None
        elif inequality_property == 'end_revision':
            if min_end_revision is None and max_end_revision is None:
                inequality_property = None
        elif inequality_property == 'timestamp':
            if min_timestamp is None and max_timestamp is None:
                inequality_property = None
        elif inequality_property == 'bug_id':
            if bug_id != '*':
                inequality_property = None
        elif inequality_property != 'key':
            inequality_property = None

        if inequality_property is None:
            # Compute a default inequality_property.
            if min_start_revision or max_start_revision:
                inequality_property = 'start_revision'
            elif min_end_revision or max_end_revision:
                inequality_property = 'end_revision'
            elif min_timestamp or max_timestamp:
                inequality_property = 'timestamp'
            elif bug_id == '*':
                inequality_property = 'bug_id'

        post_filters = []
        if not inequality_property:
            return query, post_filters

        if not datastore_hooks.IsUnalteredQueryPermitted():
            # _DatastorePreHook will filter internal_only=False. index.yaml does not
            # specify indexes for `internal_only, $inequality_property, -timestamp`.
            # Use post_filters for all inequality properties.
            inequality_property = ''

        if bug_id == '*':
            if inequality_property == 'bug_id':
                query = query.filter(cls.bug_id != None).order(cls.bug_id)
            else:
                post_filters.append(lambda a: a.bug_id != None)

        if min_start_revision:
            min_start_revision = int(min_start_revision)
            if inequality_property == 'start_revision':
                logging.info('filter:min_start_revision=%d',
                             min_start_revision)
                query = query.filter(cls.start_revision >= min_start_revision)
                query = query.order(cls.start_revision)
            else:
                post_filters.append(
                    lambda a: a.start_revision >= min_start_revision)

        if max_start_revision:
            max_start_revision = int(max_start_revision)
            if inequality_property == 'start_revision':
                logging.info('filter:max_start_revision=%d',
                             max_start_revision)
                query = query.filter(cls.start_revision <= max_start_revision)
                query = query.order(-cls.start_revision)
            else:
                post_filters.append(
                    lambda a: a.start_revision <= max_start_revision)

        if min_end_revision:
            min_end_revision = int(min_end_revision)
            if inequality_property == 'end_revision':
                logging.info('filter:min_end_revision=%d', min_end_revision)
                query = query.filter(cls.end_revision >= min_end_revision)
                query = query.order(cls.end_revision)
            else:
                post_filters.append(
                    lambda a: a.end_revision >= min_end_revision)

        if max_end_revision:
            max_end_revision = int(max_end_revision)
            if inequality_property == 'end_revision':
                logging.info('filter:max_end_revision=%d', max_end_revision)
                query = query.filter(cls.end_revision <= max_end_revision)
                query = query.order(-cls.end_revision)
            else:
                post_filters.append(
                    lambda a: a.end_revision <= max_end_revision)

        if min_timestamp:
            if inequality_property == 'timestamp':
                logging.info('filter:min_timestamp=%d',
                             time.mktime(min_timestamp.utctimetuple()))
                query = query.filter(cls.timestamp >= min_timestamp)
            else:
                post_filters.append(lambda a: a.timestamp >= min_timestamp)

        if max_timestamp:
            if inequality_property == 'timestamp':
                logging.info('filter:max_timestamp=%d',
                             time.mktime(max_timestamp.utctimetuple()))
                query = query.filter(cls.timestamp <= max_timestamp)
            else:
                post_filters.append(lambda a: a.timestamp <= max_timestamp)

        return query, post_filters
コード例 #30
0
class Boat(ndb.Model):
    # boat_id = ndb.StringProperty(indexed = True)
    name = ndb.StringProperty(required=True)
    type = ndb.StringProperty(required=True)
    length = ndb.IntegerProperty(required=True)
    at_sea = ndb.BooleanProperty(default=True)