class Player(db.Model): name = db.StringProperty()
class Submission(BaseEntity): """Datastore model for a student work submission.""" # Contents of the student submission. Max size is 1MB. contents = db.TextProperty() # Submission date updated_on = db.DateTimeProperty(indexed=True) # Key of the Student who wrote this submission. reviewee_key = KeyProperty(kind=models.Student.kind()) # Identifier of the unit this review is a part of. unit_id = db.StringProperty(required=True) # Optional identifier of the component which submitted this data instance_id = db.StringProperty(required=False) def __init__(self, *args, **kwargs): """Constructs a new Submission.""" assert not kwargs.get('key_name'), ( 'Setting key_name manually is not supported') reviewee_key = kwargs.get('reviewee_key') unit_id = kwargs.get('unit_id') instance_id = kwargs.get('instance_id') assert reviewee_key, 'Missing required property: reviewee_key' assert unit_id, 'Missing required_property: unit_id' kwargs['key_name'] = self.key_name(unit_id, reviewee_key, instance_id=instance_id) super(Submission, self).__init__(*args, **kwargs) @classmethod def _get_student_key(cls, value): return db.Key.from_path(models.Student.kind(), value) @classmethod def delete_by_reviewee_id(cls, user_id): student_key = cls._get_student_key(user_id) query = Submission.all(keys_only=True).filter('reviewee_key =', student_key) db.delete(query.run()) @classmethod def key_name(cls, unit_id, reviewee_key, instance_id=None): """Creates a key_name string for datastore operations. In order to work with the review subsystem, entities must have a key name populated from this method. Args: unit_id: string. The id of the unit this submission belongs to. reviewee_key: db.Key of models.models.Student. The author of the the submission. instance_id: string. The instance id of a component (e.g., file upload) which submitted the content. Returns: String. """ if instance_id: return '(submission:%s:%s:%s)' % (unit_id, instance_id, reviewee_key.id_or_name()) else: return '(submission:%s:%s)' % (unit_id, reviewee_key.id_or_name()) @classmethod def get_key(cls, unit_id, reviewee_key, instance_id=None): """Returns a db.Key for a submission.""" return db.Key.from_path( cls.kind(), cls.key_name(unit_id, reviewee_key, instance_id=instance_id)) @classmethod def safe_key(cls, db_key, transform_fn): split_key = cls._split_key(db_key.name()) if len(split_key) == 3: _, unit_id, student_key_str = split_key instance_id = None else: _, unit_id, instance_id, student_key_str = split_key student_key = db.Key.from_path(models.Student.kind(), student_key_str) safe_student_key = models.Student.safe_key(student_key, transform_fn) return db.Key.from_path( cls.kind(), cls.key_name(unit_id, safe_student_key, instance_id=instance_id)) @classmethod def write(cls, unit_id, reviewee_key, contents, instance_id=None): """Updates or creates a student submission, and returns the key. Args: unit_id: string. The id of the unit this submission belongs to. reviewee_key: db.Key of models.models.Student. The author of the submission. contents: object. The contents of the submission, as a Python object. This will be JSON-transformed before it is stored. instance_id: string. The instance id of a component (e.g., file upload) which submitted the content. Returns: db.Key of Submission. """ return cls(unit_id=str(unit_id), reviewee_key=reviewee_key, contents=transforms.dumps(contents), instance_id=instance_id, updated_on=datetime.datetime.utcnow()).put() @classmethod def get(cls, unit_id, reviewee_key, instance_id=None): submission_key = cls.get_key(unit_id, reviewee_key, instance_id=instance_id) submission = entities.get(submission_key) # For backward compatibility, if no entry is found with the instance_id # in the key, also look for an entry with no instance_id used. if submission is None and instance_id: submission = entities.get(cls.get_key(unit_id, reviewee_key)) return submission @classmethod def get_contents(cls, unit_id, reviewee_key, instance_id=None): """Returns the de-JSONified contents of a submission.""" submission_key = cls.get_key(unit_id, reviewee_key, instance_id=instance_id) contents = cls.get_contents_by_key(submission_key) # For backward compatibility, if no entry is found with the instance_id # in the key, also look for an entry with no instance_id used. if contents is None and instance_id: contents = cls.get_contents_by_key( cls.get_key(unit_id, reviewee_key)) return contents @classmethod def get_contents_by_key(cls, submission_key): """Returns the contents of a submission, given a db.Key.""" submission = entities.get(submission_key) return transforms.loads(submission.contents) if submission else None def for_export(self, transform_fn): model = super(Submission, self).for_export(transform_fn) model.reviewee_key = models.Student.safe_key(model.reviewee_key, transform_fn) return model
class StemmedIndex(SearchIndex): """Index model for stemmed (inflected) search phrases.""" parent_kind = db.StringProperty(required=True) phrases = db.StringListProperty(required=True)
class ShardState(db.Model): """Single shard execution state. The shard state is stored in the datastore and is later aggregated by controller task. Shard key_name is equal to shard_id. Properties: active: if we have this shard still running as boolean. counters_map: shard's counters map as CountersMap. Mirrors counters_map_json. mapreduce_id: unique id of the mapreduce. shard_id: unique id of this shard as string. shard_number: ordered number for this shard. result_status: If not None, the final status of this shard. update_time: The last time this shard state was updated. shard_description: A string description of the work this shard will do. last_work_item: A string description of the last work item processed. """ RESULT_SUCCESS = "success" RESULT_FAILED = "failed" RESULT_ABORTED = "aborted" _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED]) # Functional properties. active = db.BooleanProperty(default=True, indexed=False) counters_map = JsonProperty(CountersMap, default=CountersMap(), indexed=False) result_status = db.StringProperty(choices=_RESULTS, indexed=False) # For UI purposes only. mapreduce_id = db.StringProperty(required=True) update_time = db.DateTimeProperty(auto_now=True, indexed=False) shard_description = db.TextProperty(default="") last_work_item = db.TextProperty(default="") def get_shard_number(self): """Gets the shard number from the key name.""" return int(self.key().name().split("-")[-1]) shard_number = property(get_shard_number) def get_shard_id(self): """Returns the shard ID.""" return self.key().name() shard_id = property(get_shard_id) @classmethod def shard_id_from_number(cls, mapreduce_id, shard_number): """Get shard id by mapreduce id and shard number. Args: mapreduce_id: mapreduce id as string. shard_number: shard number to compute id for as int. Returns: shard id as string. """ return "%s-%d" % (mapreduce_id, shard_number) @classmethod def get_key_by_shard_id(cls, shard_id): """Retrieves the Key for this ShardState. Args: shard_id: The shard ID to fetch. Returns: The Datatore key to use to retrieve this ShardState. """ return db.Key.from_path(cls.kind(), shard_id) @classmethod def get_by_shard_id(cls, shard_id): """Get shard state from datastore by shard_id. Args: shard_id: shard id as string. Returns: ShardState for given shard id or None if it's not found. """ return cls.get_by_key_name(shard_id) @classmethod def find_by_mapreduce_id(cls, mapreduce_id): """Find all shard states for given mapreduce. Args: mapreduce_id: mapreduce id. Returns: iterable of all ShardState for given mapreduce id. """ return cls.all().filter("mapreduce_id =", mapreduce_id).fetch(99999) @classmethod def create_new(cls, mapreduce_id, shard_number): """Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore. """ shard_id = cls.shard_id_from_number(mapreduce_id, shard_number) state = cls(key_name=shard_id, mapreduce_id=mapreduce_id) return state
class ShardState(db.Model): """Single shard execution state. The shard state is stored in the datastore and is later aggregated by controller task. ShardState key_name is equal to shard_id. Shard state contains critical state to ensure the correctness of shard execution. It is the single source of truth about a shard's progress. For example: 1. A slice is allowed to run only if its payload matches shard state's expectation. 2. A slice is considered running only if it has acquired the shard's lock. 3. A slice is considered done only if it has successfully committed shard state to db. Properties about the shard: active: if we have this shard still running as boolean. counters_map: shard's counters map as CountersMap. All counters yielded within mapreduce are stored here. mapreduce_id: unique id of the mapreduce. shard_id: unique id of this shard as string. shard_number: ordered number for this shard. retries: the number of times this shard has been retried. result_status: If not None, the final status of this shard. update_time: The last time this shard state was updated. shard_description: A string description of the work this shard will do. last_work_item: A string description of the last work item processed. writer_state: writer state for this shard. The shard's output writer instance can save in-memory output references to this field in its "finalize" method. Properties about slice management: slice_id: slice id of current executing slice. A slice's task will not run unless its slice_id matches this. Initial value is 0. By the end of slice execution, this number is incremented by 1. slice_start_time: a slice updates this to now at the beginning of execution. If the transaction succeeds, the current task holds a lease of slice duration + some grace period. During this time, no other task with the same slice_id will execute. Upon slice failure, the task should try to unset this value to allow retries to carry on ASAP. slice_request_id: the request id that holds/held the lease. When lease has expired, new request needs to verify that said request has indeed ended according to logs API. Do this only when lease has expired because logs API is expensive. This field should always be set/unset with slice_start_time. It is possible Logs API doesn't log a request at all or doesn't log the end of a request. So a new request can proceed after a long conservative timeout. slice_retries: the number of times a slice has been retried due to processing data when lock is held. Taskqueue/datastore errors related to slice/shard management are not counted. This count is only a lower bound and is used to determined when to fail a slice completely. acquired_once: whether the lock for this slice has been acquired at least once. When this is True, duplicates in outputs are possible. """ RESULT_SUCCESS = "success" RESULT_FAILED = "failed" RESULT_ABORTED = "aborted" _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED]) _MAX_STATES_IN_MEMORY = 10 mapreduce_id = db.StringProperty(required=True) active = db.BooleanProperty(default=True, indexed=False) input_finished = db.BooleanProperty(default=False, indexed=False) counters_map = json_util.JsonProperty( CountersMap, default=CountersMap(), indexed=False) result_status = db.StringProperty(choices=_RESULTS, indexed=False) retries = db.IntegerProperty(default=0, indexed=False) writer_state = json_util.JsonProperty(dict, indexed=False) slice_id = db.IntegerProperty(default=0, indexed=False) slice_start_time = db.DateTimeProperty(indexed=False) slice_request_id = db.ByteStringProperty(indexed=False) slice_retries = db.IntegerProperty(default=0, indexed=False) acquired_once = db.BooleanProperty(default=False, indexed=False) update_time = db.DateTimeProperty(auto_now=True, indexed=False) shard_description = db.TextProperty(default="") last_work_item = db.TextProperty(default="") def __str__(self): kv = {"active": self.active, "slice_id": self.slice_id, "last_work_item": self.last_work_item, "update_time": self.update_time} if self.result_status: kv["result_status"] = self.result_status if self.retries: kv["retries"] = self.retries if self.slice_start_time: kv["slice_start_time"] = self.slice_start_time if self.slice_retries: kv["slice_retries"] = self.slice_retries if self.slice_request_id: kv["slice_request_id"] = self.slice_request_id if self.acquired_once: kv["acquired_once"] = self.acquired_once keys = list(kv.keys()) keys.sort() result = "ShardState is {" for k in keys: result += k + ":" + str(kv[k]) + "," result += "}" return result def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False def advance_for_next_slice(self, recovery_slice=False): """Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info. """ self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False if recovery_slice: self.slice_id += 2 else: self.slice_id += 1 def set_for_failure(self): self.active = False self.result_status = self.RESULT_FAILED def set_for_abort(self): self.active = False self.result_status = self.RESULT_ABORTED def set_input_finished(self): self.input_finished = True def is_input_finished(self): return self.input_finished def set_for_success(self): self.active = False self.result_status = self.RESULT_SUCCESS self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False def copy_from(self, other_state): """Copy data from another shard state entity to self.""" for prop in list(self.properties().values()): setattr(self, prop.name, getattr(other_state, prop.name)) def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.properties() == other.properties() def get_shard_number(self): """Gets the shard number from the key name.""" return int(self.key().name().split("-")[-1]) shard_number = property(get_shard_number) def get_shard_id(self): """Returns the shard ID.""" return self.key().name() shard_id = property(get_shard_id) @classmethod def kind(cls): """Returns entity kind.""" return "_GAE_MR_ShardState" @classmethod def shard_id_from_number(cls, mapreduce_id, shard_number): """Get shard id by mapreduce id and shard number. Args: mapreduce_id: mapreduce id as string. shard_number: shard number to compute id for as int. Returns: shard id as string. """ return "%s-%d" % (mapreduce_id, shard_number) @classmethod def get_key_by_shard_id(cls, shard_id): """Retrieves the Key for this ShardState. Args: shard_id: The shard ID to fetch. Returns: The Datatore key to use to retrieve this ShardState. """ return db.Key.from_path(cls.kind(), shard_id) @classmethod def get_by_shard_id(cls, shard_id): """Get shard state from datastore by shard_id. Args: shard_id: shard id as string. Returns: ShardState for given shard id or None if it's not found. """ return cls.get_by_key_name(shard_id) @classmethod def find_by_mapreduce_state(cls, mapreduce_state): """Find all shard states for given mapreduce. Deprecated. Use find_all_by_mapreduce_state. This will be removed after 1.8.9 release. Args: mapreduce_state: MapreduceState instance Returns: A list of ShardStates. """ return list(cls.find_all_by_mapreduce_state(mapreduce_state)) @classmethod def find_all_by_mapreduce_state(cls, mapreduce_state): """Find all shard states for given mapreduce. Args: mapreduce_state: MapreduceState instance Yields: shard states sorted by shard id. """ keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state) i = 0 while i < len(keys): @db.non_transactional def no_tx_get(i): return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY]) states = no_tx_get(i) for s in states: i += 1 if s is not None: yield s @classmethod def calculate_keys_by_mapreduce_state(cls, mapreduce_state): """Calculate all shard states keys for given mapreduce. Args: mapreduce_state: MapreduceState instance Returns: A list of keys for shard states, sorted by shard id. The corresponding shard states may not exist. """ if mapreduce_state is None: return [] keys = [] for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count): shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i) keys.append(cls.get_key_by_shard_id(shard_id)) return keys @classmethod def create_new(cls, mapreduce_id, shard_number): """Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore. """ shard_id = cls.shard_id_from_number(mapreduce_id, shard_number) state = cls(key_name=shard_id, mapreduce_id=mapreduce_id) return state
class Users(db.Model): user_name = db.StringProperty(required=True) password = db.StringProperty(required=True) created = db.DateTimeProperty(auto_now_add=True) email = db.StringProperty()
class Item(db.Model): name = db.StringProperty(required = True)
class ART(db.Model): title=db.StringProperty() arttype=db.TextProperty() created=db.DateTimeProperty(auto_now_add=True)
class Vote(db.Model): user = db.StringProperty(required=True) issue = db.ReferenceProperty(Issue, required=True) timestamp = db.DateTimeProperty(auto_now_add=True)
class UserEvent(db.Expando): date = db.DateTimeProperty(auto_now_add=True) ev_type = db.IntegerProperty(required=True, indexed=True) requester = db.StringProperty(required=True, indexed=True)
class Client(db.Model): room_id = db.StringProperty() client_id = db.StringProperty() messages = db.ListProperty(db.Text) is_initiator = db.BooleanProperty()
class NewEntry(db.Model): title = db.StringProperty(required=True) entry = db.TextProperty(required=True) created = db.DateTimeProperty(auto_now_add=True) modified = db.DateTimeProperty(auto_now_add=True)
class Word(db.Model): word = db.StringProperty() exists = db.BooleanProperty()
class Guild(db.Model): name = db.StringProperty()
class StoredToken(db.Model): user_email = db.StringProperty(required=True) session_token = db.StringProperty(required=True) target_url = db.StringProperty(required=True)
class Post(db.Model): subject = db.StringProperty(required=True) content = db.TextProperty(required=True) created = db.DateTimeProperty(auto_now_add=True)
class Posts(db.Model): title = db.StringProperty(required=True) post = db.TextProperty(required=True) created = db.DateTimeProperty(auto_now_add=True)
class FlowerbedLog(db.Model): flowerbed = db.ReferenceProperty(indexed=False, reference_class=Flowerbed) owner = db.StringProperty() attackPlants = db.IntegerProperty(indexed=False) defensePlants = db.IntegerProperty(indexed=False) timestamp = db.DateTimeProperty(auto_now=True)
class ResourceStorage(db.Model): name = db.StringProperty(multiline=False) data = db.StringProperty(multiline=True)
class Bookmark(db.Model): owner = db.StringProperty(indexed=False) flowerbed = db.ReferenceProperty(indexed=False, reference_class=Flowerbed) timestamp = db.DateTimeProperty(indexed=False, auto_now=True)
class MapreduceState(db.Model): """Holds accumulated state of mapreduce execution. MapreduceState is stored in datastore with a key name equal to the mapreduce ID. Only controller tasks can write to MapreduceState. Properties: mapreduce_spec: cached deserialized MapreduceSpec instance. read-only active: if we have this mapreduce running right now last_poll_time: last time controller job has polled this mapreduce. counters_map: shard's counters map as CountersMap. Mirrors counters_map_json. chart_url: last computed mapreduce status chart url. This chart displays the progress of all the shards the best way it can. sparkline_url: last computed mapreduce status chart url in small format. result_status: If not None, the final status of the job. active_shards: How many shards are still processing. start_time: When the job started. """ RESULT_SUCCESS = "success" RESULT_FAILED = "failed" RESULT_ABORTED = "aborted" _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED]) # Functional properties. mapreduce_spec = JsonProperty(MapreduceSpec, indexed=False) active = db.BooleanProperty(default=True, indexed=False) last_poll_time = db.DateTimeProperty(required=True) counters_map = JsonProperty(CountersMap, default=CountersMap(), indexed=False) app_id = db.StringProperty(required=False, indexed=True) # For UI purposes only. chart_url = db.TextProperty(default="") sparkline_url = db.TextProperty(default="") result_status = db.StringProperty(required=False, choices=_RESULTS) active_shards = db.IntegerProperty(default=0, indexed=False) failed_shards = db.IntegerProperty(default=0, indexed=False) aborted_shards = db.IntegerProperty(default=0, indexed=False) start_time = db.DateTimeProperty(auto_now_add=True) @classmethod def get_key_by_job_id(cls, mapreduce_id): """Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState. """ return db.Key.from_path(cls.kind(), mapreduce_id) def set_processed_counts(self, shards_processed): """Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard """ chart = google_chart_api.BarChart(shards_processed) if self.mapreduce_spec and shards_processed: chart.bottom.labels = [ str(x) for x in xrange(self.mapreduce_spec.mapper.shard_count) ] chart.left.labels = ['0', str(max(shards_processed))] chart.left.min = 0 self.chart_url = chart.display.Url(300, 200) def get_processed(self): """Number of processed entities. Returns: The total number of processed entities as int. """ return self.counters_map.get(context.COUNTER_MAPPER_CALLS) processed = property(get_processed) @staticmethod def create_new(getkeyname=_get_descending_key, gettime=datetime.datetime.now): """Create a new MapreduceState. Args: getkeyname: Used for testing. gettime: Used for testing. """ state = MapreduceState(key_name=getkeyname(), last_poll_time=gettime()) state.set_processed_counts([]) return state
class Rule(db.Model): name = db.StringProperty(indexed=False) value = db.IntegerProperty(indexed=False) timestamp = db.DateTimeProperty(indexed=False, auto_now=True)
class MapreduceState(db.Model): """Holds accumulated state of mapreduce execution. MapreduceState is stored in datastore with a key name equal to the mapreduce ID. Only controller tasks can write to MapreduceState. Properties: mapreduce_spec: cached deserialized MapreduceSpec instance. read-only active: if this MR is still running. last_poll_time: last time controller job has polled this mapreduce. counters_map: shard's counters map as CountersMap. Mirrors counters_map_json. chart_url: last computed mapreduce status chart url. This chart displays the progress of all the shards the best way it can. sparkline_url: last computed mapreduce status chart url in small format. result_status: If not None, the final status of the job. active_shards: How many shards are still processing. This starts as 0, then set by KickOffJob handler to be the actual number of input readers after input splitting, and is updated by Controller task as shards finish. start_time: When the job started. writer_state: Json property to be used by writer to store its state. This is filled when single output per job. Will be deprecated. Use OutputWriter.get_filenames instead. """ RESULT_SUCCESS = "success" RESULT_FAILED = "failed" RESULT_ABORTED = "aborted" _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED]) mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False) active = db.BooleanProperty(default=True, indexed=False) last_poll_time = db.DateTimeProperty(required=True) counters_map = json_util.JsonProperty( CountersMap, default=CountersMap(), indexed=False) app_id = db.StringProperty(required=False, indexed=True) writer_state = json_util.JsonProperty(dict, indexed=False) active_shards = db.IntegerProperty(default=0, indexed=False) failed_shards = db.IntegerProperty(default=0, indexed=False) aborted_shards = db.IntegerProperty(default=0, indexed=False) result_status = db.StringProperty(required=False, choices=_RESULTS) chart_url = db.TextProperty(default="") chart_width = db.IntegerProperty(default=300, indexed=False) sparkline_url = db.TextProperty(default="") start_time = db.DateTimeProperty(auto_now_add=True) @classmethod def kind(cls): """Returns entity kind.""" return "_GAE_MR_MapreduceState" @classmethod def get_key_by_job_id(cls, mapreduce_id): """Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState. """ return db.Key.from_path(cls.kind(), str(mapreduce_id)) @classmethod def get_by_job_id(cls, mapreduce_id): """Retrieves the instance of state for a Job. Args: mapreduce_id: The mapreduce job to retrieve. Returns: instance of MapreduceState for passed id. """ return db.get(cls.get_key_by_job_id(mapreduce_id)) def set_processed_counts(self, shards_processed): """Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard """ chart = google_chart_api.BarChart(shards_processed) shard_count = len(shards_processed) if shards_processed: stride_length = max(1, shard_count / 16) chart.bottom.labels = [] for x in range(shard_count): if (x % stride_length == 0 or x == shard_count - 1): chart.bottom.labels.append(x) else: chart.bottom.labels.append("") chart.left.labels = ["0", str(max(shards_processed))] chart.left.min = 0 self.chart_width = min(700, max(300, shard_count * 20)) self.chart_url = chart.display.Url(self.chart_width, 200) def get_processed(self): """Number of processed entities. Returns: The total number of processed entities as int. """ return self.counters_map.get(context.COUNTER_MAPPER_CALLS) processed = property(get_processed) @staticmethod def create_new(mapreduce_id=None, gettime=datetime.datetime.now): """Create a new MapreduceState. Args: mapreduce_id: Mapreduce id as string. gettime: Used for testing. """ if not mapreduce_id: mapreduce_id = MapreduceState.new_mapreduce_id() state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime()) state.set_processed_counts([]) return state @staticmethod def new_mapreduce_id(): """Generate new mapreduce id.""" return util._get_descending_key() def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.properties() == other.properties()
class Item(db.Model): name = db.StringProperty() #flower, flowerbed moneyPrice = db.IntegerProperty(indexed=False) flowerPrice = db.IntegerProperty(indexed=False)
class Review(BaseEntity): """Datastore model for a student review of a Submission.""" # Contents of the student's review. Max size is 1MB. contents = db.TextProperty() # Key of the student whose work is being reviewed. reviewee_key = KeyProperty(kind=models.Student.kind()) # Key of the Student who wrote this review. reviewer_key = KeyProperty(kind=models.Student.kind()) # Identifier of the unit this review is a part of. unit_id = db.StringProperty(required=True) def __init__(self, *args, **kwargs): """Constructs a new Review.""" assert not kwargs.get('key_name'), ( 'Setting key_name manually is not supported') reviewee_key = kwargs.get('reviewee_key') reviewer_key = kwargs.get('reviewer_key') unit_id = kwargs.get('unit_id') assert reviewee_key, 'Missing required property: reviewee_key' assert reviewer_key, 'Missing required property: reviewer_key' assert unit_id, 'Missing required_property: unit_id' kwargs['key_name'] = self.key_name(unit_id, reviewee_key, reviewer_key) super(Review, self).__init__(*args, **kwargs) @classmethod def key_name(cls, unit_id, reviewee_key, reviewer_key): """Creates a key_name string for datastore operations. In order to work with the review subsystem, entities must have a key name populated from this method. Args: unit_id: string. The id of the unit this review belongs to. reviewee_key: db.Key of models.models.Student. The student whose work is being reviewed. reviewer_key: db.Key of models.models.Student. The author of this the review. Returns: String. """ return '(review:%s:%s:%s)' % (unit_id, reviewee_key, reviewer_key) @classmethod def safe_key(cls, db_key, transform_fn): _, unit_id, reviewee_key_str, reviewer_key_str = cls._split_key( db_key.name()) reviewee_key = db.Key(encoded=reviewee_key_str) reviewer_key = db.Key(encoded=reviewer_key_str) safe_reviewee_key = models.Student.safe_key(reviewee_key, transform_fn) safe_reviewer_key = models.Student.safe_key(reviewer_key, transform_fn) return db.Key.from_path( cls.kind(), cls.key_name(unit_id, safe_reviewee_key, safe_reviewer_key)) def for_export(self, transform_fn): model = super(Review, self).for_export(transform_fn) model.reviewee_key = models.Student.safe_key(model.reviewee_key, transform_fn) model.reviewer_key = models.Student.safe_key(model.reviewer_key, transform_fn) return model @classmethod def _get_student_key(cls, value): return db.Key.from_path(models.Student.kind(), value) @classmethod def delete_by_reviewee_id(cls, user_id): student_key = cls._get_student_key(user_id) query = Review.all(keys_only=True).filter('reviewee_key =', student_key) db.delete(query.run())
class Backpack(db.Model): owner = db.StringProperty(indexed=False) name = db.StringProperty() #Item.name amount = db.IntegerProperty(indexed=False) timestamp = db.DateTimeProperty(indexed=False, auto_now=True)
class LiteralIndex(SearchIndex): """Index model for non-inflected search phrases.""" parent_kind = db.StringProperty(required=True) phrases = db.StringListProperty(required=True)
class BaseVolume(db.Model): """Base model for various types of volumes.""" ESCROW_TYPE_NAME = 'base_volume' SECRET_PROPERTY_NAME = 'undefined' ALLOW_OWNER_CHANGE = False # True for only the most recently escrowed, unique volume_uuid. active = db.BooleanProperty(default=True) created = db.DateTimeProperty(auto_now_add=True) created_by = AutoUpdatingUserProperty() # user that created the object. hostname = db.StringProperty() # name of the machine with the volume. owner = db.StringProperty() volume_uuid = db.StringProperty() # Volume UUID of the encrypted volume. tag = db.StringProperty(default='default') # Key Slot def __eq__(self, other): for p in self.properties(): if getattr(self, p) != getattr(other, p): return False return True def ToDict(self, skip_secret=False): volume = {p: str(getattr(self, p)) for p in self.properties() if not skip_secret or p != self.SECRET_PROPERTY_NAME} volume['id'] = str(self.key()) volume['active'] = self.active # store the bool, not string, value return volume def __ne__(self, other): return not self.__eq__(other) @classmethod def GetLatestByUuid(cls, volume_uuid, tag='default'): entity = cls.all().filter('tag =', tag).filter( 'volume_uuid =', volume_uuid).order('-created').fetch(1) if not entity: return None return entity[0] def Clone(self): items = {p: getattr(self, p) for p in self.properties()} del items['created_by'] del items['created'] return self.__class__(**items) @db.transactional(xg=True) def _PutNewVolume(self, ancestor_key, *args, **kwargs): ancestor = self.get(ancestor_key) if not ancestor.active: raise self.ACCESS_ERR_CLS( 'parent entity is inactive: %s.' % self.volume_uuid) ancestor.active = False super(BaseVolume, ancestor).put(*args, **kwargs) return super(BaseVolume, self).put(*args, **kwargs) def put(self, parent=None, *args, **kwargs): # pylint: disable=g-bad-name """Disallow updating an existing entity, and enforce key_name. Args: parent: Optional. A Volume of the same type as the current instance. If passed then it is used as the parent entity for this instance. *args: Positional arguments to be passed to parent class' put method. **kwargs: Keyword arguments to be passed to parent class' put method. Returns: The key of the instance (either the existing key or a new key). Raises: DuplicateEntity: New entity is a duplicate of active volume with same uuid. AccessError: required property was empty or not set. """ model_name = self.__class__.__name__ for prop_name in self.REQUIRED_PROPERTIES: if not getattr(self, prop_name, None): raise self.ACCESS_ERR_CLS('Required property empty: %s' % prop_name) if not self.active: raise self.ACCESS_ERR_CLS( 'New entity is not active: %s' % self.volume_uuid) if self.has_key(): raise self.ACCESS_ERR_CLS( 'Key should be auto genenrated for %s.' % model_name) existing_entity = parent if not existing_entity: existing_entity = self.__class__.GetLatestByUuid( self.volume_uuid, tag=self.tag) if existing_entity: if not existing_entity.active: raise self.ACCESS_ERR_CLS( 'parent entity is inactive: %s.' % self.volume_uuid) different_properties = [] for prop in self.properties(): if getattr(self, prop) != getattr(existing_entity, prop): different_properties.append(prop) if not different_properties or different_properties == ['created']: raise DuplicateEntity() if self.created > existing_entity.created: return self._PutNewVolume(existing_entity.key()) else: logging.warning('entity from past') self.active = False return super(BaseVolume, self).put(*args, **kwargs) @property def secret(self): return getattr(self, self.SECRET_PROPERTY_NAME) @property def checksum(self): return hashlib.md5(self.secret).hexdigest() @classmethod def NormalizeHostname(cls, hostname, strip_fqdn=False): """Sanitizes a hostname for consistent search functionality. Args: hostname: str hostname to sanitize. strip_fqdn: boolean, if True removes fully qualified portion of hostname. Returns: str hostname. """ # TODO(user): call this during escrow create, to sanitize before storage. if strip_fqdn: hostname = hostname.partition('.')[0] return hostname.lower()
class Entry(db.Model): title = db.StringProperty(required = True) body = db.TextProperty(required = True) created = db.DateTimeProperty(auto_now_add = True)
class BlogPost(db.Model): title = db.StringProperty(required=True) thoughts = db.TextProperty(required=True) created = db.DateTimeProperty(auto_now_add=True)