def __init__(self, request, response): super(BaseHandler, self).__init__(request, response) user = users.get_current_user() if user: logging.debug('User authenticated via SACSID cookie: ' + user.email()) else: try: user = oauth.get_current_user() logging.debug('User authenticated via OAuth token: ' + user.email()) except oauth.InvalidOAuthTokenError: logging.info('User provided an invalid OAuth token') self.abort(401, explanation='Invalid OAuth token') except oauth.InvalidOAuthParametersError: pass if not user: logging.info('No valid user authentication credentials supplied') self.user = None return key = models.User.get_memcache_key(user.user_id()) self.user = memcache.get(key) if self.user is None: self.user = models.User.get_or_insert(user.user_id(), email=user.email()) memcache.set(key, self.user) if self.user.email != user.email(): self.user.email = user.email() memcache.set(key, self.user) self.user.last_visit_at = datetime.datetime.now() db.put_async(self.user)
def trans(mobile): debug_request = StartDebuggingRequest(key=StartDebuggingRequest.create_key(app_user, mobile_id), timestamp=now()) db.put_async(debug_request) start_log_forwarding(app_user, target_jid, mobile=mobile) deferred.defer(stop_debugging, app_user, mobile_id, debug_request=debug_request, _countdown=30 * 60, _transactional=True, _queue=SCHEDULED_QUEUE)
def __restore_user_events(self, user): # event_qu ery = db.GqlQuery("SELECT * FROM Event WHERE member = :1" \ # " AND original_status != NULL", user) events = Event.get_future_suspended_events_by_member(member=user) future_puts = [] for event in events.run(): logging.debug("Restoring event '%s'." % (event.name)) event.status = event.original_status event.original_status = None event.owner_suspended_time = None event_future = db.put_async(event) future_puts.append(event_future) # Write a log of it. log_entry = HDLog(event=event, description="Restoring event because \ owner is now active.") log_entry_future = db.put_async(log_entry) future_puts.append(log_entry_future) # Wait for all the writes to finish. logging.debug("Waiting for all writes to finish...") for future_put in future_puts: future_put.get_result()
def __hold_user_events(self, user): events = Event.get_future_events_by_member(member=user) # event_query = db.GqlQuery("SELECT * FROM Event WHERE member = :1" \ # " AND status IN :2 AND start_time > :3", # user, ["pending", "approved"], local_today()) future_puts = [] for event in events: logging.debug("Suspending event '%s'." % (event.name)) event.original_status = event.status event.status = "suspended" event.owner_suspended_time = datetime.datetime.now() event_future = db.put_async(event) future_puts.append(event_future) # Write a log of it. log_entry = HDLog(event=event, description="Suspended event \ because owner was suspended.") log_entry_future = db.put_async(log_entry) future_puts.append(log_entry_future) # Wait for all the writes to finish. logging.debug("Waiting for all writes to finish...") for future_put in future_puts: future_put.get_result()
def update_userprofile_last_login(): g.readonly = CapabilitySet('datastore_v3', capabilities=['write']).is_enabled() if not g.readonly: profile = UserProfile.get_current_profile() if profile: profile.last_access = datetime.datetime.now() db.put_async(profile)
def get_execute(self, task): """Handle calling of execute_task().""" # We are being called by the task queue machinery and thus have 10 # minutes to do our work. logging.info("executing %s", task) if task.status not in ['ready', 'error']: # strange internal error raise RuntimeError("task %s is not 'ready'", task) # load # first note that the task has startet. We do a async write, so we don't have to wait # for the datastore. Since `execute_task()` is idempotent this shoult never result in # anything messy. task.status = 'started' task.starttime = time.time() db.put_async(task) self.task = task # Decode Parameters from Datastore and execute the actual task. parameters = pickle.loads(task.parameters_blob) try: result = self.execute_task(parameters) task = gaetk_LongTask.get(self.request.get('_longtaskid')) task.result_blob = pickle.dumps(result) task.status = 'done' task.endtime = time.time() task.put() except Exception, msg: # If an exception occured, note htat in the Datastore an re raise an error. # We could probably add one day some fancy error logging. logging.error(msg) task = gaetk_LongTask.get(self.request.get('_longtaskid')) task.status = 'error' task.put() raise
def _tx(user): settings = UserSettings(key_name='%s_settings' % user.key().name(), parent=user, language=language) profile = UserProfile(key_name='%s_profile' % user.key().name(), parent=user, nickname=user.username) followings = UserFollowingIndex(parent=user) counters = UserCounter(key_name='%s_counters' % user.key().name(), parent=user) sociallinks = UserSocialLinks(parent=profile) db.put_async([settings, profile, followings, counters, sociallinks])
def _change_counter(self, prop, value): obj = ListCounter.get(self.key()) oldValue = getattr(obj, prop) # obtiene el valor actual value = oldValue+value setattr(obj, prop, value) db.put_async(obj) return value
def Parse(self): rows = 0 reader = csv.reader(self.ticket_file) header = reader.next() self._rowlen = len(header) self.event.descriptor_format = json.dumps(header) self.event.put() for row in reader: if len(row) != self._rowlen: raise InvalidRowError( 'Expected %s columns but found %s. Row: %s' % (self._rowlen, len(row), row)) key = row[self.key_position] if key in self._keys: self._dupkeys.append(key) continue self._keys.append(key) ticket = model.Ticket(code=key, event=self.event, claim_count=0, descriptor=json.dumps(row)) db.put_async(ticket) logging.info('Created %s tickets: ' % (len(self._keys))) if (self._dupkeys): logging.warning('Found the following duplicate keys: %s' % (self._dupkeys))
def get(self): status = {'success' : True} self.response.headers['Content-Type'] = "application/json" try: employee1 = Employee(name = "Raj") employee1_future = db.put_async(employee1) employee2 = Employee(name = "Tyler") employee2_future = db.put_async(employee2) employee1_future.get_result() employee2_future.get_result() time.sleep(1) count1 = Employee.all().count(limit=5, deadline=60) if count1 != 2: raise Exception('Did not retrieve 2 Employees, got ' + str(count1)) employee3 = Employee(name = "Brian") employee3_future = db.put_async(employee3) employee3_future.get_result() time.sleep(1) count2 = Employee.all().count(limit=5, deadline=60) if count2 != 3: raise Exception('Did not retrieve 3 Employees, got ' + str(count2)) except Exception: status = {'success' : False} self.response.out.write(json.dumps(status)) raise finally: delete_future = db.delete_async(Employee.all()) delete_future.get_result()
def persist_to_datastore(self): """ Persist current state of experiment and alternative models to datastore. Their sums might be slightly out-of-date during any given persist, but not by much. """ experiments_to_put = [] for experiment_name in self.experiments: experiment_model = self.get_experiment(experiment_name) if experiment_model: experiments_to_put.append(experiment_model) alternatives_to_put = [] for experiment_name in self.alternatives: alternative_models = self.get_alternatives(experiment_name) for alternative_model in alternative_models: # When persisting to datastore, we want to store the most recent value we've got alternative_model.load_latest_counts() alternatives_to_put.append(alternative_model) self.update_alternative(alternative_model) # When periodically persisting to datastore, first make sure memcache # has relatively up-to-date participant/conversion counts for each # alternative. self.dirty = True self.store_if_dirty() # Once memcache is done, put both experiments and alternatives. async_experiments = db.put_async(experiments_to_put) async_alternatives = db.put_async(alternatives_to_put) async_experiments.get_result() async_alternatives.get_result()
def _change_counter(self, prop, value): obj = ListCounter.get(self.key()) oldValue = getattr(obj, prop) # obtiene el valor actual value = oldValue + value setattr(obj, prop, value) db.put_async(obj) return value
def __init__(self, request, response): super(BaseHandler, self).__init__(request, response) user = users.get_current_user() if user: logging.debug('User authenticated via SACSID cookie: ' + user.email()) else: try: user = oauth.get_current_user() logging.debug('User authenticated via OAuth token: ' + user.email()) except oauth.InvalidOAuthParametersError: pass if not user: logging.info('No valid user authentication credentials supplied') self.user = None return key = models.User.get_memcache_key(user.user_id()) self.user = memcache.get(key) if self.user is None: self.user = models.User.get_or_insert(user.user_id(), email=user.email()) memcache.set(key, self.user) if self.user.email != user.email(): self.user.email = user.email() memcache.set(key, self.user) self.user.last_visit_at = datetime.datetime.now() db.put_async(self.user)
def _use(obj_key): key_str = str(obj_key) now = _current_date() u = LastUse.get_or_insert(key_str, obj=obj_key) if now > u.last_use: u.last_use = now db.put_async(u)
def trans(): debug_request = StartDebuggingRequest(key=StartDebuggingRequest.create_key(app_user, jid), timestamp=now()) db.put_async(debug_request) deferred.defer(stop_debugging, app_user, jid, debug_request=debug_request, notify_user=False, _countdown=timeout * 60, _transactional=True, _queue=SCHEDULED_QUEUE) return start_log_forwarding(app_user, jid, xmpp_target_password=password, type_=type_)
def __hold_user_events(self, user): event_query = db.GqlQuery("SELECT * FROM Event WHERE member = :1" \ " AND status IN :2", user, ["pending", "approved"]) future_puts = [] for event in event_query.run(): logging.debug("Holding event '%s'." % (event.name)) event.original_status = event.status event.status = "onhold" event.owner_suspended_time = datetime.datetime.now() event_future = db.put_async(event) future_puts.append(event_future) # Write a log of it. log_entry = HDLog(event=event, description="Put event on hold \ because owner was suspended.") log_entry_future = db.put_async(log_entry) future_puts.append(log_entry_future) # Wait for all the writes to finish. logging.debug("Waiting for all writes to finish...") for future_put in future_puts: future_put.get_result()
def __restore_user_events(self, user): event_query = db.GqlQuery("SELECT * FROM Event WHERE member = :1" \ " AND original_status != NULL", user) future_puts = [] for event in event_query.run(): logging.debug("Restoring event '%s'." % (event.name)) event.status = event.original_status event.original_status = None event.owner_suspended_time = None event_future = db.put_async(event) future_puts.append(event_future) # Write a log of it. log_entry = HDLog(event=event, description="Restoring event because \ owner is now active.") log_entry_future = db.put_async(log_entry) future_puts.append(log_entry_future) # Wait for all the writes to finish. logging.debug("Waiting for all writes to finish...") for future_put in future_puts: future_put.get_result()
def put_async_prod(entities): if on_dev_server(): # Sync - Enable tests to run db.put(entities) else: # Async (prod) db.put_async(entities)
def put_multi_async(self, filedata_list): """Initiate an async put of the given files. This method initiates an asynchronous put of a list of file data (presented as pairs of the form (filename, data_source)). It is not transactional, and does not block, and instead immediately returns a callback function. When this function is called it will block until the puts are confirmed to have completed. At this point it will also clear stale information out of the memcache. For maximum efficiency it's advisable to defer calling the callback until all other request handling has completed, but in any event, it MUST be called before the request handler can exit successfully. Args: filedata_list: list. A list of tuples. The first entry of each tuple is the file name, the second is a filelike object holding the file data. Returns: callable. Returns a wait-and-finalize function. This function must be called at some point before the request handler exists, in order to confirm that the puts have succeeded and to purge old values from the memcache. """ filename_list = [] data_list = [] metadata_list = [] for filename, stream in filedata_list: filename = self._logical_to_physical(filename) filename_list.append(filename) metadata = FileMetadataEntity.get_by_key_name(filename) if not metadata: metadata = FileMetadataEntity(key_name=filename) metadata_list.append(metadata) metadata.updated_on = datetime.datetime.now() # We operate with raw bytes. The consumer must deal with encoding. raw_bytes = stream.read() metadata.size = len(raw_bytes) data = FileDataEntity(key_name=filename) data_list.append(data) data.data = raw_bytes data_future = db.put_async(data_list) metadata_future = db.put_async(metadata_list) def wait_and_finalize(): data_future.check_success() metadata_future.check_success() MemcacheManager.delete_multi( [self.make_key(filename) for filename in filename_list], namespace=self._ns) return wait_and_finalize
def update_new_subscribers(): new_subscribers = memcache.get('new_subscribers') if new_subscribers: subscribers = [Subscriber(key_name=user_agent.decode('iso-8859-1', 'ignore'), count=count) for user_agent, count in new_subscribers.iteritems()] while subscribers: db.put_async(subscribers[:500]) subscribers = subscribers[500:] memcache.delete('new_subscribers')
def write_data(self, stream, stream_name, picture): stream.totalPicture = stream.totalPicture + 1 user_picture = PictureModel(parent = db.Key.from_path('StreamModel', stream_name)) user_picture.id = str(stream.totalPicture) picture = images.resize(picture, 320, 400) user_picture.picture = db.Blob(picture) stream.lastUpdated = user_picture.uploadDate db.put_async(user_picture)
def trans(): capi_calls = getLocation(get_location_response_handler, get_location_response_error_handler, friend, request=request, DO_NOT_SAVE_RPCCALL_OBJECTS=True) lr = LocationRequest(parent=parent_key(friend), key_name=app_user.email(), timestamp=now()) db.put_async(lr) for capi_call in capi_calls: capi_call.lr = lr.key() capi_call.target = target db.put(capi_calls) deferred.defer(_cancel_location_request, lr, None, target, None, _countdown=17 * 60, _transactional=True, _queue=SCHEDULED_QUEUE)
def _change_counter(self, prop, value): obj = UserCounter.get(self.key()) oldValue = getattr(obj, prop) value = oldValue+value if value < 0: raise ValueError setattr(obj, prop, value) db.put_async(obj) return value
def post(self): status_data = json.loads(self.request.get('content')) log.debug('Game on: %s', status_data) s = GameStatus(W, status_data) g = s.get_game() if not g.is_running: outcome = 0 if g.last_move.get('move_type') == 'resign': outcome = 1 if g.last_move.get('user_id') != g.me.id else -1 else: oppenents_best_score = max(map(lambda x: x.score, g.opponents)) outcome = cmp(g.me.score, oppenents_best_score) log.debug('Game is dead. %s', ['I lost :(', 'We tied :|', 'I won :)'][outcome + 1]) if outcome == 0: stats.increment_tie_count() elif outcome == 1: stats.increment_win_count() else: stats.increment_lose_count() # Store game db.put_async(FinishedGame(key_name=str(g.id), id=g.id, updated=g.updated, created=g.created, players=g.players, move_count=g.move_count, board=g.board, ruleset=g.ruleset, tiles=g.tiles, end_game=g.end_game, outcome=outcome)) for player in g.players: db.put_async(User(key_name=str(player.get('id')), id=player.get('id'), username=player.get('username'))) return if not g.is_my_turn(): log.debug("Not my turn") return player = Wordfeusk() for move in itertools.islice(player.get_moves(g), 3): log.debug("Wordfeusk suggests '%s' at (%d, %d) %s", move.word, move.x0, move.y0, ("across" if move.direction == Word.ACROSS else "down")) try: g.play(move.word, move.x0, move.y0, move.direction) return except WordfeudError, e: if e.message == 'not_your_turn': log.info('Apparently not my turn') return log.exception(e)
def post(self): wordlist = self.request.get('word').split("\n") entities = list() for query in wordlist: query = query.split() word = query[0] entity = speller.lexicon0(word = word, key_name = word) entity.known = query[1:] entities.append(entity) db.put_async(entities).get_result()
def set_up_data(self): company = Company(name = "AppScale") put_future = db.put_async(company) put_future.get_result() employee1 = Employee(name = "A", parent = company) employee1_future = db.put_async(employee1) employee2 = Employee(name = "B", parent = company) employee2_future = db.put_async(employee2) employee3 = Employee(name = "C", parent = company) employee3_future = db.put_async(employee3) employee4 = Employee(name = "D", parent = company) employee4_future = db.put_async(employee4) employee1_future.get_result() employee2_future.get_result() employee3_future.get_result() employee4_future.get_result() pn1 = PhoneNumber(work = "1111111111", parent = employee1) pn1_future = db.put_async(pn1) pn2 = PhoneNumber(work = "2222222222", parent = employee2) pn2_future = db.put_async(pn2) pn3 = PhoneNumber(work = "3333333333", parent = employee3) pn3_future = db.put_async(pn3) pn4 = PhoneNumber(work = "4444444444", parent = employee4) pn4_future = db.put_async(pn4) pn1_future.get_result() pn2_future.get_result() pn3_future.get_result() pn4_future.get_result()
def trans(): key_name = fb_friend_user.email() parent = parent_key(new_user) invite = FacebookDiscoveryInvite.get_by_key_name(key_name, parent) if invite: return db.put_async(FacebookDiscoveryInvite(key_name=key_name, parent=parent)) friend_map = get_friends_map(new_user) if fb_friend_user in friend_map.friends: return deferred.defer(_send_message_to_inform_user_about_a_new_join_step_2, fb_friend_user, new_user, _transactional=True)
def _tx(user): try: from models_acc import UserFollowingIndex, UserSettings, UserProfile, UserCounter settings = UserSettings(key_name='settings_%s' % user.id, parent=user, language=language) profile = UserProfile(key_name='profile_%s' % user.id, parent=user, username=user.username, email=user.email) followings = UserFollowingIndex(parent=user) counters = UserCounter(key_name='counters_%s' % user.id, parent=user) db.put_async([settings, profile, followings, counters]) return True except: return False
def set_article_id(cls,article_id): from google.appengine.ext import deferred if article_id[:5000]: instance=cls( article_id=article_id[:5000], count=len(article_id[:5000]), ) db.put_async(instance) del article_id[:5000] if article_id: deferred.defer(cls.set_article_id,article_id)
def trans(): key_name = "log_analysis_instance" parent = parent_key(users.User(u"*****@*****.**")) la = LogAnalysis.get_by_key_name(key_name, parent) if not la: la = LogAnalysis(key_name=key_name, parent=parent, analyzed_until=now() - 10 * 60) start = la.analyzed_until end = now() la.analyzed_until = end db.put_async(la) deferred.defer(_analyze, start, end, _transactional=True)
def post(self, eid, format, sensor_kn): success = False message = None data = {} eid = int(eid) error = 0 if sensor_kn: ekey = db.Key.from_path('Enterprise', eid) s = Sensor.get_by_key_name(sensor_kn, parent=ekey) if not s: default_sensortype_id = Enterprise.CachedDefaultSensorType(eid) if default_sensortype_id: # Create on the fly only if we have a default sensortype s = Sensor.Create(ekey, sensor_kn, default_sensortype_id) if s: body = self.request.body records = None if format == 'json': parser = JSONDataParser() success, message, records = parser.attemptParse(body) if success: data['count'] = len(records) elif format == 'smssync': parser = SMSSyncDataParser() success, message, records = parser.attemptParse(body) if success: data['count'] = len(records) elif format == 'params': # Standard form post params parser = ParamsDataParser() success, message, records = parser.attemptParse(body) if success: data['count'] = len(records) else: logging.error("Unsupported format: %s" % format) n_records = s.saveRecords(records) if n_records: s.dt_updated = datetime.now() s.put() if s.target: s.target.dt_updated = s.dt_updated db.put_async(s.target) s.schedule_next_processing() success = True else: message = "No records saved" else: message = "Sensor not found and could not be created without a default type defined - %s" % sensor_kn error = ERROR.SENSOR_NOT_FOUND else: message = "Malformed - sensor key" self.json_out(data, success=success, message=message, error=error, debug=True)
def save_async(self, parent): """Persist a DME to the datastore The root must be specified as the parent so that the store can happen as part of an asynchronous transaction. """ if not self._entity: self._entity = DrilldownMatrixEntry(parent=parent) self._entity.metrics = list(self._data[0]) self._entity.configs = list(self._data[1]) self._entity.files = list(self._data[2]) self._entity.commits = list(self._data[3]) db.put_async(self._entity)
def put(self, from_comment=False): self.name = self.name.strip() if from_comment: # no escribir timeline si es de un comentario super(Suggestion, self).put() return self from django.template.defaultfilters import slugify # buscar slug if self.is_saved(): # estamos modificando sugerencia super(Suggestion, self).put() suggestion_modified.send(sender=self) else: # nueva sugerencia if self.slug is None: name = self.name.lower()[:32] self.slug = unicode(slugify('%s' % (name))) p = Suggestion.all().filter('slug =', self.slug).get() if p is not None: i = 1 while p is not None: slug = self.slug + '-%s' % i p = Suggestion.all().filter('slug =', slug).get() self.slug = self.slug + '-%s' % i super(Suggestion, self).put() counter = SuggestionCounter(parent=self) put = db.put_async(counter) self._get_short_url() put.get_result() suggestion_new.send(sender=self)
def update(self, name=None, description=None, instances_add=None, instances_del=None): ''' Actualiza una lista de usuarios :param user: usuario :type user: :class:`geouser.models.User` :param name: nombre de la lista :type name: :class:`string` :param description: descripcion de la lista :type description: :class:`string` :param instances: objetos a añadir a la lista :type instances: :class:`geouser.models.User` ''' if name is not None: self.name = name if description is not None: self.description = description for instance in instances_del: try: self.keys.remove(instance.key()) except ValueError: pass keys = set(self.keys) keys |= set([instance.key() for instance in instances_add]) self.keys = [k for k in keys] timeline = UserTimelineSystem(user=self.user.key(), msg_id=151, instance=list) put = db.put_async(timeline, self) put.get_result()
def messaging_poke(sik, id_, **kwargs): logging.info("Incoming poke call for sik %s" % sik) account = get_account_by_sik(sik) if not account: logging.info("Sik not recognized") return tag = kwargs['tag'] email = kwargs["email"] app_id = kwargs["user_details"][0]["app_id"] result_key = kwargs["result_key"] service_identity = kwargs["service_identity"] rpc = db.put_async(Poke(key_name=result_key, parent=account, email=email, app_id=app_id, tag=tag, timestamp=now(), service_identity=service_identity)) key_name = "tag:" if tag != None: key_name += tag link = PokeTagMessageFlowLink.get_by_key_name(key_name, parent=account) if not link: return flow = link.message_flow _try_or_defer(_store_mfr, email, tag, link, result_key, account, kwargs["service_identity"]) rpc.get_result() # Prevent warnings. return dict(type='flow', value=dict(flow=flow))
def get(self): timeframes = {'today': hours_ago(24), 'last7days': hours_ago(24*7), 'last30days': hours_ago(24*30)} puts = [] outdict = {} # loop over today, last7days, last30days for ttag in timeframes: # remove stale tags q = Score.all() # is score older than timeframe ? q.filter("date < ", timeframes[ttag]) q.filter("timeframes = ", ttag) # TODO: use cursors etc here # fetch(1000) will currently cause the leaderboard to # not properly update if the app recieves more than # 1000 highscores in an hour # however it should catch up in subsequent hours # so it's not really a showstopper scores = q.fetch(1000) for s in scores: s.timeframes.remove(ttag) puts.append(db.put_async(s)) outdict[str(s.key())+"_"+ttag] = "removed" # check that puts succeeded # if a put failed an exception will be thrown for p in puts: p.get_result() # on success outdict["result"] = "success"
def new_follower(sender, **kwargs): """ Captura la señal de un nuevo seguidor Escribe en el timeline de los dos usuarios. Envia una notificacion al correo. Escribe en el timeline de notificaciones """ from google.appengine.ext import db from models_acc import UserTimelineSystem, UserTimeline, UserSettings if not isinstance(kwargs['following'], db.Key): raise AttributeError if kwargs['following'].id() == 962005 or sender.username == 'georemindme': return from google.appengine.ext.deferred import defer defer(UserTimeline.add_timelines_to_follower, kwargs['following'], sender.key()) settings = UserSettings.objects.get_by_id(kwargs['following'].id()) timeline = UserTimelineSystem(parent = sender, user = sender, instance = kwargs['following'], msg_id=100, visible=False) put = db.put_async([timeline]) if settings.show_followings: timelinePublic = UserTimeline(parent=sender, user = sender, instance = kwargs['following'], msg_id=100) timelinePublic.put() from google.appengine.ext.deferred import defer defer(settings.notify_follower, sender.key()) # mandar email de notificacion put.get_result() if sender.key() != kwargs['following']: from geouser.models_utils import _Notification notification = _Notification(parent=kwargs['following'], owner=kwargs['following'], timeline=timeline) notification.put()
def get(self): '''Spoof a bunch of users ''' assert False, 'dont be here' cam = models.Camera(camera_id='1').put() cam_list = [cam,] futs = [] for i in range(1,50): users = [] for c in string.ascii_letters: s = c*i users.append(models.User( cameras = cam_list, email = s, pw = s, tz = s, name = s, addr1 = s, city = s, state = 'MA', stripe_customer_id = s, camsbought = random.choice(range(1,10)) )) futs.append(db.put_async(users)) # complete datastore transaction l = [f.get_result() for f in futs] self.response.out.write('Done!')
def create_from_count(cls, count): snap = cls() snap.play_count = count.play_count snap.artist_name = count.artist_name snap.album_title = count.album_title snap.label = count.label snap.track_id = str(count.key()) return db.put_async(snap)
def put(models, **kwargs): """Store one or more Model instance, every stored models are pushed also into memcache. TODO(sahid): Needs a better doc. """ memclient = memcache.Client() for retry in xrange(DATASTORE_NB_RETRY): try: models, multiple = datastore.NormalizeAndTypeCheck(models, db.Model) if not any(models): return multiple and [] or None # Nothings to do. async = db.put_async(models, **kwargs) try: debug("Needs to put models=%s" % ','.join(m.__class__.__name__ for m in models)) #TODO(sahid): Needs factorization. k = [unicode(x.key()) for x in models] v = serialize(models) memclient.set_multi(dict(zip(k, v)), time=MEMCACHE_TIME, key_prefix=MEMCACHE_PREFIX) ret = async.get_result() except datastore_errors.BadKeyError: debug("Incomplete key passed, " "can't store in memcached before put in the datastore.") # Incomplete key # It's better to use key_name with mp. ret = async.get_result() if ret: k = map(unicode, ret) v = serialize(models) memclient.set_multi(dict(zip(k, v)), time=MEMCACHE_TIME, key_prefix=MEMCACHE_PREFIX) if multiple: return ret return ret[0] except (db.Timeout, db.TransactionFailedError, apiproxy_errors.ApplicationError, apiproxy_errors.DeadlineExceededError), e: logging.warn("Error during the put process, " "retry %d in %.2fs", retry, DATASTORE_TIME_RETRY) logging.debug(e.message) time.sleep(DATASTORE_TIME_RETRY) logging.exception(e)
def create(user, action, object): a = Activity(user=user.name, img=user.gravatar('30'), action=action, object=object) ar = db.put_async(a) receivers = cache.get_followers(user.name) receivers.append(user.name) ar.get_result() ai = ActivityIndex(parent=a, receivers=receivers) ai.put()
def save_data(self, data): if not data: logging.warning("No data to save.") return False if len(data) > (MAX_DATA_ENTRY_PER_FILE * MAX_ENTRY_LEN): logging.error("File too big, can't save to datastore: %dK", len(data) / 1024) return False # Use the new_data_keys to store new data. If all new data are saved # successfully, swap new_data_keys and data_keys so we can reuse the # data_keys entries in next run. If unable to save new data for any # reason, only the data pointed by new_data_keys may be corrupted, # the existing data_keys data remains untouched. The corrupted data # in new_data_keys will be overwritten in next update. keys = self._convert_blob_keys(self.new_data_keys) self.new_data_keys = [] chunk_indices = self._get_chunk_indices(len(data)) logging.info('Saving file in %s chunks', len(chunk_indices)) chunk_data = [] for chunk_index in chunk_indices: chunk = ChunkData() chunk.index = chunk_index if keys: chunk.reused_key = keys.pop() chunk.entry_future = DataEntry.get_async(chunk.reused_key) else: chunk.data_entry = DataEntry() chunk_data.append(chunk) put_futures = [] for chunk in chunk_data: if chunk.entry_future: data_entry = chunk.entry_future.get_result() if not data_entry: # pragma: no cover logging.warning("Found key, but no data entry: %s", chunk.reused_key) data_entry = DataEntry() chunk.data_entry = data_entry chunk.data_entry.data = db.Blob(data[chunk.index:chunk.index + MAX_ENTRY_LEN]) put_futures.append(db.put_async(chunk.data_entry)) for future in put_futures: key = None try: key = future.get_result() self.new_data_keys.append(key) except Exception, err: # pragma: no cover logging.error("Failed to save data store entry: %s", err) self.delete_data(keys) return False
def SetMulti(self, mapping): entities = [PersistentObjectStoreItem.CreateItem( self._namespace, key, value) for key, value in mapping.iteritems()] # Some entites may be None if they were too large to insert. Skip those. rpcs = [db.put_async(entity for entity in entities if entity)] # If running the dev server, the futures don't complete until the server is # *quitting*. This is annoying. Flush now. if IsDevServer(): [rpc.wait() for rpc in rpcs] return All(Future(callback=lambda: rpc.get_result()) for rpc in rpcs)
def post(self): key1 = self.request.get('key1') val1 = self.request.get('val1') key2 = self.request.get('key2') val2 = self.request.get('val2') text1 = Text(key_name=key1, text=val1) text2 = Text(key_name=key2, text=val2) async_put = db.put_async([text1, text2]) async_put.get_result()
def __init__(self, sensorprocess, batch_size=50): self.sensorprocess = sensorprocess self.batch_size = batch_size self.cursor = None self.worker_start = datetime.now() self.start = self.worker_start self.sensor = sensorprocess.sensor self.ent = sensorprocess.enterprise self.process = sensorprocess.process self.dt_last_run = sensorprocess.dt_last_run self.dt_last_record = sensorprocess.dt_last_record self.query = self._get_query() self.processers = self.process.get_processers() # JSON array of <processer> self.ep = None self.analyses = {} self.last_record = None self.sensorprocess.start(self.worker_start) db.put_async(self.sensorprocess) self.records_processed = 0 self.continuations = 0
def increment_counters(self, key, amount): backup = key + '_backup' counter1_future = db.get_async(db.Key.from_path('Counter', key)) counter2_future = db.get_async(db.Key.from_path('Counter', backup)) counter1 = counter1_future.get_result() counter2 = counter2_future.get_result() if counter1 is None: counter1 = Counter(key_name=key, counter=0) counter2 = Counter(key_name=backup, counter=0) for i in range(0,amount): counter1.counter += 1 counter2.counter += 1 if counter1.counter == 5: raise Exception('Mock Exception') counter1_future = db.put_async(counter1) counter2_future = db.put_async(counter2) counter1_future.get_result() counter2_future.get_result()
def increment_counter(self, key, amount): get_future = db.get_async(db.Key.from_path('Counter', key)) counter = get_future.get_result() if counter is None: counter = Counter(key_name=key, counter=0) for i in range(0,amount): counter.counter += 1 if counter.counter == 5: raise Exception('Mock Exception') put_future = db.put_async(counter) put_future.get_result()
def testAsyncPutGetDelete(self): """Tests asynchronously putting, getting and deleting entities.""" class Person(db.Model): name = db.StringProperty() person = Person(name="Arthur") async = db.put_async(person) key = async .get_result() self.assertEqual(key, async .get_result()) async = db.get_async(key) person = async .get_result() self.assertEqual("Arthur", person.name) async = db.delete_async(key) async .get_result() self.assertRaises(datastore_errors.EntityNotFoundError, datastore.Get, key)
def post(self): project_id = str(uuid.uuid1()) project_name = self.request.get('name') project = Project(project_id=project_id, name=project_name, rating=int(self.request.get('rating')), description=self.request.get('description'), license=self.request.get('license'), key_name=project_name) put_future = db.put_async(project) put_future.get_result() self.response.headers['Content-Type'] = "application/json" self.response.set_status(201) self.response.out.write( json.dumps({ 'success' : True, 'project_id' : project_id }))
def put(self): if self.sender.key() == self.to.key(): return if not self.is_saved(): super(Invitation, self).put() from geouser.models_acc import UserTimelineSystem timeline = UserTimelineSystem(parent=self.sender, user=self.sender, msg_id=110, instance=self) from geouser.models_utils import _Notification notification = _Notification(parent=self.to, owner=self.to, timeline=timeline) put = db.put_async([timeline, notification]) if self.to.settings.notification_invitation: from geomail import send_notification_invitation send_notification_invitation(self.to.email, self.sender, self) put.get_result() else: super(Invitation, self).put()
def register(cls, language='en', confirmed=False, **kwargs): ''' Registra un nuevo usuario, crea todas las instancias hijas necesarias :returns: :class:`geouser.models.User` :raises: :class:`UniqueEmailConstraint` si el email ya esta en uso :raise: :class:`UniqueUsernameConstraint` si el username ya esta en uso ''' def _tx(user): try: from models_acc import UserFollowingIndex, UserSettings, UserProfile, UserCounter settings = UserSettings(key_name='settings_%s' % user.id, parent=user, language=language) profile = UserProfile(key_name='profile_%s' % user.id, parent=user, username=user.username, email=user.email) followings = UserFollowingIndex(parent=user) counters = UserCounter(key_name='counters_%s' % user.id, parent=user) db.put_async([settings, profile, followings, counters]) return True except: return False from django.core.validators import validate_email if 'email' in kwargs: validate_email(kwargs['email'].decode('utf8')) user = User(**kwargs) if confirmed: user.toggle_confirmed() user.put() trans = db.run_in_transaction(_tx, user) if not trans: user.delete() else: from models_acc import UserSocialLinks sociallinks = UserSocialLinks(parent=user.profile, key_name='sociallinks_%s' % user.id) save = db.put_async(sociallinks) from signals import user_new from watchers import new_user_registered user_new.send(sender=user, status=trans) save.get_result() return user
def put(self, from_comment=False): if self.is_saved(): super(List, self).put() if from_comment: return self from watchers import modified_list, deleted_list if not self.active: list_deleted.send(sender=self) else: list_modified.send(sender=self) else: super(List, self).put() counter = ListCounter(parent=self) a = db.put_async(counter) from watchers import new_list list_new.send(sender=self) a.get_result()
def post(self): project_id = self.request.get('project_id') query = db.GqlQuery("SELECT * FROM Project WHERE " "project_id = '%s'" % str(project_id)) module_id = str(uuid.uuid1()) module_name = self.request.get('name') module = Module(module_id=module_id, name=module_name, description=self.request.get('description'), parent=query[0], key_name=module_name) put_future = db.put_async(module) put_future.get_result() self.response.headers['Content-Type'] = "application/json" self.response.set_status(201) self.response.out.write( json.dumps({ 'success' : True, 'module_id' : module_id }))
def __restore_user_events(self, user): event_query = db.GqlQuery("SELECT * FROM Event WHERE member = :1" \ " AND original_status != NULL", user) future_puts = [] for event in event_query.run(): logging.debug("Restoring event '%s'." % (event.name)) event.status = event.original_status event.original_status = None event.owner_suspended_time = None event_future = db.put_async(event) future_puts.append(event_future) # Wait for all the writes to finish. logging.debug("Waiting for all writes to finish...") for future_put in future_puts: future_put.get_result()