def post(self): # TODO: XSRF protection. @ndb.tasklet def helper(): user = users.get_current_user() if not user: self.redirect(users.create_login_url('/account')) return account = yield get_account(user.user_id()) if self.request.get('delete'): if account: yield account.key.delete_async() self.redirect('/account') return if not account: account = Account(key=account_key(user.user_id()), email=user.email(), userid=user.user_id()) nickname = self.request.get('nickname') if nickname: account.nickname = nickname yield account.put_async() self.redirect('/account') yield ndb.transaction_async(helper)
def insert_post(shard, **kwargs): """Inserts a post at the present time, returning its key. If the post_id keyword argument is not supplied, a new post ID will be auto assigned. """ # Create the posting and insert it. post_id = kwargs.pop('post_id', None) if not post_id: post_id = models.human_uuid() new_topic = kwargs.get('new_topic', None) kwargs['post_time'] = datetime.datetime.now() post_key = ndb.Key(models.Post._get_kind(), post_id) post = models.Post( key=post_key, **kwargs) @ndb.tasklet def txn(): if (yield post_key.get_async(use_memcache=False, use_cache=False)): logging.warning('Post already exists for shard=%r, post_id=%r', shard, post_id) raise ndb.Rollback() yield post.put_async(use_memcache=False, use_cache=False) # Pull task that indicates the post to apply. This must encode the # new_topic data for this post so the apply_posts() function doesn't # need the models.Post entity in order to make progress. enqueue_post_task(shard, [post_id], new_topic=new_topic) # Notify all users of the post. futures = [] futures.append(ndb.transaction_async(txn)) futures.append(notify_posts(shard, [post])) # Set the dirty bit for this shard. This causes apply_posts to run a # second time if the Post transaction above completed while apply_posts # was already in flight. dirty_bit(shard, set=True) # Enqueue an apply task to sequence and notify the new post. futures.append(enqueue_apply_task(shard, post_id=post_id)) # Wait on futures in case they raise errors. ndb.Future.wait_all(futures) return post_key
def get_topic_info(root_shard_id, email_address): """Gets detail about topics for a root shard, updates read state. Args: root_shard_id: Shard ID to list topics for. email_address: Address of the user whose read state should be updated after getting info. Returns: Dictionary with the members: topic_list: List of dictionaries, one for each topic, suitable for rendering email digest updates. total_updates: Total number of updates for this shard. total_topics: Total number of topics that were updated. """ user_id = '%s:%s' % (root_shard_id, email_address) _, shard_and_state_list = yield topics.list_topics( root_shard_id, user_id) shard_and_state_list.sort(key=lambda x: x[0].update_time) topic_list = [] update_dict = {} total_updates = 0 for topic_shard, read_state in shard_and_state_list: # Do not include the root shard in the list of topics if any other # topics exist, since it will always include all of the updates for # related topics. We only want it to be in the list for the very # first email digest for each user if a topic was never started. if len(shard_and_state_list) > 1 and not topic_shard.root_shard: continue start_sequence = 1 end_sequence = topic_shard.sequence_number if read_state: start_sequence = read_state.last_read_sequence updates_count = end_sequence - start_sequence if updates_count <= 0: # Nothing new with this topic, so leave it out of the digest. continue total_updates += updates_count # TODO(bslatkin): Fetch all of the new posts for this topic and # extract the nicknames and/or gravatars of the users who have # contributed. info = dict( topic_id=topic_shard.shard_id, last_update_time=topic_shard.update_time, title=topic_shard.title, description=topic_shard.description, creation_nickname=topic_shard.creation_nickname, start_sequence=start_sequence, end_sequence=end_sequence, updates_count=updates_count) topic_list.append(info) update_dict[topic_shard.shard_id] = end_sequence # TODO(bslatkin): Split this flow into two parts: One to generate the # parameters and save them somewhere, another to actually update the # read state. We'd want to do this to make sure that if errors happen # we will still always send email, but we don't risk sending the same # email twice. Right now we risk never sending an email at all. if update_dict: txn = lambda: topics.update_read_state(update_dict, user_id) yield ndb.transaction_async(txn) topic_info_dict = dict( shard_id=root_shard_id, shard_url=config.shard_url_template % root_shard_id, total_updates=total_updates, total_topics=len(topic_list), topic_list=topic_list) raise ndb.Return(topic_info_dict)