예제 #1
0
    def run(self, auth_pair, photos, locs=None):
        if not photos or not len(photos):
            return

        photoGroup = photos[0]

        if not locs:
            locs = self.getGroupLocs(auth_pair, photoGroup)
            # this took some time, schedule it for new run
            deferred.defer(self.run, auth_pair, photos, locs)
            return

        photo = photoGroup[0]
        loc = self.getClosestLoc(locs, photo.exif.time.text)
        if loc :
            #logging.info("updating photo: "+photo.title.text)
            self.updatePhoto(auth_pair, photo, loc)
            msg = {
                    'title': photo.title.text,
                    'thumbnail': photo.media.thumbnail[0].url,
                    'lat':loc['latitude'],
                    'lng': loc['longitude']
            };
            channel.send_message(auth_pair.token, json.dumps(msg))

        photoGroup.pop(0)
        if len(photoGroup) == 0:
            photos.pop(0)
            locs = None

        if not len(photos) == 0:
            deferred.defer(self.run, auth_pair, photos, locs)
예제 #2
0
    def trans():
        login_state = PaymentOauthLoginState.create_key(state).get()
        if not login_state:
            return False
        if login_state.completed:
            return False
        login_state.completed = True
        login_state.put()

        payment_user_key = get_payment_user_key(login_state.app_user)
        payment_user = payment_user_key.get()
        if not payment_user:
            payment_user = PaymentUser(key=payment_user_key)
            payment_user.providers = []
            payment_user.assets = []

        pup = payment_user.get_provider(login_state.provider_id)
        if pup:
            pup.token = token
        else:
            payment_user.providers.append(PaymentUserProvider(provider_id=login_state.provider_id, token=token))
        payment_user.put()

        deferred.defer(sync_payment_assets, login_state.app_user, login_state.provider_id, True, _transactional=True)

        return True
예제 #3
0
    def _continue(self, start_key, batch_size):
        """Processes a batch of entities."""
        q = self.get_query()
        # If we're resuming, pick up where we left off last time.
        if start_key:
            q.filter('__key__ >', start_key)

        # Keep updating records until we run out of time.
        try:
            # Steps over the results, returning each entity and its index.
            for i, entity in enumerate(q):
                map_updates, map_deletes = self.map(entity)
                self.to_put.extend(map_updates)
                self.to_delete.extend(map_deletes)

                # Record the last entity we processed.
                start_key = entity.key()

                # Do updates and deletes in batches.
                if (i + 1) % batch_size == 0:
                    self._batch_write()

        except DeadlineExceededError:
            # Write any unfinished updates to the datastore.
            self._batch_write()
            # Queue a new task to pick up where we left off.
            defer(self._continue, start_key, batch_size)
            return

        # Write any updates to the datastore, since it may not have happened
        # otherwise
        self._batch_write()

        self.finish()
def _delete_non_ancestor_models(parent_service_user, service_user):
    sp = get_service_profile(service_user)
    if sp:
        def get_service_identity_based_keys():
            keys = list()
            si_users = list()
            for si in get_service_identities(service_user):
                keys.append(ProfilePointer.create_key(si.service_identity_user))
                si_users.append(si.service_identity_user)
            for qr in ServiceInteractionDef.all().ancestor(parent_key(service_user)):
                keys.append(db.Key.from_path(ShortURL.kind(), ServiceInteractionDef.shortUrl.get_value_for_datastore(qr).id()))
            return keys, si_users

        keys = [db.Key.from_path(Avatar.kind(), sp.avatarId)]
        keys.extend(TrialServiceAccount.all(keys_only=True).filter("service", service_user))
        more_keys, service_identity_users = db.run_in_transaction(get_service_identity_based_keys)
        keys.extend(more_keys)
        keys.extend(MessageFlowRunRecord.all(keys_only=True).filter("service_identity >=", service_user.email() + '/').filter("service_identity <", service_user.email() + u"/\ufffd"))
        keys.extend(Branding.all(keys_only=True).filter("user", service_user))
        keys.extend(SIKKey.all(keys_only=True).filter("user", service_user))
        keys.extend(APIKey.all(keys_only=True).filter("user", service_user))
        logging.info(keys)
        db.delete(keys)

        delete_service_tasks = DeleteServiceTasks(key_name=service_user.email())
        delete_service_tasks.tasks = 3
        delete_service_tasks.put()

        deferred.defer(_delete_sessions, parent_service_user, service_user)
        deferred.defer(_delete_service_log, parent_service_user, service_user)
        deferred.defer(_delete_service_models, parent_service_user, service_user)
        deferred.defer(_cleanup_service_identities, service_identity_users)
    else:
        if parent_service_user and parent_service_user != service_user:
            deferred.defer(delete_service_finished, parent_service_user, service_user.email(), True)
예제 #5
0
def run_deploy_task():
  """Attempts to run the per-version deploy task."""
  task_name = 'deploy-%s' % os.environ['CURRENT_VERSION_ID'].replace('.', '-')
  try:
    deferred.defer(try_post_deploy, _name=task_name, _countdown=10)
  except (taskqueue.TaskAlreadyExistsError, taskqueue.taskqueue.TombstonedTaskError), e:
    pass
예제 #6
0
def submit():
    game = get_current_game()
    player = Player.get_by_id(session['username'])
    sequence = map(int, request.form.getlist('sequence[]'))
    g.sequence = sequence
    if not player.played_this_round:
        json_result = None
        player.played_this_round = True
        player.put()
        game.players_played += 1
        if session['username'] == game.leader:
            game.sequence = g.sequence
            session['score'] += 1
            send_channel_update('copysequence', [x.name for x in ndb.get_multi(game.players) if x.name != game.leader])
            json_result = jsonify(message='Waiting on other players to copy your sequence',
                                    result='leader')
        else:
            if sequence == game.sequence:
                json_result = jsonify(message='Very good! Wait for the next round.',
                                        result='match')
                session['score'] += 1
                #send_channel_update('sequencematch', [session['username']])
            else:
                json_result = jsonify(message='You got it wrong :( oh well',
                                        result='mismatch')
                #send_channel_update('sequencemismatch', [session['username']])
        if game.players_played == len(game.players):
            # next round!
            deferred.defer(start_round, _countdown=3)
        return json_result
    return '', 204
예제 #7
0
def send_mail_notification(subject, body, to=None, **kwargs):
  if not config.CONFIG_DB.feedback_email:
    return
  brand_name = config.CONFIG_DB.brand_name
  sender = '%s <%s>' % (brand_name, config.CONFIG_DB.feedback_email)
  subject = '[%s] %s' % (brand_name, subject)
  deferred.defer(mail.send_mail, sender, to or sender, subject, body, **kwargs)
예제 #8
0
def sendGcmToUserDeferred(irssiuser, message):
    logging.info("queuing deferred task for sending message to user %s" % irssiuser.email)
    key = irssiuser.key()
    try:
        deferred.defer(_sendGcmToUser, key, message, _queue='gcmqueue')
    except TransientError as e:
        logging.warn("Transient error: %s" % e)
예제 #9
0
파일: rerender.py 프로젝트: cklzqw/gaeaib
def do_render_cache(cursor=None):
  thq = Thread.all()

  if cursor:
    thq.with_cursor(cursor)

  thread = thq.get()

  if not thread:
    logging.info("stop thread clean")
    return

  board = thread.parent_key().name()
  render = Render(board=board, thread = thread.key().id())

  for idx,post in enumerate(thread.posts):
    post['text_html'] = markup(
          board=board, postid=post.get("post"),
          data=escape(post.get('text', '')),
    )

    if idx == 0:
      render.create(post)
    else:
      render.append(post)

  if len(thread.posts) > 1:
    thread.put()
  else:
    thread.delete()

  render.save()

  deferred.defer(do_render_cache, thq.cursor())
예제 #10
0
def set(path, body, content_type, indexed=True, **kwargs):
  """Sets the StaticContent for the provided path.
  
  Args:
    path: The path to store the content against.
    body: The data to serve for that path.
    content_type: The MIME type to serve the content as.
    indexed: Index this page in the sitemap?
    **kwargs: Additional arguments to be passed to the StaticContent constructor
  Returns:
    A StaticContent object.
  """
  content = StaticContent(
      key_name=path,
      body=body,
      content_type=content_type,
      indexed=indexed,
      **kwargs)
  content.put()
  memcache.replace(path, db.model_to_protobuf(content).Encode())
  try:
    now = datetime.datetime.now().replace(second=0, microsecond=0)
    eta = now.replace(second=0, microsecond=0) + datetime.timedelta(seconds=65)
    if indexed:
      deferred.defer(
          utils._regenerate_sitemap,
          _name='sitemap-%s' % (now.strftime('%Y%m%d%H%M'),),
          _eta=eta)
  except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
    pass
예제 #11
0
def sendGcmToTokenDeferred(token, message):
    logging.info("queuing deferred task for sending message to token %s" % token.gcm_token)
    key = token.key()
    try:
        deferred.defer(_sendGcmToToken, key, message, _queue='gcmqueue')
    except TransientError as e:
        logging.warn("Transient error: %s" % e)
예제 #12
0
    def delete_shouts(self, request):

        # This sample runs it in the mappers backend, with 100 batches, the more things you do in map
        # the smaller the batch should be to avoid duplicate runs on failures
        deferred.defer(demo.DeleteAllShout().run, 100, _target='mappers')

        return message_types.VoidMessage()
예제 #13
0
 def handle_upload(self):
   results = []
   blob_keys = []
   for name, fieldStorage in self.request.POST.items():
       if type(fieldStorage) is unicode:
           continue
       result = {}
       result['name'] = re.sub(r'^.*\\','',fieldStorage.filename)
       result['type'] = fieldStorage.type
       result['size'] = self.get_file_size(fieldStorage.file)
       if self.validate(result):
           blob_key = str(self.write_blob(fieldStorage.value, result))
           blob_keys.append(blob_key)
           result['deleteType'] = 'DELETE'
           result['deleteUrl'] = self.request.host_url +\
               '/?key=' + urllib.quote(blob_key, '')
           if (IMAGE_TYPES.match(result['type'])):
               try:
                   result['url'] = images.get_serving_url(
                       blob_key,
                       secure_url=self.request.host_url.startswith(
                           'https'
                       )
                   )
                   result['thumbnailUrl'] = result['url'] +\
                       THUMBNAIL_MODIFICATOR
               except:  # Could not get an image serving url
                   pass
           if not 'url' in result:
               result['url'] = self.request.host_url +\
                   '/' + blob_key + '/' + urllib.quote(
                       result['name'].encode('utf-8'), '')
       results.append(result)
   deferred.defer(cleanup,blob_keys,_countdown=EXPIRATION_TIME)
   return results
예제 #14
0
def _put(models,countdown=0):
  batch_size = 50
  to_put = []
  keys = []
  try:
    last_index = 0
    for i,model in enumerate(models):
      to_put.append(model)
      last_index = i
      if (i+1) % batch_size == 0:
        keys.extend(db.put(to_put))
        to_put = []
    keys.extend(db.put(to_put))
    return keys
    
  except apiproxy_errors.DeadlineExceededError:
    keys.extend(db.put(to_put))
    deferred.defer(_put,models[last_index+1:],_countdown=10)
    return keys
  
  except apiproxy_errors.CapabilityDisabledError:
    if not countdown:
      countdown = 30
    else:
      countdown *= 2
    deferred.defer(_put,models,countdown,_countdown=countdown)
예제 #15
0
def AddTestForBrowserVersions(mapping, browser_versions):
  """Adds tests for the given RunSiteMap the list of browser versions."""
  for browser_version in browser_versions:
    deferred.defer(AddTestForMapping,
                   mapping=mapping,
                   browser_version=browser_version,
                   _queue='tests-queue')
    def post(self):
        self._require_admin()

        user_id = self.user_bundle.account.key.id()
        clients = self.request.get_all('client_types')
        title = self.request.get('title')
        message = self.request.get('message')
        url = self.request.get('url')
        app_version = self.request.get('app_version')

        error = ""
        if not clients:
            error = "clients"
        elif not title:
            error = "title"
        elif not message:
            error = "message"
        if error:
            self.redirect('/admin/mobile/broadcast?error={}'.format(error))
            return

        try:
            clients = [int(c) for c in clients]
            deferred.defer(NotificationHelper.send_broadcast, clients, title, message, url, app_version, _queue="admin")
            logging.info('User {} sent broadcast'.format(user_id))
        except Exception, e:
            logging.error("Error sending broadcast: {}".format(str(e)))
            logging.error("Trace: {}".format(traceback.format_exc()))
            self.redirect('/admin/mobile/broadcast?error=sending')
            return
예제 #17
0
파일: src_lb.py 프로젝트: JamesValero/yjl
  def get(self):

    try:
      deferred.defer(src_lb_update)
      self.response.out.write('Task added')
    except TaskAlreadyExistsError:
      self.response.out.write('Task existed')
예제 #18
0
        def txn():
            try:
                marker = rpc.Get(self.key())
                marker.__class__ = ShardedTaskMarker

                queued_shards = marker[ShardedTaskMarker.QUEUED_KEY]
                processing_shards = marker[ShardedTaskMarker.RUNNING_KEY]
                queued_count = len(queued_shards)

                for j in range(min(BATCH_SIZE, queued_count)):
                    pickled_shard = queued_shards.pop()
                    processing_shards.append(pickled_shard)
                    shard = cPickle.loads(str(pickled_shard))
                    deferred.defer(
                        self.run_shard,
                        query,
                        shard,
                        operation,
                        operation_method,
                        entities_per_task=entities_per_task,
                        # Defer this task onto the correct queue with `_queue`, passing the `queue`
                        # parameter back to the function again so that it can do the same next time
                        queue=queue,
                        _queue=queue,
                        _transactional=True,
                    )

                marker.put()
            except datastore_errors.EntityNotFoundError:
                logging.error(
                    "Unable to start task %s as marker is missing",
                    self.key().id_or_name()
                )
                return
예제 #19
0
    def txn(shards):
        marker_key = ShardedTaskMarker.get_key(identifier, query._Query__namespace)
        try:
            rpc.Get(marker_key)

            # If the marker already exists, don't do anything - just return
            return
        except datastore_errors.EntityNotFoundError:
            pass

        marker = ShardedTaskMarker(identifier, query, namespace=query._Query__namespace)

        if shards:
            for shard in shards:
                marker["shards_queued"].append(cPickle.dumps(shard))
        else:
            # No shards, then there is nothing to do!
            marker["is_finished"] = True
        marker["time_started"] = datetime.utcnow()
        marker.put()
        if not marker["is_finished"]:
            deferred.defer(
                marker.begin_processing, operation, operation_method, entities_per_task, queue,
                _transactional=True, _queue=queue
            )

        return marker_key
예제 #20
0
  def start_instance(pool):
    """Start a new instance with a given configuration."""
    # start instance
    # defer an update load call
    instance_class = string_to_class(pool.instance_type)
    name = rfc1035(instance_class.__name__)
    # max length of a name is 63
    name = "{}-{}".format(name, uuid.uuid4())[:63]
    instance = instance_class(id=name)
    instance.pool = pool.key
    instance.zone = random.choice(LOCATIONS[pool.region]["zones"])
    instance.put()

    compute = compute_api()
    if compute:
      def update_zone(s):
        return re.sub(r"\/zones\/([^/]*)",
                      "/zones/{}".format(instance.zone),
                      s)
      instance.PARAMS.update({
        "name": name,
        "zone": update_zone(instance.PARAMS.get("zone")),
        "machineType": update_zone(instance.PARAMS.get("machineType")),
      })
      operation = compute.instances().insert(
        project=PROJECT_ID, zone=instance.zone, body=instance.PARAMS).execute()
      logging.info("Create instance operation {}".format(operation))
      instance.status = operation.get("status")
      name = "update_instance_status_{}_{}".format(instance.key.urlsafe(), int(time.time()))
      deferred.defer(update_instance_status, instance.key.urlsafe(), _countdown=STARTING_STATUS_DELAY, _name=name)
    else:
      logging.warn("No compute api defined.")
      raise AppError("No compute api defined.")
예제 #21
0
 def fill_pool(pool):
   compute = compute_api()
   if compute:
     # find existing instances
     instance_class = string_to_class(pool.instance_type)
     name_match = ".*{}.*".format(rfc1035(instance_class.__name__))
     name_filter = "name eq {}".format(name_match)
     size = 0
     for zone in LOCATIONS[pool.region]["zones"]:
       resp = compute.instances().list(project=PROJECT_ID,
                                       zone=zone,
                                       filter=name_filter).execute()
       logging.info("List of instances {}".format(resp))
       items = resp.get("items", [])
       for info in items:
         status = info.get("status")
         # if instance is new or running add it to the pool
         if status in [InstanceStatus.RUNNING, InstanceStatus.PENDING, InstanceStatus.STAGING]:
           logging.info("instance {}".format(info))
           instance = instance_class(id=info.get("name"))
           instance.zone = info.get("zone").split("/")[-1]
           instance.status = status
           instance.address = info["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
           instance.pool = pool.key
           instance.put()
           name = "update_instance_status_{}_{}".format(instance.key.urlsafe(), int(time.time()))
           deferred.defer(update_instance_status, instance.key.urlsafe(), _countdown=STARTING_STATUS_DELAY, _name=name)
           size += 1
     # start any additional instances need to meet pool min_size
     for i in range(pool.min_size - size):
       LoadBalancer.start_instance(pool)
예제 #22
0
  def ProposalMailer(self, action):
    """Notifies admins of proposed changes and changed proposals.

    Args:
      action: string, defines what message will be sent.
    """
    current_user = users.get_current_user()
    current_user_nick = current_user.nickname()

    body = self._BuildProposalBody(os.environ.get('DEFAULT_VERSION_HOSTNAME'),
                                   self.filename)

    if action == 'proposal':
      subject = 'Proposal for %s by %s' % (self.filename, current_user_nick)
    elif action == 'approval':
      subject = 'Proposal Approved for %s by %s' % (
          self.filename, current_user_nick)
    elif action == 'rejection':
      subject = 'Proposal Rejected for %s by %s' % (
          self.filename, current_user_nick)
    else:
      logging.warning('Unknown action in ProposalMailer: %s', action)
      return

    recipient, _ = settings.Settings.GetItem('email_admin_list')
    recipient_list = [recipient, self.user]
    return_address, _ = settings.Settings.GetItem('email_sender')
    message = mail_tool.EmailMessage(to=recipient_list, sender=return_address,
                                     subject=subject, body=body)
    try:
      deferred.defer(message.send)
    except (deferred.Error, taskqueue.Error, apiproxy_errors.Error):
      logging.exception('Notification failed to send.')
예제 #23
0
    def trans():
        to_put = list()
        service_profile = get_service_profile(service_user)
        service_profile.expiredAt = now()
        service_profile.enabled = False
        to_put.append(service_profile)
        service_identity_keys = get_service_identities_query(service_user, True)
        search_configs = db.get(
                [SearchConfig.create_key(create_service_identity_user(users.User(key.parent().name()), key.name())) for
                 key in service_identity_keys])

        svc_index = search.Index(name=SERVICE_INDEX)
        loc_index = search.Index(name=SERVICE_LOCATION_INDEX)

        for search_config in search_configs:
            if search_config:
                search_config.enabled = False
                to_put.append(search_config)
                on_trans_committed(_cleanup_search_index, search_config.service_identity_user.email(), svc_index,
                                   loc_index)

        for objects_to_put in chunks(to_put, 200):
            put_and_invalidate_cache(*objects_to_put)

        deferred.defer(cleanup_sessions, service_user, _transactional=True)
        deferred.defer(cleanup_friend_connections, service_user, _transactional=True)
예제 #24
0
    def trans_create():
        rogerthat_profile = get_service_or_user_profile(users.User(email))
        if rogerthat_profile and isinstance(rogerthat_profile, ServiceProfile):
            from rogerthat.bizz.service import AppFailedToCreateUserProfileWithExistingServiceException
            raise AppFailedToCreateUserProfileWithExistingServiceException(email)

        user_profile = get_user_profile(app_user, cached=False)
        is_new_profile = False
        if not user_profile:
            deactivated_user_profile = get_deactivated_user_profile(app_user)
            if deactivated_user_profile:
                deferred.defer(reactivate_user_profile, deactivated_user_profile, app_user, _transactional=True)
                ActivationLog(timestamp=now(), email=app_user.email(), mobile=None, description="Reactivate user account by registering a paper loyalty card").put()
            else:
                is_new_profile = True
                avatar, image = _create_new_avatar(app_user, add_trial_overlay=False)

                user_profile = UserProfile(parent=parent_key(app_user), key_name=app_user.email())
                user_profile.name = name
                user_profile.language = language
                user_profile.avatarId = avatar.key().id()
                user_profile.app_id = app_id
                _calculateAndSetAvatarHash(user_profile, image)

        pp = ProfilePointer(key=db.Key.from_path(ProfilePointer.kind(), user_code))
        pp.user = app_user
        pp.short_url_id = short_url_id

        if is_new_profile:
            put_and_invalidate_cache(user_profile, pp, ProfilePointer.create(app_user))
        else:
            pp.put()
예제 #25
0
    def trans_create(avatar, image, share_sid_key):
        azzert(not get_service_profile(service_user, cached=False))
        azzert(not get_default_service_identity_not_cached(service_user))

        profile = ServiceProfile(parent=parent_key(service_user), key_name=service_user.email())
        profile.avatarId = avatar.key().id()
        _calculateAndSetAvatarHash(profile, image)

        service_identity_user = create_service_identity_user(service_user, ServiceIdentity.DEFAULT)
        service_identity = ServiceIdentity(key=ServiceIdentity.keyFromUser(service_identity_user))
        service_identity.inheritanceFlags = 0
        service_identity.name = name
        service_identity.description = "%s (%s)" % (name, service_user.email())
        service_identity.shareSIDKey = share_sid_key
        service_identity.shareEnabled = False
        service_identity.creationTimestamp = now()
        service_identity.appIds = supported_app_ids

        update_result = update_func(profile, service_identity) if update_func else None

        put_and_invalidate_cache(profile, service_identity,
                                 ProfilePointer.create(service_user),
                                 ProfileHashIndex.create(service_user))

        deferred.defer(create_default_qr_templates, service_user, _transactional=True)

        return profile, service_identity, update_result
예제 #26
0
  def  _QueueChangeTestPatternsAndEmail(
      self, entity, new_patterns, old_patterns):
    deferred.defer(
        _QueueChangeTestPatternsTasks, old_patterns, new_patterns)

    user_email = users.get_current_user().email()
    subject = 'Added or updated %s: %s by %s' % (
        self._model_class.__name__, entity.key.string_id(), user_email)
    email_key = entity.key.string_id()

    email_body = _NOTIFICATION_EMAIL_BODY % {
        'key': email_key,
        'new_test_path_patterns': json.dumps(
            list(new_patterns), indent=2, sort_keys=True,
            separators=(',', ': ')),
        'old_test_path_patterns': json.dumps(
            list(old_patterns), indent=2, sort_keys=True,
            separators=(',', ': ')),
        'hostname': app_identity.get_default_version_hostname(),
        'user': user_email,
    }
    mail.send_mail(
        sender=_SENDER_ADDRESS,
        to=_NOTIFICATION_ADDRESS,
        subject=subject,
        body=email_body)
예제 #27
0
def _QueueChangeTestPatternsTasks(old_patterns, new_patterns):
  """Updates tests that are different between old_patterns and new_patterns.

  The two arguments both represent sets of test paths (i.e. sets of data
  series). Any tests that are different between these two sets need to be
  updated.

  Some properties of TestMetadata entities are updated when they are put in the
  |_pre_put_hook| method of TestMetadata, so any TestMetadata entity that might
  need to be updated should be re-put.

  Args:
    old_patterns: An iterable of test path pattern strings.
    new_patterns: Another iterable of test path pattern strings.

  Returns:
    A pair (added_test_paths, removed_test_paths), which are, respectively,
    the test paths that are in the new set but not the old, and those that
    are in the old set but not the new.
  """
  added_patterns, removed_patterns = _ComputeDeltas(old_patterns, new_patterns)
  patterns = list(added_patterns) + list(removed_patterns)

  for i in xrange(0, len(patterns), _NUM_PATTERNS_PER_TASK):
    pattern_sublist = patterns[i:i+_NUM_PATTERNS_PER_TASK]

    deferred.defer(_GetTestPathsAndAddTask, pattern_sublist)
예제 #28
0
파일: clean.py 프로젝트: cklzqw/gaeaib
def do_clean(cursor=None):

  bq = BlobInfo.all()

  if cursor:
    bq.with_cursor(cursor)

  blob = bq.get()

  if not blob:
    return

  key = str(blob.key())

  thq = Thread.all(keys_only=True)
  thq.filter("images", key)

  th = thq.get()

  if th:
    logging.info("thread: %r" % th)
  else:
    logging.info("no thread for image %r" % key)

    blob.delete()

  deferred.defer(do_clean, bq.cursor(), _countdown=30)
def _delete_service_log(parent_service_user, service_user):
    keys = ServiceLog.all(keys_only=True).filter("user", service_user).fetch(1000)
    if keys:
        db.delete(keys)
        deferred.defer(_delete_service_log, parent_service_user, service_user, _countdown=5)
    else:
        _decrease_and_verify(parent_service_user, service_user)
예제 #30
0
def view_that_defers(request):
    from google.appengine.ext.deferred import defer
    from django.http import HttpResponse

    defer(do_something)

    return HttpResponse("OK")
예제 #31
0
 def get(self):
     deferred.defer(controller.delete_all_step_records,
                    _queue='summary-delete',
                    _target='stats-backend')
     self.response.out.write('deleted all')
예제 #32
0
                                     youtube_id=youtube_id,
                                     language=lang,
                                     json=json)
                new.put()
                report['writes'] += 1
            else:
                logging.info('%s content already up-to-date' % key_name)
        except Exception, e:
            logging.error('%s subtitles fetch failed: %s' % (key_name, e))
            report['errors'] += 1

    # Generate a report if there is nothing left to process

    if len(videos) < BATCH_SIZE:
        deferred.defer(_task_report_handler,
                       uid,
                       report,
                       _name='%s_report' % uid,
                       _queue=TASK_QUEUE)
    else:
        next_id = task_id + 1
        cursor = query.cursor()
        deferred.defer(_task_handler,
                       uid,
                       next_id,
                       cursor,
                       report,
                       _name='%s_%s' % (uid, next_id),
                       _queue=TASK_QUEUE,
                       _countdown=DEFER_SECONDS)
예제 #33
0
 def start(self):
     """ Don't do anything in a time constrained view. Kick of the first
     deferred task. """
     deferred.defer(self.map, _queue=self.queue)
예제 #34
0
def process_notification(notification, now, stats):
    notification_key = notification.key()
    policy = None
    stats.started += 1

    # Treat as module-protected. pylint: disable-msg=protected-access
    if notification._done_date:
        _LOG.info(
            'Skipping offline processing of notification with key %s; already '
            'done at %s', notification_key, notification._done_date)
        stats.skipped_already_done += 1
        return

    if notifications.Manager._is_still_enqueued(notification, now):
        _LOG.info(
            'Skipping offline processing of notification with key %s; still on '
            'queue (last enqueued: %s)', notification_key,
            notification._last_enqueue_date)
        stats.skipped_still_enqueued += 1
        return

    payload_key = db.Key.from_path(
        notifications.Payload.kind(),
        notifications.Payload.key_name(notification.to, notification.intent,
                                       notification.enqueue_date))
    payload = db.get(payload_key)

    if not payload:
        _LOG.error(
            'Could not process notification with key %s; associated payload with '
            'key %s not found', notification_key, payload_key)
        stats.missing_payload += 1
        return

    if notifications.Manager._is_too_old_to_reenqueue(
            notification.enqueue_date, now):
        stats.too_old += 1
        exception = notifications.NotificationTooOldError((
            'Notification %s with enqueue_date %s too old to re-enqueue at %s; '
            'limit is %s days') % (
                notification_key,
                notification.enqueue_date,
                now,
                notifications._MAX_RETRY_DAYS,
            ))
        notifications.Manager._mark_failed(notification,
                                           now,
                                           exception,
                                           permanent=True)

    if notification._fail_date or notification._send_date:
        policy = notifications._RETENTION_POLICIES.get(
            notification._retention_policy)
        notifications.Manager._mark_done(notification, now)

        if policy:
            policy.run(notification, payload)
            stats.policy_run += 1
        else:
            _LOG.warning(
                'Cannot apply retention policy %s to notification %s and payload '
                '%s; policy not found. Existing policies are: %s',
                notification._retention_policy, notification_key, payload_key,
                ', '.join(sorted(notifications._RETENTION_POLICIES.keys())))
            stats.missing_policy += 1
        db.put([notification, payload])
    else:
        notifications.Manager._mark_enqueued(notification, now)
        db.put(notification)
        deferred.defer(
            notifications.Manager._transactional_send_mail_task,
            notification_key,
            payload_key,
            _retry_options=notifications.Manager._get_retry_options())
        stats.reenqueued += 1
예제 #35
0
    def post(self):
        data = json.loads(self.request.body)
        name, phone, email = map(data['info'].get, ('name', 'phone', 'email'))
        phone = '7' + ''.join(c for c in phone if '0' <= c <= '9')

        namespace = self._find_namespace(name)
        password = "******" % random.randint(0, 10000)
        CompanyUser.create_user(namespace,
                                namespace=namespace,
                                password_raw=password,
                                login=namespace,
                                rights=CompanyUser.RIGHTS_MASK_ADMIN)

        namespace_manager.set_namespace(namespace)
        cfg = Config(id=1)
        cfg.APP_NAME = name
        cfg.DELIVERY_PHONES = [phone]
        cfg.DELIVERY_EMAILS = ['*****@*****.**', email]
        cfg.SUPPORT_EMAILS = [email]
        cfg.ACTION_COLOR = "FF25B8CD"
        cfg.put()

        delivery_slot_keys = [
            DeliverySlot(name=u'Сейчас', slot_type=0, value=0).put(),
            DeliverySlot(name=u'Через 5 минут', slot_type=0, value=5).put(),
            DeliverySlot(name=u'Через 10 минут', slot_type=0, value=10).put(),
            DeliverySlot(name=u'Через 15 минут', slot_type=0, value=15).put(),
            DeliverySlot(name=u'Через 20 минут', slot_type=0, value=20).put(),
            DeliverySlot(name=u'Через 25 минут', slot_type=0, value=25).put(),
            DeliverySlot(name=u'Через 30 минут', slot_type=0, value=30).put()
        ]

        menu = data['menu']
        init = MenuCategory(category=None, title="Init")
        init.put()
        for i, category_dict in enumerate(menu):
            MenuCategory.generate_category_sequence_number(
            )  # only to increase counter
            category = MenuCategory(title=category_dict["title"],
                                    sequence_number=i,
                                    category=init.key)
            category.put()
            for j, item in enumerate(category_dict["items"]):
                MenuItem(category=category.key,
                         title=item["title"],
                         description=item["description"],
                         picture=item["imageUrl"],
                         price=int(round(float(item["price"]) * 100)),
                         sequence_number=j).put()
            for _ in category.get_items():
                category.generate_sequence_number()  # only to increase counter

        venue_dict = data['venue']
        venue = Venue(title=venue_dict['title'],
                      description=venue_dict['address'],
                      coordinates=ndb.GeoPt(venue_dict['lat'],
                                            venue_dict['lng']),
                      active=True)
        venue.update_address()
        venue.schedule = Schedule(days=[
            DaySchedule(
                weekday=i, start=datetime.time(0, 0), end=datetime.time(0, 0))
            for i in xrange(1, 8)
        ])

        for delivery_type in (SELF, IN_CAFE):
            delivery = DeliveryType.create(delivery_type)
            delivery.put()
            delivery.status = STATUS_AVAILABLE
            delivery.max_time = DAY_SECONDS + HOUR_SECONDS  # need hour to order on tomorrow
            delivery.delivery_slots = delivery_slot_keys
            venue.delivery_types.append(delivery)

        venue.put()

        PaymentType(id=str(CASH_PAYMENT_TYPE),
                    title="cash",
                    status=STATUS_AVAILABLE).put()

        deferred.defer(_notify_sms, phone, namespace, password)
        deferred.defer(_notify_email, email, phone, name, namespace, password)

        self.render_json({'login': namespace, 'password': password})
예제 #36
0
 def get(self):
     deferred.defer(controller.step_cleanup,
                    _queue='step-operations',
                    _target='stats-backend')
     self.response.out.write('steps cleaned')
예제 #37
0
 def get(self):
     deferred.defer(controller.get_steps,
                    disable_cache_lookup=True,
                    _queue='step-operations',
                    _target='stats-backend')
     self.response.out.write('steps cached')
예제 #38
0
def update_user_data(sik, service_identity, email, app_id, address, notifications, collections):
    deferred.defer(_update_user_data, sik, service_identity, email, app_id, address, notifications, collections, _transactional=ndb.in_transaction())
예제 #39
0
파일: connexus.py 프로젝트: oel92/APT
    def handle_upload(self):
	print "test"
        results = []
        blob_keys = []
        for name, fieldStorage in self.request.POST.items():
            if type(fieldStorage) is unicode:
                continue
            result = {}
            result['name'] = re.sub(
                r'^.*\\',
                '',
                fieldStorage.filename
            )
            result['type'] = fieldStorage.type
            result['size'] = self.get_file_size(fieldStorage.file)

		
            if self.validate(result):
                blob_key = str(
                    self.write_blob(fieldStorage.value, result)
                )
                blob_keys.append(blob_key)
                result['deleteType'] = 'DELETE'
                result['deleteUrl'] = self.request.host_url +\
                    '/?key=' + urllib.quote(blob_key, '')
		

		#image = Image(parent=streamKey)
			
		#upload_files = self.get_uploads('file')
		#if (len(upload_files) > 0 ):
		#	blob_info = upload_files[0]
		#	website = images.get_serving_url(blob_info)
		#	image.blob_url = website
		#	image.blob_key = blob_info.key()
		#	image.comment = self.request.get("comments")
		#	image.put()
		#	stream_obj.date_updated = datetime.datetime.now()
		#	stream_obj.date_only_updated = datetime.datetime.now().date()
		#	stream_obj.num_pics = stream_obj.num_pics + 1
		#	stream_obj.put()
				
									
		#query_params = {'stream_key': stream_obj.key.urlsafe(), 'upload': True}
		#self.redirect('/streamview?' + urllib.urlencode(query_params))


                if (IMAGE_TYPES.match(result['type'])):
                    try:
			print "hello"
			
                        result['url'] = images.get_serving_url(
                            blob_key,
                            secure_url=self.request.host_url.startswith(
                                'https'
                            )
                        )
			print result['url']
                        result['thumbnailUrl'] = result['url'] +\
                            THUMBNAIL_MODIFICATOR
			

			print str(fieldStorage.file)
			print str(fieldStorage)
			print str(fieldStorage.type)
			print blob_key
			
				
			urlString = self.request.get("stream_key")
			streamKey = ndb.Key(urlsafe=urlString)
			image = Image(parent=streamKey)
			
			image.blob_url = result['url']
			key = str(blob_key)
			print key
			image.blob_key = BlobKey(key)
			image.comment = ""	
			long = uniform(-180,180)
			lat = uniform(-70, 70)
			image.longitude = long
			image.latitude = lat
			image.put()
			

			
			stream_obj = streamKey.get()
			stream_obj.date_updated = datetime.datetime.now()
			stream_obj.date_only_updated = datetime.datetime.now().date()
			stream_obj.num_pics = stream_obj.num_pics + 1
			stream_obj.put()
			
                    except:  # Could not get an image serving url
                        pass
                if not 'url' in result:
                    result['url'] = self.request.host_url +\
                        '/' + blob_key + '/' + urllib.quote(
                            result['name'].encode('utf-8'), '')
            results.append(result)
        deferred.defer(
            cleanup,
            blob_keys,
            _countdown=EXPIRATION_TIME
        )
        return results
예제 #40
0
def send_collection_message(sik, service_identity, email, app_id, message):
    json_rpc_id = guid()
    deferred.defer(_send_collection_message, sik, service_identity, email, app_id, message, json_rpc_id, _transactional=ndb.in_transaction())
예제 #41
0
def check_bq_job(job_id, item_ids, suggestions_key, page_token):
    bq = BigQueryClient()
    logging.info("Polling suggestion job {}.".format(job_id))
    # TODO: catch 404 errors for jobs created 24+ hours ago, retry with new jobid
    try:
        bq_json = bq.get_async_job_results(job_id, page_token,
                                           MAX_RESULTS_PER_SUGGESTIONS_QUERY)
    except Exception as e:
        logging.error("Error from BigQuery with item_id={}.".format(item_ids))
        raise deferred.PermanentTaskFailure(e)
    if not bq_json['jobComplete']:
        logging.info("- job not completed yet.")
        deferred.defer(check_bq_job,
                       job_id,
                       item_ids,
                       suggestions_key,
                       "",
                       _countdown=5)
        return
    if not 'rows' in bq_json:
        logging.error(
            u"Invalid json for BigQueryTable. Job for {} is probably "
            u"invalid (bad item_id?).\n"
            u"JSON:\n"
            u"{}".format(item_ids, bq_json))
        raise deferred.PermanentTaskFailure("No rows in BigQuery response for "
                                            "{}.".format(item_ids))
    table = BigQueryTable(bq_json)
    item_ids_array = item_ids.split('|')
    # Get the consolidated book for each item_id
    suggestions = suggestions_key.get()
    assert isinstance(suggestions, SuggestionsRecord)
    for row in table.data:
        item_id = row[0]
        prediction = float(row[1])
        if item_id in item_ids_array:
            continue  # Original book.
        consolidated_book_key = BookRecord.query(
            BookRecord.item_id_array == item_id).get(keys_only=True)
        if not consolidated_book_key:
            logging.info(
                "No consolidated book with item_id '{}' found.".format(
                    item_id))
            continue
        if not consolidated_book_key in suggestions.books:
            suggestions.books.append(consolidated_book_key)
            suggestions.books_prediction.append(prediction)
        if len(suggestions.books) >= 1000:
            break
    next_page_token = bq_json.get('pageToken', "")
    if next_page_token != "" and len(suggestions.books) < 1000:
        suggestions.put()
        deferred.defer(check_bq_job, job_id, item_ids, suggestions_key,
                       next_page_token)
        logging.info("Suggestions for item_ids '{}' partly fetched. "
                     "Running again.".format(item_ids))
    else:
        suggestions.completed = True
        suggestions.json_generation_started = True
        suggestions.put()
        logging.info(
            "Suggestions for item_ids '{}' completed and saved.".format(
                item_ids))
        deferred.defer(create_suggestions_json, suggestions.key)
예제 #42
0
 def get(self):
     """Rollup logs into weeks"""
     deferred.defer(rollup_events, _queue='rollup')
예제 #43
0
    def change_view(self, handler, object_id, extra_context=None):
        """View zum Bearbeiten eines vorhandenen Objekts"""

        obj = self.get_object(object_id)
        if obj is None:
            raise gaetk.handler.HTTP404_NotFound

        model_class = type(obj)
        form_class = self.get_form()

        if handler.request.get('delete') == 'yesiwant':
            # Der User hat gebeten, dieses Objekt zu löschen.
            key = compat.xdb_key(obj)
            data = compat.xdb_to_protobuf(obj)
            dblayer = 'ndb' if compat.xdb_is_ndb(obj) else 'db'
            archived = DeletedObject(key_name=str(key),
                                     model_class=model_class.__name__,
                                     old_key=str(key),
                                     dblayer=dblayer,
                                     data=data)
            archived.put()
            # Indexierung für Admin-Volltextsuche
            from gaetk.admin.search import remove_from_index
            if compat.xdb_is_ndb(obj):
                obj.key.delete()
                deferred.defer(remove_from_index, obj.key)
            else:
                obj.delete()
                deferred.defer(remove_from_index, obj.key())

            handler.add_message(
                'warning',
                u'<strong>{} {}</strong> wurde gelöscht. <a href="{}">Objekt wiederherstellen!</a>'
                .format(compat.xdb_kind(self.model),
                        compat.xdb_id_or_name(key), archived.undelete_url()))
            raise gaetk.handler.HTTP302_Found(
                location='/admin/%s/%s/' %
                (util.get_app_name(model_class), compat.xdb_kind(model_class)))

        # Wenn das Formular abgeschickt wurde und gültig ist,
        # speichere das veränderte Objekt und leite auf die Übersichtsseite um.
        if handler.request.method == 'POST':
            form = form_class(handler.request.POST)
            if form.validate():
                key_name = compat.xdb_id_or_name(compat.xdb_key(obj))
                self.handle_blobstore_fields(handler, obj, key_name)
                if hasattr(obj, 'update'):
                    obj.update(form.data)
                else:
                    form.populate_obj(obj)
                key = obj.put()
                handler.add_message(
                    'success',
                    u'<strong><a href="/admin/{}/{}/{}/">{} {}</a></strong> wurde gespeichert.'
                    .format(util.get_app_name(self.model),
                            compat.xdb_kind(self.model),
                            compat.xdb_str_key(key),
                            compat.xdb_kind(self.model),
                            compat.xdb_id_or_name(key)))

                # Indexierung für Admin-Volltextsuche
                from gaetk.admin.search import add_to_index
                deferred.defer(add_to_index, key)
                raise gaetk.handler.HTTP302_Found(
                    location='/admin/%s/%s/' % (util.get_app_name(model_class),
                                                compat.xdb_kind(model_class)))
        else:
            form = form_class(obj=obj)

        template_values = {
            'object': obj,
            'form': form,
            'field_args': self.field_args,
            'admin_class': self
        }
        if extra_context is not None:
            template_values.update(extra_context)
        handler.render(template_values, self.get_template('change'))
예제 #44
0
 def put(self):
     isNew = not self.is_saved()
     db.Model.put(self)
     self.putSearchResult()
     isNew and deferred.defer(SiteCounters.incrementCounter, 'campaigns')
예제 #45
0
 def get(self):
     event = self.request.get('event', 'test')
     deferred.defer(test_stream.stream.populate, 'timedtext.xml', event)
예제 #46
0
    def add_view(self, handler, extra_context=None):
        """View zum Hinzufügen eines neuen Objekts"""
        form_class = self.get_form()

        # Standardmaessig lassen wir die App Engine fuer das Model automatisch einen
        # Key generieren. Es besteht jedoch in der Admin-Klasse die Moeglichkeit, via
        # 'db_key_field=[propertyname]' ein Feld festzulegen, dessen Inhalt im Formular
        # als Key beim Erzeugen der Instanz genutzt wird.
        admin_class = site.get_admin_class(self.model)
        key_field = None
        if admin_class and hasattr(admin_class, 'db_key_field'):
            key_field = admin_class.db_key_field

        # Wenn das Formular abgeschickt wurde und gültig ist,
        # speichere das veränderte Objekt und leite auf die Übersichtsseite um.
        if handler.request.method == 'POST':
            form = form_class(handler.request.POST)

            if form.validate():
                form_data = self._convert_property_data(form.data)
                key_name = form_data.get(key_field) if key_field else None

                # TODO: util.create_instance nutzen oder entfernen
                if hasattr(self.model, 'create'):
                    factory = self.model.create
                else:
                    factory = self.model

                if issubclass(self.model, ndb.Model):
                    obj = factory(id=key_name, **form_data)
                else:
                    obj = factory(key_name=key_name, **form_data)

                # Beim Anlegen muss dann halt einmal gespeichert werden,
                # ansonsten ist der ID unbekannt.
                if self.blob_upload_fields and key_name is None:
                    key_name = compat.xdb_id_or_name(obj.put())
                    self.handle_blobstore_fields(handler, obj, key_name)

                key = obj.put()
                handler.add_message(
                    'success',
                    u'<strong><a href="/admin/{}/{}/{}/">{} {}</a></strong> wurde angelegt.'
                    .format(util.get_app_name(self.model),
                            compat.xdb_kind(self.model),
                            compat.xdb_str_key(key),
                            compat.xdb_kind(self.model),
                            compat.xdb_id_or_name(key)))

                # Indexierung für Admin-Volltextsuche
                from gaetk.admin.search import add_to_index
                deferred.defer(add_to_index, key)

                # Call post-create-hooks
                if isinstance(self.post_create_hooks, collections.Iterable):
                    for hook in self.post_create_hooks:
                        deferred.defer(util.call_hook, hook,
                                       compat.xdb_str_key(key))

                raise gaetk.handler.HTTP302_Found(location='..')
        else:
            form = form_class()

        template_values = {
            'form': form,
            'field_args': self.field_args,
            'admin_class': self
        }
        if extra_context is not None:
            template_values.update(extra_context)
        handler.render(template_values, self.get_template('add'))
예제 #47
0
def _FindAndRestartJobs():
  jobs = _FindFrozenJobs()
  opts = TaskRetryOptions(task_retry_limit=1)

  for j in jobs:
    deferred.defer(_ProcessFrozenJob, j.job_id, _retry_options=opts)
예제 #48
0
 def Send(self, to, subject, text_body, html_body, reply_to=None):
     if self.defer:
         deferred.defer(_send_mail, to, subject, text_body, html_body,
                        reply_to)
     else:
         _send_mail(to, subject, text_body, html_body, reply_to)
예제 #49
0
 def get(self):
     for bm in Bookmarks.query():
         deferred.defer(add_domain, bm.key, _target="worker", _queue="admin")
예제 #50
0
def defer(callable, *args, **kwargs):
    if ndb.in_transaction() and kwargs.get('_transactional') is not False:
        kwargs['_transactional'] = True
    deferred.defer(callable, *args, **kwargs)
예제 #51
0
 def get(self): 
     for feed in Feeds.query(): 
         if feed.notify == 'digest': 
             deferred.defer(utils.feed_digest, feed.key, _target="worker", _queue="admin")
예제 #52
0
 def get(self):
     feed = Feeds.get_by_id(int(self.request.get('feed')))
     deferred.defer(utils.pop_feed, feed.key, _queue="admin")
예제 #53
0
def DeferCommitBlockableChangeSet(
    blockable_key, tail_defer=True, tail_defer_count=0, countdown=0):
  deferred.defer(
      _CommitBlockableChangeSet, blockable_key, tail_defer=tail_defer,
      tail_defer_count=tail_defer_count,
      _queue=constants.TASK_QUEUE.BIT9_COMMIT_CHANGE, _countdown=countdown)
예제 #54
0
 def get(self): 
     for ui in UserInfo.query(): 
         if ui.daily: 
             deferred.defer(utils.daily_digest, ui.user, _target="worker", _queue="admin")
예제 #55
0
 def post(self):
     deferred.defer(self._get_cmd().run)
     self.response.write('Command started.')
예제 #56
0
 def get(self): 
     for feed in Feeds.query(): 
         deferred.defer(utils.pop_feed, feed.key, _target="worker", _queue="admin")
예제 #57
0
    def Midpoint(cls, commit_a, commit_b):
        """Return a Commit halfway between the two given Commits.

    Uses Gitiles to look up the commit range.

    Args:
      commit_a: The first Commit in the range.
      commit_b: The last Commit in the range.

    Returns:
      A new Commit representing the midpoint.
      The commit before the midpoint if the range has an even number of commits.
      commit_a if the Commits are the same or adjacent.

    Raises:
      NonLinearError: The Commits are in different repositories or commit_a does
        not come before commit_b.
    """
        if commit_a == commit_b:
            return commit_a

        if commit_a.repository != commit_b.repository:
            raise NonLinearError(
                'Repositories differ between Commits: %s vs %s' %
                (commit_a.repository, commit_b.repository))

        # We need to get the full list of commits in between two git hashes, and
        # only look into the chain formed by following the first parents of each
        # commit. This gives us a linear view of the log even in the presence of
        # merge commits.
        commits = []

        # The commit_range by default is in reverse-chronological (latest commit
        # first) order. This means we should keep following the first parent to get
        # the linear history for a branch that we're exploring.
        expected_parent = commit_b.git_hash
        commit_range = gitiles_service.CommitRange(commit_a.repository_url,
                                                   commit_a.git_hash,
                                                   commit_b.git_hash)
        for commit in commit_range:
            # Skip commits until we find the parent we're looking for.
            if commit['commit'] == expected_parent:
                commits.append(commit)
                if 'parents' in commit and len(commit['parents']):
                    expected_parent = commit['parents'][0]

        # We don't handle NotFoundErrors because we assume that all Commits either
        # came from this method or were already validated elsewhere.
        if len(commits) == 0:
            raise NonLinearError(
                'Commit "%s" does not come before commit "%s".' %
                (commit_a, commit_b))

        if len(commits) == 1:
            return commit_a

        commits.pop(0)  # Remove commit_b from the range.

        deferred.defer(_CacheCommitDetails, commit_a.repository,
                       commits[len(commits) / 2]['commit'])

        return cls(commit_a.repository, commits[len(commits) / 2]['commit'])
예제 #58
0
def tell_queue(chat_id, msg, kb=None, markdown=True, inlineKeyboardMarkup=False, one_time_keyboard=True):
    deferred.defer(tell,chat_id, msg, kb=kb, markdown=markdown,
                   inlineKeyboardMarkup=inlineKeyboardMarkup, one_time_keyboard=one_time_keyboard,
                   _queue="messages-queue")
예제 #59
0
def pull_data(user, third_party_user):
    deferred.defer(pull_shots, user, third_party_user)
예제 #60
0
def close_channel(channel_id):
    # Sets /channels/<channel_id>/_meta/status to "closed", and schedules a
    # deferred task to remove_channel.
    result = fb_put("/channels/" + channel_id + "/_meta/status", "closed")
    deferred.defer(remove_channel, channel_id, _countdown=30)