コード例 #1
0
def WaitUntilDownloadAllowed(master_name,
                             timeout_seconds=90):  # pragma: no cover
    """Waits until next download from the specified master is allowed.

  Returns:
    True if download is allowed to proceed.
    False if download is not still allowed when the given timeout occurs.
  """
    client = memcache.Client()
    key = _MEMCACHE_MASTER_DOWNLOAD_LOCK % master_name
    deadline = time.time() + timeout_seconds
    download_interval_seconds = (waterfall_config.GetDownloadBuildDataSettings(
    ).get('download_interval_seconds'))
    memcache_master_download_expiration_seconds = (
        waterfall_config.GetDownloadBuildDataSettings().get(
            'memcache_master_download_expiration_seconds'))

    while True:
        info = client.gets(key)
        if not info or time.time() - info['time'] >= download_interval_seconds:
            new_info = {'time': time.time()}
            if not info:
                success = client.add(
                    key,
                    new_info,
                    time=memcache_master_download_expiration_seconds)
            else:
                success = client.cas(
                    key,
                    new_info,
                    time=memcache_master_download_expiration_seconds)

            if success:
                logging.info('Download from %s is allowed. Waited %s seconds.',
                             master_name,
                             (time.time() + timeout_seconds - deadline))
                return True

        if time.time() > deadline:
            logging.info('Download from %s is not allowed. Waited %s seconds.',
                         master_name, timeout_seconds)
            return False

        logging.info('Waiting to download from %s', master_name)
        time.sleep(download_interval_seconds + random.random())
コード例 #2
0
def put(models, **kwargs):
    """Store one or more Model instance, every stored
    models are pushed also into memcache.

    TODO(sahid): Needs a better doc.
    """
    memclient = memcache.Client()
    for retry in xrange(DATASTORE_NB_RETRY):
        try:
            models, multiple = datastore.NormalizeAndTypeCheck(models, db.Model)
            if not any(models): return multiple and [] or None # Nothings to do.
            async = db.put_async(models, **kwargs)
            try:
                debug("Needs to put models=%s" % ','.join(m.__class__.__name__ for m in models))
                #TODO(sahid): Needs factorization.
                k = [unicode(x.key()) for x in models]
                v = serialize(models)
                memclient.set_multi(dict(zip(k, v)),
                                   time=MEMCACHE_TIME,
                                   key_prefix=MEMCACHE_PREFIX)
                ret = async.get_result()
            except datastore_errors.BadKeyError:
                debug("Incomplete key passed, "
                      "can't store in memcached before put in the datastore.")
                # Incomplete key
                # It's better to use key_name with mp.
                ret = async.get_result()
                if ret:
                    k = map(unicode, ret)
                    v = serialize(models)
                    memclient.set_multi(dict(zip(k, v)),
                                       time=MEMCACHE_TIME,
                                       key_prefix=MEMCACHE_PREFIX)
            if multiple:
                return ret
            return ret[0]
        except (db.Timeout,
                db.TransactionFailedError,
                apiproxy_errors.ApplicationError,
                apiproxy_errors.DeadlineExceededError), e:
            logging.warn("Error during the put process, "
                         "retry %d in %.2fs", retry, DATASTORE_TIME_RETRY)
            logging.debug(e.message)            
            time.sleep(DATASTORE_TIME_RETRY)
        logging.exception(e)
コード例 #3
0
 def __init__(self, conn=None, auto_batcher_class=AutoBatcher, config=None):
     # NOTE: If conn is not None, config is only used to get the
     # auto-batcher limits.
     if conn is None:
         conn = model.make_connection(config)
     self._conn = conn
     self._auto_batcher_class = auto_batcher_class
     # Get the get/put/delete limits (defaults 1000, 500, 500).
     # Note that the explicit config passed in overrides the config
     # attached to the connection, if it was passed in.
     max_get = (datastore_rpc.Configuration.max_get_keys(
         config, conn.config) or datastore_rpc.Connection.MAX_GET_KEYS)
     max_put = (datastore_rpc.Configuration.max_put_entities(
         config, conn.config) or datastore_rpc.Connection.MAX_PUT_ENTITIES)
     max_delete = (datastore_rpc.Configuration.max_delete_keys(
         config, conn.config) or datastore_rpc.Connection.MAX_DELETE_KEYS)
     # Create the get/put/delete auto-batchers.
     self._get_batcher = auto_batcher_class(self._get_tasklet, max_get)
     self._put_batcher = auto_batcher_class(self._put_tasklet, max_put)
     self._delete_batcher = auto_batcher_class(self._delete_tasklet,
                                               max_delete)
     # We only have a single limit for memcache (default 1000).
     max_memcache = (ContextOptions.max_memcache_items(config, conn.config)
                     or datastore_rpc.Connection.MAX_GET_KEYS)
     # Create the memcache auto-batchers.
     self._memcache_get_batcher = auto_batcher_class(
         self._memcache_get_tasklet, max_memcache)
     self._memcache_set_batcher = auto_batcher_class(
         self._memcache_set_tasklet, max_memcache)
     self._memcache_del_batcher = auto_batcher_class(
         self._memcache_del_tasklet, max_memcache)
     self._memcache_off_batcher = auto_batcher_class(
         self._memcache_off_tasklet, max_memcache)
     # Create a list of batchers for flush().
     self._batchers = [
         self._get_batcher,
         self._put_batcher,
         self._delete_batcher,
         self._memcache_get_batcher,
         self._memcache_set_batcher,
         self._memcache_del_batcher,
         self._memcache_off_batcher,
     ]
     self._cache = {}
     self._memcache = memcache.Client()
コード例 #4
0
ファイル: lock_util.py プロジェクト: yildizzorlu/engblog
def memcache_util_get_multi_async_with_deadline(
        keys,
        key_prefix='',
        namespace=None,
        deadline=DEFAULT_MEMCACHE_GET_DEADLINE):
    """Like get_multi_async(), but fails if it takes longer than deadline.

    Asynchronously looks up multiple keys from memcache in one
    operation.  Deadline is in seconds and is defaulted to a
    reasonable value unless set explicitly.

    See memcache.Client().get_multi_async documentation for details.
    """
    rpc = memcache.create_rpc(deadline=deadline)
    return memcache.Client().get_multi_async(keys,
                                             key_prefix=key_prefix,
                                             namespace=namespace,
                                             rpc=rpc)
コード例 #5
0
def update_active_tas(ta=None):
    client = memcache.Client()
    if client.get(ACTIVE_TAS_KEY) is None:
        client.add(ACTIVE_TAS_KEY, {})
    while True:
        d = client.gets(ACTIVE_TAS_KEY)
        now = datetime.utcnow()
        if ta is not None:
            d[ta] = now
        to_remove = []
        for k,v in d.iteritems():
            if now - v > MAX_INACTIVITY_TIME:
                to_remove.append(k)
        for k in to_remove:
            del d[k]
        if client.cas(ACTIVE_TAS_KEY, d):
            break
    return len(d)
コード例 #6
0
 def __init__(self, tasks=None, time=0, namespace=None, memcache=None, runner_type=None):
     """
     Constructs a caching multi task runner.
     
     @tasks a list of tasks to provide caching over
     @time expiration time in seconds, as defined by memcache.set_multi()
     @namespace a memcache namespace, as defined by memcache.set_multi()
     @memcache the memcache implementation to use, defaults to google.appengine.api.memcache.Client
     @runner_type the runner to use for tasks that are not found in cache, defaults to AsyncMultiTask
     """
     if tasks is None:
         super(CachedMultiTask,self).__init__()
     else:
         super(CachedMultiTask,self).__init__(tasks)
     self.time = time
     self.namespace = namespace
     self.memcache = memcache or memcache_builtin.Client()
     self.runner_type = runner_type or AsyncMultiTask
コード例 #7
0
    def __init__(self, resource, client, max_requests, expire=None):
        """
        Class initialization method checks if the Rate Limit algorithm is
        actually supported by the installed Redis version and sets some
        useful properties.

        If Rate Limit is not supported, it raises an Exception.

        :param resource: resource identifier string (i.e. ‘user_pictures’)
        :param client: client identifier string (i.e. ‘192.168.0.10’)
        :param max_requests: integer (i.e. ‘10’)
        :param expire: seconds to wait before resetting counters (i.e. ‘60’)
        """
        self._memcached = memcache.Client()

        self._rate_limit_key = "rate_limit:{0}_{1}".format(resource, client)
        self._max_requests = max_requests
        self._expire = expire or 1
コード例 #8
0
def get_userprefs(user_id=None):
    if not user_id:
        user = users.get_current_user()
        if not user:
            return None

        user_id = user.user_id()

    userprefs = memcache.Client().get(user_id, namespace='UserPrefs')
    if not userprefs:
        key = db.Key.from_path('UserPrefs', user_id)
        userprefs = db.get(key)
        if userprefs:
            userprefs.cache_set()
        else:
            userprefs = UserPrefs(key_name=user_id)

    return userprefs
コード例 #9
0
def release_repo_scan_lock(project, repo, pipeline_id):  # pragma: no cover
    client = memcache.Client()
    key = MEMCACHE_REPO_SCAN_LOCK % models.Repo.repo_id(project, repo)
    while True:
        counter = client.gets(key)
        if not counter:
            logging.info('tried to release %s but it doesn\'t exist' % key)
            return
        if counter['counter'] == 0 or counter['pipeline_id'] != pipeline_id:
            logging.info('counter is 0 or pipeline %s doesn\'t match: %s' %
                         (pipeline_id, counter))
            return
        new_counter = {
            'pipeline_id': pipeline_id,
            'counter': 0,
        }
        if client.cas(key, new_counter, time=MEMCACHE_REPO_SCAN_EXPIRATION):
            logging.info('cas succeeded %s' % new_counter)
            return
コード例 #10
0
ファイル: stored_object.py プロジェクト: wanghui0720/catapult
  def GetAsync(cls, key):
    """Gets value in memcache."""
    keys = cls._GetCacheKeyList(key)
    head_key = cls._GetCacheKey(key)
    client = memcache.Client()
    cache_values = yield client.get_multi_async(keys)
    # Whether we have all the memcache values.
    if len(keys) != len(cache_values) or head_key not in cache_values:
      raise ndb.Return(None)

    serialized = ''
    cache_size = cache_values[head_key]
    keys.remove(head_key)
    for key in keys[:cache_size]:
      if key not in cache_values:
        raise ndb.Return(None)
      if cache_values[key] is not None:
        serialized += cache_values[key]
    raise ndb.Return(pickle.loads(serialized))
コード例 #11
0
def _UpdateCachedItemIds(feed_url, time_period_in_days):
    oldest_date = (datetime.now() - timedelta(days=time_period_in_days) -
                   TIME_BETWEEN_UPDATES)
    feed_items = yield FeedItem.query(
        FeedItem.feed_url == feed_url, FeedItem.published_date >=
        oldest_date).order(-FeedItem.published_date).fetch_async(
            100, projection=['item_id', 'published_date', 'retrieved_date'])
    items_proto = cache_pb2.FeedItems()
    for i in feed_items:
        item_proto = items_proto.feed_item.add()
        item_proto.item_id = i.item_id
        item_proto.published_timestamp_millis = _TimeToMillis(i.published_date)
        item_proto.retrieved_timestamp_millis = _TimeToMillis(
            i.retrieved_date or i.published_date)
    client = memcache.Client()
    client.set(
        ITEM_ID_CACHE_PREFIX + str(time_period_in_days) + ':' + feed_url,
        items_proto.SerializeToString())
    raise ndb.Return(items_proto)
コード例 #12
0
 def remove_channel_token(self, token_id):
     client = memcache.Client()
     user_token_list = client.gets(key=self.key.id(),
                                   namespace='user_tokens')
     if user_token_list:
         for token in user_token_list:
             if token.id == token_id:
                 while True:  # Retry Loop
                     user_token_list = client.gets(key=self.key.id(),
                                                   namespace='user_tokens')
                     if token in user_token_list:  # Check to make sure nothing else removed it
                         user_token_list.remove(token)
                         if client.cas(key=self.key.id(),
                                       value=user_token_list,
                                       namespace='user_tokens'):
                             break
                     else:
                         break
                 break
コード例 #13
0
    def get(self):
        self.response.headers['Content-Type'] = 'text/plain'
        cache = memcache.Client()

        # check if there's already a match for this player
        player_id = self.request.get("player_id")
        match = cache.get("matches:" + player_id)
        if match:
            self.response.write(json.dumps(match))
            return

        # otherwise, check to see if there's another player waiting for a match
        while True:
            other_player_id = cache.gets("matchmaker")
            if other_player_id and other_player_id != player_id:
                # remove this match from cache to indicate that we're taking it
                if not cache.cas("matchmaker", False):
                    continue
                match_mdl = Match()
                match_mdl.player_one_id = player_id
                match_mdl.player_two_id = other_player_id
                match_mdl.put()

                match = {
                    'match_id': str(match_mdl.key().id()),
                    'player_one_id': player_id,
                    'player_two_id': other_player_id
                }
                memcache.set('matches:' + player_id, match)
                memcache.set('matches:' + other_player_id, match)
                memcache.set('matches:' + str(match_mdl.key().id()), match)
                break
            else:
                if not cache.add("matchmaker", player_id, time=10):
                    if not cache.cas("matchmaker", player_id, time=10):
                        continue
                break

        if not match:
            self.response.write("ERR:NO-OPPONENT")
        else:
            self.response.write(json.dumps(match))
コード例 #14
0
    def update_all_posts_cache(
        self, update_with_post
    ):  # update_with_post is a BlogPost to be appended to the posts cache.
        key = 'post_list'
        client = memcache.Client()

        for k in xrange(100):
            previous_posts = client.gets(key)
            if previous_posts is None:
                previous_posts = list(
                    db.GqlQuery(
                        "SELECT * FROM BlogPost ORDER BY created DESC"))
                if log_db:
                    logging.warning("DATABASE READ: All posts!")

            # we don't append, because the posts must stay ordered as newest first
            all_posts = [update_with_post] + previous_posts

            if client.cas(key, all_posts):
                break
コード例 #15
0
ファイル: memcache.py プロジェクト: sjones4/hawkeye
    def get(self):
        self.response.headers['Content-Type'] = "application/json"
        key = str(uuid.uuid1())
        timeout = 36000
        client = memcache.Client()

        memcache.set(key, 1, int(timeout))
        gets_val = client.gets(key)
        if client.cas(key, 2) == False:
            self.response.out.write(json.dumps({ 'success' : False , 'error':\
             'cas returned False, should have returned True'}))

        else:
            gets_val = client.gets(key)
            memcache.set(key, 1, int(timeout))
            if client.cas(key, 2):
                self.response.out.write(json.dumps({ 'success' : False, 'error':\
                  'cas returned True, should have returned False'}))
            else:
                self.response.out.write(json.dumps({'success': True}))
コード例 #16
0
 def __init__(self,
              client_namespace="channel-clients",
              message_namespace="channel-buckets",
              channels=None,
              max_message_backlog=200,
              pull_retries=37,
              pull_sleep=1.5,
              default_cas_ttl=60 * 30):
     """
         Creates a channel manager
     """
     assert channels
     self.client_namespace = client_namespace
     self.message_namespace = message_namespace
     self.client = memcache.Client()
     self.default_cas_ttl = default_cas_ttl
     self.max_message_backlog = max_message_backlog
     self.channels = channels
     self.pull_retries = pull_retries
     self.pull_sleep = pull_sleep
コード例 #17
0
 def increase_counter(cls, instance, count):
     '''
         Increment the counter of given key
     '''
     memclient = memcache.Client()
     def increase():
         import random
         index = random.randint(0, SHARDS-1)#select a random shard to increases
         shard_key = str(instance) + str(index)#creates key_name
         counter = cls.get_by_key_name(shard_key)
         if not counter:#if counter doesn't exist, create a new one
             counter = cls(key_name=shard_key, instance_key=instance)
         counter.count += count
         counter.put()
     
     db.run_in_transaction(increase)
     if count > 0:
         memclient.incr(str(instance), initial_value=0)
     else:
         memclient.decr(str(instance), initial_value=0)
コード例 #18
0
    def get(self, section_id=None):
        if not section_id: section_id = 'all'

        client = memcache.Client()

        ophan_json = client.get(section_id)

        if not ophan_json:
            refresh_data(section_id)
            ophan_json = "[]"

        last_read = client.get(section_id + ".epoch_seconds")

        if last_read and not fresh(last_read):
            refresh_data(section_id)

        headers.json(self.response)
        headers.set_cache_headers(self.response, 60)
        headers.set_cors_headers(self.response)
        self.response.out.write(formats.jsonp(self.request, ophan_json))
コード例 #19
0
ファイル: probers.py プロジェクト: wangz5/apprtc
 def store_instance_state(self, probing_results):
     # Store an active collider host to memcache to be served to clients.
     # If the currently active host is still up, keep it. If not, pick a
     # new active host that is up.
     memcache_client = memcache.Client()
     for retries in xrange(constants.MEMCACHE_RETRY_LIMIT):
         active_host = memcache_client.gets(
             constants.WSS_HOST_ACTIVE_HOST_KEY)
         if active_host is None:
             memcache_client.set(constants.WSS_HOST_ACTIVE_HOST_KEY, '')
             active_host = memcache_client.gets(
                 constants.WSS_HOST_ACTIVE_HOST_KEY)
         active_host = self.create_collider_active_host(
             active_host, probing_results)
         if memcache_client.cas(constants.WSS_HOST_ACTIVE_HOST_KEY,
                                active_host):
             logging.info('collider active host saved to memcache: ' +
                          str(active_host))
             break
         logging.warning('retry # ' + str(retries) +
                         ' to set collider status')
コード例 #20
0
ファイル: main.py プロジェクト: emlynoregan/im_critsec
    def completion():
        lmemcacheClient2 = memcache.Client()
        try:
            lreentry = _get_memcount(lmemcacheClient1, "reentry")
            logging.info("reentry=%s" % lreentry)
            if (lreentry or 0) > 0:
                raise PermanentTaskFailure("Reentry == %s, should be 0" %
                                           lreentry)
            _set_memcount(lmemcacheClient2, "reentry", 1)

            time.sleep(2)
            lnumCalls = _get_memcount(lmemcacheClient1, "numcalls")
            lnumCalls = (lnumCalls or 0) + 1
            logging.info("lnumCalls=%s" % lnumCalls)
            if lnumCalls == 2:
                fut = GetFutureAndCheckReady(futurekey)
                fut.set_success("called completion twice")

            _set_memcount(lmemcacheClient1, "numcalls", lnumCalls)
        finally:
            _set_memcount(lmemcacheClient2, "reentry", 0)
コード例 #21
0
    def store_for_identity_if_dirty(self, ident):
        if not self.dirty:
            return

        # No longer dirty
        self.dirty = False

        # memcache.set_async isn't exposed; make a Client so we can use it
        client = memcache.Client()
        future = client.set_multi_async(
            {BingoIdentityCache.key_for_identity(ident): self})

        # Always fire off a task queue to persist bingo identity cache
        # since there's no cron job persisting these objects like BingoCache.
        self.persist_to_datastore(ident)
        # TODO(alpert): If persist_to_datastore has more than 50 identities and
        # creates a deferred task AND that task runs before the above memcache
        # set finishes then we could lose a tiny bit of data for a user, but
        # that's extremely unlikely to happen.

        future.get_result()
コード例 #22
0
def updateHacker(secret, dict):
    memcachedKey = memcachedBase + secret
    client = memcache.Client()
    retries = 5
    success = False
    while retries > 0 and not success:
        hacker = client.gets(memcachedKey)
        if hacker is None:
            hacker = registration.Hacker.WithSecret(secret)
            client.set(memcachedKey, hacker)

        for k, v in dict.iteritems():
            setattr(hacker, k, v)

        if client.cas(memcachedKey, hacker):
            success = True
            hacker.put()

        retries -= 1

    return success
コード例 #23
0
def WaitUntilDownloadAllowed(master_name,
                             timeout_seconds=90):  # pragma: no cover
    """Waits until next download from the specified master is allowed.

  Returns:
    True if download is allowed to proceed.
    False if download is not still allowed when the given timeout occurs.
  """
    client = memcache.Client()
    key = _MEMCACHE_MASTER_DOWNLOAD_LOCK % master_name

    deadline = time.time() + timeout_seconds
    while True:
        info = client.gets(key)
        if not info or time.time() - info['time'] >= _DOWNLOAD_INTERVAL_SECONDS:
            new_info = {'time': time.time()}
            if not info:
                success = client.add(
                    key,
                    new_info,
                    time=_MEMCACHE_MASTER_DOWNLOAD_EXPIRATION_SECONDS)
            else:
                success = client.cas(
                    key,
                    new_info,
                    time=_MEMCACHE_MASTER_DOWNLOAD_EXPIRATION_SECONDS)

            if success:
                logging.info('Download from %s is allowed. Waited %s seconds.',
                             master_name,
                             (time.time() + timeout_seconds - deadline))
                return True

        if time.time() > deadline:
            logging.info('Download from %s is not allowed. Waited %s seconds.',
                         master_name, timeout_seconds)
            return False

        logging.info('Waiting to download from %s', master_name)
        time.sleep(_DOWNLOAD_INTERVAL_SECONDS + random.random())
コード例 #24
0
ファイル: load_info.py プロジェクト: jamesw6811/code-mmo-web
    def AddServer(cls, grid, resp):
        newserv = SingleServer(key_name=grid)
        newserv.gridstr = grid
        newserv.statusresp = str(resp)
        newserv.put()

        memcache_client = memcache.Client()
        while True:
            servers = memcache_client.gets(cls.ALL_SERVERS)
            if servers is None:
                logging.error('all_servers entry in Memcache is None.')
                memcache.set(cls.ALL_SERVERS, [grid])
                break
            if grid in servers:
                logging.error('adding same server twice!')
                break
            servers.append(grid)
            if memcache_client.cas(cls.ALL_SERVERS, servers):
                break

        info = {cls.STATUS: cls.STATUS_LOADING, cls.LAST_RESP: str(resp)}
        memcache.set(cls.SERVER_INFO_PREFIX + grid, info)
コード例 #25
0
ファイル: onlinecollaborators.py プロジェクト: meedan/montage
    def __init__(
            self,
            object_id,
            prefix="project",
            namespace="collab",
            collaborator_expiry=90,
            online_event_kind=EventKind.PROJECTCOLLABORATORONLINE,
            offline_event_kind=EventKind.PROJECTCOLLABORATOROFFLINE
            ):
        """
            Creates an online collaborators manager

            object_id: the object on which users are collaborating
        """
        self.object_id = object_id
        self.prefix = prefix
        self.namespace = namespace
        self.client = memcache.Client()
        self.collaborator_expiry = datetime.timedelta(
            seconds=collaborator_expiry)
        self.online_event_kind = online_event_kind
        self.offline_event_kind = offline_event_kind
コード例 #26
0
ファイル: RawData.py プロジェクト: TakashiSasaki/odenkiapi
    def GET(self, jrequest, jresponse):
        assert isinstance(jresponse, JsonRpcResponse)
        jresponse.setId()

        client = memcache.Client()
        keys = client.get(self.MEMCACHE_KEY)
        if keys:
            jresponse.setExtraValue("memcache", "hit")
        else:
            jresponse.setExtraValue("memcache", "missed and reloaded")
            keys = RawData.fetchRecent()
            client.set(self.MEMCACHE_KEY, keys, time=70)

        for key in keys:
            raw_data = key.get()
            assert isinstance(raw_data, RawData)
            jresponse.addResult(raw_data)
        jresponse.setColumns(RawDataColumns())

        jresponse.setExtraValue("__name__", __name__)
        jresponse.setExtraValue("__package__", __package__)
        jresponse.setExtraValue("__file__", __file__)
コード例 #27
0
ファイル: load_info.py プロジェクト: jamesw6811/code-mmo-web
    def RemoveServer(cls, grid):
        memcache_client = memcache.Client()
        while True:
            servers = memcache_client.gets(cls.ALL_SERVERS)
            if not servers:
                break
            try:
                servers.remove(grid)
            except ValueError:
                logging.error(
                    'Attempted to remove server that does not exist %s', grid)
                break
            if memcache_client.cas(cls.ALL_SERVERS, servers):
                break

        datastore_single_server = SingleServer.GetByName(grid)
        if datastore_single_server:
            memcache.delete(cls.SERVER_INFO_PREFIX + grid)
            datastore_single_server.delete()
        else:
            logging.error('Trying to remove server with no datastore entry %s',
                          grid)
コード例 #28
0
ファイル: lock_util.py プロジェクト: yildizzorlu/engblog
def memcache_util_add_multi_async_with_deadline(
        mapping,
        time=0,
        key_prefix='',
        min_compress_len=0,
        namespace=None,
        deadline=DEFAULT_MEMCACHE_SET_DEADLINE):
    """Like add_multi_async(), but fails if it takes longer than deadline.

    Asynchronously adds multiple keys' values at once.  Deadline is in
    seconds and is defaulted to a reasonable value unless set
    explicitly.

    See memcache.Client().add_multi_async documentation for details.
    """
    rpc = memcache.create_rpc(deadline=deadline)
    return memcache.Client().add_multi_async(mapping,
                                             time=time,
                                             key_prefix=key_prefix,
                                             min_compress_len=min_compress_len,
                                             namespace=namespace,
                                             rpc=rpc)
コード例 #29
0
 def add_channel_token(self):
     token_id = str(self.username) + str(server.create_uuid())
     token = ChannelToken(id=token_id,
                          token=channel.create_channel(token_id))
     client = memcache.Client()
     while True:  # Retry Loop
         user_token_list = client.gets(key=self.key.id(),
                                       namespace='user_tokens')
         if user_token_list is None:
             if not memcache.add(key=self.key.id(),
                                 value=[token],
                                 namespace='user_tokens'):
                 memcache.set(key=self.key.id(),
                              value=[token],
                              namespace='user_tokens')
             break
         user_token_list.append(token)
         if client.cas(key=self.key.id(),
                       value=user_token_list,
                       namespace='user_tokens'):
             break
     return token
コード例 #30
0
def get_by_keys(keys, kind=None):
    if kind:
        keys = [str(db.Key.from_path(kind, i)) for i in keys]

    client = memcache.Client()
    values = client.get_multi(keys)
    data = [values.get(i) for i in keys]

    if None in data:
        to_fetch = []
        for i in range(len(keys)):
            if data[i] is None:
                to_fetch.append(i)

        fetch_keys = [keys[i] for i in to_fetch]
        fetched = db.get(fetch_keys)
        set_multi(dict(zip(fetch_keys, fetched)))

        for i in to_fetch:
            data[i] = fetched.pop(0)

    return data