def primeDesireCache(step, startKey = None, stopKey = None):
    import princeFunc
    
    if stopKey is None and startKey is None:
        startKey = 1
        stopKey = int(meSchema.tradeCue.all(keys_only=True).order('-__key__').get().name())
    elif stopKey is None or startKey is None:
        raise(BaseException('Must define both startKey and stopKey, or both must be None!'))
    
    memdict = {}
    clockKeyStep = step
    queryStr = princeFunc.getDesireQueryStr(max(step-405,0),step)
    desires = db.GqlQuery(queryStr).fetch(20000)
    for desire in desires:
        desirekey = desire.key().name()
        stckID = meTools.getStckID(desire.Symbol)
        cueKey = desirekey.split("_")[-2]      # Extract cueKey from middle.
        memkey = 'desire_' + cueKey + '_' + str(stckID)
        step = int(desirekey.split("_")[0])    # Extract step from front part of desirekey.
        if not memdict.__contains__(memkey):
            memdict[memkey] = step
        elif memdict[memkey] < step:
            memdict[memkey] = step
    memcache.set_multi(memdict)
    cachepy.set_multi(memdict, priority = 1)
    cachepy.set('stepclock_' + str(startKey) + '_' + str(stopKey), clockKeyStep) # Manually syncing stepclock until get saner method.
Exemple #2
0
    def fetch(self,limit,offset=0,
              _cache=[],
              _local_expiration = QUERY_EXPIRATION,
              _memcache_expiration = QUERY_EXPIRATION):
      '''By default this method runs the query on datastore.
      
      If additonal parameters are supplied, it tries to retrieve query
      results for current parameters and fetch & offset limits.
      
      It also does a cascaded cache refresh if no match for 
      current arguments are found in given cache layers.
      
      Arguments:
        
        limit: Number of model entities to be fetched      
        offset: The number of results to skip.
        _cache: Cache layers to retrieve the results. If no match is found
          the query is run on datastore and these layers are refreshed.         
        _local_expiration: Expiration in seconds for local cache layer, if 
          a cache refresh operation is run.         
        _memcache_expiration: Expiration in seconds for memcache,
          if a cache refresh operation is run.
        
      Returns:
        The return value is a list of model instances, possibly an empty list.
      
      Raises:
        CacheLayerError: If an invalid cache layer name is supplied
      '''
      klass = self.__class__        
      _cache = _to_list(_cache)
      _validate_cache(_cache)
      result = None
      
      local_flag = True if LOCAL in _cache else False
      memcache_flag = True if MEMCACHE in _cache else False
        
      self._clear_keyname(klass.offset_key)
      self._clear_keyname(klass.limit_key)
      self._concat_keyname(klass.limit_key+str(limit))
      if offset != 0:
        self._concat_keyname(klass.offset_key+str(offset))

      if local_flag:
        result = cachepy.get(self.key_name)

      if memcache_flag and result is None:
        result = _deserialize(memcache.get(self.key_name))
        if local_flag and result is not None:
          cachepy.set(self.key_name,result,_local_expiration)
      
      if result is None:
        result = self.query.fetch(limit,offset)
        if memcache_flag:
          memcache.set(self.key_name,_serialize(result),_memcache_expiration)
        if local_flag:
          cachepy.set(self.key_name,result,_local_expiration)
      
      return result
Exemple #3
0
    def fetch_user_data(self, auth_token, memcache_key):
        headers = {
            'Authorization': 'Bearer %s' % auth_token,
        }

        resp = urlfetch.fetch(url='https://alpha-api.app.net/stream/0/token', method='GET', headers=headers)
        if resp.status_code == 200:
            cachepy.set(memcache_key, resp.content, 5 * 60)  # Expire in 5 min
            return resp.content

        return None
Exemple #4
0
def layer_cache_check_set_return(target, d_layer, d_expiration, d_bust_cache,
                                 *args, **kwargs):

    key = kwargs.get("key", "")
    if d_layer is None:
        layer = kwargs.get("layer",
                           Globals.DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE)
    else:
        layer = d_layer
    if d_expiration is None:
        expiration = kwargs.get("expiration", Globals._1_WEEK)
    else:
        expiration = d_expiration
    if d_bust_cache is None:
        bust_cache = kwargs.get("bust_cache", False)
    else:
        bust_cache = d_bust_cache

    #logging.info("read key: %s, layer: %s, bust_cache: %s, expiration: %s", key, layer, bust_cache, expiration)

    if not bust_cache:
        if layer != Globals.SINGLE_LAYER_MEMCACHE_ONLY:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer != Globals.SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
            result = memcache.Client().get(key)
            if result is not None:
                cachepy.set(key, result)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = kwargs.get("key", "")

    #logging.info("write key: %s, layer: %s, bust_cache: %s, expiration: %s", key, layer, bust_cache, expiration)

    if layer != Globals.SINGLE_LAYER_MEMCACHE_ONLY:
        cachepy.set(key, result, expiry=expiration)

    if layer != Globals.SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
        if not memcache.Client().set(key, result, time=expiration):
            logging.error("Memcache set failed for %s" % key)

    return result
Exemple #5
0
def cache(key, func, expire=3600):
    skey = str(key)

    result = cachepy.get(skey)
    if result is not None:
        return result

    result = memcache.get(skey)
    if result is not None:
        cachepy.set(skey, result)
        return result

    result = func(key)

    cachepy.set(skey, result, expiry=expire)
    memcache.set(skey, result, time=expire)
    return result
def syncProcessCache(step,startKey,stopKey):
    clockKey = 'stepclock_' + str(startKey) + '_' + str(stopKey)
    stepclock = cachepy.get(clockKey, priority=1)
    memkeylist = []
    if stepclock not in [step-1, step]:
        for i in range(startKey,stopKey + 1):
            for stckID in [1,2,3,4]:
                cuekey = meTools.buildTradeCueKey(i)
                recency_key = 'desire_' + cuekey + '_' + str(stckID)
                memkeylist.append(recency_key)
        recentDesires = memcache.get_multi(memkeylist)
        cachepy.set_multi(recentDesires, priority = 1)
        '''
        for des in recentDesires:
            cachepy.set(des, recentDesires[des], priority=1)
        '''
    cachepy.set(clockKey,step,priority=1)   # Set cachepy clockKey to current step since synced with Memcache.
def _cachepy_put(models,time = 0):
  '''Put given models to local cache in serialized form
   with expiration in seconds
  
  Args:
    models: List of models to be saved to local cache
    time: Expiration time in seconds for each model instance
  
  Returns:
    List of  of db.Keys of the models that were put
  '''
  to_put = _to_dict(models)
  if time == 0: #cachepy uses None as unlimited caching flag
    time = None
  
  for key, model in to_put.iteritems():
    cachepy.set(key,model,time)
  return [model.key() for model in models]
Exemple #8
0
def _cachepy_put(models, time=0):
    '''Put given models to local cache in serialized form
   with expiration in seconds
  
  Args:
    models: List of models to be saved to local cache
    time: Expiration time in seconds for each model instance
  
  Returns:
    List of  of db.Keys of the models that were put
  '''
    to_put = _to_dict(models)
    if time == 0:  #cachepy uses None as unlimited caching flag
        time = None

    for key, model in to_put.iteritems():
        cachepy.set(key, model, time)
    return [model.key() for model in models]
Exemple #9
0
def layer_cache_check_set_return(
        target, 
        key_fxn, 
        expiration = DEFAULT_LAYER_CACHE_EXPIRATION_SECONDS, 
        layer = DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE,
        persist_across_app_versions = False,
        *args, 
        **kwargs):

    key = key_fxn(*args, **kwargs)
    namespace = App.version

    if persist_across_app_versions:
        namespace = None
    bust_cache = kwargs.get("bust_cache", False)

    if not bust_cache:
        if layer != SINGLE_LAYER_MEMCACHE_ONLY:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                cachepy.set(key, result)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = key_fxn(*args, **kwargs)

    if layer != SINGLE_LAYER_MEMCACHE_ONLY:
        cachepy.set(key, result, expiry=expiration)

    if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
        if not memcache.set(key, result, time=expiration, namespace=namespace):
            logging.error("Memcache set failed for %s" % key)

    return result
Exemple #10
0
def layer_cache_check_set_return(
        target,
        key_fxn,
        expiration=DEFAULT_LAYER_CACHE_EXPIRATION_SECONDS,
        layer=DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE,
        persist_across_app_versions=False,
        *args,
        **kwargs):

    key = key_fxn(*args, **kwargs)
    namespace = App.version

    if persist_across_app_versions:
        namespace = None
    bust_cache = kwargs.get("bust_cache", False)

    if not bust_cache:
        if layer != SINGLE_LAYER_MEMCACHE_ONLY:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                cachepy.set(key, result)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = key_fxn(*args, **kwargs)

    if layer != SINGLE_LAYER_MEMCACHE_ONLY:
        cachepy.set(key, result, expiry=expiration)

    if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
        if not memcache.set(key, result, time=expiration, namespace=namespace):
            logging.error("Memcache set failed for %s" % key)

    return result
Exemple #11
0
    def display_page_or_304(self, get_page, expiry=PAGE_CACHING_TIME_304, request_path=None):
        """ It just uses etag because last-modified is for http 1.0 and nowadays no browser uses it """
        import mc
        content = None
        #- It's possible to customize the request path in case different requeste paths have the same content -#
        request_path = request_path or self.request.path
        #- Trying to get the etag from cachepy -#
        current_etag = cachepy.get(constants.cachepy.ETAG_KEY % request_path)
        #- There is no etag stored in cachepy.
        if current_etag is None:
            #- Trying to get the etag from mc.
            current_etag = mc.get(constants.mc.ETAG_KEY % request_path)
            if current_etag is None:
                #- MC doesn't have the etag either -#
                content = get_page() # Getting the page content.
                current_etag = self._get_content_etag(content) # Generating etag for the content.
                mc.add(constants.mc.ETAG_KEY % request_path, current_etag, expiry)
                cachepy_expiry = expiry if expiry != 0 else None
                cachepy.set(constants.cachepy.ETAG_KEY % request_path, current_etag, cachepy_expiry)
            else:
                #- MC has the etag let's cache it on cachepy and go on -#
                cachepy.set(constants.cachepy.ETAG_KEY % request_path, current_etag)

        browser_etag = self.request.headers['If-None-Match'] if 'If-None-Match' in self.request.headers else None

        """ Browser etag might be None but current_etag is always generated or gotten from the caches above """

        if browser_etag == current_etag:
            """ Ther user has already the content cached on his browser """
            self.response.headers['ETag'] = current_etag;
            self.error(304)
        else:
            """ No etag match so the content has to be generated and transferred to the client """
            if content is None:
                """ The content wasn't generated above so lest do it now """
                content = get_page()
            """ There is no need to generate the etag again because always is going to be generated above, either taken from cachepy or memcache
            or generated again """
            self.response.headers['ETag'] = current_etag;
            self.response.out.write(content)
def doDesire(step, cuekey):
    # see if tradeCue for key results in a new desire.
    desires = []
    tradecue = meTools.memGet(meSchema.tradeCue, cuekey)
    for stckID in [1,2,3,4]:
        deltakey = str(stckID) + '_' + str(step)
        cval = cvalDict[deltakey]
        if cval is None or len(cval) < tradecue.TimeDelta + 1:
            return desires
        cue = cval[tradecue.TimeDelta]
        qDelta = tradecue.QuoteDelta
        if (qDelta > 0 and cmp(cue,qDelta) == 1) or (qDelta < 0 and cmp(cue,qDelta) == -1):
            recent = recency(tradecue,step,stckID)
            if not recent:
                action = makeDesire(stckID, cuekey, step)
                recency_key = 'desire_' + tradecue.key().name() + '_' + str(stckID)
                # Maybe combine this into function?
                memcache.set(recency_key, step)
                cachepy.set(recency_key, step, priority=1)
                
                desires.append(action)
    return desires
Exemple #13
0
 def wrapper(*args, **kwargs):
     arg_list = []
     for i, arg in enumerate(args):
         try:
             if not i in ignore_args: 
                 arg_list.append(pickle.dumps(arg))
         except PicklingError:
             raise UnsupportedArgumentError(arg)
     for kwarg in kwargs.items():
         try:
             arg_list.append(pickle.dumps(kwarg[1]))
         except PicklingError:
             raise UnsupportedArgumentError(kwarg)
     key = fxn.__name__ + '(' + ','.join(arg_list) + ')'
     if version_aware:
         key = os.environ['CURRENT_VERSION_ID'] + '/' + key
     #logging.debug('caching key: %s' % key)
     data = cachepy.get(key) or memcache.get(key)
     if Debug(): 
         if not ACTIVE_ON_DEV_SERVER and not force_cache: 
             return fxn(*args, **kwargs)
     if kwargs.get('_force_run'):
         #logging.debug("forced execution of %s" % fxn.__name__)
         pass
     elif data:
         #logging.debug('cache hit for key: %s' % key)
         if data.__class__ == NoneVal: 
             data = None
         return data
     data = fxn(*args, **kwargs)
     data_to_save = data
     if data is None:
         data_to_save = NoneVal() 
     cachepy.set(key, data_to_save, time / 24) #cachepy expiry time must be much shorter
     memcache.set(key, data_to_save, time)
     return data
Exemple #14
0
        def fetch(self,
                  limit,
                  offset=0,
                  _cache=None,
                  _local_expiration=QUERY_EXPIRATION,
                  _memcache_expiration=QUERY_EXPIRATION):
            '''By default this method runs the query on datastore.
      
      If additonal parameters are supplied, it tries to retrieve query
      results for current parameters and fetch & offset limits.
      
      It also does a cascaded cache refresh if no match for 
      current arguments are found in given cache layers.
      
      Arguments:
        
        limit: Number of model entities to be fetched      
        offset: The number of results to skip.
        _cache: Cache layers to retrieve the results. If no match is found
          the query is run on datastore and these layers are refreshed.         
        _local_expiration: Expiration in seconds for local cache layer, if 
          a cache refresh operation is run.         
        _memcache_expiration: Expiration in seconds for memcache,
          if a cache refresh operation is run.
        
      Returns:
        The return value is a list of model instances, possibly an empty list.
      
      Raises:
        CacheLayerError: If an invalid cache layer name is supplied
      '''
            klass = self.__class__
            if _cache is None:
                _cache = []
            else:
                _cache = _to_list(_cache)
                _validate_cache(_cache)

            result = None

            local_flag = True if LOCAL in _cache else False
            memcache_flag = True if MEMCACHE in _cache else False

            self._clear_keyname(klass.offset_key)
            self._clear_keyname(klass.limit_key)
            self._concat_keyname(klass.limit_key + str(limit))
            if offset != 0:
                self._concat_keyname(klass.offset_key + str(offset))

            if local_flag:
                result = cachepy.get(self.key_name)

            if memcache_flag and result is None:
                result = _deserialize(memcache.get(self.key_name))
                if local_flag and result is not None:
                    cachepy.set(self.key_name, result, _local_expiration)

            if result is None:
                result = self.query.fetch(limit, offset)
                if memcache_flag:
                    memcache.set(self.key_name, _serialize(result),
                                 _memcache_expiration)
                if local_flag:
                    cachepy.set(self.key_name, result, _local_expiration)

            return result
Exemple #15
0
def layer_cache_check_set_return(
        target, 
        key_fxn, 
        expiration = DEFAULT_LAYER_CACHE_EXPIRATION_SECONDS, 
        layer = Layers.Default,
        persist_across_app_versions = False,
        *args, 
        **kwargs):

    key = key_fxn(*args, **kwargs)
    namespace = App.version

    if persist_across_app_versions:
        namespace = None
    bust_cache = kwargs.get("bust_cache", False)

    if not bust_cache:

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                return result

        if layer & Layers.Datastore:
            result = KeyValueCache.get(key, namespace=namespace)
            if result is not None:
                # Found in datastore, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                if layer & Layers.Memcache:
                    memcache.set(key, result, time=expiration, namespace=namespace)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = key_fxn(*args, **kwargs)

    if isinstance(result, UncachedResult):
        # Don't cache this result, just return it
        result = result.result
    else:
        # Cache the result
        if layer & Layers.InAppMemory:
            cachepy.set(key, result, expiry=expiration)

        if layer & Layers.Memcache:
            if not memcache.set(key, result, time=expiration, namespace=namespace):
                logging.error("Memcache set failed for %s" % key)

        if layer & Layers.Datastore:
            KeyValueCache.set(key, result, time=expiration, namespace=namespace)

    return result