示例#1
0
def recency(tradecue,step,stckID):
    recent = False
    recency_key = 'desire_' + tradecue.key().name() + '_' + str(stckID)
    lastStep = cachepy.get(recency_key, priority=1)
    if lastStep >= step - tradecue.TimeDelta and lastStep != step:
        recent = True
    return recent
示例#2
0
    def fetch(self,limit,offset=0,
              _cache=[],
              _local_expiration = QUERY_EXPIRATION,
              _memcache_expiration = QUERY_EXPIRATION):
      '''By default this method runs the query on datastore.
      
      If additonal parameters are supplied, it tries to retrieve query
      results for current parameters and fetch & offset limits.
      
      It also does a cascaded cache refresh if no match for 
      current arguments are found in given cache layers.
      
      Arguments:
        
        limit: Number of model entities to be fetched      
        offset: The number of results to skip.
        _cache: Cache layers to retrieve the results. If no match is found
          the query is run on datastore and these layers are refreshed.         
        _local_expiration: Expiration in seconds for local cache layer, if 
          a cache refresh operation is run.         
        _memcache_expiration: Expiration in seconds for memcache,
          if a cache refresh operation is run.
        
      Returns:
        The return value is a list of model instances, possibly an empty list.
      
      Raises:
        CacheLayerError: If an invalid cache layer name is supplied
      '''
      klass = self.__class__        
      _cache = _to_list(_cache)
      _validate_cache(_cache)
      result = None
      
      local_flag = True if LOCAL in _cache else False
      memcache_flag = True if MEMCACHE in _cache else False
        
      self._clear_keyname(klass.offset_key)
      self._clear_keyname(klass.limit_key)
      self._concat_keyname(klass.limit_key+str(limit))
      if offset != 0:
        self._concat_keyname(klass.offset_key+str(offset))

      if local_flag:
        result = cachepy.get(self.key_name)

      if memcache_flag and result is None:
        result = _deserialize(memcache.get(self.key_name))
        if local_flag and result is not None:
          cachepy.set(self.key_name,result,_local_expiration)
      
      if result is None:
        result = self.query.fetch(limit,offset)
        if memcache_flag:
          memcache.set(self.key_name,_serialize(result),_memcache_expiration)
        if local_flag:
          cachepy.set(self.key_name,result,_local_expiration)
      
      return result
示例#3
0
    def before_request(self):
        '''Try and setup user for this request'''

        if request.method == 'OPTIONS':
            # This is a cors-preflight don't do any auth work on this one
            return

        adn_user = None
        is_app_token = False

        authorization_header = request.headers.get('Authorization')
        if authorization_header:
            method, access_token = authorization_header.split(' ', 1)
            if access_token:
                memcache_key, token_hash = hash_for_token(access_token)
                user_data = cachepy.get(memcache_key) or self.fetch_user_data(access_token, memcache_key)
                if user_data:
                    token = json.loads(user_data).get('data', {})
                    if token and token['app']['client_id'] == current_app.config['CLIENT_ID']:
                        if token.get('is_app_token'):
                            is_app_token = True
                        else:
                            try:
                                adn_user = EasyDict(token['user'])
                            except:
                                pass

        view_func = self.app.view_functions.get(request.endpoint)
        login_required = getattr(view_func, 'login_required', True)
        # logger.info('Login Required: %s', login_required)
        if login_required and not adn_user:
            abort(401)

        app_token_required = getattr(view_func, 'app_token_required', False)
        # logger.info('app_token_required: %s', app_token_required)
        if app_token_required and not is_app_token:
            abort(401)

        if adn_user:
            user = User.get_or_insert(User.key_from_adn_user(adn_user), access_token=access_token)
            if user.access_token != access_token:
                user.access_token = access_token
                user.put()
                Feed.reauthorize(user)

            g.adn_user = adn_user
            g.user = user
示例#4
0
def layer_cache_check_set_return(target, d_layer, d_expiration, d_bust_cache,
                                 *args, **kwargs):

    key = kwargs.get("key", "")
    if d_layer is None:
        layer = kwargs.get("layer",
                           Globals.DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE)
    else:
        layer = d_layer
    if d_expiration is None:
        expiration = kwargs.get("expiration", Globals._1_WEEK)
    else:
        expiration = d_expiration
    if d_bust_cache is None:
        bust_cache = kwargs.get("bust_cache", False)
    else:
        bust_cache = d_bust_cache

    #logging.info("read key: %s, layer: %s, bust_cache: %s, expiration: %s", key, layer, bust_cache, expiration)

    if not bust_cache:
        if layer != Globals.SINGLE_LAYER_MEMCACHE_ONLY:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer != Globals.SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
            result = memcache.Client().get(key)
            if result is not None:
                cachepy.set(key, result)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = kwargs.get("key", "")

    #logging.info("write key: %s, layer: %s, bust_cache: %s, expiration: %s", key, layer, bust_cache, expiration)

    if layer != Globals.SINGLE_LAYER_MEMCACHE_ONLY:
        cachepy.set(key, result, expiry=expiration)

    if layer != Globals.SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
        if not memcache.Client().set(key, result, time=expiration):
            logging.error("Memcache set failed for %s" % key)

    return result
示例#5
0
def cache(key, func, expire=3600):
    skey = str(key)

    result = cachepy.get(skey)
    if result is not None:
        return result

    result = memcache.get(skey)
    if result is not None:
        cachepy.set(skey, result)
        return result

    result = func(key)

    cachepy.set(skey, result, expiry=expire)
    memcache.set(skey, result, time=expire)
    return result
示例#6
0
def syncProcessCache(step,startKey,stopKey):
    clockKey = 'stepclock_' + str(startKey) + '_' + str(stopKey)
    stepclock = cachepy.get(clockKey, priority=1)
    memkeylist = []
    if stepclock not in [step-1, step]:
        for i in range(startKey,stopKey + 1):
            for stckID in [1,2,3,4]:
                cuekey = meTools.buildTradeCueKey(i)
                recency_key = 'desire_' + cuekey + '_' + str(stckID)
                memkeylist.append(recency_key)
        recentDesires = memcache.get_multi(memkeylist)
        cachepy.set_multi(recentDesires, priority = 1)
        '''
        for des in recentDesires:
            cachepy.set(des, recentDesires[des], priority=1)
        '''
    cachepy.set(clockKey,step,priority=1)   # Set cachepy clockKey to current step since synced with Memcache.
示例#7
0
def layer_cache_check_set_return(
        target, 
        key_fxn, 
        expiration = DEFAULT_LAYER_CACHE_EXPIRATION_SECONDS, 
        layer = DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE,
        persist_across_app_versions = False,
        *args, 
        **kwargs):

    key = key_fxn(*args, **kwargs)
    namespace = App.version

    if persist_across_app_versions:
        namespace = None
    bust_cache = kwargs.get("bust_cache", False)

    if not bust_cache:
        if layer != SINGLE_LAYER_MEMCACHE_ONLY:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                cachepy.set(key, result)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = key_fxn(*args, **kwargs)

    if layer != SINGLE_LAYER_MEMCACHE_ONLY:
        cachepy.set(key, result, expiry=expiration)

    if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
        if not memcache.set(key, result, time=expiration, namespace=namespace):
            logging.error("Memcache set failed for %s" % key)

    return result
示例#8
0
def layer_cache_check_set_return(
        target,
        key_fxn,
        expiration=DEFAULT_LAYER_CACHE_EXPIRATION_SECONDS,
        layer=DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE,
        persist_across_app_versions=False,
        *args,
        **kwargs):

    key = key_fxn(*args, **kwargs)
    namespace = App.version

    if persist_across_app_versions:
        namespace = None
    bust_cache = kwargs.get("bust_cache", False)

    if not bust_cache:
        if layer != SINGLE_LAYER_MEMCACHE_ONLY:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                cachepy.set(key, result)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = key_fxn(*args, **kwargs)

    if layer != SINGLE_LAYER_MEMCACHE_ONLY:
        cachepy.set(key, result, expiry=expiration)

    if layer != SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY:
        if not memcache.set(key, result, time=expiration, namespace=namespace):
            logging.error("Memcache set failed for %s" % key)

    return result
示例#9
0
    def display_page_or_304(self, get_page, expiry=PAGE_CACHING_TIME_304, request_path=None):
        """ It just uses etag because last-modified is for http 1.0 and nowadays no browser uses it """
        import mc
        content = None
        #- It's possible to customize the request path in case different requeste paths have the same content -#
        request_path = request_path or self.request.path
        #- Trying to get the etag from cachepy -#
        current_etag = cachepy.get(constants.cachepy.ETAG_KEY % request_path)
        #- There is no etag stored in cachepy.
        if current_etag is None:
            #- Trying to get the etag from mc.
            current_etag = mc.get(constants.mc.ETAG_KEY % request_path)
            if current_etag is None:
                #- MC doesn't have the etag either -#
                content = get_page() # Getting the page content.
                current_etag = self._get_content_etag(content) # Generating etag for the content.
                mc.add(constants.mc.ETAG_KEY % request_path, current_etag, expiry)
                cachepy_expiry = expiry if expiry != 0 else None
                cachepy.set(constants.cachepy.ETAG_KEY % request_path, current_etag, cachepy_expiry)
            else:
                #- MC has the etag let's cache it on cachepy and go on -#
                cachepy.set(constants.cachepy.ETAG_KEY % request_path, current_etag)

        browser_etag = self.request.headers['If-None-Match'] if 'If-None-Match' in self.request.headers else None

        """ Browser etag might be None but current_etag is always generated or gotten from the caches above """

        if browser_etag == current_etag:
            """ Ther user has already the content cached on his browser """
            self.response.headers['ETag'] = current_etag;
            self.error(304)
        else:
            """ No etag match so the content has to be generated and transferred to the client """
            if content is None:
                """ The content wasn't generated above so lest do it now """
                content = get_page()
            """ There is no need to generate the etag again because always is going to be generated above, either taken from cachepy or memcache
            or generated again """
            self.response.headers['ETag'] = current_etag;
            self.response.out.write(content)
示例#10
0
 def wrapper(*args, **kwargs):
     arg_list = []
     for i, arg in enumerate(args):
         try:
             if not i in ignore_args: 
                 arg_list.append(pickle.dumps(arg))
         except PicklingError:
             raise UnsupportedArgumentError(arg)
     for kwarg in kwargs.items():
         try:
             arg_list.append(pickle.dumps(kwarg[1]))
         except PicklingError:
             raise UnsupportedArgumentError(kwarg)
     key = fxn.__name__ + '(' + ','.join(arg_list) + ')'
     if version_aware:
         key = os.environ['CURRENT_VERSION_ID'] + '/' + key
     #logging.debug('caching key: %s' % key)
     data = cachepy.get(key) or memcache.get(key)
     if Debug(): 
         if not ACTIVE_ON_DEV_SERVER and not force_cache: 
             return fxn(*args, **kwargs)
     if kwargs.get('_force_run'):
         #logging.debug("forced execution of %s" % fxn.__name__)
         pass
     elif data:
         #logging.debug('cache hit for key: %s' % key)
         if data.__class__ == NoneVal: 
             data = None
         return data
     data = fxn(*args, **kwargs)
     data_to_save = data
     if data is None:
         data_to_save = NoneVal() 
     cachepy.set(key, data_to_save, time / 24) #cachepy expiry time must be much shorter
     memcache.set(key, data_to_save, time)
     return data
示例#11
0
def _cachepy_get(keys):
  '''Get items with given keys from local cache'''
  result = {}
  for key in keys:
    result[key] = cachepy.get(key)
  return result
示例#12
0
        def fetch(self,
                  limit,
                  offset=0,
                  _cache=None,
                  _local_expiration=QUERY_EXPIRATION,
                  _memcache_expiration=QUERY_EXPIRATION):
            '''By default this method runs the query on datastore.
      
      If additonal parameters are supplied, it tries to retrieve query
      results for current parameters and fetch & offset limits.
      
      It also does a cascaded cache refresh if no match for 
      current arguments are found in given cache layers.
      
      Arguments:
        
        limit: Number of model entities to be fetched      
        offset: The number of results to skip.
        _cache: Cache layers to retrieve the results. If no match is found
          the query is run on datastore and these layers are refreshed.         
        _local_expiration: Expiration in seconds for local cache layer, if 
          a cache refresh operation is run.         
        _memcache_expiration: Expiration in seconds for memcache,
          if a cache refresh operation is run.
        
      Returns:
        The return value is a list of model instances, possibly an empty list.
      
      Raises:
        CacheLayerError: If an invalid cache layer name is supplied
      '''
            klass = self.__class__
            if _cache is None:
                _cache = []
            else:
                _cache = _to_list(_cache)
                _validate_cache(_cache)

            result = None

            local_flag = True if LOCAL in _cache else False
            memcache_flag = True if MEMCACHE in _cache else False

            self._clear_keyname(klass.offset_key)
            self._clear_keyname(klass.limit_key)
            self._concat_keyname(klass.limit_key + str(limit))
            if offset != 0:
                self._concat_keyname(klass.offset_key + str(offset))

            if local_flag:
                result = cachepy.get(self.key_name)

            if memcache_flag and result is None:
                result = _deserialize(memcache.get(self.key_name))
                if local_flag and result is not None:
                    cachepy.set(self.key_name, result, _local_expiration)

            if result is None:
                result = self.query.fetch(limit, offset)
                if memcache_flag:
                    memcache.set(self.key_name, _serialize(result),
                                 _memcache_expiration)
                if local_flag:
                    cachepy.set(self.key_name, result, _local_expiration)

            return result
示例#13
0
def _cachepy_get(keys):
    '''Get items with given keys from local cache'''
    result = {}
    for key in keys:
        result[key] = cachepy.get(key)
    return result
示例#14
0
def layer_cache_check_set_return(
        target, 
        key_fxn, 
        expiration = DEFAULT_LAYER_CACHE_EXPIRATION_SECONDS, 
        layer = Layers.Default,
        persist_across_app_versions = False,
        *args, 
        **kwargs):

    key = key_fxn(*args, **kwargs)
    namespace = App.version

    if persist_across_app_versions:
        namespace = None
    bust_cache = kwargs.get("bust_cache", False)

    if not bust_cache:

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                return result

        if layer & Layers.Datastore:
            result = KeyValueCache.get(key, namespace=namespace)
            if result is not None:
                # Found in datastore, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                if layer & Layers.Memcache:
                    memcache.set(key, result, time=expiration, namespace=namespace)
                return result

    result = target(*args, **kwargs)

    # In case the key's value has been changed by target's execution
    key = key_fxn(*args, **kwargs)

    if isinstance(result, UncachedResult):
        # Don't cache this result, just return it
        result = result.result
    else:
        # Cache the result
        if layer & Layers.InAppMemory:
            cachepy.set(key, result, expiry=expiration)

        if layer & Layers.Memcache:
            if not memcache.set(key, result, time=expiration, namespace=namespace):
                logging.error("Memcache set failed for %s" % key)

        if layer & Layers.Datastore:
            KeyValueCache.set(key, result, time=expiration, namespace=namespace)

    return result