예제 #1
0
 def thread_func(thread_output):
     request_cache.set('key', 'thread')
     thread_output['value_of_get_before_flush'] = (
         request_cache.get('key'))
     request_cache.flush()
     thread_output['value_of_get_after_flush'] = (
         request_cache.get('key'))
예제 #2
0
    def get_cached_result(key, namespace, expiration, layer):

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            result = get_from_memcache(key, namespace=namespace)
            if result is not None:
                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                return result

        if layer & Layers.Datastore:
            result = KeyValueCache.get(key, namespace=namespace)
            if result is not None:
                # Found in datastore, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                if layer & Layers.Memcache:
                    set_to_memcache(key,
                                    result,
                                    time=expiration,
                                    namespace=namespace)
                return result

        if layer & Layers.Blobstore:
            result = BlobCache.get(key, namespace=namespace)
            # TODO: fill upward layers if size of dumped result is going to be less than 1MB (might be too costly to figure that out
            return result
예제 #3
0
    def get_cached_result(key, namespace, expiration, layer):

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            result = get_from_memcache(key, namespace=namespace)
            if result is not None:
                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                return result

        if layer & Layers.Datastore:
            result = KeyValueCache.get(key, namespace=namespace)
            if result is not None:
                # Found in datastore, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                if layer & Layers.Memcache:
                    set_to_memcache(key, result, time=expiration, namespace=namespace)
                return result
        
        if layer & Layers.Blobstore:
            result = BlobCache.get(key, namespace=namespace)
            # TODO: fill upward layers if size of dumped result is going to be less than 1MB (might be too costly to figure that out
            return result
예제 #4
0
    def get_cached_result(key, namespace, expiration, layer):

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                return result

        if layer & Layers.Datastore:
            result = KeyValueCache.get(key, namespace=namespace)
            if result is not None:
                # Found in datastore, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                if layer & Layers.Memcache:
                    memcache.set(key,
                                 result,
                                 time=expiration,
                                 namespace=namespace)
                return result
예제 #5
0
    def test_write_thread_storage_in_flush(self):
        request_cache.set('key', 'main')
        self.assertTrue(request_cache.has('key'))
        self.assertEqual('main', request_cache.get('key'))

        thread_output = {}

        def thread_func(thread_output):
            request_cache.set('key', 'thread')
            thread_output['value_of_get_before_flush'] = (
                request_cache.get('key'))
            request_cache.flush()
            thread_output['value_of_get_after_flush'] = (
                request_cache.get('key'))

        thread = threading.Thread(target=thread_func, args=[thread_output])
        thread.start()
        thread.join()

        # The main thread should not see changes made by the second
        # thread.
        self.assertTrue(request_cache.has('key'))
        self.assertEqual('main', request_cache.get('key'))
        self.assertEqual('thread', thread_output['value_of_get_before_flush'])
        self.assertIsNone(thread_output['value_of_get_after_flush'])
예제 #6
0
    def set_cached_result(key, namespace, expiration, layer, result, 
                          use_chunks, compress_chunks):
        # Cache the result
        if layer & Layers.InAppMemory:
            instance_cache.set(key, result, expiry=expiration)

        if layer & Layers.Memcache:
            
            if not use_chunks:

                try:
                    if not memcache.set(key, result, time=expiration, 
                                        namespace=namespace):
                        logging.error("Memcache set failed for %s" % key)
                except ValueError, e:
                    if str(e).startswith("Values may not be more than"):
                        # The result was too big to store in memcache.  Going  
                        # to chunk it and try again
                        ChunkedResult.set(key, result, expiration, namespace, 
                                          compress=compress_chunks,
                                          cache_class=memcache)
                    else: 
                        raise

            else:
                # use_chunks parameter was explicitly set, not going to even 
                # bother trying to put it in memcache directly
                ChunkedResult.set(key, result, expiration, namespace, 
                                  compress=compress_chunks,
                                  cache_class=memcache)
예제 #7
0
    def set_cached_result(key, namespace, expiration, layer, result):
        # Cache the result
        if layer & Layers.InAppMemory:
            cachepy.set(key, result, expiry=expiration)

        if layer & Layers.Memcache:
            if not memcache.set(key, result, time=expiration, namespace=namespace):
                logging.error("Memcache set failed for %s" % key)

        if layer & Layers.Datastore:
            KeyValueCache.set(key, result, time=expiration, namespace=namespace)
예제 #8
0
    def get_cached_result(key, namespace, expiration, layer):

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            maybe_chunked_result = memcache.get(key, namespace=namespace)
            if maybe_chunked_result is not None:
                if isinstance(maybe_chunked_result, ChunkedResult):
                    result = maybe_chunked_result.get_result(
                        memcache, namespace=namespace)
                else:
                    result = maybe_chunked_result

                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)

                return result

        if layer & Layers.Datastore:
            maybe_chunked_result = KeyValueCache.get(key, namespace=namespace)
            if maybe_chunked_result is not None:
                # Found in datastore. Unchunk results if needed, and fill upward
                # layers
                if isinstance(maybe_chunked_result, ChunkedResult):
                    result = maybe_chunked_result.get_result(
                        KeyValueCache, namespace=namespace)

                    if layer & Layers.Memcache:
                        # Since the result in the datastore needed to be chunked
                        # we will need to use ChunkedResult for memcache as well
                        ChunkedResult.set(key,
                                          result,
                                          expiration,
                                          namespace,
                                          cache_class=memcache)
                else:
                    result = maybe_chunked_result
                    if layer & Layers.Memcache:
                        # Since the datastore wasn't using a chunked result
                        # This memcache.set should succeed as well.
                        memcache.set(key,
                                     result,
                                     time=expiration,
                                     namespace=namespace)

                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)

                return result
예제 #9
0
def get_user_id_from_profile(profile):

    if profile is not None and "name" in profile and "id" in profile:
        # Workaround http://code.google.com/p/googleappengine/issues/detail?id=573
        name = unicodedata.normalize('NFKD', profile["name"]).encode('utf-8', 'ignore')

        user_id = FACEBOOK_ID_PREFIX + profile["id"]

        # Cache any future lookup of current user's facebook nickname in this request
        request_cache.set(get_facebook_nickname_key(user_id), name)

        return user_id

    return None
예제 #10
0
    def set_cached_result(key, namespace, expiration, layer, result):
        # Cache the result
        if layer & Layers.InAppMemory:
            cachepy.set(key, result, expiry=expiration)

        if layer & Layers.Memcache:
            if not memcache.set(
                    key, result, time=expiration, namespace=namespace):
                logging.error("Memcache set failed for %s" % key)

        if layer & Layers.Datastore:
            KeyValueCache.set(key,
                              result,
                              time=expiration,
                              namespace=namespace)
예제 #11
0
def get_user_id_from_profile(profile):

    if profile is not None and "name" in profile and "id" in profile:
        # Workaround http://code.google.com/p/googleappengine/issues/detail?id=573
        name = unicodedata.normalize('NFKD', profile["name"]).encode(
            'utf-8', 'ignore')

        user_id = FACEBOOK_ID_PREFIX + profile["id"]

        # Cache any future lookup of current user's facebook nickname in this request
        request_cache.set(get_facebook_nickname_key(user_id), name)

        return user_id

    return None
예제 #12
0
    def get_cached_result(key, namespace, expiration, layer):

        if layer & Layers.InAppMemory:
            result = instance_cache.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            maybe_chunked_result = memcache.get(key, namespace=namespace)
            if maybe_chunked_result is not None:
                if isinstance(maybe_chunked_result, ChunkedResult):
                    result = maybe_chunked_result.get_result(memcache, 
                                                            namespace=namespace)
                else:
                    result = maybe_chunked_result

                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    instance_cache.set(key, result, expiry=expiration)

                return result

        if layer & Layers.Datastore:
            maybe_chunked_result = KeyValueCache.get(key, namespace=namespace)
            if maybe_chunked_result is not None:
                # Found in datastore. Unchunk results if needed, and fill upward 
                # layers
                if isinstance(maybe_chunked_result, ChunkedResult):
                    result = maybe_chunked_result.get_result(KeyValueCache, 
                                                            namespace=namespace)
                    
                    if layer & Layers.Memcache:
                        # Since the result in the datastore needed to be chunked
                        # we will need to use ChunkedResult for memcache as well
                        ChunkedResult.set(key, result, expiration, namespace, 
                                          cache_class=memcache)
                else:
                    result = maybe_chunked_result
                    if layer & Layers.Memcache:
                        # Since the datastore wasn't using a chunked result
                        # This memcache.set should succeed as well.
                        memcache.set(key, result, time=expiration, 
                                     namespace=namespace)

                if layer & Layers.InAppMemory:
                    instance_cache.set(key, result, expiry=expiration)
                
                return result
예제 #13
0
    def test_read_thread_storage_in_get(self):
        self.assertIsNone(request_cache.get('key'))
        request_cache.set('key', 'main')
        self.assertEqual('main', request_cache.get('key'))

        thread_output = {}

        def thread_func(thread_output):
            thread_output['value_of_get'] = request_cache.get('key')

        thread = threading.Thread(target=thread_func, args=[thread_output])
        thread.start()
        thread.join()

        # The second thread should see different values than the main thread.
        self.assertIsNone(thread_output['value_of_get'])
예제 #14
0
    def set_cached_result(key, namespace, expiration, layer, result,
                          use_chunks, compress_chunks):
        # Cache the result
        if layer & Layers.InAppMemory:
            cachepy.set(key, result, expiry=expiration)

        if layer & Layers.Memcache:

            if not use_chunks:

                try:
                    if not memcache.set(
                            key, result, time=expiration, namespace=namespace):
                        logging.error("Memcache set failed for %s" % key)
                except ValueError, e:
                    if str(e).startswith("Values may not be more than"):
                        # The result was too big to store in memcache.  Going
                        # to chunk it and try again
                        ChunkedResult.set(key,
                                          result,
                                          expiration,
                                          namespace,
                                          compress=compress_chunks,
                                          cache_class=memcache)
                    else:
                        raise

            else:
                # use_chunks parameter was explicitly set, not going to even
                # bother trying to put it in memcache directly
                ChunkedResult.set(key,
                                  result,
                                  expiration,
                                  namespace,
                                  compress=compress_chunks,
                                  cache_class=memcache)
예제 #15
0
def get_user_id_from_profile(profile, full_user_id=True):
    """ Get Facebook ID from Facebook profile data and cache Facebook nickname.

    Args:
        full_user_id: If true, return the full Khan Academy Facebook user ID
            (ex: "http://facebookid.khanacademy.org/4"). If false, return just
            the Facebook user ID ("4") without the FACEBOOK_ID_PREFIX.
    """

    if profile is not None and "name" in profile and "id" in profile:
        # Workaround http://code.google.com/p/googleappengine/issues/detail?id=573
        name = unicodedata.normalize('NFKD', profile["name"]).encode('utf-8', 'ignore')

        user_id = FACEBOOK_ID_PREFIX + profile["id"]

        if not full_user_id:
            return profile["id"]

        # Cache any future lookup of current user's facebook nickname in this request
        request_cache.set(get_facebook_nickname_key(user_id), name)

        return user_id

    return None
예제 #16
0
    def get_cached_result(key, namespace, expiration, layer):

        if layer & Layers.InAppMemory:
            result = cachepy.get(key)
            if result is not None:
                return result

        if layer & Layers.Memcache:
            result = memcache.get(key, namespace=namespace)
            if result is not None:
                # Found in memcache, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                return result

        if layer & Layers.Datastore:
            result = KeyValueCache.get(key, namespace=namespace)
            if result is not None:
                # Found in datastore, fill upward layers
                if layer & Layers.InAppMemory:
                    cachepy.set(key, result, expiry=expiration)
                if layer & Layers.Memcache:
                    memcache.set(key, result, time=expiration, namespace=namespace)
                return result
예제 #17
0
 def thread_func(thread_output):
     request_cache.set('key', 'thread')
     thread_output['value_of_get_after_set'] = request_cache.get('key')
예제 #18
0
def enable():
    request_cache.set("layer_cache_disabled", False)
예제 #19
0
def disable():
    request_cache.set("layer_cache_disabled", True)
예제 #20
0
def enable():
    request_cache.set("layer_cache_disabled", False)
예제 #21
0
def disable():
    request_cache.set("layer_cache_disabled", True)