def thread_func(thread_output): request_cache.set('key', 'thread') thread_output['value_of_get_before_flush'] = ( request_cache.get('key')) request_cache.flush() thread_output['value_of_get_after_flush'] = ( request_cache.get('key'))
def test_repopulate_missing_inapp_cache_when_reading_from_memcache(self): ''' Tests if missing inapp cache gets repopulated from memcache It performs the checks on large values that will make use of the ChunkedResult ''' @layer_cache.cache(layer=layer_cache.Layers.Memcache | layer_cache.Layers.InAppMemory, compress_chunks=False) def func(result): return result func(_BIG_STRING) instance_cache.flush() # make sure instance_cache's value is gone self.assertIsNone(instance_cache.get(self.key)) # make sure we are still able to get the value from memcache self.assertEqualTruncateError(_BIG_STRING, func("a")) # make sure instance_cache has been filled again self.assertEqualTruncateError(_BIG_STRING, instance_cache.get(self.key))
def test_missing_inapp_and_memcache_get_repopulated_from_datastore(self): ''' Tests if result from datastore resaves data to higher levels It performs the checks on large values that will make use of the ChunkedResult. ''' @layer_cache.cache(layer=layer_cache.Layers.Memcache | layer_cache.Layers.Datastore | layer_cache.Layers.InAppMemory, compress_chunks=False) def func(result): return result func(_BIG_STRING) instance_cache.flush() # make sure instance_cache is flushed self.assertIsNone(instance_cache.get(self.key)) # force removal from memcache memcache.delete(self.key) # make sure removal worked self.assertIsNone(memcache.get(self.key)) # make sure we are still able to get the value from datastore self.assertEqualTruncateError(_BIG_STRING, func("a")) # make sure instance_cache has been filled again self.assertEqualTruncateError(_BIG_STRING, instance_cache.get(self.key)) # make sure memcache value has been readded self.assertIsInstance( memcache.get(self.key), layer_cache.ChunkedResult)
def tearDown(self): for p in self._patches: p.stop() request_cache.flush() # Sanity check to ensure we tore down things properly self.assertTrue(_cur_user() is None) super(PostLoginTest, self).tearDown()
def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() # Create a consistency policy that will simulate the High # Replication consistency model. self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy( probability=0) self.testbed.init_datastore_v3_stub(consistency_policy=self.policy) self.testbed.init_user_stub() self.testbed.init_memcache_stub() cachepy.flush()
def tearDown(self): super(RequestCacheThreadSafetyTest, self).tearDown() request_cache.flush()
def tearDown(self): for p in self._patches: p.stop() request_cache.flush() super(OAuthMapTests, self).tearDown()
def setUp(self, db_consistency_probability=0, use_test_db=False, test_db_filename='testutil/test_db.sqlite', queue_yaml_dir='.', app_id='dev~khan-academy'): """Initialize a testbed for appengine, and also initialize the cache. This sets up the backend state (databases, queues, etc) to a known, pure state before each test. Arguments: db_consistency_probability: a number between 0 and 1 indicating the percent of times db writes are immediately visible. If set to 1, then the database seems consistent. If set to 0, then writes are never visible until a commit-causing command is run: get()/put()/delete()/ancestor queries. 0 is the default, and does the best testing that the code does not make assumptions about immediate consistency. See https://developers.google.com/appengine/docs/python/datastore/overview#Datastore_Writes_and_Data_Visibility for details on GAE's consistency policies with the High Replication Datastore (HRD). use_test_db: if True, then initialize the datastore with the contents of testutil/test_db.sqlite, rather than being empty. This routine makes a copy of the db file (in /tmp) so changes from one test won't affect another. test_db_filename: the file to use with use_test_db, relative to the project root (that is, the directory with app.yaml in it). It is ignored if use_test_db is False. It is unusual to want to change this value from the default, but it can be done if you have another test-db in a non-standard location. queue_yaml_dir: the directory where queue.yaml lives, relative to the project root. If set, we will initialize the taskqueue stub. This is needed if you wish to run mapreduces in your test. This will almost always be '.'. app_id: what we should pretend our app-id is. The default matches the app-id used to make test_db.sqlite, so database lookups on that file will succeed. """ self.testbed = testbed.Testbed() # This lets us use testutil's test_db.sqlite if we want to. self.testbed.setup_env(app_id=app_id) self.testbed.activate() # Create a consistency policy that will simulate the High # Replication consistency model. self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy( probability=db_consistency_probability) if use_test_db: root = rootdir.project_rootdir() (fd, self.datastore_filename) = tempfile.mkstemp(prefix='test_db_', suffix='.sqlite') _copy_to_fd(open(os.path.join(root, test_db_filename)), fd) os.close(fd) else: self.datastore_filename = None self.testbed.init_datastore_v3_stub( consistency_policy=self.policy, datastore_file=self.datastore_filename, use_sqlite=(self.datastore_filename is not None)) self.testbed.init_user_stub() self.testbed.init_memcache_stub() if queue_yaml_dir: root = rootdir.project_rootdir() self.testbed.init_taskqueue_stub( root_path=os.path.join(root, queue_yaml_dir), auto_task_running=True) instance_cache.flush()