def after_put(self, *args, **kwargs): # On changes to any org, clear the cached list of names. for p in self.public_properties: memcache.delete(self.all_of_property_key(p)) # Reset memcache for cached properties of related objects and queries. # This relationship is "down" so there may be many keys to clear so # don't try to actually refresh the cached values, just set up a cache # miss for their next read and they'll recover. to_delete = [] # Projects p_keys = model.Project.get(n=float('inf'), organization_id=self.uid, keys_only=True) # These keys are for individual project entities to_delete += [util.cached_properties_key(k.id()) for k in p_keys] # ProjectCohorts pcs = list( model.ProjectCohort.get(n=float('inf'), organization_id=self.uid) ) # force generator to store whole list in memory for re-use # These keys are for individual project cohort entities to_delete += [util.cached_properties_key(pc.uid) for pc in pcs] # These are for caches of whole query results. to_delete += [ util.cached_query_key('SuperDashboard', organization_id=self.uid) ] for pc in pcs: kwargs = { 'program_label': pc.program_label, 'cohort_label': pc.cohort_label } to_delete.append(util.cached_query_key('SuperDashboard', **kwargs)) taskqueue.add( url='/task/cache_dashboard', headers={'Content-Type': 'application/json; charset=utf-8'}, payload=json.dumps(kwargs), countdown=config.task_consistency_countdown, ) memcache.delete_multi(to_delete) # Save tasklist. if self.tasklist: # Tasklist might not always be present; it is if created via # create(), but not if fetched from the datastore. self.tasklist.put()
def after_put(self, *args, **kwargs): if self.tasklist: # Tasklist might not always be present; it is if created via # create(), but not if fetched from the datastore. self.tasklist.put() # Reset memcache for cached properties of related objects. # This relationship is "down" so there may be many keys to clear so # don't try to actually refresh the cached values, just set up a cache # miss for their next read and they'll recover. to_delete = [] for pc in model.ProjectCohort.get(n=float('inf'), project_id=self.uid): # These keys are for individual project cohort entities. to_delete.append(util.cached_properties_key(pc.uid)) # These are for caches of whole query results. kwargs = { 'program_label': pc.program_label, 'cohort_label': pc.cohort_label } to_delete.append(util.cached_query_key('SuperDashboard', **kwargs)) taskqueue.add( url='/task/cache_dashboard', headers={'Content-Type': 'application/json; charset=utf-8'}, payload=json.dumps(kwargs), countdown=config.task_consistency_countdown, ) # Also clear the dashboard's organization query. to_delete.append( util.cached_query_key('SuperDashboard', organization_id=self.organization_id)) memcache.delete_multi(to_delete)
def test_put_clears_entity_caches(self): org, project, pc = self.create_org_with_pc() p_key = util.cached_properties_key(project.uid) pc_key = util.cached_properties_key(pc.uid) memcache.set(p_key, {'foo': 1}) memcache.set(pc_key, {'foo': 1}) # Re-fetch the org so it doesn't have an associated tasklist, which # saves checkpoints. This should clear memcache without relying on those # checkpoints. org = org.key.get() org.name = "Bar University" org.put() self.assertIsNone(memcache.get(p_key)) self.assertIsNone(memcache.get(pc_key))
def test_updating_team_caches_rel_count(self): org, team = self.test_creating_team_caches_rel_count() # This should trigger a memcache update. team.organization_ids = [] team.put() cached = memcache.get(util.cached_properties_key(org.uid)) self.assertEqual(cached['num_teams'], 0) # Should see the same count on object. self.assertEqual(org.to_client_dict()['num_teams'], 0)
def test_updating_user_caches_rel_count(self): org, user = self.test_creating_user_caches_rel_count() # This should trigger a memcache update. user.owned_organizations = [] user.put() cached = memcache.get(util.cached_properties_key(org.uid)) self.assertEqual(cached['num_users'], 0) # Should see the same count on object. self.assertEqual(org.to_client_dict()['num_users'], 0)
def test_change_contact_name(self): cl, user = self.test_initial_contact_name() # This should trigger a memcache update. user.name = 'Mark Antony' user.put() cached = memcache.get(util.cached_properties_key(cl.uid)) self.assertEqual(cached['contact_name'], user.name) # Should see the same count on object. self.assertEqual(cl.to_client_dict()['contact_name'], user.name)
def test_updating_classroom_caches_rel_count(self): team, classroom = self.test_creating_classroom_caches_rel_count() # Change the number of students. This should trigger a memcache update. classroom.num_students = 5 classroom.put() cached = memcache.get(util.cached_properties_key(team.uid)) self.assertEqual(cached['participation_base'], 5) # Should see the same count on object. self.assertEqual(team.to_client_dict()['participation_base'], 5)
def clear_cached_properties(self, prop): """Related project cohorts need their cached properties cleared.""" # This relationship is "down" so there may be many keys to clear so # don't try to actually refresh the cached values, just set up a # cache miss for their next read and they'll recover. pcs = list(model.ProjectCohort.get( n=float('inf'), **{prop: self.parent_id} )) # force generator to store whole list in memory for re-use mem_keys = [util.cached_properties_key(pc.uid) for pc in pcs] memcache.delete_multi(mem_keys) return pcs
def to_client_dict(self): """Decorate the org with counts of related objects; cached.""" d = super(Organization, self).to_client_dict() # Decorate the team with counts of related objects; cached. d.update(self.default_cached_properties) cached = memcache.get(util.cached_properties_key(self.uid)) if cached: d.update(cached) else: d.update(Organization.update_cached_properties(self.uid)) return d
def test_put_clears_project_cohort_cache(self): project, pc = self.create_with_pc() key = util.cached_properties_key(pc.uid) memcache.set(key, {'foo': 1}) # Re-fetch the org so it doesn't have an associated tasklist, which # saves checkpoints. This should clear memcache without relying on those # checkpoints. project = project.key.get() project.priority = True project.put() self.assertIsNone(memcache.get(key))
def test_initial_contact_name(self): cl = Classroom.create(name="Classroom", team_id="Team_foo", code="foo") user = User.create(email="*****@*****.**", name="Cleopatra") cl.contact_id = user.uid cl.put() user.put() # Handling the class for the client should populate memcache. self.assertEqual(cl.to_client_dict()['contact_name'], user.name) # Should see the value in the cache directly. cached = memcache.get(util.cached_properties_key(cl.uid)) self.assertEqual(cached['contact_name'], user.name) return (cl, user)
def test_creating_user_caches_rel_count(self): org = Organization.create(name="Organization", captain_id="User_cap", program_id=self.ep_program.uid) user = User.create(email="*****@*****.**", owned_organizations=[org.uid]) org.put() # Populate memcache with the org's original value of 0 users. cached = Organization.update_cached_properties(org.uid) # This should trigger a memcache update. user.put() cached = memcache.get(util.cached_properties_key(org.uid)) self.assertEqual(cached['num_users'], 1) # Should see the same count on object. self.assertEqual(org.to_client_dict()['num_users'], 1) return (org, user)
def to_client_dict(self): """Decorate the team with counts of related objects; cached.""" d = super(Classroom, self).to_client_dict() # If the team name is available (returned by some custom queries), # ndb's to_dict() will exclude it. Put it back so we can use it. if hasattr(self, 'team_name'): d['team_name'] = self.team_name d.update(self.default_cached_properties) cached = memcache.get(util.cached_properties_key(self.uid)) if cached: d.update(cached) else: d.update(Classroom.update_cached_properties(self.uid)) return d
def test_creating_user_caches_rel_count(self): team = Team.create(name="Team", captain_id="User_cap", program_id=self.program.uid) user = User.create(email="*****@*****.**", owned_teams=[team.uid]) team.put() # Populate memcache with the team's original value of 0 users. cached = Team.update_cached_properties(team.uid) # This should trigger a memcache update. user.put() cached = memcache.get(util.cached_properties_key(team.uid)) self.assertEqual(cached['num_users'], 1) # Should see the same count on object. self.assertEqual(team.to_client_dict()['num_users'], 1) return (team, user)
def test_creating_team_caches_rel_count(self): org = Organization.create(name="Organization", captain_id="User_cap", program_id=self.ep_program.uid) team = Team.create( name="Team Foo", captain_id='User_cap', organization_ids=[org.uid], program_id=self.ep_program.uid, ) org.put() # Populate memcache with the org's original value of 0 teams. cached = Organization.update_cached_properties(org.uid) # This should trigger a memcache update. team.put() cached = memcache.get(util.cached_properties_key(org.uid)) self.assertEqual(cached['num_teams'], 1) # Should see the same count on object. self.assertEqual(org.to_client_dict()['num_teams'], 1) return (org, team)
def after_put(self, init_kwargs, *args, **kwargs): """Reset memcache for related objects * Include _all_ those the user is joining (e.g. on creation) as well as any the user is leaving. * Include name changes stored elsewhere """ rels = ((Team, 'owned_teams'), (Organization, 'owned_organizations')) for model, attr in rels: original_ids = set(init_kwargs[attr]) new_ids = set(getattr(self, attr)) leaving_ids = original_ids.difference(new_ids) for uid in set(new_ids).union(leaving_ids): model.update_cached_properties(uid) # If this user is the contact for any classrooms, and their name has # changed, update the name of the classroom. if init_kwargs['name'] != self.name: for c in Classroom.get(contact_id=self.uid): key = util.cached_properties_key(c.uid) cached_props = memcache.get(key) or {} memcache.set(key, dict(cached_props, contact_name=self.name))
def test_creating_classroom_caches_rel_count(self): team = Team.create(name="Team", captain_id="User_cap", program_id=self.program.uid) classroom = Classroom.create( name="Class Foo", code='trout viper', team_id=team.uid, contact_id='User_contact', ) team.put() # Populate memcache with the team's original value of 0 classrooms. cached = Team.update_cached_properties(team.uid) # This should trigger a memcache update. classroom.put() cached = memcache.get(util.cached_properties_key(team.uid)) self.assertEqual(cached['num_classrooms'], 1) # Should see the same count on object. self.assertEqual(team.to_client_dict()['num_classrooms'], 1) return (team, classroom)
def update_cached_properties(klass, team_id): """If we find the team in the db, query for rel counts and cache.""" from_db = Team.get_cached_properties_from_db(team_id) if from_db: memcache.set(util.cached_properties_key(team_id), from_db) return from_db
def update_cached_properties(klass, org_id): """If we find the org in the db, query for rel counts and cache.""" from_db = Organization.get_cached_properties_from_db(org_id) if from_db: memcache.set(util.cached_properties_key(org_id), from_db) return from_db
def update_cached_properties(self): """Refreshes the value of cached properties in memcache.""" from_db = self.get_cached_properties_from_db() if from_db: memcache.set(util.cached_properties_key(self.uid), from_db) return from_db
def get_cached_properties(self): """Get cached properties, defaulting to the db if necessary.""" cached = memcache.get(util.cached_properties_key(self.uid)) if cached: return cached return self.update_cached_properties()
def update_cached_properties(klass, classroom_id): from_db = Classroom.get_cached_properties_from_db(classroom_id) if from_db: memcache.set(util.cached_properties_key(classroom_id), from_db) return from_db
def pc_collection_resolver(root, info, **kwargs): """Query project cohorts with lots of associated data and agressive caching. Can be called three ways, based on what's in kwargs: 1. 'user_id' - /api/users/X/dashboard, all pc's owned by a user 2. 'organization_id' - /api/organizations/X/dashboard, all pc's in an org 3. 'program_label' and 'cohort_label' - /api/dashboard, all pc's in that cohort. """ # Most kwargs can be passed directly to get(), but if this is a query for # project cohorts owned by a certain user, there's one extra step. user_id = kwargs.pop('user_id', None) if user_id: user = model.User.get_by_id(user_id) if not user.owned_organizations: return [] kwargs['organization_id'] = user.owned_organizations util.profiler.add_event("querying pcs") pcs = list(model.ProjectCohort.get(n=default_n, **kwargs)) # Get cached properties for these project cohorts all at once. See # graphql_util.resolve_client_prop(). pcs_by_memkey = {util.cached_properties_key(pc.uid): pc for pc in pcs} # memcache.get_multi only returns results for keys it finds; those it # doesn't find are missing (rather than being set to None) mem_results = memcache.get_multi(pcs_by_memkey.keys()) for memkey, props in mem_results.items(): pc = pcs_by_memkey[memkey] pc._cached_properties = props # Go to the db for pc's which didn't have memcache results. Refresh # memcache all at once with the results. uncached = { memkey: pc for memkey, pc in pcs_by_memkey.items() if memkey not in mem_results.keys() } util.profiler.add_event("batch cached pc props") cached_pc_props_by_id = model.ProjectCohort.batch_cached_properties_from_db( project_cohorts=uncached.values()) # Inform memcache about all the properties we fetched from the db. to_set = {} for memkey, pc in uncached.items(): props = cached_pc_props_by_id[pc.uid] pc._cached_properties = props to_set[memkey] = props memcache.set_multi(to_set) # Projects have their own cached properties which we can also batch, with # data that's already been fetched. util.profiler.add_event("batch cached project props") cached_project_props_by_id = model.Project.batch_cached_properties_from_db( projects=[pc._cached_properties['project'] for pc in pcs], organizations=[pc._cached_properties['organization'] for pc in pcs], ) for pc in pcs: props = cached_project_props_by_id[pc.project_id] pc._cached_properties['project']._cached_properties = props util.profiler.add_event("handing off to graphql") return pcs