def post_delete(self, instance, **kwargs): schedule_update_config_cache( organization_id=instance.organization_id, generate=False, update_reason="organizationoption.post_delete", ) self.reload_cache(instance.organization_id)
def test_generate( monkeypatch, default_project, default_organization, default_projectkey, task_runner, entire_organization, redis_cache, ): assert not redis_cache.get(default_project.id) if not entire_organization: kwargs = {"project_id": default_project.id} else: kwargs = {"organization_id": default_organization.id} with task_runner(): schedule_update_config_cache(generate=True, **kwargs) cfg = redis_cache.get(default_project.id) assert cfg["organizationId"] == default_organization.id assert cfg["projectId"] == default_project.id assert cfg["publicKeys"] == [{ "publicKey": default_projectkey.public_key, "isEnabled": True, "numericId": default_projectkey.id, "quotas": [], }]
def test_no_cache(monkeypatch, default_project): def apply_async(*a, **kw): assert False monkeypatch.setattr("sentry.tasks.relay.update_config_cache.apply_async", apply_async) schedule_update_config_cache(generate=True, project_id=default_project.id)
def reload_cache(self, project_id, update_reason): if update_reason != "projectoption.get_all_values": schedule_update_config_cache(project_id=project_id, generate=True, update_reason=update_reason) cache_key = self._make_key(project_id) result = {i.key: i.value for i in self.filter(project=project_id)} cache.set(cache_key, result) self._option_cache[cache_key] = result return result
def reload_cache(self, project_id, update_reason): schedule_update_config_cache(project_id=project_id, generate=True, update_reason=update_reason) cache_key = self._make_key(project_id) result = dict( (i.key, i.value) for i in self.filter(project=project_id)) cache.set(cache_key, result) self._option_cache[cache_key] = result return result
def reload_cache(self, organization_id, update_reason): if update_reason != "organizationoption.get_all_values": schedule_update_config_cache( organization_id=organization_id, generate=False, update_reason=update_reason ) cache_key = self._make_key(organization_id) result = {i.key: i.value for i in self.filter(organization=organization_id)} cache.set(cache_key, result) self._option_cache[cache_key] = result return result
def reload_cache(self, organization_id, update_reason): schedule_update_config_cache(organization_id=organization_id, generate=False, update_reason=update_reason) cache_key = self._make_key(organization_id) result = dict((i.key, i.value) for i in self.filter(organization=organization_id)) cache.set(cache_key, result) self._option_cache[cache_key] = result return result
def test_debounce(monkeypatch, default_project, default_organization, redis_cache): tasks = [] def apply_async(args, kwargs): assert not args tasks.append(kwargs) monkeypatch.setattr("sentry.tasks.relay.update_config_cache.apply_async", apply_async) schedule_update_config_cache(generate=True, project_id=default_project.id) schedule_update_config_cache(generate=False, project_id=default_project.id) schedule_update_config_cache(generate=True, organization_id=default_organization.id) schedule_update_config_cache(generate=False, organization_id=default_organization.id) assert tasks == [ { "generate": True, "project_id": default_project.id, "organization_id": None, "update_reason": None, }, { "generate": True, "project_id": None, "organization_id": default_organization.id, "update_reason": None, }, ]
def test_invalidate( monkeypatch, default_project, default_organization, task_runner, entire_organization, redis_cache, ): cfg = {"foo": "bar"} redis_cache.set_many({default_project.id: cfg}) assert redis_cache.get(default_project.id) == cfg if not entire_organization: kwargs = {"project_id": default_project.id} else: kwargs = {"organization_id": default_organization.id} with task_runner(): schedule_update_config_cache(generate=False, **kwargs) assert not redis_cache.get(default_project.id)
def test_generate( monkeypatch, default_project, default_organization, task_runner, entire_organization, redis_cache, ): assert not redis_cache.get(default_project.id) if not entire_organization: kwargs = {"project_id": default_project.id} else: kwargs = {"organization_id": default_organization.id} with task_runner(): schedule_update_config_cache(generate=True, **kwargs) cfg = redis_cache.get(default_project.id) assert cfg["organizationId"] == default_organization.id assert cfg["projectId"] == default_project.id
def post_delete(self, instance, **kwargs): schedule_update_config_cache( project_id=instance.project_id, generate=True, update_reason="projectkey.post_delete" )
def post_save(self, instance, **kwargs): schedule_update_config_cache( public_key=instance.public_key, generate=True, update_reason="projectkey.post_save" )
def post_save(self, instance, **kwargs): schedule_update_config_cache(project_id=instance.project_id, generate=True, update_reason="projectoption.post_save") self.reload_cache(instance.project_id)