def __init__(self, user, restore_id="", version=V1, state_hash="", items=False, stock_settings=None, domain=None, force_cache=False, cache_timeout=None, overwrite_cache=False): self.user = user self.restore_id = restore_id self.version = version self.state_hash = state_hash self.items = items if stock_settings: self.stock_settings = stock_settings elif domain and domain.commtrack_settings: self.stock_settings = domain.commtrack_settings.get_ota_restore_settings() else: self.stock_settings = StockSettings() self.domain = domain self.force_cache = force_cache self.cache_timeout = cache_timeout or INITIAL_SYNC_CACHE_TIMEOUT self.overwrite_cache = overwrite_cache self.cache = get_redis_default_cache() # keep track of the number of batches (if any) for comparison in unit tests self.num_batches = None
def claim(request, domain): """ Allows a user to claim a case that they don't own. """ as_user = request.POST.get('commcare_login_as', None) restore_user = get_restore_user(domain, request.couch_user, as_user) cache = get_redis_default_cache() case_id = request.POST.get('case_id', None) if case_id is None: return HttpResponse('A case_id is required', status=400) try: if (cache.get(_claim_key(restore_user.user_id)) == case_id or get_first_claim(domain, restore_user.user_id, case_id)): return HttpResponse('You have already claimed that {}'.format( request.POST.get('case_type', 'case')), status=409) claim_case(domain, restore_user.user_id, case_id, host_type=request.POST.get('case_type'), host_name=request.POST.get('case_name')) except CaseNotFound: return HttpResponse( 'The case "{}" you are trying to claim was not found'.format( case_id), status=410) cache.set(_claim_key(restore_user.user_id), case_id) return HttpResponse(status=200)
def tearDown(self): ensure_index_deleted(CASE_SEARCH_INDEX) self.user.delete() self.domain.delete() for query_addition in CaseSearchQueryAddition.objects.all(): query_addition.delete() cache = get_redis_default_cache() cache.clear()
def has_cached_payload(sync_log, version, prefix=RESTORE_CACHE_KEY_PREFIX): return bool(get_redis_default_cache().get(restore_cache_key( sync_log.domain, prefix, sync_log.user_id, version=version, sync_log_id=sync_log._id, )))
def process_sms(message_id): """ message_id - _id of an SMSLog entry """ # Note that Redis error/exception notifications go out from the # run_sms_queue command, so no need to send them out here # otherwise we'd get too many emails. rcache = cache_core.get_redis_default_cache() if not isinstance(rcache, RedisCache): return try: client = rcache.raw_client except NotImplementedError: return utcnow = datetime.utcnow() # Prevent more than one task from processing this SMS, just in case # the message got enqueued twice. message_lock = get_lock(client, "sms-queue-processing-%s" % message_id) if message_lock.acquire(blocking=False): msg = SMSLog.get(message_id) if message_is_stale(msg, utcnow): set_error(msg, ERROR_MESSAGE_IS_STALE) message_lock.release() return if msg.direction == OUTGOING: domain_object = Domain.get_by_name(msg.domain, strict=True) if handle_domain_specific_delays(msg, domain_object, utcnow): message_lock.release() return requeue = False # Process inbound SMS from a single contact one at a time recipient_block = msg.direction == INCOMING if (isinstance(msg.processed, bool) and not msg.processed and not msg.error and msg.datetime_to_process < utcnow): if recipient_block: recipient_lock = get_lock(client, "sms-queue-recipient-phone-%s" % msg.phone_number) recipient_lock.acquire(blocking=True) if msg.direction == OUTGOING: requeue = handle_outgoing(msg) elif msg.direction == INCOMING: handle_incoming(msg) else: set_error(msg, ERROR_INVALID_DIRECTION) if recipient_block: recipient_lock.release() message_lock.release() if requeue: process_sms.delay(message_id)
def process_sms(message_id): """ message_id - _id of an SMSLog entry """ # Note that Redis error/exception notifications go out from the # run_sms_queue command, so no need to send them out here # otherwise we'd get too many emails. rcache = cache_core.get_redis_default_cache() if not isinstance(rcache, RedisCache): return try: client = rcache.raw_client except NotImplementedError: return utcnow = datetime.utcnow() # Prevent more than one task from processing this SMS, just in case # the message got enqueued twice. message_lock = get_lock(client, "sms-queue-processing-%s" % message_id) if message_lock.acquire(blocking=False): msg = SMSLog.get(message_id) if message_is_stale(msg, utcnow): msg.set_system_error(ERROR_MESSAGE_IS_STALE) message_lock.release() return if msg.direction == OUTGOING: domain_object = Domain.get_by_name(msg.domain, strict=True) if handle_domain_specific_delays(msg, domain_object, utcnow): message_lock.release() return requeue = False # Process inbound SMS from a single contact one at a time recipient_block = msg.direction == INCOMING if (isinstance(msg.processed, bool) and not msg.processed and not msg.error and msg.datetime_to_process < utcnow): if recipient_block: recipient_lock = get_lock(client, "sms-queue-recipient-phone-%s" % msg.phone_number) recipient_lock.acquire(blocking=True) if msg.direction == OUTGOING: requeue = handle_outgoing(msg) elif msg.direction == INCOMING: handle_incoming(msg) else: msg.set_system_error(ERROR_INVALID_DIRECTION) if recipient_block: recipient_lock.release() message_lock.release() if requeue: process_sms.delay(message_id)
def get_redis_client(self): rcache = cache_core.get_redis_default_cache() if not isinstance(rcache, RedisCache): raise RedisClientError("Could not get redis connection.") try: client = rcache.raw_client except: raise RedisClientError("Could not get redis connection.") return client
def __init__(self, user, restore_id="", version=V1, state_hash="", caching_enabled=False, items=False, stock_settings=None): self.user = user self.restore_id = restore_id self.version = version self.state_hash = state_hash self.caching_enabled = caching_enabled self.cache = get_redis_default_cache() self.items = items self.stock_settings = stock_settings or StockSettings()
def _restore_config(self, is_async=True, sync_log_id='', overwrite_cache=False): restore_config = RestoreConfig( project=self.project, restore_user=self.user, params=RestoreParams(sync_log_id=sync_log_id, version=V2), cache_settings=RestoreCacheSettings( overwrite_cache=overwrite_cache ), is_async=is_async ) self.addCleanup(get_redis_default_cache().clear) return restore_config
def test_restore_caches_cleared(self): cache = get_redis_default_cache() cache_key = restore_cache_key(RESTORE_CACHE_KEY_PREFIX, 'user_id', version="2.0") cache.set(cache_key, 'test-thing') self.assertEqual(cache.get(cache_key), 'test-thing') form = """ <data xmlns="http://openrosa.org/formdesigner/blah"> <meta> <userID>{user_id}</userID> </meta> </data> """ submit_form_locally(form.format(user_id='user_id'), DOMAIN) self.assertIsNone(cache.get(cache_key))
def __init__(self, project=None, user=None, params=None, cache_settings=None): self.project = project self.domain = project.name if project else '' self.user = user self.params = params or RestoreParams() self.cache_settings = cache_settings or RestoreCacheSettings() self.version = self.params.version self.restore_state = RestoreState(self.project, self.user, self.params) self.force_cache = self.cache_settings.force_cache self.cache_timeout = self.cache_settings.cache_timeout self.overwrite_cache = self.cache_settings.overwrite_cache self.cache = get_redis_default_cache()
def __init__(self, user, restore_id="", version=V1, state_hash="", items=False, stock_settings=None, domain=None, force_cache=False, cache_timeout=None, overwrite_cache=False): self.user = user self.restore_id = restore_id self.version = version self.state_hash = state_hash self.items = items self.stock_settings = stock_settings or StockSettings() self.domain = domain self.force_cache = force_cache self.cache_timeout = cache_timeout or INITIAL_SYNC_CACHE_TIMEOUT self.overwrite_cache = overwrite_cache self.cache = get_redis_default_cache()
def get_redis_client(): rcache = cache_core.get_redis_default_cache() if not isinstance(rcache, RedisCache): raise Exception("Could not get redis client. Is redis down?") return rcache.raw_client
def handle(self, *args, **options): cache = get_redis_default_cache() cache.set(CELERY_DEPLOY_IN_PROGRESS_FLAG, True, timeout=5 * 60)
def get_value(self): logger.debug('getting {}'.format(self.debug_info)) return get_redis_default_cache().get(self.cache_key)
def invalidate(self): logger.debug('invalidating {}'.format(self.debug_info)) get_redis_default_cache().delete(self.cache_key)
delete_all_sync_logs() delete_all_users() super(BaseAsyncRestoreTest, cls).tearDownClass() def _restore_config(self, async=True, sync_log_id='', overwrite_cache=False): restore_config = RestoreConfig( project=self.project, restore_user=self.user, params=RestoreParams(sync_log_id=sync_log_id, version=V2), cache_settings=RestoreCacheSettings( overwrite_cache=overwrite_cache), is_async=async) self.addCleanup(get_redis_default_cache().clear) return restore_config class AsyncRestoreTestCouchOnly(BaseAsyncRestoreTest): @mock.patch('casexml.apps.phone.restore.get_async_restore_payload') def test_regular_restore_doesnt_start_task(self, task): """ when the feature flag is off, the celery task does not get called """ self._restore_config(async=False).get_payload() self.assertFalse(task.delay.called) @mock.patch('casexml.apps.phone.restore.get_async_restore_payload') def test_first_async_restore_kicks_off_task(self, task): delay = mock.MagicMock()
from redis_cache.exceptions import ConnectionInterrumped from django.conf import settings import django.core.exceptions from dimagi.utils.couch.cache import cache_core rcache = cache_core.get_redis_default_cache() ############################################################################################################ from corehq.apps.users.models import CouchUser, PublicUser, InvalidUser from corehq.apps.domain.models import Domain SESSION_USER_KEY_PREFIX = "session_user_doc_%s" class UsersMiddleware(object): def __init__(self): # Normally we'd expect this class to be pulled out of the middleware list, too, # but in case someone forgets, this will stop this class from being used. found_domain_app = False for app_name in settings.INSTALLED_APPS: if app_name == "users" or app_name.endswith(".users"): found_domain_app = True break if not found_domain_app: raise django.core.exceptions.MiddlewareNotUsed #def process_request(self, request): def process_view(self, request, view_func, view_args, view_kwargs): if 'domain' in view_kwargs: request.domain = view_kwargs['domain'] if 'org' in view_kwargs:
def handle(self, **options): cache = get_redis_default_cache() cache.set(DEPLOY_IN_PROGRESS_FLAG, True, timeout=5 * 60)
def set_value(self, value, timeout=None): logger.debug('setting {}'.format(self.debug_info)) if timeout is None: timeout = self.timeout get_redis_default_cache().set(self.cache_key, value, timeout=timeout)
def is_deploy_in_progress(): cache = get_redis_default_cache() return cache.get(DEPLOY_IN_PROGRESS_FLAG) is not None
def _cache(self): return get_redis_default_cache()
delete_all_cases() delete_all_sync_logs() delete_all_users() super(BaseAsyncRestoreTest, cls).tearDownClass() def _restore_config(self, async=True, sync_log_id='', overwrite_cache=False): restore_config = RestoreConfig( project=self.project, restore_user=self.user, params=RestoreParams(sync_log_id=sync_log_id, version=V2), cache_settings=RestoreCacheSettings( overwrite_cache=overwrite_cache ), is_async=async ) self.addCleanup(get_redis_default_cache().clear) return restore_config class AsyncRestoreTestCouchOnly(BaseAsyncRestoreTest): @mock.patch('casexml.apps.phone.restore.get_async_restore_payload') def test_regular_restore_doesnt_start_task(self, task): """ when the feature flag is off, the celery task does not get called """ self._restore_config(async=False).get_payload() self.assertFalse(task.delay.called) @mock.patch('casexml.apps.phone.restore.get_async_restore_payload') def test_first_async_restore_kicks_off_task(self, task): delay = mock.MagicMock()
def exists(self): logger.debug('if exists {}'.format(self.debug_info)) return self.cache_key in get_redis_default_cache()
def tearDown(self): delete_all_xforms() delete_all_cases() delete_all_sync_logs() get_redis_default_cache().clear() super(BaseOtaRestoreTest, self).tearDown()
def is_deploy_in_progress(): cache = get_redis_default_cache() return cache.get(CELERY_DEPLOY_IN_PROGRESS_FLAG) is not None
def tearDown(self): ensure_index_deleted(CASE_SEARCH_INDEX) self.user.delete(deleted_by=None) self.domain.delete() cache = get_redis_default_cache() cache.clear()