def process_pillow_retry(error_doc_id): # Redis error logged in get_redis_client try: client = cache_core.get_redis_client() except cache_core.RedisClientError: return # Prevent more than one task from processing this error, just in case # it got enqueued twice. lock = client.lock( "pillow-retry-processing-%s" % error_doc_id, timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60 ) if lock.acquire(blocking=False): try: error_doc = PillowError.objects.get(id=error_doc_id) except PillowError.DoesNotExist: release_lock(lock, True) return pillow_name_or_class = error_doc.pillow try: pillow = get_pillow_by_name(pillow_name_or_class) except PillowNotFoundError: pillow = None if not pillow: notify_error(( "Could not find pillowtop class '%s' while attempting a retry. " "If this pillow was recently deleted then this will be automatically cleaned up eventually. " "If not, then this should be looked into." ) % pillow_name_or_class) try: error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1 error_doc.save() finally: release_lock(lock, True) return change = error_doc.change_object try: change_metadata = change.metadata if change_metadata: document_store = get_document_store( data_source_type=change_metadata.data_source_type, data_source_name=change_metadata.data_source_name, domain=change_metadata.domain ) change.document_store = document_store pillow.process_change(change) except Exception: ex_type, ex_value, ex_tb = sys.exc_info() error_doc.add_attempt(ex_value, ex_tb) error_doc.queued = False error_doc.save() else: error_doc.delete() finally: release_lock(lock, True)
def process_pillow_retry(error_doc_id): # Redis error logged in get_redis_client try: client = cache_core.get_redis_client() except cache_core.RedisClientError: return # Prevent more than one task from processing this error, just in case # it got enqueued twice. lock = client.lock("pillow-retry-processing-%s" % error_doc_id, timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT * 60) if lock.acquire(blocking=False): try: error_doc = PillowError.objects.get(id=error_doc_id) except PillowError.DoesNotExist: release_lock(lock, True) return pillow_name_or_class = error_doc.pillow try: pillow = get_pillow_by_name(pillow_name_or_class) except PillowNotFoundError: pillow = None if not pillow: notify_error(( "Could not find pillowtop class '%s' while attempting a retry. " "If this pillow was recently deleted then this will be automatically cleaned up eventually. " "If not, then this should be looked into.") % pillow_name_or_class) try: error_doc.total_attempts = PillowError.multi_attempts_cutoff( ) + 1 error_doc.save() finally: release_lock(lock, True) return change = error_doc.change_object try: try: from corehq.apps.userreports.pillow import ConfigurableReportKafkaPillow if isinstance(pillow, ConfigurableReportKafkaPillow): raise Exception('this is temporarily not supported!') except ImportError: pass pillow.process_change(change) except Exception: ex_type, ex_value, ex_tb = sys.exc_info() error_doc.add_attempt(ex_value, ex_tb) error_doc.queued = False error_doc.save() else: error_doc.delete() finally: release_lock(lock, True)
def test_pillow_not_found(self): error = PillowError.objects.create( doc_id='missing-pillow', pillow='badmodule.NotARealPillow', date_created=datetime.utcnow(), date_last_attempt=datetime.utcnow() ) # make sure this doesn't error process_pillow_retry(error.id) # and that its total_attempts was bumped above the threshold self.assertTrue(PillowError.objects.get(pk=error.pk).total_attempts > PillowError.multi_attempts_cutoff())
def test_pillow_not_found(self): error = PillowError.objects.create(doc_id='missing-pillow', pillow='NotARealPillow', date_created=datetime.utcnow(), date_last_attempt=datetime.utcnow()) # make sure this doesn't error process_pillow_retry(error.id) # and that its total_attempts was bumped above the threshold self.assertTrue( PillowError.objects.get(pk=error.pk).total_attempts > PillowError.multi_attempts_cutoff())
def test_bulk_reset_cutoff(self): for i in range(0, 3): error = create_error({'id': i}, attempts=1) if i >= 1: error.total_attempts = PillowError.multi_attempts_cutoff() + 1 error.save() errors = PillowError.get_errors_to_process(datetime.utcnow()).all() self.assertEqual(len(errors), 0) PillowError.bulk_reset_attempts(datetime.utcnow()) errors = PillowError.get_errors_to_process(datetime.utcnow()).all() self.assertEqual(len(errors), 2)
def test_bulk_reset_cutoff(self): for i in range(0, 3): error = create_error({'id': i}, attempts=1) if i >= 1: error.total_attempts = PillowError.multi_attempts_cutoff() + 1 error.save() errors = PillowError.get_errors_to_process(datetime.utcnow()).all() self.assertEqual(len(errors), 0) PillowError.bulk_reset_attempts(datetime.utcnow()) errors = PillowError.get_errors_to_process(datetime.utcnow()).all() self.assertEqual(len(errors), 2)
def process_pillow_retry(error_doc_id): try: error_doc = PillowError.objects.get(id=error_doc_id) except PillowError.DoesNotExist: return pillow_name_or_class = error_doc.pillow try: pillow = get_pillow_by_name(pillow_name_or_class) except PillowNotFoundError: pillow = None if not pillow: notify_error(( "Could not find pillowtop class '%s' while attempting a retry. " "If this pillow was recently deleted then this will be automatically cleaned up eventually. " "If not, then this should be looked into.") % pillow_name_or_class) try: error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1 error_doc.save() finally: return change = error_doc.change_object try: change_metadata = change.metadata if change_metadata: document_store = get_document_store( data_source_type=change_metadata.data_source_type, data_source_name=change_metadata.data_source_name, domain=change_metadata.domain) change.document_store = document_store pillow.process_change(change) except Exception: ex_type, ex_value, ex_tb = sys.exc_info() error_doc.add_attempt(ex_value, ex_tb) error_doc.save() else: error_doc.delete()
def process_pillow_retry(error_doc_id): # Redis error logged in get_redis_client try: client = cache_core.get_redis_client() except cache_core.RedisClientError: return # Prevent more than one task from processing this error, just in case # it got enqueued twice. lock = client.lock( "pillow-retry-processing-%s" % error_doc_id, timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60 ) if lock.acquire(blocking=False): try: error_doc = PillowError.objects.get(id=error_doc_id) except PillowError.DoesNotExist: release_lock(lock, True) return pillow_name_or_class = error_doc.pillow try: pillow = get_pillow_by_name(pillow_name_or_class) except PillowNotFoundError: if not settings.UNIT_TESTING: _assert = soft_assert(to='@'.join(['czue', 'dimagi.com'])) _assert(False, 'Pillow retry {} is still using legacy class {}'.format( error_doc.pk, pillow_name_or_class )) pillow = _try_legacy_import(pillow_name_or_class) if not pillow: notify_error(( "Could not find pillowtop class '%s' while attempting a retry. " "If this pillow was recently deleted then this will be automatically cleaned up eventually. " "If not, then this should be looked into." ) % pillow_name_or_class) try: error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1 error_doc.save() finally: release_lock(lock, True) return change = error_doc.change_object if getattr(pillow, 'include_docs', False): try: change.set_document(pillow.get_couch_db().open_doc(change.id)) except ResourceNotFound: change.deleted = True try: try: from corehq.apps.userreports.pillow import ConfigurableReportKafkaPillow if isinstance(pillow, ConfigurableReportKafkaPillow): raise Exception('this is temporarily not supported!') except ImportError: pass pillow.process_change(change, is_retry_attempt=True) except Exception: ex_type, ex_value, ex_tb = sys.exc_info() error_doc.add_attempt(ex_value, ex_tb) error_doc.queued = False error_doc.save() else: error_doc.delete() finally: release_lock(lock, True)
def process_pillow_retry(error_doc_id): # Redis error logged in get_redis_client try: client = cache_core.get_redis_client() except cache_core.RedisClientError: return # Prevent more than one task from processing this error, just in case # it got enqueued twice. lock = client.lock( "pillow-retry-processing-%s" % error_doc_id, timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60 ) if lock.acquire(blocking=False): try: error_doc = PillowError.objects.get(id=error_doc_id) except PillowError.DoesNotExist: return pillow_class = error_doc.pillow try: pillow = get_pillow_instance(pillow_class) except ValueError: # all fluff pillows have module path of 'fluff' so can't be imported directly _, pillow_class_name = pillow_class.rsplit('.', 1) try: pillow = get_pillow_by_name(pillow_class_name) except PillowNotFoundError: pillow = None if not pillow: notify_error(( "Could not find pillowtop class '%s' while attempting a retry. " "If this pillow was recently deleted then this will be automatically cleaned up eventually. " "If not, then this should be looked into." ) % pillow_class) try: error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1 error_doc.save() finally: release_lock(lock, True) return change = error_doc.change_object if pillow.include_docs: try: change.set_document(pillow.get_couch_db().open_doc(change.id)) except ResourceNotFound: change.deleted = True try: try: from corehq.apps.userreports.pillow import ConfigurableIndicatorPillow if isinstance(pillow, ConfigurableIndicatorPillow): raise Exception('this is temporarily not supported!') except ImportError: pass pillow.process_change(change, is_retry_attempt=True) except Exception: ex_type, ex_value, ex_tb = sys.exc_info() error_doc.add_attempt(ex_value, ex_tb) error_doc.queued = False error_doc.save() else: error_doc.delete() finally: release_lock(lock, True)