def email_enterprise_report(domain, slug, couch_user): account = BillingAccount.get_account_by_domain(domain) report = EnterpriseReport.create(slug, account.id, couch_user) # Generate file csv_file = io.StringIO() writer = csv.writer(csv_file) writer.writerow(report.headers) writer.writerows(report.rows) # Store file in redis hash_id = uuid.uuid4().hex redis = get_redis_client() redis.set(hash_id, csv_file.getvalue()) redis.expire(hash_id, 60 * 60 * 24) csv_file.close() # Send email url = absolute_reverse("enterprise_dashboard_download", args=[domain, report.slug, str(hash_id)]) link = "<a href='{}'>{}</a>".format(url, url) subject = _("Enterprise Dashboard: {}").format(report.title) body = "The enterprise report you requested for the account {} is ready.<br>" \ "You can download the data at the following link: {}<br><br>" \ "Please remember that this link will only be active for 24 hours.".format(account.name, link) send_html_email_async(subject, couch_user.username, body)
def _build_indicators(indicator_config_id, relevant_ids): config = _get_config_by_id(indicator_config_id) adapter = IndicatorSqlAdapter(config) couchdb = _get_db(config.referenced_doc_type) redis_client = get_redis_client().client.get_client() redis_key = _get_redis_key_for_config(config) for doc in iter_docs(couchdb, relevant_ids, chunksize=500): try: # save is a noop if the filter doesn't match adapter.save(doc) redis_client.srem(redis_key, doc.get('_id')) except Exception as e: logging.exception('problem saving document {} to table. {}'.format(doc['_id'], e)) if not is_static(indicator_config_id): redis_client.delete(redis_key) config.meta.build.finished = True try: config.save() except ResourceConflict: current_config = DataSourceConfiguration.get(config._id) # check that a new build has not yet started if config.meta.build.initiated == current_config.meta.build.initiated: current_config.meta.build.finished = True current_config.save()
def test_passes_trial_check(self, domain_is_on_trial_patch, set_system_error_patch, delay_patch, enqueue_patch): client = get_redis_client() sms = QueuedSMS(domain='trial-project') domain_is_on_trial_patch.return_value = True key = 'sms-sent-on-trial-for-trial-project' # Test when key doesn't exist yet self.assertIsNone(client.get(key)) self.assertTrue(passes_trial_check(sms)) self.assertEqual(client.get(key), 1) self.assertGreater(client.ttl(key), 89 * 24 * 60 * 60) # Test with existing key self.assertTrue(passes_trial_check(sms)) self.assertEqual(client.get(key), 2) self.assertGreater(client.ttl(key), 89 * 24 * 60 * 60) # Test when limit is exceeded client.set(key, MAX_TRIAL_SMS) self.assertFalse(passes_trial_check(sms)) set_system_error_patch.assert_called_once() # Test when not on trial domain_is_on_trial_patch.return_value = False self.assertTrue(passes_trial_check(sms))
def rule_progress(request, domain): client = get_redis_client() handlers = CaseReminderHandler.get_handlers(domain, reminder_type_filter=REMINDER_TYPE_DEFAULT) response = {} for handler in handlers: info = {} if handler.locked: info['complete'] = False current = None total = None try: current = client.get('reminder-rule-processing-current-%s' % handler._id) total = client.get('reminder-rule-processing-total-%s' % handler._id) except: continue info['current'] = int_or_none(current) info['total'] = int_or_none(total) else: info['complete'] = True response[handler._id] = info return HttpResponse(json.dumps(response))
def rebuild_indicators(indicator_config_id): config = _get_config_by_id(indicator_config_id) adapter = IndicatorSqlAdapter(config) couchdb = _get_db(config.referenced_doc_type) redis_client = get_redis_client().client.get_client() redis_key = _get_redis_key_for_config(config) if not is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.datetime.utcnow() config.meta.build.finished = False config.save() redis_key = _get_redis_key_for_config(config) adapter.rebuild_table() relevant_ids_chunk = [] for relevant_id in iterate_doc_ids_in_domain_by_type( config.domain, config.referenced_doc_type, chunk_size=CHUNK_SIZE, database=couchdb): relevant_ids_chunk.append(relevant_id) if len(relevant_ids_chunk) >= CHUNK_SIZE: redis_client.sadd(redis_key, *relevant_ids_chunk) _build_indicators(indicator_config_id, relevant_ids_chunk) relevant_ids_chunk = [] if relevant_ids_chunk: redis_client.sadd(redis_key, *relevant_ids_chunk) _build_indicators(indicator_config_id, relevant_ids_chunk)
def get_enqueue_lock(self, session_id, current_action_due): client = get_redis_client() key = "create-task-for-smsforms-session-%s-%s" % ( session_id, current_action_due.strftime('%Y-%m-%d %H:%M:%S') ) return client.lock(key, timeout=60 * 60)
def get_enqueue_lock(self, queued_sms): client = get_redis_client() key = "create-task-for-sms-%s-%s" % ( queued_sms.pk, queued_sms.datetime_to_process.strftime('%Y-%m-%d %H:%M:%S') ) return client.lock(key, timeout=3 * 60 * 60)
def check_repeaters(): start = datetime.utcnow() cutoff = start + CHECK_REPEATERS_INTERVAL redis_client = get_redis_client().client.get_client() # Timeout for slightly less than periodic check check_repeater_lock = redis_client.lock( CHECK_REPEATERS_KEY, timeout=CHECK_REPEATERS_INTERVAL.seconds - 10 ) if not check_repeater_lock.acquire(blocking=False): return for record in iterate_repeat_records(start): now = datetime.utcnow() lock_key = _get_repeat_record_lock_key(record) if now > cutoff: break lock = redis_client.lock(lock_key, timeout=60 * 60 * 48) if not lock.acquire(blocking=False): continue process_repeat_record.delay(record) try: check_repeater_lock.release() except LockError: # Ignore if already released pass
def rebuild_indicators(indicator_config_id): config = _get_config_by_id(indicator_config_id) adapter = IndicatorSqlAdapter(config) couchdb = _get_db(config.referenced_doc_type) redis_client = get_redis_client().client.get_client() redis_key = _get_redis_key_for_config(config) if not is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.datetime.utcnow() config.meta.build.finished = False config.save() redis_key = _get_redis_key_for_config(config) adapter.rebuild_table() relevant_ids = get_doc_ids_in_domain_by_type( config.domain, config.referenced_doc_type, database=couchdb, ) for docs in chunked(relevant_ids, 1000): redis_client.sadd(redis_key, *docs) _build_indicators(indicator_config_id, relevant_ids)
def process_sms(queued_sms_pk): """ queued_sms_pk - pk of a QueuedSMS entry """ client = get_redis_client() utcnow = get_utcnow() # Prevent more than one task from processing this SMS, just in case # the message got enqueued twice. message_lock = get_lock(client, "sms-queue-processing-%s" % queued_sms_pk) if message_lock.acquire(blocking=False): try: msg = QueuedSMS.objects.get(pk=queued_sms_pk) except QueuedSMS.DoesNotExist: # The message was already processed and removed from the queue release_lock(message_lock, True) return if message_is_stale(msg, utcnow): msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE) remove_from_queue(msg) release_lock(message_lock, True) return if msg.direction == OUTGOING: if msg.domain: domain_object = Domain.get_by_name(msg.domain) else: domain_object = None if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow): release_lock(message_lock, True) return requeue = False # Process inbound SMS from a single contact one at a time recipient_block = msg.direction == INCOMING if (isinstance(msg.processed, bool) and not msg.processed and not msg.error and msg.datetime_to_process < utcnow): if recipient_block: recipient_lock = get_lock(client, "sms-queue-recipient-phone-%s" % msg.phone_number) recipient_lock.acquire(blocking=True) if msg.direction == OUTGOING: requeue = handle_outgoing(msg) elif msg.direction == INCOMING: handle_incoming(msg) else: msg.set_system_error(SMS.ERROR_INVALID_DIRECTION) remove_from_queue(msg) if recipient_block: release_lock(recipient_lock, True) release_lock(message_lock, True) if requeue: process_sms.delay(queued_sms_pk)
def process_pillow_retry(error_doc_id): # Redis error logged in get_redis_client try: client = cache_core.get_redis_client() except cache_core.RedisClientError: return # Prevent more than one task from processing this error, just in case # it got enqueued twice. lock = client.lock( "pillow-retry-processing-%s" % error_doc_id, timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60 ) if lock.acquire(blocking=False): try: error_doc = PillowError.objects.get(id=error_doc_id) except PillowError.DoesNotExist: release_lock(lock, True) return pillow_name_or_class = error_doc.pillow try: pillow = get_pillow_by_name(pillow_name_or_class) except PillowNotFoundError: pillow = None if not pillow: notify_error(( "Could not find pillowtop class '%s' while attempting a retry. " "If this pillow was recently deleted then this will be automatically cleaned up eventually. " "If not, then this should be looked into." ) % pillow_name_or_class) try: error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1 error_doc.save() finally: release_lock(lock, True) return change = error_doc.change_object try: change_metadata = change.metadata if change_metadata: document_store = get_document_store( data_source_type=change_metadata.data_source_type, data_source_name=change_metadata.data_source_name, domain=change_metadata.domain ) change.document_store = document_store pillow.process_change(change) except Exception: ex_type, ex_value, ex_tb = sys.exc_info() error_doc.add_attempt(ex_value, ex_tb) error_doc.queued = False error_doc.save() else: error_doc.delete() finally: release_lock(lock, True)
def _store_excel_in_redis(file): hash_id = uuid.uuid4().hex r = get_redis_client() r.set(hash_id, file.getvalue()) r.expire(hash_id, EXPIRE_TIME) return hash_id
def resume_building_indicators(indicator_config_id): config = _get_config_by_id(indicator_config_id) redis_client = get_redis_client().client.get_client() redis_key = _get_redis_key_for_config(config) if len(redis_client.smembers(redis_key)) > 0: relevant_ids = redis_client.smembers(redis_key) _build_indicators(indicator_config_id, relevant_ids)
def populate_queue(self): client = get_redis_client() utcnow = datetime.utcnow() entries = self.get_items_to_be_processed(utcnow) for entry in entries: item_id = entry["id"] process_datetime_str = entry["key"] self.enqueue(item_id, process_datetime_str, redis_client=client)
def get_enqueue_lock(self, cls, schedule_instance_id, next_event_due): client = get_redis_client() key = "create-task-for-%s-%s-%s" % ( cls.__name__, schedule_instance_id.hex, next_event_due.strftime('%Y-%m-%d %H:%M:%S') ) return client.lock(key, timeout=60 * 60)
def password_changed(self, password, user): # store password attempt and retain just RESTRICT_USED_PASSWORDS_NUM attempts # Also set expiry time to avoid retaining passwords in redis forever client = get_redis_client() key_name = self.redis_key_for_user(user.username) attempts = client.get(key_name, []) attempts.append(hash_password(password)) client.set(key_name, attempts[-RESTRICT_USED_PASSWORDS_NUM:]) client.expire(key_name, timedelta(EXPIRE_PASSWORD_ATTEMPTS_IN))
def handle_outgoing(msg): """ Should return a requeue flag, so if it returns True, the message will be requeued and processed again immediately, and if it returns False, it will not be queued again. """ backend = msg.outbound_backend sms_interval = backend.get_sms_interval() use_rate_limit = sms_interval is not None use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and len(backend.phone_numbers) > 1) if use_rate_limit or use_load_balancing: client = cache_core.get_redis_client() lbi = None orig_phone_number = None if use_load_balancing: lbi = backend.get_next_phone_number(client) orig_phone_number = lbi.phone_number elif (isinstance(backend, SMSLoadBalancingMixin) and len(backend.phone_numbers) == 1): # If there's only one phone number, we don't need to go through the # load balancing algorithm. But we should always pass an # orig_phone_number if it's an instance of SMSLoadBalancingMixin. orig_phone_number = backend.phone_numbers[0] if use_rate_limit: if use_load_balancing: lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id, lbi.phone_number) else: lock_key = "sms-backend-%s-rate-limit" % backend._id lock = client.lock(lock_key, timeout=30) if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)): if use_load_balancing: lbi.finish(save_stats=True) result = send_message_via_backend(msg, backend=backend, orig_phone_number=orig_phone_number) if use_rate_limit: wait_and_release_lock(lock, sms_interval) # Only do the following if an unrecoverable error did not happen if not msg.error: if result: handle_successful_processing_attempt(msg) else: handle_unsuccessful_processing_attempt(msg) return False else: # We're using rate limiting, but couldn't acquire the lock, so # another thread is sending sms with this backend. Rather than wait, # we'll just put this message at the back of the queue. if use_load_balancing: lbi.finish(save_stats=False) return True
def get_raw_password(obfuscated_password, username=None): client = get_redis_client() def replay_attack(): # Replay attack where the same obfuscated password used from previous login attempt key_name = obfuscated_password_redis_key_for_user(username, obfuscated_password) if client.get(key_name): return True def record_login_attempt(): key_name = obfuscated_password_redis_key_for_user(username, obfuscated_password) client.set(key_name, True) client.expire(key_name, timedelta(days=EXPIRE_LOGIN_ATTEMPTS_IN)) def _mobile_request_to_track(username): # To be added just for audit test and should be removed to implement for all users if username not in USERS_TO_TRACK_FOR_REPLAY_ATTACK: return False return resolve(request.path).url_name in MOBILE_REQUESTS_TO_TRACK_FOR_REPLAY_ATTACK def _decode_password(): raw_password = extract_password(obfuscated_password) if raw_password is None: # if there was no obfuscation done, just return the raw password # and skip any further checks return obfuscated_password # In case of 2-step authentication for web skip by checking for auth-username which is # present in first step if username and ( (request and request.POST.get('auth-username')) or _mobile_request_to_track(username)): if replay_attack(): return '' record_login_attempt() return raw_password if settings.OBFUSCATE_PASSWORD_FOR_NIC_COMPLIANCE: request = get_request() if request: # 1. an attempt to decode a password should be done just once in a request for the login attempt # check to work correctly and not consider it a replay attack in case of multiple calls # 2. also there should be no need to decode a password multiple times in the same request. if not hasattr(request, 'decoded_password'): request.decoded_password = {} # return decoded password set on request object for the obfuscated_password if obfuscated_password in request.decoded_password: return request.decoded_password[obfuscated_password] else: # decode the password and save it on the request object for obfuscated_password request.decoded_password[obfuscated_password] = _decode_password() return request.decoded_password[obfuscated_password] else: return _decode_password() else: return obfuscated_password
def get_connection_slot_lock(phone_number, backend, max_simultaneous_connections): """ There is one redis lock per connection slot, numbered from 0 to max_simultaneous_connections - 1. A slot is taken if the lock can't be acquired. """ slot = get_connection_slot_from_phone_number(phone_number, max_simultaneous_connections) key = 'backend-%s-connection-slot-%s' % (backend.couch_id, slot) client = get_redis_client() return client.lock(key, timeout=60)
def enqueue(self, item_id, process_datetime_str, redis_client=None): client = redis_client or get_redis_client() queue_name = self.get_queue_name() enqueuing_lock = self.get_enqueuing_lock(client, "%s-enqueuing-%s-%s" % (queue_name, item_id, process_datetime_str)) if enqueuing_lock.acquire(blocking=False): try: self.enqueue_item(item_id) except: # We couldn't enqueue, so release the lock release_lock(enqueuing_lock, True)
def log_metadata_received(call): """ Only temporary, for debugging. """ try: key = "kookoo-metadata-received-%s" % call.pk client = get_redis_client() client.set(key, datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) client.expire(key, 7 * 24 * 60 * 60) except: pass
def __enter__(self): try: client = get_redis_client() for key in self.keys: lock = client.lock(key, timeout=self.timeout) self.locks.append(lock) for lock in self.locks: lock.acquire(blocking=True) except Exception: if self.fail_hard: raise
def store_excel_in_redis(file): hash_id = uuid.uuid4().hex tmp = NamedTemporaryFile(delete=False) tmp.file.write(file.getvalue()) r = get_redis_client() r.set(hash_id, tmp.name) r.expire(hash_id, EXPIRE_TIME) remove_temp_file.apply_async(args=[tmp.name], countdown=EXPIRE_TIME) return hash_id
def process_sms(message_id): """ message_id - _id of an SMSLog entry """ client = get_redis_client() utcnow = datetime.utcnow() # Prevent more than one task from processing this SMS, just in case # the message got enqueued twice. message_lock = get_lock(client, "sms-queue-processing-%s" % message_id) if message_lock.acquire(blocking=False): msg = SMSLog.get(message_id) if message_is_stale(msg, utcnow): msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE) release_lock(message_lock, True) return if msg.direction == OUTGOING: if msg.domain: domain_object = Domain.get_by_name(msg.domain, strict=True) else: domain_object = None if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow): release_lock(message_lock, True) return requeue = False # Process inbound SMS from a single contact one at a time recipient_block = msg.direction == INCOMING if (isinstance(msg.processed, bool) and not msg.processed and not msg.error and msg.datetime_to_process < utcnow): if recipient_block: recipient_lock = get_lock(client, "sms-queue-recipient-phone-%s" % msg.phone_number) recipient_lock.acquire(blocking=True) if msg.direction == OUTGOING: requeue = handle_outgoing(msg) elif msg.direction == INCOMING: handle_incoming(msg) else: msg.set_system_error(SMS.ERROR_INVALID_DIRECTION) if recipient_block: release_lock(recipient_lock, True) release_lock(message_lock, True) if requeue: process_sms.delay(message_id)
def test_reset_key(self): key = 'my-load-balancing-reset-test-key' client = get_redis_client().client.get_client() client.set(key, 999998) load_balance(key, [1]) self.assertEqual(int(client.get(key)), 999999) load_balance(key, [1]) self.assertIsNone(client.get(key)) load_balance(key, [1]) self.assertEqual(int(client.get(key)), 1)
def send_current_event_content_to_recipients(self): client = get_redis_client() content = self.memoized_schedule.get_current_event_content(self) if isinstance(content, (IVRSurveyContent, SMSCallbackContent)): raise TypeError( "IVR and Callback use cases are no longer supported. " "How did this schedule instance end up as active?" ) if isinstance(self, CaseScheduleInstanceMixin): content.set_context(case=self.case, schedule_instance=self) else: content.set_context(schedule_instance=self) logged_event = MessagingEvent.create_from_schedule_instance(self, content) recipient_count = 0 for recipient in self.expand_recipients(): recipient_count += 1 # The framework will retry sending a non-processed schedule instance # once every hour. # If we are processing a long list of recipients here and an error # occurs half-way through, we don't want to reprocess the entire list # of recipients again when the framework retries it an hour later. # So we use a non-blocking lock tied to the event due time and recipient # to make sure that we don't try resending the same content to the same # recipient more than once in the event of a retry. # If we succeed in sending the content, we don't release the lock so # that it won't retry later. If we fail in sending the content, we release # the lock so that it will retry later. lock = self.get_content_send_lock(client, recipient) if lock.acquire(blocking=False): try: content.send(recipient, logged_event) except: # Release the lock if an error happened so that we can try sending # to this recipient again later. lock.release() raise # Update the MessagingEvent for reporting if recipient_count == 0: logged_event.error(MessagingEvent.ERROR_NO_RECIPIENT) else: logged_event.completed()
def passes_trial_check(msg): if msg.domain and domain_is_on_trial(msg.domain): with CriticalSection(['check-sms-sent-on-trial-for-%s' % msg.domain], timeout=60): key = 'sms-sent-on-trial-for-%s' % msg.domain expiry = 90 * 24 * 60 * 60 client = get_redis_client() value = client.get(key) or 0 if value >= MAX_TRIAL_SMS: msg.set_system_error(SMS.ERROR_TRIAL_SMS_EXCEEDED) return False client.set(key, value + 1, timeout=expiry) return True
def should_log_exception_for_backend(backend): """ Only returns True if an exception hasn't been logged for the given backend in the last hour. """ client = get_redis_client() key = 'exception-logged-for-backend-%s' % backend.couch_id if client.get(key): return False else: client.set(key, 1) client.expire(key, 60 * 60) return True
def resume_building_indicators(indicator_config_id): config = _get_config_by_id(indicator_config_id) redis_client = get_redis_client().client.get_client() redis_key = _get_redis_key_for_config(config) # maintaining support for existing sets in redis while the # transition to lists occurs try: relevant_ids = redis_client.lrange(redis_key, 0, -1) except: relevant_ids = tuple(redis_client.smembers(redis_key)) if len(relevant_ids) > 0: _build_indicators(indicator_config_id, relevant_ids) last_id = relevant_ids[-1] _iteratively_build_table(config, last_id)
def enterprise_dashboard_download(request, domain, slug, export_hash): account = _get_account_or_404(request, domain) report = EnterpriseReport.create(slug, account.id, request.couch_user) redis = get_redis_client() content = redis.get(export_hash) if content: file = ContentFile(content) response = HttpResponse(file, Format.FORMAT_DICT[Format.UNZIPPED_CSV]) response['Content-Length'] = file.size response['Content-Disposition'] = 'attachment; filename="{}"'.format(report.filename) return response return HttpResponseNotFound(_("That report was not found. Please remember that " "download links expire after 24 hours."))
def set_cached_webhook_secret(cls, request_token, webhook_secret): client = get_redis_client() key = cls.get_cache_key(request_token) client.set(key, webhook_secret) client.expire(key, 7 * 24 * 60 * 60)
def get_redis_lock(cls, key, timeout_seconds): client = get_redis_client() lock = client.lock(key, timeout=timeout_seconds) return lock
def __init__(self, domain): self.client = get_redis_client() self.domain = domain
def get_cached_webhook_secret(self, request_token): client = get_redis_client() key = self.get_cache_key(request_token) return client.get(key)
NON_FDC_PRESCRIPTION_DAYS_THRESHOLD, ) from .exceptions import EnikshayTaskException from .data_store import AdherenceDatastore import six logger = get_task_logger(__name__) DoseStatus = namedtuple('DoseStatus', 'taken missed unknown source value') BatchStatus = namedtuple( 'BatchStatus', 'update_count noupdate_count success_count errors case_batches ledger_batches duration' ) CACHE_KEY = "reconciliation-task-{}" cache = get_redis_client() def enikshay_task(self): # runs adherence and voucher calculations for all domains that have # `toggles.UATBC_ADHERENCE_TASK` enabled domains = toggles.UATBC_ADHERENCE_TASK.get_enabled_domains() for domain in domains: if toggles.DATA_MIGRATION.enabled(domain): # Don't run this on the india cluster anymore continue try: task_group = EpisodeUpdater(domain).run() except Exception as e:
def process_sms(queued_sms_pk): """ queued_sms_pk - pk of a QueuedSMS entry """ client = get_redis_client() utcnow = get_utcnow() # Prevent more than one task from processing this SMS, just in case # the message got enqueued twice. message_lock = get_lock(client, "sms-queue-processing-%s" % queued_sms_pk) if message_lock.acquire(blocking=False): try: msg = QueuedSMS.objects.get(pk=queued_sms_pk) except QueuedSMS.DoesNotExist: # The message was already processed and removed from the queue release_lock(message_lock, True) return if message_is_stale(msg, utcnow): msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE) remove_from_queue(msg) release_lock(message_lock, True) return if msg.direction == OUTGOING: if msg.domain: domain_object = Domain.get_by_name(msg.domain) else: domain_object = None if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow): release_lock(message_lock, True) return requeue = False # Process inbound SMS from a single contact one at a time recipient_block = msg.direction == INCOMING if (isinstance(msg.processed, bool) and not msg.processed and not msg.error and msg.datetime_to_process < utcnow): if recipient_block: recipient_lock = get_lock(client, "sms-queue-recipient-phone-%s" % msg.phone_number) recipient_lock.acquire(blocking=True) if msg.direction == OUTGOING: if ( msg.domain and msg.couch_recipient_doc_type and msg.couch_recipient and not is_contact_active(msg.domain, msg.couch_recipient_doc_type, msg.couch_recipient) ): msg.set_system_error(SMS.ERROR_CONTACT_IS_INACTIVE) remove_from_queue(msg) else: requeue = handle_outgoing(msg) elif msg.direction == INCOMING: handle_incoming(msg) else: msg.set_system_error(SMS.ERROR_INVALID_DIRECTION) remove_from_queue(msg) if recipient_block: release_lock(recipient_lock, True) release_lock(message_lock, True) if requeue: process_sms.delay(queued_sms_pk)
def populate_queue(self): client = get_redis_client() utcnow = datetime.utcnow() items = self.get_items_to_be_processed(utcnow) for item in items: self.enqueue(item, redis_client=client)
def get_used_passwords(self, username): client = get_redis_client() return client.get(self.redis_key_for_user(username), [])
def get_enqueue_lock(self, queued_sms): client = get_redis_client() key = "create-task-for-sms-%s-%s" % ( queued_sms.pk, queued_sms.datetime_to_process.strftime('%Y-%m-%d %H:%M:%S')) return client.lock(key, timeout=3 * 60 * 60)
def set_last_visited_domain(couch_user, domain): client = cache_core.get_redis_client() cache_expiration = 60 * 60 * 24 * 7 cache_key = _last_visited_domain_cache_key(couch_user) client.set(cache_key, domain, timeout=cache_expiration)
def get_enqueue_lock(self, cls, schedule_instance_id, next_event_due): client = get_redis_client() key = "create-task-for-%s-%s-%s" % ( cls.__name__, schedule_instance_id.hex, next_event_due.strftime('%Y-%m-%d %H:%M:%S')) return client.lock(key, timeout=60 * 60)
def process_sms(queued_sms_pk): """ queued_sms_pk - pk of a QueuedSMS entry """ client = get_redis_client() utcnow = get_utcnow() # Prevent more than one task from processing this SMS, just in case # the message got enqueued twice. message_lock = get_lock(client, "sms-queue-processing-%s" % queued_sms_pk) if message_lock.acquire(blocking=False): try: msg = QueuedSMS.objects.get(pk=queued_sms_pk) except QueuedSMS.DoesNotExist: # The message was already processed and removed from the queue release_lock(message_lock, True) return if message_is_stale(msg, utcnow): msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE) remove_from_queue(msg) release_lock(message_lock, True) return outbound_counter = None if msg.direction == OUTGOING: domain_object = Domain.get_by_name( msg.domain) if msg.domain else None if domain_object and handle_domain_specific_delays( msg, domain_object, utcnow): release_lock(message_lock, True) return outbound_counter = OutboundDailyCounter(domain_object) if not outbound_counter.can_send_outbound_sms(msg): release_lock(message_lock, True) return requeue = False # Process inbound SMS from a single contact one at a time recipient_block = msg.direction == INCOMING # We check datetime_to_process against utcnow plus a small amount # of time because timestamps can differ between machines which # can cause us to miss sending the message the first time and # result in an unnecessary delay. if (isinstance(msg.processed, bool) and not msg.processed and not msg.error and msg.datetime_to_process < (utcnow + timedelta(seconds=10))): if recipient_block: recipient_lock = get_lock( client, "sms-queue-recipient-phone-%s" % msg.phone_number) recipient_lock.acquire(blocking=True) if msg.direction == OUTGOING: if (msg.domain and msg.couch_recipient_doc_type and msg.couch_recipient and not is_contact_active( msg.domain, msg.couch_recipient_doc_type, msg.couch_recipient)): msg.set_system_error(SMS.ERROR_CONTACT_IS_INACTIVE) remove_from_queue(msg) else: requeue = handle_outgoing(msg) elif msg.direction == INCOMING: try: handle_incoming(msg) except DelayProcessing: process_sms.apply_async([queued_sms_pk], countdown=60) if recipient_block: release_lock(recipient_lock, True) release_lock(message_lock, True) else: msg.set_system_error(SMS.ERROR_INVALID_DIRECTION) remove_from_queue(msg) if recipient_block: release_lock(recipient_lock, True) release_lock(message_lock, True) if requeue: if outbound_counter: outbound_counter.decrement() send_to_sms_queue(msg)
def __init__(self, rule_id): self.rule_id = rule_id self.client = get_redis_client()
def get_obfuscated_passwords(username): client = get_redis_client() return client.get(obfuscated_passwords_redis_key_for_user(username), [])
def get_enqueue_lock(self, session_id, current_action_due): client = get_redis_client() key = "create-task-for-smsforms-session-%s-%s" % ( session_id, current_action_due.strftime('%Y-%m-%d %H:%M:%S')) return client.lock(key, timeout=60 * 60)
def record_login_attempt(): client = get_redis_client() key_name = obfuscated_passwords_redis_key_for_user(username) obfuscated_passwords = client.get(key_name, []) client.set(key_name, obfuscated_passwords + [hash_password(obfuscated_password)]) client.expire(key_name, timedelta(EXPIRE_LOGIN_ATTEMPTS_IN))
def get_last_visited_domain(couch_user): client = cache_core.get_redis_client() return client.get(_last_visited_domain_cache_key(couch_user))
order += 1 def already_randomized(case): any_message = FRIRandomizedMessage.view( "fri/randomized_message", startkey=[case.domain, case.case_id], endkey=[case.domain, case.case_id, {}], include_docs=True ).first() return any_message is not None def get_randomized_message(case, order): if order >= 0 and order <= 279: client = get_redis_client() lock = client.lock("fri-randomization-%s" % case.case_id, timeout=300) lock.acquire(blocking=True) if not already_randomized(case): randomize_messages(case) release_lock(lock, True) message = FRIRandomizedMessage.view( "fri/randomized_message", key=[case.domain, case.case_id, order], include_docs=True ).one() return message else: return None