def page_context(self): context = super().page_context context.update({ "name": self.rule.name, "case_type": self.rule.case_type, "match_type": self.dedupe_action.match_type, "case_properties": self.dedupe_action.case_properties, "include_closed": self.dedupe_action.include_closed, "properties_to_update": [{ "name": prop["name"], "valueType": prop["value_type"], "value": prop["value"] } for prop in self.dedupe_action.properties_to_update], "readonly": self.rule.locked_for_editing, }) if self.rule.locked_for_editing: progress_helper = MessagingRuleProgressHelper(self.rule_id) context.update({ "progress": progress_helper.get_progress_pct(), "complete": progress_helper.get_cases_processed(), "total": progress_helper.get_total_cases_to_process(), }) return context
def get_restart_ajax_response(self, rule): helper = MessagingRuleProgressHelper(rule.pk) if self.limit_rule_restarts and helper.rule_initiation_key_is_set(): minutes_remaining = helper.rule_initiation_key_minutes_remaining() return JsonResponse({'status': 'error', 'minutes_remaining': minutes_remaining}) initiate_messaging_rule_run(rule.domain, rule.pk) return JsonResponse({'status': 'success'})
def run_messaging_rule(domain, rule_id): rule = _get_cached_rule(domain, rule_id) if not rule: return incr = 0 progress_helper = MessagingRuleProgressHelper(rule_id) progress_helper.set_initial_progress() for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type): sync_case_for_messaging_rule.delay(domain, case_id, rule_id) incr += 1 if incr >= 1000: progress_helper.increase_total_case_count(incr) incr = 0 if progress_helper.is_canceled(): break progress_helper.increase_total_case_count(incr) # By putting this task last in the queue, the rule should be marked # complete at about the time that the last tasks are finishing up. # This beats saving the task results in the database and using a # celery chord which would be more taxing on system resources. set_rule_complete.delay(rule_id)
def get_restart_ajax_response(self, rule): helper = MessagingRuleProgressHelper(rule.pk) if self.limit_rule_restarts and helper.rule_initiation_key_is_set(): minutes_remaining = helper.rule_initiation_key_minutes_remaining() return JsonResponse({'status': 'error', 'minutes_remaining': minutes_remaining}) initiate_messaging_rule_run(rule.domain, rule.pk) return JsonResponse({ 'status': 'success', 'rule': self._format_rule_for_json(rule), })
def _get_duplicates_count(self, rule): if rule.locked_for_editing: progress_helper = MessagingRuleProgressHelper(rule.id) return _( "Processing - {progress_percent}% ({cases_processed}/{total_cases} cases) complete" ).format( progress_percent=progress_helper.get_progress_pct(), cases_processed=progress_helper.get_cases_processed(), total_cases=progress_helper.get_total_cases_to_process(), ) action = CaseDeduplicationActionDefinition.from_rule(rule) return CaseDuplicate.objects.filter(action=action).count()
def _sync_case_for_messaging_rule(domain, case_id, rule_id): case_load_counter("messaging_rule_sync", domain)() case = CaseAccessors(domain).get_case(case_id) rule = _get_cached_rule(domain, rule_id) if rule: rule.run_rule(case, utcnow()) MessagingRuleProgressHelper(rule_id).increment_current_case_count()
def get_conditional_alerts_ajax_response(self): query = self.get_conditional_alerts_queryset() total_records = query.count() rules = query[self.display_start:self.display_start + self.display_length] data = [] for rule in rules: schedule = rule.get_messaging_rule_schedule() data.append({ 'name': rule.name, 'case_type': rule.case_type, 'active': schedule.active, 'editable': self.can_use_inbound_sms or not schedule.memoized_uses_sms_survey, 'locked_for_editing': rule.locked_for_editing, 'progress_pct': MessagingRuleProgressHelper(rule.pk).get_progress_pct(), 'id': rule.pk, }) return self.datatables_ajax_response(data, total_records)
def print_status(self, rule): schedule = rule.get_schedule() msg = MessagingRuleProgressHelper(rule.id) initiated = msg.rule_initiation_key_is_set() processed = msg.client.get(msg.current_key) total = msg.client.get(msg.total_key) print("{}: ({}) {:<25} {} / {} processed, {}m to reset{}".format( rule.id, ", ".join([ "rule " + ("on" if rule.active else "off"), "schedule " + ("on" if schedule.active else "off"), ("lock" if rule.locked_for_editing else "edit"), ]), rule.name, processed, total, (msg.rule_initiation_key_minutes_remaining() if initiated else "?"), (", canceled" if msg.is_canceled() else ""), ))
def handle(self, rule_id=None, domain=None, cancel=False, **options): rule = None if rule_id and domain: rule = self.get_rule(domain, rule_id) self.print_status(rule) if cancel: confirm = input("Are you sure you want to cancel this rule? This is NOT a dry run. y/N?") if confirm == "y": msg = MessagingRuleProgressHelper(rule.id) if msg.is_canceled(): print("already canceled") else: msg.cancel() print("canceled rule", rule_id) else: print("Currently locked rules:") rules = AutomaticUpdateRule.objects.filter(locked_for_editing=True) for rule in rules: self.print_status(rule)
def _format_rule_for_json(self, rule): schedule = rule.get_messaging_rule_schedule() return { 'name': rule.name, 'case_type': rule.case_type, 'active': schedule.active, 'editable': self.schedule_is_editable(schedule), 'locked_for_editing': rule.locked_for_editing, 'progress_pct': MessagingRuleProgressHelper(rule.pk).get_progress_pct(), 'id': rule.pk, }
def run_messaging_rule(domain, rule_id): rule = _get_cached_rule(domain, rule_id) if not rule: return total_count = 0 progress_helper = MessagingRuleProgressHelper(rule_id) for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type): sync_case_for_messaging_rule.delay(domain, case_id, rule_id) total_count += 1 if total_count % 1000 == 0: progress_helper.set_total_case_count(total_count) progress_helper.set_total_case_count(total_count) # By putting this task last in the queue, the rule should be marked # complete at about the time that the last tasks are finishing up. # This beats saving the task results in the database and using a # celery chord which would be more taxing on system resources. set_rule_complete.delay(rule_id)
def run_messaging_rule(domain, rule_id): rule = _get_cached_rule(domain, rule_id) if not rule: return progress_helper = MessagingRuleProgressHelper(rule_id) total_cases_count = CaseES().domain(domain).case_type( rule.case_type).count() progress_helper.set_total_cases_to_be_processed(total_cases_count) def _run_rule_sequentially(): incr = 0 progress_helper.set_initial_progress() for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type): sync_case_for_messaging_rule.delay(domain, case_id, rule_id) incr += 1 if incr >= 1000: incr = 0 progress_helper.update_total_key_expiry() if progress_helper.is_canceled(): break # By putting this task last in the queue, the rule should be marked # complete at about the time that the last tasks are finishing up. # This beats saving the task results in the database and using a # celery chord which would be more taxing on system resources. set_rule_complete.delay(rule_id) def _run_rule_on_multiple_shards(): db_aliases = get_db_aliases_for_partitioned_query() progress_helper.set_initial_progress(shard_count=len(db_aliases)) for db_alias in db_aliases: run_messaging_rule_for_shard.delay(domain, rule_id, db_alias) if should_use_sql_backend(domain): _run_rule_on_multiple_shards() else: _run_rule_sequentially()
def run_messaging_rule(domain, rule_id): rule = _get_cached_rule(domain, rule_id) if not rule: return progress_helper = MessagingRuleProgressHelper(rule_id) total_cases_count = CaseES().domain(domain).case_type( rule.case_type).count() progress_helper.set_total_cases_to_be_processed(total_cases_count) db_aliases = get_db_aliases_for_partitioned_query() progress_helper.set_initial_progress(shard_count=len(db_aliases)) for db_alias in db_aliases: run_messaging_rule_for_shard.delay(domain, rule_id, db_alias)
def run_messaging_rule_for_shard(domain, rule_id, db_alias): rule = _get_cached_rule(domain, rule_id) if not rule: return chunk_size = getattr(settings, 'MESSAGING_RULE_CASE_CHUNK_SIZE', 100) progress_helper = MessagingRuleProgressHelper(rule_id) if not progress_helper.is_canceled(): for case_id_chunk in chunked(paginated_case_ids(domain, rule.case_type, db_alias), chunk_size): sync_case_chunk_for_messaging_rule.delay(domain, case_id_chunk, rule_id) progress_helper.increase_total_case_count(len(case_id_chunk)) if progress_helper.is_canceled(): break all_shards_complete = progress_helper.mark_shard_complete(db_alias) if all_shards_complete: # this should get triggered for the last shard set_rule_complete.delay(rule_id)
def get_conditional_alerts_ajax_response(self): query = self.get_conditional_alerts_queryset() total_records = query.count() rules = query[self.display_start:self.display_start + self.display_length] data = [] for rule in rules: data.append([ '< delete placeholder >', rule.name, rule.case_type, rule.get_messaging_rule_schedule().active, '< action placeholder >', rule.locked_for_editing, MessagingRuleProgressHelper(rule.pk).get_progress_pct(), rule.pk, ]) return self.datatables_ajax_response(data, total_records)
def backfill_deduplicate_rule(domain, rule): from corehq.apps.data_interfaces.models import ( AutomaticUpdateRule, CaseDeduplicationActionDefinition, DomainCaseRuleRun, ) progress_helper = MessagingRuleProgressHelper(rule.pk) total_cases_count = CaseSearchES().domain(domain).case_type( rule.case_type).count() progress_helper.set_total_cases_to_be_processed(total_cases_count) now = datetime.utcnow() try: run_record = DomainCaseRuleRun.objects.create( domain=domain, started_on=now, status=DomainCaseRuleRun.STATUS_RUNNING, case_type=rule.case_type, ) action = CaseDeduplicationActionDefinition.from_rule(rule) case_iterator = AutomaticUpdateRule.iter_cases( domain, rule.case_type, include_closed=action.include_closed) iter_cases_and_run_rules( domain, case_iterator, [rule], now, run_record.id, rule.case_type, progress_helper=progress_helper, ) finally: progress_helper.set_rule_complete() AutomaticUpdateRule.objects.filter(pk=rule.pk).update( locked_for_editing=False, last_run=now, )
def set_rule_complete(rule_id): AutomaticUpdateRule.objects.filter(pk=rule_id).update( locked_for_editing=False) MessagingRuleProgressHelper(rule_id).set_rule_complete()
domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING) rules = [rule for rule in rules if rule.pk == rule_id] return rules[0] if len(rules) == 1 else None def _sync_case_for_messaging_rule(domain, case_id, rule_id): case_load_counter("messaging_rule_sync", domain)() try: case = CaseAccessors(domain).get_case(case_id) except CaseNotFound: clear_messaging_for_case(domain, case_id) return rule = _get_cached_rule(domain, rule_id) if rule: rule.run_rule(case, utcnow()) MessagingRuleProgressHelper(rule_id).increment_current_case_count() def initiate_messaging_rule_run(rule): if not rule.active: return AutomaticUpdateRule.objects.filter(pk=rule.pk).update( locked_for_editing=True) transaction.on_commit( lambda: run_messaging_rule.delay(rule.domain, rule.pk)) def paginated_case_ids(domain, case_type): row_generator = paginate_query_across_partitioned_databases( CommCareCaseSQL, Q(domain=domain, type=case_type, deleted=False),
def initiate_messaging_rule_run(domain, rule_id): MessagingRuleProgressHelper(rule_id).set_initial_progress() AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=True) run_messaging_rule.delay(domain, rule_id)