def bulk_update_records(cls, configs_by_docs, domain, doc_type_by_id): # type (Dict[str, List[str]], str, Dict[str, str]) -> None # configs_by_docs should be a dict of doc_id -> list of config_ids if not configs_by_docs: return doc_ids = list(configs_by_docs.keys()) current_indicators = AsyncIndicator.objects.filter( doc_id__in=doc_ids).all() to_update = [] for indicator in current_indicators: new_configs = set(configs_by_docs[indicator.doc_id]) current_configs = set(indicator.indicator_config_ids) if not new_configs.issubset(current_configs): indicator.indicator_config_ids = sorted( current_configs.union(new_configs)) indicator.unsuccessful_attempts = 0 to_update.append(indicator) if to_update: bulk_update_helper(to_update) new_doc_ids = set(doc_ids) - set( [i.doc_id for i in current_indicators]) AsyncIndicator.objects.bulk_create([ AsyncIndicator(doc_id=doc_id, doc_type=doc_type_by_id[doc_id], domain=domain, indicator_config_ids=sorted( configs_by_docs[doc_id])) for doc_id in new_doc_ids ])
def bulk_update_records(cls, configs_by_docs, domain, doc_type_by_id): # type (Dict[str, List[str]], str, Dict[str, str]) -> None # configs_by_docs should be a dict of doc_id -> list of config_ids if not configs_by_docs: return doc_ids = list(configs_by_docs.keys()) current_indicators = AsyncIndicator.objects.filter(doc_id__in=doc_ids).all() to_update = [] for indicator in current_indicators: new_configs = set(configs_by_docs[indicator.doc_id]) current_configs = set(indicator.indicator_config_ids) if not new_configs.issubset(current_configs): indicator.indicator_config_ids = sorted(current_configs.union(new_configs)) indicator.unsuccessful_attempts = 0 to_update.append(indicator) if to_update: bulk_update_helper(to_update) new_doc_ids = set(doc_ids) - set([i.doc_id for i in current_indicators]) AsyncIndicator.objects.bulk_create([ AsyncIndicator(doc_id=doc_id, doc_type=doc_type_by_id[doc_id], domain=domain, indicator_config_ids=sorted(configs_by_docs[doc_id])) for doc_id in new_doc_ids ])
def bulk_update(cls, objects): # 'objects' is a list of existing LocationType objects to be updated # Note: this is tightly coupled with .bulk_management.NewLocationImporter.bulk_commit() # so it can't be used on its own cls._pre_bulk_save(objects) now = datetime.utcnow() for o in objects: o.last_modified = now # the caller should call 'sync_administrative_status' for individual objects bulk_update_helper(objects)
def handle(self, user_id, date, **options): # SQL synclogs_sql = SyncLogSQL.objects.filter( user_id=user_id, date=date, log_format=LOG_FORMAT_SIMPLIFY ) for synclog in synclogs_sql: doc = properly_wrap_sync_log(synclog.doc) doc.case_ids_on_phone = {'broken to force 412'} bulk_update_helper(synclogs_sql)
def handle(self, user_id, date, **options): # SQL synclogs_sql = SyncLogSQL.objects.filter( user_id=user_id, date=date, log_format=LOG_FORMAT_SIMPLIFY) for synclog in synclogs_sql: doc = properly_wrap_sync_log(synclog.doc) doc.case_ids_on_phone = {'broken to force 412'} bulk_update_helper(synclogs_sql) # Couch - ToDo - delete after Synclog SQL migration is over synclogs_couch = SimplifiedSyncLog.view("phone/sync_logs_by_user", startkey=[user_id, {}], endkey=[user_id, date], descending=True, reduce=False, include_docs=True) logs = [] for log in synclogs_couch: log.case_ids_on_phone = {'broken to force 412'} logs.append(log) SimplifiedSyncLog.bulk_save(logs)