def process_case_batch(self, case_batch): updates = get_case_sync_updates(self.restore_state.domain, case_batch, self.restore_state.last_sync_log) for update in updates: case = update.case self.potential_elements_to_sync[ case.case_id] = PotentialSyncElement( case_stub=CaseStub(case.case_id, case.type), sync_xml_items=get_xml_for_response( update, self.restore_state)) self._process_case_update(case) self._mark_case_as_checked(case)
def process_case_batch(self, case_batch): updates = get_case_sync_updates( self.restore_state.domain, case_batch, self.restore_state.last_sync_log ) for update in updates: case = update.case self.potential_elements_to_sync[case.case_id] = PotentialSyncElement( case_stub=CaseStub(case.case_id, case.type), sync_xml_items=get_xml_for_response(update, self.restore_state) ) self._process_case_update(case) self._mark_case_as_checked(case)
def get_payload(self): response = self.restore_state.restore_class() case_ids_to_sync = set() for owner_id in self.restore_state.owner_ids: case_ids_to_sync = case_ids_to_sync | set(self.get_case_ids_for_owner(owner_id)) if (not self.restore_state.is_initial and any([not self.is_clean(owner_id) for owner_id in self.restore_state.owner_ids])): # if it's a steady state sync and we have any dirty owners, then we also need to # include ALL cases on the phone that have been modified since the last sync as # possible candidates to sync (since they may have been closed or reassigned by someone else) # don't bother checking ones we've already decided to check other_ids_to_check = self.restore_state.last_sync_log.case_ids_on_phone - case_ids_to_sync case_ids_to_sync = case_ids_to_sync | set(filter_cases_modified_since( self.restore_state.domain, list(other_ids_to_check), self.restore_state.last_sync_log.date )) all_maybe_syncing = copy(case_ids_to_sync) all_synced = set() all_indices = defaultdict(set) all_dependencies_syncing = set() while case_ids_to_sync: ids = pop_ids(case_ids_to_sync, chunk_size) # todo: see if we can avoid wrapping - serialization depends on it heavily for now case_batch = filter( partial(case_needs_to_sync, last_sync_log=self.restore_state.last_sync_log), [CommCareCase.wrap(doc) for doc in get_docs(CommCareCase.get_db(), ids)] ) updates = get_case_sync_updates( self.restore_state.domain, case_batch, self.restore_state.last_sync_log ) for update in updates: case = update.case all_synced.add(case._id) append_update_to_response(response, update, self.restore_state) # update the indices in the new sync log if case.indices: all_indices[case._id] = {index.identifier: index.referenced_id for index in case.indices} # and double check footprint for non-live cases for index in case.indices: if index.referenced_id not in all_maybe_syncing: case_ids_to_sync.add(index.referenced_id) if not _is_live(case, self.restore_state): all_dependencies_syncing.add(case._id)
def compile_response(timing_context, restore_state, response, batches, update_progress): done = 0 for cases in batches: with timing_context("get_stock_payload"): response.extend(get_stock_payload( restore_state.project, restore_state.stock_settings, cases, )) with timing_context("get_case_sync_updates (%s cases)" % len(cases)): updates = get_case_sync_updates( restore_state.domain, cases, restore_state.last_sync_log) with timing_context("get_xml_for_response (%s updates)" % len(updates)): response.extend(item for update in updates for item in get_xml_for_response(update, restore_state)) done += len(cases) update_progress(done)
def compile_response(timing_context, restore_state, response, batches, update_progress): done = 0 for cases in batches: with timing_context("get_stock_payload"): response.extend( get_stock_payload( restore_state.project, restore_state.stock_settings, cases, )) with timing_context("get_case_sync_updates (%s cases)" % len(cases)): updates = get_case_sync_updates(restore_state.domain, cases, restore_state.last_sync_log) with timing_context("get_xml_for_response (%s updates)" % len(updates)): response.extend( item for update in updates for item in get_xml_for_response(update, restore_state)) done += len(cases) update_progress(done)
def get_payload(self): response = self.restore_state.restore_class() case_ids_to_sync = set() for owner_id in self.restore_state.owner_ids: case_ids_to_sync = case_ids_to_sync | set(self.get_case_ids_for_owner(owner_id)) if (not self.restore_state.is_initial and any([not self.is_clean(owner_id) for owner_id in self.restore_state.owner_ids])): # if it's a steady state sync and we have any dirty owners, then we also need to # include ALL cases on the phone that have been modified since the last sync as # possible candidates to sync (since they may have been closed or reassigned by someone else) # don't bother checking ones we've already decided to check other_ids_to_check = self.restore_state.last_sync_log.case_ids_on_phone - case_ids_to_sync case_ids_to_sync = case_ids_to_sync | set(filter_cases_modified_since( self.restore_state.domain, list(other_ids_to_check), self.restore_state.last_sync_log.date )) all_maybe_syncing = copy(case_ids_to_sync) all_synced = set() all_indices = defaultdict(set) all_dependencies_syncing = set() while case_ids_to_sync: ids = pop_ids(case_ids_to_sync, chunk_size) # todo: see if we can avoid wrapping - serialization depends on it heavily for now case_batch = filter( partial(case_needs_to_sync, last_sync_log=self.restore_state.last_sync_log), [CommCareCase.wrap(doc) for doc in get_docs(CommCareCase.get_db(), ids)] ) updates = get_case_sync_updates( self.restore_state.domain, case_batch, self.restore_state.last_sync_log ) for update in updates: case = update.case all_synced.add(case._id) append_update_to_response(response, update, self.restore_state) # update the indices in the new sync log if case.indices: all_indices[case._id] = {index.identifier: index.referenced_id for index in case.indices} # and double check footprint for non-live cases for index in case.indices: if index.referenced_id not in all_maybe_syncing: case_ids_to_sync.add(index.referenced_id) if not _is_live(case, self.restore_state): all_dependencies_syncing.add(case._id) # commtrack ledger sections for this batch commtrack_elements = get_stock_payload( self.restore_state.project, self.restore_state.stock_settings, [CaseStub(update.case._id, update.case.type) for update in updates] ) response.extend(commtrack_elements) # add any new values to all_syncing all_maybe_syncing = all_maybe_syncing | case_ids_to_sync # update sync token - marking it as the new format self.restore_state.current_sync_log = SimplifiedSyncLog.wrap( self.restore_state.current_sync_log.to_json() ) self.restore_state.current_sync_log.log_format = LOG_FORMAT_SIMPLIFIED index_tree = IndexTree(indices=all_indices) case_ids_on_phone = all_synced primary_cases_syncing = all_synced - all_dependencies_syncing if not self.restore_state.is_initial: case_ids_on_phone = case_ids_on_phone | self.restore_state.last_sync_log.case_ids_on_phone # subtract primary cases from dependencies since they must be newly primary all_dependencies_syncing = all_dependencies_syncing | ( self.restore_state.last_sync_log.dependent_case_ids_on_phone - primary_cases_syncing ) index_tree = self.restore_state.last_sync_log.index_tree.apply_updates(index_tree) self.restore_state.current_sync_log.case_ids_on_phone = case_ids_on_phone self.restore_state.current_sync_log.dependent_case_ids_on_phone = all_dependencies_syncing self.restore_state.current_sync_log.index_tree = index_tree return response
def _case_sync_updates(self, all_potential_to_sync): return get_case_sync_updates(self.domain, all_potential_to_sync, self.last_sync)