예제 #1
0
def explode_cases(user_id, domain, factor, task=None):
    user = CommCareUser.get_by_user_id(user_id, domain)
    messages = list()
    if task:
        DownloadBase.set_progress(explode_case_task, 0, 0)
    count = 0

    old_to_new = dict()
    child_cases = list()
    accessor = CaseAccessors(domain)

    case_ids = accessor.get_case_ids_by_owners(user.get_owner_ids(),
                                               closed=False)
    cases = accessor.iter_cases(case_ids)

    # copy parents
    for case in cases:
        # skip over user as a case
        if case.type == USERCASE_TYPE:
            continue
        # save children for later
        if case.indices:
            child_cases.append(case)
            continue
        old_to_new[case.case_id] = list()
        for i in range(factor - 1):
            new_case_id = uuid.uuid4().hex
            # add new parent ids to the old to new id mapping
            old_to_new[case.case_id].append(new_case_id)
            submit_case(case, new_case_id, domain)
            count += 1
            if task:
                DownloadBase.set_progress(explode_case_task, count, 0)
예제 #2
0
def do_livequery(timing_context, restore_state, response, async_task=None):
    """Get case sync restore response

    This function makes no changes to external state other than updating
    the `restore_state.current_sync_log` and progress of `async_task`.
    Extends `response` with restore elements.
    """
    def index_key(index):
        return '{} {}'.format(index.case_id, index.identifier)

    def is_extension(case_id):
        """Determine if case_id is an extension case

        A case that is both a child and an extension is not an extension.
        """
        return case_id in hosts_by_extension and case_id not in parents_by_child

    def has_live_extension(case_id, cache={}):
        """Check if available case_id has a live extension case

        Do not check for live children because an available parent
        cannot cause it's children to become live. This is unlike an
        available host, which can cause its available extension to
        become live through the recursive rules:

        - A case is available if
            - it is open and not an extension case (applies to host).
            - it is open and is the extension of an available case.
        - A case is live if it is owned and available.

        The result is cached to reduce recursion in subsequent calls
        and to prevent infinite recursion.
        """
        try:
            return cache[case_id]
        except KeyError:
            cache[case_id] = False
        cache[case_id] = result = any(
            ext_id in live_ids  # has live extension
            or ext_id in owned_ids  # ext is owned and available, will be live
            or has_live_extension(ext_id)
            for ext_id in extensions_by_host[case_id])
        return result

    def enliven(case_id):
        """Mark the given case, its extensions and their hosts as live

        This closure mutates `live_ids` from the enclosing function.
        """
        if case_id in live_ids:
            # already live
            return
        debug('enliven(%s)', case_id)
        live_ids.add(case_id)
        # case is open and is the extension of a live case
        ext_ids = extensions_by_host.get(case_id, [])
        # case has live extension
        host_ids = hosts_by_extension.get(case_id, [])
        # case has live child
        parent_ids = parents_by_child.get(case_id, [])
        for cid in chain(ext_ids, host_ids, parent_ids):
            enliven(cid)

    def classify(index, prev_ids):
        """Classify index as either live or extension with live status pending

        This closure mutates case graph data structures from the
        enclosing function.

        :returns: Case id for next related index fetch or IGNORE
        if the related case should be ignored.
        """
        sub_id = index.case_id
        ref_id = index.referenced_id  # aka parent/host/super
        relationship = index.relationship
        ix_key = index_key(index)
        if ix_key in seen_ix[sub_id]:
            return IGNORE  # unexpected, don't process duplicate index twice
        seen_ix[sub_id].add(ix_key)
        seen_ix[ref_id].add(ix_key)
        indices[sub_id].append(index)
        debug("%s --%s--> %s", sub_id, relationship, ref_id)
        if sub_id in live_ids:
            # ref has a live child or extension
            enliven(ref_id)
            # It does not matter that sub_id -> ref_id never makes it into
            # hosts_by_extension since both are live and therefore this index
            # will not need to be traversed in other liveness calculations.
        elif relationship == EXTENSION:
            if sub_id in open_ids:
                if ref_id in live_ids:
                    # sub is open and is the extension of a live case
                    enliven(sub_id)
                else:
                    # live status pending:
                    # if ref becomes live -> sub is open extension of live case
                    # if sub becomes live -> ref has a live extension
                    extensions_by_host[ref_id].add(sub_id)
                    hosts_by_extension[sub_id].add(ref_id)
            else:
                return IGNORE  # closed extension
        elif sub_id in owned_ids:
            # sub is owned and available (open and not an extension case)
            enliven(sub_id)
            # ref has a live child
            enliven(ref_id)
        else:
            # live status pending: if sub becomes live -> ref has a live child
            parents_by_child[sub_id].add(ref_id)

        next_id = ref_id if sub_id in prev_ids else sub_id
        if next_id not in all_ids:
            return next_id
        return IGNORE  # circular reference

    def update_open_and_deleted_ids(related):
        """Update open_ids and deleted_ids with related case_ids

        TODO store referenced case (parent) deleted and closed status in
        CommCareCaseIndexSQL to reduce number of related indices fetched
        and avoid this extra query per related query.
        """
        case_ids = {
            case_id
            for index in related
            for case_id in [index.case_id, index.referenced_id]
            if case_id not in all_ids
        }
        rows = accessor.get_closed_and_deleted_ids(list(case_ids))
        for case_id, closed, deleted in rows:
            if deleted:
                deleted_ids.add(case_id)
            if closed or deleted:
                case_ids.remove(case_id)
        open_ids.update(case_ids)

    IGNORE = object()
    debug = logging.getLogger(__name__).debug
    accessor = CaseAccessors(restore_state.domain)

    # case graph data structures
    live_ids = set()
    deleted_ids = set()
    extensions_by_host = defaultdict(set)  # host_id -> (open) extension_ids
    hosts_by_extension = defaultdict(set)  # (open) extension_id -> host_ids
    parents_by_child = defaultdict(set)  # child_id -> parent_ids
    indices = defaultdict(list)  # case_id -> list of CommCareCaseIndex-like
    seen_ix = defaultdict(
        set)  # case_id -> set of '<index.case_id> <index.identifier>'
    owner_ids = list(restore_state.owner_ids)

    debug("sync %s for %r", restore_state.current_sync_log._id, owner_ids)
    with timing_context("livequery"):
        with timing_context("get_case_ids_by_owners"):
            owned_ids = accessor.get_case_ids_by_owners(owner_ids,
                                                        closed=False)
            debug("owned: %r", owned_ids)

        next_ids = all_ids = set(owned_ids)
        owned_ids = set(owned_ids)  # owned, open case ids (may be extensions)
        open_ids = set(owned_ids)
        while next_ids:
            exclude = set(chain.from_iterable(seen_ix[id] for id in next_ids))
            with timing_context(
                    "get_related_indices({} cases, {} seen)".format(
                        len(next_ids), len(exclude))):
                related = accessor.get_related_indices(list(next_ids), exclude)
                if not related:
                    break
                update_open_and_deleted_ids(related)
                next_ids = {
                    classify(index, next_ids)
                    for index in related
                    if index.referenced_id not in deleted_ids
                    and index.case_id not in deleted_ids
                }
                next_ids.discard(IGNORE)
                all_ids.update(next_ids)
                debug('next: %r', next_ids)

        with timing_context("enliven open roots (%s cases)" % len(open_ids)):
            debug('open: %r', open_ids)
            # owned, open, not an extension -> live
            for case_id in owned_ids:
                if not is_extension(case_id):
                    enliven(case_id)

            # available case with live extension -> live
            for case_id in open_ids:
                if (case_id not in live_ids and not is_extension(case_id)
                        and has_live_extension(case_id)):
                    enliven(case_id)

            debug('live: %r', live_ids)

        if restore_state.last_sync_log:
            with timing_context("discard_already_synced_cases"):
                debug('last sync: %s', restore_state.last_sync_log._id)
                sync_ids = discard_already_synced_cases(
                    live_ids, restore_state, accessor)
        else:
            sync_ids = live_ids
        restore_state.current_sync_log.case_ids_on_phone = live_ids

        with timing_context("compile_response(%s cases)" % len(sync_ids)):
            iaccessor = PrefetchIndexCaseAccessor(accessor, indices)
            compile_response(
                timing_context,
                restore_state,
                response,
                batch_cases(iaccessor, sync_ids),
                init_progress(async_task, len(sync_ids)),
            )
예제 #3
0
 def _get_case_ids(self):
     asha_location_ids = [choice.value for choice in self.value]
     accessor = CaseAccessors('icds-cas')  # figure out how to get the domain here
     return accessor.get_case_ids_by_owners(asha_location_ids)
예제 #4
0
class Dumper(object):
    def __init__(self, domain):
        """
        Dump all transitions in an excel sheet in a format easy to understand by users
        One tab per location type, with changes specific to the location type
        See TestDumper.test_dump for example
        """
        self.domain = domain
        self.new_site_codes = []
        self.case_accessor = CaseAccessors(domain)

    def dump(self, transitions_per_location_type):
        """
        :param transitions_per_location_type: location types mapped to transitions where
        each transition is a dict with an operation
        like merge or split mapped to details for the operation
        Check Parser for the expected format for each operation
        """
        location_types = list(transitions_per_location_type.keys())
        headers = [[location_type, DUMPER_COLUMNS]
                   for location_type in location_types]
        stream = io.BytesIO()
        self._setup_site_codes(list(transitions_per_location_type.values()))
        rows = self._rows(transitions_per_location_type).items()
        export_raw(headers, rows, stream)
        stream.seek(0)
        return stream

    def _setup_site_codes(self, transitions):
        # from the site codes of the destination locations find sites codes that are
        # not present in the system yet and
        # that are present but archived
        destination_site_codes = self._get_destination_site_codes(transitions)
        site_codes_present = (SQLLocation.active_objects.filter(
            site_code__in=destination_site_codes).values_list('site_code',
                                                              flat=True))
        self.new_site_codes = set(destination_site_codes) - set(
            site_codes_present)
        self.archived_sites_codes = set(
            SQLLocation.objects.filter(site_code__in=destination_site_codes,
                                       is_archived=True).values_list(
                                           'site_code', flat=True))
        self.old_site_codes = self._get_old_site_codes(transitions)

    @staticmethod
    def _get_destination_site_codes(transitions):
        # find all sites codes of the destination/final locations
        new_site_codes = []
        for transition in transitions:
            for operation, details in transition.items():
                # in case of split final site codes is a list itself and is the value in the dict
                if operation == SPLIT_OPERATION:
                    [
                        new_site_codes.extend(to_site_codes)
                        for to_site_codes in list(details.values())
                    ]
                else:
                    new_site_codes.extend(list(details.keys()))
        return new_site_codes

    @staticmethod
    def _get_old_site_codes(transitions):
        # find all sites codes of the destination/final locations
        old_site_codes = []
        for transition in transitions:
            for operation, details in transition.items():
                # in case of merge old site code is the key in the dict
                if operation == SPLIT_OPERATION:
                    old_site_codes.extend(list(details.keys()))
                # in case of merge old site codes is a list itself and is the value in the dict
                elif operation == MERGE_OPERATION:
                    [
                        old_site_codes.extend(from_site_codes)
                        for from_site_codes in list(details.values())
                    ]
                else:
                    old_site_codes.extend(list(details.values()))
        return old_site_codes

    def _rows(self, transitions_per_location_type):
        rows = {
            location_type: []
            for location_type in transitions_per_location_type
        }
        for location_type, transitions in transitions_per_location_type.items(
        ):
            for operation, details in transitions.items():
                rows[location_type].extend(
                    self._get_rows_for_operation(operation, details))
        return rows

    def _get_rows_for_operation(self, operation, details):
        rows = []
        if operation == MERGE_OPERATION:
            rows.extend(self._get_rows_for_merge(details))
        elif operation == SPLIT_OPERATION:
            rows.extend(self._get_rows_for_split(details))
        elif operation in [MOVE_OPERATION, EXTRACT_OPERATION]:
            for destination, source in details.items():
                rows.append(self._build_row(source, operation, destination))
        return rows

    def _get_rows_for_merge(self, details):
        return [
            self._build_row(source, MERGE_OPERATION, destination)
            for destination, sources in details.items() for source in sources
        ]

    def _build_row(self, source, operation, destination):
        return [
            source, operation, destination, destination in self.new_site_codes,
            destination in self.archived_sites_codes,
            self._get_count_of_cases_owned(source)
        ]

    def _get_rows_for_split(self, details):
        return [
            self._build_row(source, SPLIT_OPERATION, destination)
            for source, destinations in details.items()
            for destination in destinations
        ]

    @memoized
    def _get_count_of_cases_owned(self, site_code):
        location_id = self._old_location_ids_by_site_code().get(site_code)
        if location_id:
            return len(self.case_accessor.get_case_ids_by_owners([location_id
                                                                  ]))
        return "Not Found"

    @memoized
    def _old_location_ids_by_site_code(self):
        return {
            loc.site_code: loc.location_id
            for loc in SQLLocation.active_objects.filter(
                domain=self.domain, site_code__in=self.old_site_codes)
        }
예제 #5
0
def do_livequery(timing_context, restore_state, response, async_task=None):
    """Get case sync restore response

    This function makes no changes to external state other than updating
    the `restore_state.current_sync_log` and progress of `async_task`.
    Extends `response` with restore elements.
    """
    def index_key(index):
        return '{} {}'.format(index.case_id, index.identifier)

    def is_extension(case_id):
        """Determine if case_id is an extension case

        A case that is both a child and an extension is not an extension.
        """
        return case_id in hosts_by_extension and case_id not in parents_by_child

    def has_live_extension(case_id, cache={}):
        """Check if available case_id has a live extension case

        Do not check for live children because an available parent
        cannot cause it's children to become live. This is unlike an
        available host, which can cause its available extension to
        become live through the recursive rules:

        - A case is available if
            - it is open and not an extension case (applies to host).
            - it is open and is the extension of an available case.
        - A case is live if it is owned and available.

        The result is cached to reduce recursion in subsequent calls
        and to prevent infinite recursion.
        """
        try:
            return cache[case_id]
        except KeyError:
            cache[case_id] = False
        cache[case_id] = result = any(
            ext_id in live_ids      # has live extension
            or ext_id in owned_ids  # ext is owned and available, will be live
            or has_live_extension(ext_id)
            for ext_id in extensions_by_host[case_id]
        )
        return result

    def enliven(case_id):
        """Mark the given case, its extensions and their hosts as live

        This closure mutates `live_ids` from the enclosing function.
        """
        if case_id in live_ids:
            # already live
            return
        debug('enliven(%s)', case_id)
        live_ids.add(case_id)
        # case is open and is the extension of a live case
        ext_ids = extensions_by_host.get(case_id, [])
        # case has live extension
        host_ids = hosts_by_extension.get(case_id, [])
        # case has live child
        parent_ids = parents_by_child.get(case_id, [])
        for cid in chain(ext_ids, host_ids, parent_ids):
            enliven(cid)

    def classify(index, prev_ids):
        """Classify index as either live or extension with live status pending

        This closure mutates case graph data structures from the
        enclosing function.

        :returns: Case id for next related index fetch or IGNORE
        if the related case should be ignored.
        """
        sub_id = index.case_id
        ref_id = index.referenced_id  # aka parent/host/super
        relationship = index.relationship
        ix_key = index_key(index)
        if ix_key in seen_ix[sub_id]:
            return IGNORE  # unexpected, don't process duplicate index twice
        seen_ix[sub_id].add(ix_key)
        seen_ix[ref_id].add(ix_key)
        indices[sub_id].append(index)
        debug("%s --%s--> %s", sub_id, relationship, ref_id)
        if sub_id in live_ids:
            # ref has a live child or extension
            enliven(ref_id)
            # It does not matter that sub_id -> ref_id never makes it into
            # hosts_by_extension since both are live and therefore this index
            # will not need to be traversed in other liveness calculations.
        elif relationship == EXTENSION:
            if sub_id in open_ids:
                if ref_id in live_ids:
                    # sub is open and is the extension of a live case
                    enliven(sub_id)
                else:
                    # live status pending:
                    # if ref becomes live -> sub is open extension of live case
                    # if sub becomes live -> ref has a live extension
                    extensions_by_host[ref_id].add(sub_id)
                    hosts_by_extension[sub_id].add(ref_id)
            else:
                return IGNORE  # closed extension
        elif sub_id in owned_ids:
            # sub is owned and available (open and not an extension case)
            enliven(sub_id)
            # ref has a live child
            enliven(ref_id)
        else:
            # live status pending: if sub becomes live -> ref has a live child
            parents_by_child[sub_id].add(ref_id)

        next_id = ref_id if sub_id in prev_ids else sub_id
        if next_id not in all_ids:
            return next_id
        return IGNORE  # circular reference

    def update_open_and_deleted_ids(related):
        """Update open_ids and deleted_ids with related case_ids

        TODO store referenced case (parent) deleted and closed status in
        CommCareCaseIndexSQL to reduce number of related indices fetched
        and avoid this extra query per related query.
        """
        case_ids = {case_id
            for index in related
            for case_id in [index.case_id, index.referenced_id]
            if case_id not in all_ids}
        rows = accessor.get_closed_and_deleted_ids(list(case_ids))
        for case_id, closed, deleted in rows:
            if deleted:
                deleted_ids.add(case_id)
            if closed or deleted:
                case_ids.remove(case_id)
        open_ids.update(case_ids)

    IGNORE = object()
    debug = logging.getLogger(__name__).debug
    accessor = CaseAccessors(restore_state.domain)

    # case graph data structures
    live_ids = set()
    deleted_ids = set()
    extensions_by_host = defaultdict(set)  # host_id -> (open) extension_ids
    hosts_by_extension = defaultdict(set)  # (open) extension_id -> host_ids
    parents_by_child = defaultdict(set)    # child_id -> parent_ids
    indices = defaultdict(list)  # case_id -> list of CommCareCaseIndex-like
    seen_ix = defaultdict(set)   # case_id -> set of '<index.case_id> <index.identifier>'
    owner_ids = list(restore_state.owner_ids)

    debug("sync %s for %r", restore_state.current_sync_log._id, owner_ids)
    with timing_context("livequery"):
        with timing_context("get_case_ids_by_owners"):
            owned_ids = accessor.get_case_ids_by_owners(owner_ids, closed=False)
            debug("owned: %r", owned_ids)

        next_ids = all_ids = set(owned_ids)
        owned_ids = set(owned_ids)  # owned, open case ids (may be extensions)
        open_ids = set(owned_ids)
        while next_ids:
            exclude = set(chain.from_iterable(seen_ix[id] for id in next_ids))
            with timing_context("get_related_indices({} cases, {} seen)".format(
                    len(next_ids), len(exclude))):
                related = accessor.get_related_indices(list(next_ids), exclude)
                if not related:
                    break
                update_open_and_deleted_ids(related)
                next_ids = {classify(index, next_ids)
                    for index in related
                    if index.referenced_id not in deleted_ids
                        and index.case_id not in deleted_ids}
                next_ids.discard(IGNORE)
                all_ids.update(next_ids)
                debug('next: %r', next_ids)

        with timing_context("enliven open roots (%s cases)" % len(open_ids)):
            debug('open: %r', open_ids)
            # owned, open, not an extension -> live
            for case_id in owned_ids:
                if not is_extension(case_id):
                    enliven(case_id)

            # available case with live extension -> live
            for case_id in open_ids:
                if (case_id not in live_ids
                        and not is_extension(case_id)
                        and has_live_extension(case_id)):
                    enliven(case_id)

            debug('live: %r', live_ids)

        if restore_state.last_sync_log:
            with timing_context("discard_already_synced_cases"):
                debug('last sync: %s', restore_state.last_sync_log._id)
                sync_ids = discard_already_synced_cases(
                    live_ids, restore_state, accessor)
        else:
            sync_ids = live_ids
        restore_state.current_sync_log.case_ids_on_phone = live_ids

        with timing_context("compile_response(%s cases)" % len(sync_ids)):
            iaccessor = PrefetchIndexCaseAccessor(accessor, indices)
            compile_response(
                timing_context,
                restore_state,
                response,
                batch_cases(iaccessor, sync_ids),
                init_progress(async_task, len(sync_ids)),
            )
예제 #6
0
class CleanOwnerSyncPayload(object):
    def __init__(self, timing_context, case_ids_to_sync, restore_state):
        self.restore_state = restore_state
        self.case_accessor = CaseAccessors(self.restore_state.domain)
        self.response = self.restore_state.restore_class()

        self.case_ids_to_sync = case_ids_to_sync
        self.all_maybe_syncing = copy(case_ids_to_sync)
        self.checked_cases = set()
        self.child_indices = defaultdict(set)
        self.extension_indices = defaultdict(set)
        self.all_dependencies_syncing = set()
        self.closed_cases = set()
        self.potential_elements_to_sync = {}

        self.timing_context = timing_context

    def get_payload(self):
        with self.timing_context('process_case_batches'):
            while self.case_ids_to_sync:
                self.process_case_batch(self._get_next_case_batch())

        with self.timing_context('update_index_trees'):
            self.update_index_trees()

        with self.timing_context('update_case_ids_on_phone'):
            self.update_case_ids_on_phone()

        with self.timing_context('move_no_longer_owned_cases_to_dependent_list_if_necessary'):
            self.move_no_longer_owned_cases_to_dependent_list_if_necessary()

        with self.timing_context('purge_and_get_irrelevant_cases'):
            irrelevant_cases = self.purge_and_get_irrelevant_cases()

        with self.timing_context('compile_response'):
            self.compile_response(irrelevant_cases)

        return self.response

    def _get_next_case_batch(self):
        ids = pop_ids(self.case_ids_to_sync, chunk_size)
        return [
            case for case in self.case_accessor.get_cases(ids)
            if not case.is_deleted and case_needs_to_sync(case, last_sync_log=self.restore_state.last_sync_log)
        ]

    def process_case_batch(self, case_batch):
        updates = get_case_sync_updates(
            self.restore_state.domain, case_batch, self.restore_state.last_sync_log
        )

        for update in updates:
            case = update.case
            self.potential_elements_to_sync[case.case_id] = PotentialSyncElement(
                case_stub=CaseStub(case.case_id, case.type),
                sync_xml_items=get_xml_for_response(update, self.restore_state)
            )
            self._process_case_update(case)
            self._mark_case_as_checked(case)

    def _process_case_update(self, case):
        if case.indices:
            self._update_indices_in_new_synclog(case)
            self._add_unchecked_indices_to_be_checked(case)

        if not _is_live(case, self.restore_state):
            self.all_dependencies_syncing.add(case.case_id)
            if case.closed:
                self.closed_cases.add(case.case_id)

    def _mark_case_as_checked(self, case):
        self.checked_cases.add(case.case_id)

    def _update_indices_in_new_synclog(self, case):
        self.extension_indices[case.case_id] = {
            index.identifier: index.referenced_id
            for index in case.indices
            if index.relationship == CASE_INDEX_EXTENSION
        }
        self.child_indices[case.case_id] = {
            index.identifier: index.referenced_id
            for index in case.indices
            if index.relationship == CASE_INDEX_CHILD
        }

    def _add_unchecked_indices_to_be_checked(self, case):
        for index in case.indices:
            if index.referenced_id not in self.all_maybe_syncing:
                self.case_ids_to_sync.add(index.referenced_id)
        self.all_maybe_syncing |= self.case_ids_to_sync

    def move_no_longer_owned_cases_to_dependent_list_if_necessary(self):
        if not self.restore_state.is_initial:
            removed_owners = (
                set(self.restore_state.last_sync_log.owner_ids_on_phone) - set(self.restore_state.owner_ids)
            )
            if removed_owners:
                # if we removed any owner ids, then any cases that belonged to those owners need
                # to be moved to the dependent list
                case_ids_to_try_purging = self.case_accessor.get_case_ids_by_owners(list(removed_owners))
                for to_purge in case_ids_to_try_purging:
                    if to_purge in self.restore_state.current_sync_log.case_ids_on_phone:
                        self.restore_state.current_sync_log.dependent_case_ids_on_phone.add(to_purge)

    def update_index_trees(self):
        index_tree = IndexTree(indices=self.child_indices)
        extension_index_tree = IndexTree(indices=self.extension_indices)
        if not self.restore_state.is_initial:
            index_tree = self.restore_state.last_sync_log.index_tree.apply_updates(index_tree)
            extension_index_tree = self.restore_state.last_sync_log.extension_index_tree.apply_updates(
                extension_index_tree
            )

        self.restore_state.current_sync_log.index_tree = index_tree
        self.restore_state.current_sync_log.extension_index_tree = extension_index_tree

    def update_case_ids_on_phone(self):
        case_ids_on_phone = self.checked_cases
        primary_cases_syncing = self.checked_cases - self.all_dependencies_syncing
        if not self.restore_state.is_initial:
            case_ids_on_phone |= self.restore_state.last_sync_log.case_ids_on_phone
            # subtract primary cases from dependencies since they must be newly primary
            self.all_dependencies_syncing |= (
                self.restore_state.last_sync_log.dependent_case_ids_on_phone -
                primary_cases_syncing
            )
        self.restore_state.current_sync_log.case_ids_on_phone = case_ids_on_phone
        self.restore_state.current_sync_log.dependent_case_ids_on_phone = self.all_dependencies_syncing
        self.restore_state.current_sync_log.closed_cases = self.closed_cases

    def purge_and_get_irrelevant_cases(self):
        original_case_ids_on_phone = self.restore_state.current_sync_log.case_ids_on_phone.copy()
        self.restore_state.current_sync_log.purge_dependent_cases()
        purged_cases = original_case_ids_on_phone - self.restore_state.current_sync_log.case_ids_on_phone
        # don't sync purged cases that were never on the phone
        if self.restore_state.is_initial:
            irrelevant_cases = purged_cases
        else:
            irrelevant_cases = purged_cases - self.restore_state.last_sync_log.case_ids_on_phone
        return irrelevant_cases

    def compile_response(self, irrelevant_cases):
        relevant_sync_elements = [
            potential_sync_element
            for syncable_case_id, potential_sync_element in self.potential_elements_to_sync.iteritems()
            if syncable_case_id not in irrelevant_cases
        ]

        with self.timing_context('add_commtrack_elements_to_response'):
            self._add_commtrack_elements_to_response(relevant_sync_elements)

        self._add_case_elements_to_response(relevant_sync_elements)

    def _add_commtrack_elements_to_response(self, relevant_sync_elements):
        commtrack_elements = get_stock_payload(
            self.restore_state.project, self.restore_state.stock_settings,
            [
                potential_sync_element.case_stub
                for potential_sync_element in relevant_sync_elements
            ]
        )
        self.response.extend(commtrack_elements)

    def _add_case_elements_to_response(self, relevant_sync_elements):
        for relevant_case in relevant_sync_elements:
            for xml_item in relevant_case.sync_xml_items:
                self.response.append(xml_item)
예제 #7
0
def explode_cases(user_id, domain, factor, task=None):
    user = CommCareUser.get_by_user_id(user_id, domain)
    messages = list()
    if task:
        DownloadBase.set_progress(explode_case_task, 0, 0)
    count = 0

    old_to_new = dict()
    child_cases = list()
    accessor = CaseAccessors(domain)

    case_ids = accessor.get_case_ids_by_owners(user.get_owner_ids(), closed=False)
    cases = accessor.iter_cases(case_ids)

    # copy parents
    for case in cases:
        # skip over user as a case
        if case.type == USERCASE_TYPE:
            continue
        # save children for later
        if case.indices:
            child_cases.append(case)
            continue
        old_to_new[case.case_id] = list()
        for i in range(factor - 1):
            new_case_id = uuid.uuid4().hex
            # add new parent ids to the old to new id mapping
            old_to_new[case.case_id].append(new_case_id)
            submit_case(case, new_case_id, domain)
            count += 1
            if task:
                DownloadBase.set_progress(explode_case_task, count, 0)

    max_iterations = len(child_cases) ** 2
    iterations = 0
    while len(child_cases) > 0:
        if iterations > max_iterations:
            raise Exception('cases had inconsistent references to each other')
        iterations += 1
        # take the first case
        case = child_cases.pop(0)
        can_process = True
        parent_ids = dict()

        for index in case.indices:
            ref_id = index.referenced_id
            # if the parent hasn't been processed
            if ref_id not in old_to_new.keys():
                # append it to the backand break out
                child_cases.append(case)
                can_process = False
                break
            # update parent ids that this case needs
            parent_ids.update({ref_id: old_to_new[ref_id]})
        # keep processing
        if not can_process:
            continue

        old_to_new[case.case_id] = list()
        for i in range(factor - 1):
            # grab the parents for this round of exploding
            parents = {k: v[i] for k, v in parent_ids.items()}
            new_case_id = uuid.uuid4().hex
            old_to_new[case.case_id].append(new_case_id)
            submit_case(case, new_case_id, domain, parents)
            count += 1
            if task:
                DownloadBase.set_progress(explode_case_task, count, 0)

    messages.append("All of %s's cases were exploded by a factor of %d" % (user.raw_username, factor))

    return {'messages': messages}
예제 #8
0
파일: api.py 프로젝트: xbryanc/commcare-hq
class CaseAPIHelper(object):
    """
    Simple config object for querying the APIs
    """
    def __init__(self,
                 domain,
                 status=CASE_STATUS_OPEN,
                 case_type=None,
                 ids_only=False,
                 footprint=False,
                 strip_history=False,
                 filters=None):
        if status not in [
                CASE_STATUS_ALL, CASE_STATUS_CLOSED, CASE_STATUS_OPEN
        ]:
            raise ValueError("invalid case status %s" % status)
        self.domain = domain
        self.status = status
        self.case_type = case_type
        self.ids_only = ids_only
        self.wrap = not ids_only  # if we're just querying IDs we don't need to wrap the docs
        self.footprint = footprint
        self.strip_history = strip_history
        self.filters = filters
        self.case_accessors = CaseAccessors(self.domain)

    def _case_results(self, case_id_list):
        def _filter(res):
            if self.filters:
                for path, val in self.filters.items():
                    actual_val = safe_index(res.case_json, path.split("/"))
                    if actual_val != val:
                        # closed=false => case.closed == False
                        if val in ('null', 'true', 'false'):
                            if actual_val != json.loads(val):
                                return False
                        else:
                            return False
                return True

        if self.filters and not self.footprint:
            base_results = self._populate_results(case_id_list)
            return list(filter(_filter, base_results))

        if self.footprint:
            initial_case_ids = set(case_id_list)
            dependent_case_ids = get_dependent_case_info(
                self.domain, initial_case_ids).all_ids
            all_case_ids = initial_case_ids | dependent_case_ids
        else:
            all_case_ids = case_id_list

        if self.ids_only:
            return [
                CaseAPIResult(domain=self.domain, id=case_id, id_only=True)
                for case_id in all_case_ids
            ]
        else:
            return self._populate_results(all_case_ids)

    def _populate_results(self, case_id_list):
        if should_use_sql_backend(self.domain):
            base_results = [
                CaseAPIResult(domain=self.domain,
                              couch_doc=case,
                              id_only=self.ids_only)
                for case in self.case_accessors.iter_cases(case_id_list)
            ]
        else:
            base_results = [
                CaseAPIResult(domain=self.domain,
                              couch_doc=case,
                              id_only=self.ids_only) for case in iter_cases(
                                  case_id_list, self.strip_history, self.wrap)
            ]
        return base_results

    def get_all(self):
        status = self.status or CASE_STATUS_ALL
        if status == CASE_STATUS_ALL:
            case_ids = self.case_accessors.get_case_ids_in_domain(
                self.case_type)
        elif status == CASE_STATUS_OPEN:
            case_ids = self.case_accessors.get_open_case_ids_in_domain_by_type(
                self.case_type)
        else:
            raise ValueError("Invalid value for 'status': '%s'" % status)

        return self._case_results(case_ids)

    def get_owned(self, user_id):
        try:
            user = CouchUser.get_by_user_id(user_id, self.domain)
        except KeyError:
            user = None
        try:
            owner_ids = user.get_owner_ids()
        except AttributeError:
            owner_ids = [user_id]

        closed = {
            CASE_STATUS_OPEN: False,
            CASE_STATUS_CLOSED: True,
            CASE_STATUS_ALL: None,
        }[self.status]

        ids = self.case_accessors.get_case_ids_by_owners(owner_ids,
                                                         closed=closed)
        return self._case_results(ids)
예제 #9
0
class CaseAPIHelper(object):
    """
    Simple config object for querying the APIs
    """

    def __init__(self, domain, status=CASE_STATUS_OPEN, case_type=None, ids_only=False,
                 footprint=False, strip_history=False, filters=None):
        if status not in [CASE_STATUS_ALL, CASE_STATUS_CLOSED, CASE_STATUS_OPEN]:
            raise ValueError("invalid case status %s" % status)
        self.domain = domain
        self.status = status
        self.case_type = case_type
        self.ids_only = ids_only
        self.wrap = not ids_only  # if we're just querying IDs we don't need to wrap the docs
        self.footprint = footprint
        self.strip_history = strip_history
        self.filters = filters
        self.case_accessors = CaseAccessors(self.domain)

    def _case_results(self, case_id_list):
        def _filter(res):
            if self.filters:
                for path, val in self.filters.items():
                    actual_val = safe_index(res.case_json, path.split("/"))
                    if actual_val != val:
                        # closed=false => case.closed == False
                        if val in ('null', 'true', 'false'):
                            if actual_val != json.loads(val):
                                return False
                        else:
                            return False
                return True

        if self.filters and not self.footprint:
            base_results = self._populate_results(case_id_list)
            return filter(_filter, base_results)

        if self.footprint:
            initial_case_ids = set(case_id_list)
            dependent_case_ids = get_dependent_case_info(self.domain, initial_case_ids).all_ids
            all_case_ids = initial_case_ids | dependent_case_ids
        else:
            all_case_ids = case_id_list

        if self.ids_only:
            return [CaseAPIResult(domain=self.domain, id=case_id, id_only=True) for case_id in all_case_ids]
        else:
            return self._populate_results(all_case_ids)

    def _populate_results(self, case_id_list):
        if should_use_sql_backend(self.domain):
            base_results = [CaseAPIResult(domain=self.domain, couch_doc=case, id_only=self.ids_only)
                            for case in self.case_accessors.iter_cases(case_id_list)]
        else:
            base_results = [CaseAPIResult(domain=self.domain, couch_doc=case, id_only=self.ids_only)
                            for case in iter_cases(case_id_list, self.strip_history, self.wrap)]
        return base_results

    def get_all(self):
        status = self.status or CASE_STATUS_ALL
        if status == CASE_STATUS_ALL:
            case_ids = self.case_accessors.get_case_ids_in_domain(self.case_type)
        elif status == CASE_STATUS_OPEN:
            case_ids = self.case_accessors.get_open_case_ids_in_domain_by_type(self.case_type)
        else:
            raise ValueError("Invalid value for 'status': '%s'" % status)

        return self._case_results(case_ids)

    def get_owned(self, user_id):
        try:
            user = CouchUser.get_by_user_id(user_id, self.domain)
        except KeyError:
            user = None
        try:
            owner_ids = user.get_owner_ids()
        except AttributeError:
            owner_ids = [user_id]

        closed = {
            CASE_STATUS_OPEN: False,
            CASE_STATUS_CLOSED: True,
            CASE_STATUS_ALL: None,
        }[self.status]

        ids = self.case_accessors.get_case_ids_by_owners(owner_ids, closed=closed)
        return self._case_results(ids)