示例#1
0
    def _delete_couch_data(self):
        for doc_class, doc_ids in get_doc_ids_to_dump(self.domain_name):
            db = doc_class.get_db()
            for docs in chunked(iter_docs(db, doc_ids), 100):
                db.bulk_delete(docs)

            self.assertEqual(0, len(get_docs(db, doc_ids)))
def iter_location_join_supply_point(all_location_ids, chunksize=100):

    # this function was copy-paste-modified from iter_docs

    database = Location.get_db()
    for location_ids in chunked(all_location_ids, chunksize):
        # sync supply point id
        locations = [row.get('doc')
                     for row in get_docs(database, keys=location_ids)
                     if row.get('doc')
                     and row.get('doc')['domain'] not in EXCLUDE_DOMAINS]

        supply_points = SupplyPointCase.view(
            'commtrack/supply_point_by_loc',
            keys=[[location['domain'], location['_id']]
                  for location in locations],
            include_docs=True,
            classes={'CommCareCase': SupplyPointCase},
        ).all()

        supply_points_index = {}

        for supply_point in supply_points:
            key = (supply_point.domain, supply_point.location_id)
            if key in supply_points_index:
                raise Exception(
                    "Multiple supply points have "
                    "domain={!r}, location_id={!r}".format(*key))
            supply_points_index[key] = supply_point

        for location in locations:
            yield (
                location,
                supply_points_index.get((location['domain'], location['_id']))
            )
示例#3
0
def iter_bulk_delete(database,
                     ids,
                     chunksize=100,
                     doc_callback=None,
                     wait_time=None,
                     max_fetch_attempts=1):
    total_count = 0
    for doc_ids in chunked(ids, chunksize):
        for i in range(max_fetch_attempts):
            try:
                doc_dicts = get_docs(database, keys=doc_ids)
                break
            except RequestException:
                if i == (max_fetch_attempts - 1):
                    raise
                sleep(30)

        if doc_callback:
            for doc in doc_dicts:
                doc_callback(doc)

        total_count += len(doc_dicts)
        database.bulk_delete(doc_dicts)
        if wait_time:
            sleep(wait_time)

    return total_count
示例#4
0
def get_report_configs(config_ids, domain):
    """
    Return a list of ReportConfigurations.
    config_ids may be ReportConfiguration or StaticReportConfiguration ids.
    """

    static_report_config_ids = []
    dynamic_report_config_ids = []
    for config_id in config_ids:
        if report_config_id_is_static(config_id):
            static_report_config_ids.append(config_id)
        else:
            dynamic_report_config_ids.append(config_id)
    static_report_configs = StaticReportConfiguration.by_ids(
        static_report_config_ids)
    for config in static_report_configs:
        if config.domain != domain:
            raise ReportConfigurationNotFoundError

    dynamic_report_configs = []
    if dynamic_report_config_ids:
        dynamic_report_configs = [
            ReportConfiguration.wrap(doc) for doc in get_docs(
                ReportConfiguration.get_db(), dynamic_report_config_ids)
        ]

    if len(dynamic_report_configs) != len(dynamic_report_config_ids):
        raise ReportConfigurationNotFoundError
    for config in dynamic_report_configs:
        if config.domain != domain:
            raise ReportConfigurationNotFoundError

    return dynamic_report_configs + static_report_configs
示例#5
0
def _copy(config):
    # unfortunately the only couch view we have for this needs to go by domain
    # will be a bit slow
    database = Domain.get_db()
    assert database.uri == config.source_db.uri, 'can only use "copy" with the main HQ DB as the source'
    domain_names = Domain.get_all_names()
    for domain in domain_names:
        for doc_type in config.doc_types:
            ids_of_this_type = [row['id'] for row in database.view(
                'domain/docs',
                startkey=[domain, doc_type],
                endkey=[domain, doc_type, {}],
                reduce=False,
                include_docs=False,
            )]
            if ids_of_this_type:
                new_revs = dict([
                    (row['id'], row['value']['rev'])
                    for row in config.dest_db.view('_all_docs', keys=ids_of_this_type, include_docs=False)
                    if 'error' not in row
                ])
                for id_group in chunked(ids_of_this_type, 500):
                    docs = get_docs(database, id_group)
                    for doc in docs:
                        if doc['_id'] in new_revs:
                            doc['_rev'] = new_revs[doc['_id']]
                    config.dest_db.bulk_save(docs)

            print 'copied {} {}s from {}'.format(len(ids_of_this_type), doc_type, domain)
    print 'copy docs complete'
示例#6
0
def tag_forms_as_deleted_rebuild_associated_cases(user_id, domain, form_id_list, deletion_id,
                                                  deletion_date, deleted_cases=None):
    """
    Upon user deletion, mark associated forms as deleted and prep cases
    for a rebuild.
    - 2 saves/sec for cloudant slowness (rate_limit)
    """
    if deleted_cases is None:
        deleted_cases = set()

    cases_to_rebuild = set()
    forms_to_check = get_docs(XFormInstance.get_db(), form_id_list)
    forms_to_save = []
    for form in forms_to_check:
        assert form['domain'] == domain
        if not is_deleted(form):
            form['doc_type'] += DELETED_SUFFIX
            form['-deletion_id'] = deletion_id
            form['-deletion_date'] = deletion_date
            forms_to_save.append(form)

        # rebuild all cases anyways since we don't know if this has run or not if the task was killed
        cases_to_rebuild.update(get_case_ids_from_form(form))

    XFormInstance.get_db().bulk_save(forms_to_save)
    detail = UserArchivedRebuild(user_id=user_id)
    for case in cases_to_rebuild - deleted_cases:
        _rebuild_case_with_retries.delay(domain, case, detail)
示例#7
0
def get_non_archived_facilities_below(location):
    child_ids = (
        location.sql_location.get_descendants(include_self=True)
        .filter(is_archived=False, location_type__name="FACILITY")
        .values_list("location_id", flat=True)
    )
    return [Location.wrap(doc) for doc in get_docs(Location.get_db(), child_ids)]
示例#8
0
def get_report_configs(config_ids, domain):
    """
    Return a list of ReportConfigurations.
    config_ids may be ReportConfiguration or StaticReportConfiguration ids.
    """

    static_report_config_ids = []
    dynamic_report_config_ids = []
    for config_id in config_ids:
        if report_config_id_is_static(config_id):
            static_report_config_ids.append(config_id)
        else:
            dynamic_report_config_ids.append(config_id)
    static_report_configs = StaticReportConfiguration.by_ids(static_report_config_ids)
    for config in static_report_configs:
        if config.domain != domain:
            raise ReportConfigurationNotFoundError

    dynamic_report_configs = [
        ReportConfiguration.wrap(doc) for doc in
        get_docs(ReportConfiguration.get_db(), dynamic_report_config_ids)
    ]
    if len(dynamic_report_configs) != len(dynamic_report_config_ids):
        raise ReportConfigurationNotFoundError
    for config in dynamic_report_configs:
        if config.domain != domain:
            raise ReportConfigurationNotFoundError

    return dynamic_report_configs + static_report_configs
示例#9
0
    def _delete_couch_data(self):
        for doc_class, doc_ids in get_doc_ids_to_dump(self.domain_name):
            db = doc_class.get_db()
            for docs in chunked(iter_docs(db, doc_ids), 100):
                db.bulk_delete(docs)

            self.assertEqual(0, len(get_docs(db, doc_ids)))
示例#10
0
def _get_non_design_docs(db):
    docs = get_docs(db, [
        result['id']
        for result in db if not result['id'].startswith('_design/')
    ],
                    attachments=True)
    _sort_by_doc_id(docs)
    return docs
示例#11
0
def delete_doain_couch_data_for_dump_load_test(domain_name):
    for doc_class, doc_ids in get_doc_ids_to_dump(domain_name):
        db = doc_class.get_db()
        for docs in chunked(iter_docs(db, doc_ids), 100):
            db.bulk_delete(docs)

        assert 0 == len(get_docs(
            db, doc_ids)), f"Some docs not deleted: {doc_class}"
示例#12
0
def get_non_archived_facilities_below(location):
    child_ids = location.sql_location.get_descendants(
        include_self=True).filter(is_archived=False,
                                  location_type__name='FACILITY').values_list(
                                      'location_id', flat=True)
    return [
        Location.wrap(doc) for doc in get_docs(Location.get_db(), child_ids)
    ]
示例#13
0
def _update_groups(user, group_ids, user_change_logger):
    groups_updated = user.set_groups(group_ids)
    if user_change_logger and groups_updated:
        groups = []
        if group_ids:
            groups = [
                Group.wrap(doc) for doc in get_docs(Group.get_db(), group_ids)
            ]
        user_change_logger.add_info(UserChangeMessage.groups_info(groups))
示例#14
0
def paginate_releases(request, domain, app_id):
    limit = request.GET.get('limit')
    only_show_released = json.loads(
        request.GET.get('only_show_released', 'false'))
    build_comment = request.GET.get('build_comment')
    page = int(request.GET.get('page', 1))
    page = max(page, 1)
    try:
        limit = int(limit)
    except (TypeError, ValueError):
        limit = 10

    timezone = get_timezone_for_user(request.couch_user, domain)

    app_es = (AppES().start((page - 1) * limit).size(limit).sort(
        'version', desc=True).domain(domain).is_build().app_id(app_id))
    if only_show_released:
        app_es = app_es.is_released()
    if build_comment:
        app_es = app_es.build_comment(build_comment)
    results = app_es.exclude_source().run()
    app_ids = results.doc_ids
    apps = get_docs(Application.get_db(), app_ids)
    for app in apps:
        app.pop('translations')
    saved_apps = [
        SavedAppBuild.wrap(
            app, scrap_old_conventions=False).to_saved_build_json(timezone)
        for app in apps
    ]

    j2me_enabled_configs = CommCareBuildConfig.j2me_enabled_config_labels()
    for app in saved_apps:
        app['include_media'] = app['doc_type'] != 'RemoteApp'
        app['j2me_enabled'] = app['menu_item_label'] in j2me_enabled_configs
        app['target_commcare_flavor'] = (
            SavedAppBuild.get(app['_id']).target_commcare_flavor
            if toggles.TARGET_COMMCARE_FLAVOR.enabled(domain) else 'none')

    if toggles.APPLICATION_ERROR_REPORT.enabled(request.couch_user.username):
        versions = [app['version'] for app in saved_apps]
        num_errors_dict = _get_error_counts(domain, app_id, versions)
        for app in saved_apps:
            app['num_errors'] = num_errors_dict.get(app['version'], 0)

    total_apps = results.total
    num_pages = int(ceil(total_apps / limit))

    return json_response({
        'apps': saved_apps,
        'pagination': {
            'total': total_apps,
            'num_pages': num_pages,
            'current_page': page,
        }
    })
示例#15
0
def _iter_skipped_forms(domain, migration_id, stopper, with_progress):
    from dimagi.utils.couch.bulk import get_docs
    couch = XFormInstance.get_db()
    with stop_at_previous_migration(domain, migration_id, stopper):
        skipped_form_ids = _iter_skipped_form_ids(domain, migration_id,
                                                  stopper, with_progress)
        for form_ids in chunked(skipped_form_ids, _iter_docs.chunk_size, list):
            for doc in get_docs(couch, form_ids):
                assert doc["domain"] == domain, doc
                yield doc
示例#16
0
def _iter_missing_forms(statedb, stopper):
    from dimagi.utils.couch.bulk import get_docs
    from .missingdocs import MissingIds
    couch = XFormInstance.get_db()
    domain = statedb.domain
    for doc_type in MissingIds.form_types:
        missing_ids = statedb.iter_missing_doc_ids(doc_type)
        for form_ids in chunked(missing_ids, _iter_docs.chunk_size, list):
            for doc in get_docs(couch, form_ids):
                assert doc["domain"] == domain, doc
                yield doc_type, doc
            if stopper.clean_break:
                break
示例#17
0
def bulk_auto_deactivate_commcare_users(user_ids, domain):
    """
    Deactivates CommCareUsers in bulk.

    Please pre-chunk ids to a reasonable size. Also please reference the
    save() method in CommCareUser when making changes.

    :param user_ids: list of user IDs
    :param domain: name of domain user IDs belong to
    """
    from corehq.apps.users.models import UserHistory, CommCareUser
    from corehq.apps.users.model_log import UserModelAction

    last_modified = json_format_datetime(datetime.datetime.utcnow())
    user_docs_to_bulk_save = []
    for user_doc in get_docs(CommCareUser.get_db(), keys=user_ids):
        if user_doc['is_active']:
            user_doc['is_active'] = False
            user_doc['last_modified'] = last_modified
            user_docs_to_bulk_save.append(user_doc)

    # bulk save django Users
    user_query = User.objects.filter(
        username__in=[u["username"] for u in user_docs_to_bulk_save])
    user_query.update(is_active=False)

    # bulk save in couch
    CommCareUser.get_db().bulk_save(user_docs_to_bulk_save)

    # bulk create all the UserHistory logs
    UserHistory.objects.bulk_create([
        UserHistory(by_domain=domain,
                    for_domain=domain,
                    user_type=CommCareUser.doc_type,
                    user_repr=u['username'].split('@')[0],
                    changed_by_repr=SYSTEM_USER_ID,
                    user_id=u['_id'],
                    changed_by=SYSTEM_USER_ID,
                    changes={'is_active': False},
                    changed_via=USER_CHANGE_VIA_AUTO_DEACTIVATE,
                    change_messages={},
                    action=UserModelAction.UPDATE.value,
                    user_upload_record_id=None) for u in user_docs_to_bulk_save
    ])

    # clear caches and fire signals
    for user_doc in user_docs_to_bulk_save:
        commcare_user = CommCareUser.wrap(user_doc)
        commcare_user.clear_quickcache_for_user()
        commcare_user.fire_signals()
示例#18
0
    def get_payload(self):
        response = self.restore_state.restore_class()
        case_ids_to_sync = set()
        for owner_id in self.restore_state.owner_ids:
            case_ids_to_sync = case_ids_to_sync | set(self.get_case_ids_for_owner(owner_id))

        if (not self.restore_state.is_initial and
                any([not self.is_clean(owner_id) for owner_id in self.restore_state.owner_ids])):
            # if it's a steady state sync and we have any dirty owners, then we also need to
            # include ALL cases on the phone that have been modified since the last sync as
            # possible candidates to sync (since they may have been closed or reassigned by someone else)

            # don't bother checking ones we've already decided to check
            other_ids_to_check = self.restore_state.last_sync_log.case_ids_on_phone - case_ids_to_sync
            case_ids_to_sync = case_ids_to_sync | set(filter_cases_modified_since(
                self.restore_state.domain, list(other_ids_to_check), self.restore_state.last_sync_log.date
            ))

        all_maybe_syncing = copy(case_ids_to_sync)
        all_synced = set()
        all_indices = defaultdict(set)
        all_dependencies_syncing = set()
        while case_ids_to_sync:
            ids = pop_ids(case_ids_to_sync, chunk_size)
            # todo: see if we can avoid wrapping - serialization depends on it heavily for now
            case_batch = filter(
                partial(case_needs_to_sync, last_sync_log=self.restore_state.last_sync_log),
                [CommCareCase.wrap(doc) for doc in get_docs(CommCareCase.get_db(), ids)]
            )
            updates = get_case_sync_updates(
                self.restore_state.domain, case_batch, self.restore_state.last_sync_log
            )
            for update in updates:
                case = update.case
                all_synced.add(case._id)
                append_update_to_response(response, update, self.restore_state)

                # update the indices in the new sync log
                if case.indices:
                    all_indices[case._id] = {index.identifier: index.referenced_id for index in case.indices}
                    # and double check footprint for non-live cases
                    for index in case.indices:
                        if index.referenced_id not in all_maybe_syncing:
                            case_ids_to_sync.add(index.referenced_id)

                if not _is_live(case, self.restore_state):
                    all_dependencies_syncing.add(case._id)
示例#19
0
def iter_docs_with_retry(database, ids, chunksize=100, max_attempts=5, **query_params):
    """
    A version of iter_docs that retries fetching documents if the connection
    to couch fails for any reason.

    This is useful for long-running migrations where you don't want a single
    failed request to make the process fail.
    """
    for doc_ids in chunked(ids, chunksize):
        for i in range(max_attempts):
            try:
                result = get_docs(database, keys=doc_ids, **query_params)
                break
            except RequestException:
                if i == (max_attempts - 1):
                    raise
                sleep(30)

        for doc in result:
            yield doc
示例#20
0
def iter_docs_with_retry(database, ids, chunksize=100, max_attempts=5, **query_params):
    """
    A version of iter_docs that retries fetching documents if the connection
    to couch fails for any reason.

    This is useful for long-running migrations where you don't want a single
    failed request to make the process fail.
    """
    for doc_ids in chunked(ids, chunksize):
        for i in range(max_attempts):
            try:
                result = get_docs(database, keys=doc_ids, **query_params)
                break
            except RequestException:
                if i == (max_attempts - 1):
                    raise
                sleep(30)

        for doc in result:
            yield doc
示例#21
0
def _iter_skipped_forms(statedb, stopper, cached):
    # Datadog tag: type:find_skipped_forms
    from dimagi.utils.couch.bulk import get_docs
    from .missingdocs import MissingIds
    couch = XFormInstance.get_db()
    domain = statedb.domain
    with MissingIds.forms(statedb, stopper, tag="skipped") as skipped:
        if cached:
            doc_types = skipped.doc_types
            iter_doc_ids = statedb.iter_missing_doc_ids
        else:
            doc_types = ["XFormInstance"]
            iter_doc_ids = skipped
        for doc_type in doc_types:
            skipped_ids = iter_doc_ids(doc_type)
            for form_ids in chunked(skipped_ids, _iter_docs.chunk_size, list):
                for doc in get_docs(couch, form_ids):
                    assert doc["domain"] == domain, doc
                    yield doc_type, doc
                if stopper.clean_break:
                    break
示例#22
0
def iter_bulk_delete(database, ids, chunksize=100, doc_callback=None, wait_time=None,
        max_fetch_attempts=1):
    total_count = 0
    for doc_ids in chunked(ids, chunksize):
        for i in range(max_fetch_attempts):
            try:
                doc_dicts = get_docs(database, keys=doc_ids)
                break
            except RequestException:
                if i == (max_fetch_attempts - 1):
                    raise
                sleep(30)

        if doc_callback:
            for doc in doc_dicts:
                doc_callback(doc)

        total_count += len(doc_dicts)
        database.bulk_delete(doc_dicts)
        if wait_time:
            sleep(wait_time)

    return total_count
示例#23
0
def iter_docs(database, ids, chunksize=100):
    for doc_ids in chunked(ids, chunksize):
        for doc in get_docs(database, keys=doc_ids):
            yield doc['doc']
示例#24
0
def iter_bulk_delete(database, ids, chunksize=100):
    for doc_ids in chunked(ids, chunksize):
        doc_dicts = get_docs(database, keys=doc_ids)
        database.bulk_delete(doc_dicts)
示例#25
0
def iter_docs(database, ids, chunksize=100, **query_params):
    for doc_ids in chunked(ids, chunksize):
        for doc in get_docs(database, keys=doc_ids, **query_params):
            yield doc
示例#26
0
def iter_docs(database, ids, chunksize=100, **query_params):
    for doc_ids in chunked(ids, chunksize):
        for doc in get_docs(database, keys=doc_ids, **query_params):
            yield doc
示例#27
0
def paginate_releases(request, domain, app_id):
    limit = request.GET.get('limit')
    only_show_released = json.loads(request.GET.get('only_show_released', 'false'))
    query = request.GET.get('query')
    page = int(request.GET.get('page', 1))
    page = max(page, 1)
    try:
        limit = int(limit)
    except (TypeError, ValueError):
        limit = 10
    skip = (page - 1) * limit
    timezone = get_timezone_for_user(request.couch_user, domain)

    def _get_batch(start_build=None, skip=None):
        start_build = {} if start_build is None else start_build
        return Application.get_db().view('app_manager/saved_app',
            startkey=[domain, app_id, start_build],
            endkey=[domain, app_id],
            descending=True,
            limit=limit,
            skip=skip,
            wrapper=lambda x: (
                SavedAppBuild.wrap(x['value'], scrap_old_conventions=False)
                .releases_list_json(timezone)
            ),
        ).all()

    if not bool(only_show_released or query):
        # If user is limiting builds by released status or build comment, it's much
        # harder to be performant with couch. So if they're not doing so, take shortcuts.
        total_apps = len(get_built_app_ids_for_app_id(domain, app_id))
        saved_apps = _get_batch(skip=skip)
    else:
        app_es = (
            AppES()
            .start((page - 1) * limit)
            .size(limit)
            .sort('version', desc=True)
            .domain(domain)
            .is_build()
            .app_id(app_id)
        )
        if only_show_released:
            app_es = app_es.is_released()
        if query:
            app_es = app_es.add_query(build_comment(query), queries.SHOULD)
            try:
                app_es = app_es.add_query(version(int(query)), queries.SHOULD)
            except ValueError:
                pass

        results = app_es.exclude_source().run()
        total_apps = results.total
        app_ids = results.doc_ids
        apps = get_docs(Application.get_db(), app_ids)

        saved_apps = [
            SavedAppBuild.wrap(app, scrap_old_conventions=False).releases_list_json(timezone)
            for app in apps
        ]

    if toggles.APPLICATION_ERROR_REPORT.enabled(request.couch_user.username):
        versions = [app['version'] for app in saved_apps]
        num_errors_dict = _get_error_counts(domain, app_id, versions)
        for app in saved_apps:
            app['num_errors'] = num_errors_dict.get(app['version'], 0)

    num_pages = int(ceil(total_apps / limit))

    return json_response({
        'apps': saved_apps,
        'pagination': {
            'total': total_apps,
            'num_pages': num_pages,
            'current_page': page,
            'more': page * limit < total_apps,  # needed when select2 uses this endpoint
        }
    })
示例#28
0
def iter_docs(database, ids, chunksize=100):
    for doc_ids in chunked(ids, chunksize):
        for doc in get_docs(database, keys=doc_ids):
            doc_dict = doc.get('doc')
            if doc_dict:
                yield doc_dict
示例#29
0
def iter_docs(database, ids, chunksize=100):
    for doc_ids in chunked(ids, chunksize):
        for doc in get_docs(database, keys=doc_ids):
            doc_dict = doc.get('doc')
            if doc_dict:
                yield doc_dict
示例#30
0
def iter_bulk_delete(database, ids, chunksize=100):
    for doc_ids in chunked(ids, chunksize):
        doc_dicts = [doc.get('doc') for doc in get_docs(database, keys=doc_ids) if doc.get('doc')]
        database.bulk_delete(doc_dicts)
示例#31
0
    def get_payload(self):
        response = self.restore_state.restore_class()
        case_ids_to_sync = set()
        for owner_id in self.restore_state.owner_ids:
            case_ids_to_sync = case_ids_to_sync | set(self.get_case_ids_for_owner(owner_id))

        if (not self.restore_state.is_initial and
                any([not self.is_clean(owner_id) for owner_id in self.restore_state.owner_ids])):
            # if it's a steady state sync and we have any dirty owners, then we also need to
            # include ALL cases on the phone that have been modified since the last sync as
            # possible candidates to sync (since they may have been closed or reassigned by someone else)

            # don't bother checking ones we've already decided to check
            other_ids_to_check = self.restore_state.last_sync_log.case_ids_on_phone - case_ids_to_sync
            case_ids_to_sync = case_ids_to_sync | set(filter_cases_modified_since(
                self.restore_state.domain, list(other_ids_to_check), self.restore_state.last_sync_log.date
            ))

        all_maybe_syncing = copy(case_ids_to_sync)
        all_synced = set()
        all_indices = defaultdict(set)
        all_dependencies_syncing = set()
        while case_ids_to_sync:
            ids = pop_ids(case_ids_to_sync, chunk_size)
            # todo: see if we can avoid wrapping - serialization depends on it heavily for now
            case_batch = filter(
                partial(case_needs_to_sync, last_sync_log=self.restore_state.last_sync_log),
                [CommCareCase.wrap(doc) for doc in get_docs(CommCareCase.get_db(), ids)]
            )
            updates = get_case_sync_updates(
                self.restore_state.domain, case_batch, self.restore_state.last_sync_log
            )
            for update in updates:
                case = update.case
                all_synced.add(case._id)
                append_update_to_response(response, update, self.restore_state)

                # update the indices in the new sync log
                if case.indices:
                    all_indices[case._id] = {index.identifier: index.referenced_id for index in case.indices}
                    # and double check footprint for non-live cases
                    for index in case.indices:
                        if index.referenced_id not in all_maybe_syncing:
                            case_ids_to_sync.add(index.referenced_id)

                if not _is_live(case, self.restore_state):
                    all_dependencies_syncing.add(case._id)

            # commtrack ledger sections for this batch
            commtrack_elements = get_stock_payload(
                self.restore_state.project, self.restore_state.stock_settings,
                [CaseStub(update.case._id, update.case.type) for update in updates]
            )
            response.extend(commtrack_elements)

            # add any new values to all_syncing
            all_maybe_syncing = all_maybe_syncing | case_ids_to_sync

        # update sync token - marking it as the new format
        self.restore_state.current_sync_log = SimplifiedSyncLog.wrap(
            self.restore_state.current_sync_log.to_json()
        )
        self.restore_state.current_sync_log.log_format = LOG_FORMAT_SIMPLIFIED
        index_tree = IndexTree(indices=all_indices)
        case_ids_on_phone = all_synced
        primary_cases_syncing = all_synced - all_dependencies_syncing
        if not self.restore_state.is_initial:
            case_ids_on_phone = case_ids_on_phone | self.restore_state.last_sync_log.case_ids_on_phone
            # subtract primary cases from dependencies since they must be newly primary
            all_dependencies_syncing = all_dependencies_syncing | (
                self.restore_state.last_sync_log.dependent_case_ids_on_phone -
                primary_cases_syncing
            )
            index_tree = self.restore_state.last_sync_log.index_tree.apply_updates(index_tree)

        self.restore_state.current_sync_log.case_ids_on_phone = case_ids_on_phone
        self.restore_state.current_sync_log.dependent_case_ids_on_phone = all_dependencies_syncing
        self.restore_state.current_sync_log.index_tree = index_tree

        return response
示例#32
0
def paginate_releases(request, domain, app_id):
    limit = request.GET.get('limit')
    only_show_released = json.loads(request.GET.get('only_show_released', 'false'))
    build_comment = request.GET.get('build_comment')
    page = int(request.GET.get('page', 1))
    page = max(page, 1)
    try:
        limit = int(limit)
    except (TypeError, ValueError):
        limit = 10
    skip = (page - 1) * limit
    timezone = get_timezone_for_user(request.couch_user, domain)

    def _get_batch(start_build=None, skip=None):
        start_build = {} if start_build is None else start_build
        return Application.get_db().view('app_manager/saved_app',
            startkey=[domain, app_id, start_build],
            endkey=[domain, app_id],
            descending=True,
            limit=limit,
            skip=skip,
            wrapper=lambda x: SavedAppBuild.wrap(x['value'],
                                                 scrap_old_conventions=False).releases_list_json(timezone),
        ).all()

    if not bool(only_show_released or build_comment):
        # If user is limiting builds by released status or build comment, it's much
        # harder to be performant with couch. So if they're not doing so, take shortcuts.
        total_apps = len(get_built_app_ids_for_app_id(domain, app_id))
        saved_apps = _get_batch(skip=skip)
    else:
        app_es = (
            AppES()
            .start((page - 1) * limit)
            .size(limit)
            .sort('version', desc=True)
            .domain(domain)
            .is_build()
            .app_id(app_id)
        )
        if only_show_released:
            app_es = app_es.is_released()
        if build_comment:
            app_es = app_es.build_comment(build_comment)
        results = app_es.exclude_source().run()
        app_ids = results.doc_ids
        apps = get_docs(Application.get_db(), app_ids)
        saved_apps = [
            SavedAppBuild.wrap(app, scrap_old_conventions=False).releases_list_json(timezone)
            for app in apps
        ]
        total_apps = results.total

    j2me_enabled_configs = CommCareBuildConfig.j2me_enabled_config_labels()
    for app in saved_apps:
        app['include_media'] = app['doc_type'] != 'RemoteApp'
        app['j2me_enabled'] = app['menu_item_label'] in j2me_enabled_configs
        app['target_commcare_flavor'] = (
            SavedAppBuild.get(app['_id']).target_commcare_flavor
            if toggles.TARGET_COMMCARE_FLAVOR.enabled(domain)
            else 'none'
        )

    if toggles.APPLICATION_ERROR_REPORT.enabled(request.couch_user.username):
        versions = [app['version'] for app in saved_apps]
        num_errors_dict = _get_error_counts(domain, app_id, versions)
        for app in saved_apps:
            app['num_errors'] = num_errors_dict.get(app['version'], 0)

    num_pages = int(ceil(total_apps / limit))

    return json_response({
        'apps': saved_apps,
        'pagination': {
            'total': total_apps,
            'num_pages': num_pages,
            'current_page': page,
        }
    })
示例#33
0
def iter_docs(database, ids, chunksize=100):
    for doc_ids in chunked(ids, chunksize):
        for doc in get_docs(database, keys=doc_ids):
            yield doc['doc']
示例#34
0
def paginate_releases(request, domain, app_id):
    limit = request.GET.get('limit')
    only_show_released = json.loads(
        request.GET.get('only_show_released', 'false'))
    query = request.GET.get('query')
    page = int(request.GET.get('page', 1))
    page = max(page, 1)
    try:
        limit = int(limit)
    except (TypeError, ValueError):
        limit = 10
    skip = (page - 1) * limit
    timezone = get_timezone_for_user(request.couch_user, domain)

    def _get_batch(start_build=None, skip=None):
        start_build = {} if start_build is None else start_build
        return Application.get_db().view(
            'app_manager/saved_app',
            startkey=[domain, app_id, start_build],
            endkey=[domain, app_id],
            descending=True,
            limit=limit,
            skip=skip,
            wrapper=lambda x:
            (SavedAppBuild.wrap(x['value'], scrap_old_conventions=False).
             releases_list_json(timezone)),
        ).all()

    if not bool(only_show_released or query):
        # If user is limiting builds by released status or build comment, it's much
        # harder to be performant with couch. So if they're not doing so, take shortcuts.
        total_apps = len(get_built_app_ids_for_app_id(domain, app_id))
        saved_apps = _get_batch(skip=skip)
    else:
        app_es = (AppES().start((page - 1) * limit).size(limit).sort(
            'version', desc=True).domain(domain).is_build().app_id(app_id))
        if only_show_released:
            app_es = app_es.is_released()
        if query:
            app_es = app_es.add_query(build_comment(query), queries.SHOULD)
            try:
                app_es = app_es.add_query(version(int(query)), queries.SHOULD)
            except ValueError:
                pass

        results = app_es.exclude_source().run()
        total_apps = results.total
        app_ids = results.doc_ids
        apps = get_docs(Application.get_db(), app_ids)

        saved_apps = [
            SavedAppBuild.wrap(
                app, scrap_old_conventions=False).releases_list_json(timezone)
            for app in apps
        ]

    if toggles.APPLICATION_ERROR_REPORT.enabled(request.couch_user.username):
        versions = [app['version'] for app in saved_apps]
        num_errors_dict = _get_error_counts(domain, app_id, versions)
        for app in saved_apps:
            app['num_errors'] = num_errors_dict.get(app['version'], 0)

    num_pages = int(ceil(total_apps / limit))

    return json_response({
        'apps': saved_apps,
        'pagination': {
            'total': total_apps,
            'num_pages': num_pages,
            'current_page': page,
            'more': page * limit <
            total_apps,  # needed when select2 uses this endpoint
        }
    })
示例#35
0
def _get_non_design_docs(db):
    docs = get_docs(db, [result['id'] for result in db
                         if not result['id'].startswith('_design/')],
                    attachments=True)
    _sort_by_doc_id(docs)
    return docs