Exemplo n.º 1
0
def dump_locations(domain, download_id, include_consumption, headers_only, task=None):
    exporter = LocationExporter(domain, include_consumption=include_consumption,
                                headers_only=headers_only, async_task=task)

    fd, path = tempfile.mkstemp()
    writer = Excel2007ExportWriter()
    writer.open(header_table=exporter.get_headers(), file=path)
    with writer:
        exporter.write_data(writer)

    with open(path, 'rb') as file_:
        db = get_blob_db()
        expiry_mins = 60
        db.put(
            file_,
            domain=domain,
            parent_id=domain,
            type_code=CODES.tempfile,
            key=download_id,
            timeout=expiry_mins,
        )

        file_format = Format.from_format(Excel2007ExportWriter.format)
        expose_blob_download(
            download_id,
            expiry=expiry_mins * 60,
            mimetype=file_format.mimetype,
            content_disposition=safe_filename_header('{}_locations'.format(domain), file_format.extension),
            download_id=download_id,
        )
Exemplo n.º 2
0
def _export_data_dictionary(domain):
    queryset = CaseType.objects.filter(domain=domain).prefetch_related(
        Prefetch('properties', queryset=CaseProperty.objects.order_by('name')))
    export_data = {}
    for case_type in queryset:
        export_data[case_type.name or _("No Name")] = [{
            _('Case Property'):
            prop.name,
            _('Group'):
            prop.group,
            _('Data Type'):
            prop.data_type,
            _('Description'):
            prop.description,
            _('Deprecated'):
            prop.deprecated
        } for prop in case_type.properties.all()]
    headers = (_('Case Property'), _('Group'), _('Data Type'),
               _('Description'), _('Deprecated'))
    outfile = io.BytesIO()
    writer = Excel2007ExportWriter()
    header_table = [(tab_name, [headers]) for tab_name in export_data]
    writer.open(header_table=header_table, file=outfile)
    for tab_name, tab in export_data.items():
        tab_rows = []
        for row in tab:
            tab_rows.append([row.get(header, '') for header in headers])
        writer.write([(tab_name, tab_rows)])
    writer.close()
    return outfile
Exemplo n.º 3
0
def write_to_file(locations):
    """
    locations = [
        ('loc_type1', {
             'headers': ['header1', 'header2', ...]
             'rows': [
                 {
                     'header1': val1
                     'header2': val2
                 },
                 {...},
             ]
        })
    ]
    """
    outfile = StringIO()
    writer = Excel2007ExportWriter()
    header_table = [(tab_name, [tab['headers']])
                    for tab_name, tab in locations]
    writer.open(header_table=header_table, file=outfile)
    for tab_name, tab in locations:
        headers = tab['headers']
        tab_rows = [[row.get(header, '') for header in headers]
                    for row in tab['rows']]
        writer.write([(tab_name, tab_rows)])
    writer.close()
    return outfile.getvalue()
Exemplo n.º 4
0
def _export_data_dictionary(domain):
    export_fhir_data = toggles.FHIR_INTEGRATION.enabled(domain)
    case_type_headers = [
        _('Case Type'),
        _('FHIR Resource Type'),
        _('Remove Resource Type(Y)')
    ]
    case_prop_headers = [
        _('Case Property'),
        _('Group'),
        _('Data Type'),
        _('Description'),
        _('Deprecated')
    ]

    case_type_data, case_prop_data = _generate_data_for_export(
        domain, export_fhir_data)

    outfile = io.BytesIO()
    writer = Excel2007ExportWriter()
    header_table = _get_headers_for_export(export_fhir_data, case_type_headers,
                                           case_prop_headers, case_prop_data)
    writer.open(header_table=header_table, file=outfile)
    if export_fhir_data:
        _export_fhir_data(writer, case_type_headers, case_type_data)
    _export_case_prop_data(writer, case_prop_headers, case_prop_data)
    writer.close()
    return outfile
Exemplo n.º 5
0
def dump_users_and_groups(response, domain):
    def _load_memoizer(domain):
        group_memoizer = GroupMemoizer(domain=domain)
        # load groups manually instead of calling group_memoizer.load_all()
        # so that we can detect blank groups
        blank_groups = set()
        for group in Group.by_domain(domain):
            if group.name:
                group_memoizer.add_group(group)
            else:
                blank_groups.add(group)
        if blank_groups:
            raise GroupNameError(blank_groups=blank_groups)

        return group_memoizer

    export_file = StringIO()
    writer = Excel2007ExportWriter()
    group_memoizer = _load_memoizer(domain)
    location_cache = LocationIdToSiteCodeCache(domain)

    user_data_model = CustomDataFieldsDefinition.get_or_create(
        domain,
        UserFieldsView.field_type
    )

    user_headers, user_rows = parse_users(
        group_memoizer,
        domain,
        user_data_model,
        location_cache
    )

    group_headers, group_rows = parse_groups(group_memoizer.groups)
    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]
    rows = [
        ('users', user_rows),
        ('groups', group_rows),
    ]

    domain_obj = Domain.get_by_name(domain)
    # This is only for domains using the multiple locations feature flag
    if domain_obj.commtrack_enabled and domain_obj.supports_multiple_locations_per_user:
        headers.append(
            ('locations', [['username', 'location-sms-code', 'location name (optional)']])
        )
        rows.append(
            ('locations', get_location_rows(domain))
        )

    writer.open(
        header_table=headers,
        file=export_file,
    )
    writer.write(rows)
    writer.close()
    response.write(export_file.getvalue())
Exemplo n.º 6
0
def dump_locations(response, domain, include_consumption=False):
    file = StringIO()
    writer = Excel2007ExportWriter()

    location_types = defined_location_types(domain)

    if include_consumption:
        defaults = get_default_column_data(domain, location_types)
    else:
        defaults = {
            'headers': {},
            'values': {}
        }

    common_types = ['site_code', 'name', 'parent_site_code', 'latitude', 'longitude']
    writer.open(
        header_table=[
            (loc_type, [
                common_types +
                get_custom_property_names(domain, loc_type, common_types) +
                defaults['headers'].get(loc_type, [])
            ])
            for loc_type in location_types
        ],
        file=file,
    )

    for loc_type in location_types:
        tab_rows = []
        locations = Location.filter_by_type(domain, loc_type)
        for loc in locations:
            parent_site_code = loc.parent.site_code if loc.parent else ''

            custom_prop_values = []
            for prop in location_custom_properties(domain, loc.location_type):
                if prop.name not in common_types:
                    custom_prop_values.append(
                        loc[prop.name] or ''
                    )

            if loc._id in defaults['values']:
                default_column_values = defaults['values'][loc._id]
            else:
                default_column_values = []

            tab_rows.append(
                [
                    loc.site_code,
                    loc.name,
                    parent_site_code,
                    loc.latitude or '',
                    loc.longitude or ''
                ] + custom_prop_values + default_column_values
            )
        writer.write([(loc_type, tab_rows)])

    writer.close()
    response.write(file.getvalue())
Exemplo n.º 7
0
def download_products(request, domain):
    def _get_products(domain):
        for p_doc in iter_docs(Product.get_db(), Product.ids_by_domain(domain)):
            # filter out archived products from export
            if not ('is_archived' in p_doc and p_doc['is_archived']):
                yield Product.wrap(p_doc)

    def _build_row(keys, product):
        row = []
        for key in keys:
            row.append(product.get(key, '') or '')

        return row

    file = StringIO()
    writer = Excel2007ExportWriter()

    product_keys = [
        'id',
        'name',
        'unit',
        'product_id',
        'description',
        'category',
        'program_id',
        'cost',
    ]

    data_keys = set()

    products = []
    for product in _get_products(domain):
        product_dict = product.to_dict()

        custom_properties = product.custom_property_dict()
        data_keys.update(custom_properties.keys())
        product_dict.update(custom_properties)

        products.append(product_dict)

    keys = product_keys + list(data_keys)

    writer.open(
        header_table=[
            ('products', [keys])
        ],
        file=file,
    )

    for product in products:
        writer.write([('products', [_build_row(keys, product)])])

    writer.close()

    response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype)
    response['Content-Disposition'] = 'attachment; filename="products.xlsx"'
    response.write(file.getvalue())
    return response
Exemplo n.º 8
0
def dump_users_and_groups(domain, download_id, user_filters):
    from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView

    def _load_memoizer(domain):
        group_memoizer = GroupMemoizer(domain=domain)
        # load groups manually instead of calling group_memoizer.load_all()
        # so that we can detect blank groups
        blank_groups = set()
        for group in Group.by_domain(domain):
            if group.name:
                group_memoizer.add_group(group)
            else:
                blank_groups.add(group)
        if blank_groups:
            raise GroupNameError(blank_groups=blank_groups)

        return group_memoizer

    writer = Excel2007ExportWriter()
    group_memoizer = _load_memoizer(domain)
    location_cache = LocationIdToSiteCodeCache(domain)

    user_data_model = CustomDataFieldsDefinition.get_or_create(
        domain,
        UserFieldsView.field_type
    )

    user_headers, user_rows = parse_users(
        group_memoizer,
        domain,
        user_data_model,
        location_cache,
        user_filters
    )

    group_headers, group_rows = parse_groups(group_memoizer.groups)
    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]
    rows = [
        ('users', user_rows),
        ('groups', group_rows),
    ]

    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    filename = "user_export_{}_{}.xlsx".format(domain, uuid.uuid4().hex)
    file_path = get_download_file_path(use_transfer, filename)
    writer.open(
        header_table=headers,
        file=file_path,
    )
    writer.write(rows)
    writer.close()

    expose_download(use_transfer, file_path, filename, download_id, 'xlsx')
Exemplo n.º 9
0
def formdefs(request, domain, app_id):
    # TODO: Looks like this function is never used
    langs = [json.loads(request.GET.get('lang', '"en"'))]
    format = request.GET.get('format', 'json')
    app = get_app(domain, app_id)

    def get_questions(form):
        xform = XForm(form.source)
        prefix = '/%s/' % xform.data_node.tag_name

        def remove_prefix(string):
            if string.startswith(prefix):
                return string[len(prefix):]
            else:
                raise Exception()

        def transform_question(q):
            return {
                'id': remove_prefix(q['value']),
                'type': q['tag'],
                'text': q['label'] if q['tag'] != 'hidden' else ''
            }

        return [transform_question(q) for q in xform.get_questions(langs)]

    formdefs = [{
        'name':
        "%s, %s" % (f['form'].get_module().name['en'], f['form'].name['en'])
        if f['type'] == 'module_form' else 'User Registration',
        'columns': ['id', 'type', 'text'],
        'rows':
        get_questions(f['form'])
    } for f in app.get_forms(bare=False)]

    if format == 'xlsx':
        f = StringIO()
        writer = Excel2007ExportWriter()
        writer.open([(sheet['name'], [FormattedRow(sheet['columns'])])
                     for sheet in formdefs], f)
        writer.write([(sheet['name'], [
            FormattedRow([
                cell for (_, cell) in sorted(
                    row.items(),
                    key=lambda item: sheet['columns'].index(item[0]))
            ]) for row in sheet['rows']
        ]) for sheet in formdefs])
        writer.close()
        response = HttpResponse(
            f.getvalue(), content_type=Format.from_format('xlsx').mimetype)
        set_file_download(response, 'formdefs.xlsx')
        return response
    else:
        return json_response(formdefs)
Exemplo n.º 10
0
def _dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, total_count, owner_id):
    writer = Excel2007ExportWriter(format_as_text=True)
    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    file_path = get_download_file_path(use_transfer, filename)
    writer.open(
        header_table=headers,
        file=file_path,
    )
    writer.write(rows)
    writer.close()

    expose_download(use_transfer, file_path, filename, download_id, 'xlsx', owner_ids=[owner_id])
    DownloadBase.set_progress(task, total_count, total_count)
Exemplo n.º 11
0
def dump_locations(domain,
                   download_id,
                   include_consumption,
                   headers_only,
                   owner_id,
                   root_location_ids=None,
                   task=None,
                   **kwargs):
    exporter = LocationExporter(domain,
                                include_consumption=include_consumption,
                                root_location_ids=root_location_ids,
                                headers_only=headers_only,
                                async_task=task,
                                **kwargs)

    fd, path = tempfile.mkstemp()
    writer = Excel2007ExportWriter()
    writer.open(header_table=exporter.get_headers(), file=path)
    with writer:
        exporter.write_data(writer)

    with open(path, 'rb') as file_:
        db = get_blob_db()
        expiry_mins = 60
        db.put(
            file_,
            domain=domain,
            parent_id=domain,
            type_code=CODES.tempfile,
            key=download_id,
            timeout=expiry_mins,
        )

        file_format = Format.from_format(Excel2007ExportWriter.format)
        filename = '{}_locations'.format(domain)
        if len(root_location_ids) == 1:
            root_location = SQLLocation.objects.get(
                location_id=root_location_ids[0])
            filename += '_{}'.format(root_location.name)
        expose_blob_download(
            download_id,
            expiry=expiry_mins * 60,
            mimetype=file_format.mimetype,
            content_disposition=safe_filename_header(filename,
                                                     file_format.extension),
            download_id=download_id,
            owner_ids=[owner_id],
        )
Exemplo n.º 12
0
def dump_locations(response, domain):
    file = StringIO()
    writer = Excel2007ExportWriter()

    location_types = defined_location_types(domain)

    defaults = get_default_column_data(domain, location_types)

    common_types = ['id', 'name', 'parent_id', 'latitude', 'longitude']
    writer.open(
        header_table=[
            (loc_type, [
                common_types +
                get_custom_property_names(domain, loc_type) +
                defaults['headers'].get(loc_type, [])
            ])
            for loc_type in location_types
        ],
        file=file,
    )

    for loc_type in location_types:
        tab_rows = []
        locations = Location.filter_by_type(domain, loc_type)
        for loc in locations:
            parent_id = loc.parent._id if loc.parent else ''

            custom_prop_values = [loc[prop.name] or '' for prop in location_custom_properties(domain, loc.location_type)]

            if loc._id in defaults['values']:
                default_column_values = defaults['values'][loc._id]
            else:
                default_column_values = []

            tab_rows.append(
                [
                    loc._id,
                    loc.name,
                    parent_id,
                    loc.latitude or '',
                    loc.longitude or ''
                ] + custom_prop_values + default_column_values
            )
        writer.write([(loc_type, tab_rows)])

    writer.close()
    response.write(file.getvalue())
Exemplo n.º 13
0
def dump_users_and_groups(response, domain):
    from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView

    def _load_memoizer(domain):
        group_memoizer = GroupMemoizer(domain=domain)
        # load groups manually instead of calling group_memoizer.load_all()
        # so that we can detect blank groups
        blank_groups = set()
        for group in Group.by_domain(domain):
            if group.name:
                group_memoizer.add_group(group)
            else:
                blank_groups.add(group)
        if blank_groups:
            raise GroupNameError(blank_groups=blank_groups)

        return group_memoizer

    export_file = StringIO()
    writer = Excel2007ExportWriter()
    group_memoizer = _load_memoizer(domain)
    location_cache = LocationIdToSiteCodeCache(domain)

    user_data_model = CustomDataFieldsDefinition.get_or_create(
        domain, UserFieldsView.field_type)

    user_headers, user_rows = parse_users(group_memoizer, domain,
                                          user_data_model, location_cache)

    group_headers, group_rows = parse_groups(group_memoizer.groups)
    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]
    rows = [
        ('users', user_rows),
        ('groups', group_rows),
    ]

    writer.open(
        header_table=headers,
        file=export_file,
    )
    writer.write(rows)
    writer.close()
    response.write(export_file.getvalue())
Exemplo n.º 14
0
def download_products(request, domain):
    def _parse_custom_properties(product):
        product_data_model = CustomDataFieldsDefinition.get_or_create(
            domain, ProductFieldsView.field_type)
        product_data_fields = [f.slug for f in product_data_model.fields]

        model_data = {}
        uncategorized_data = {}

        for prop, val in six.iteritems(product.product_data):
            if prop in product_data_fields:
                model_data['data: ' + prop] = encode_if_needed(val)
            else:
                uncategorized_data['uncategorized_data: ' +
                                   prop] = encode_if_needed(val)

        return model_data, uncategorized_data

    def _get_products(domain):
        for p_doc in iter_docs(Product.get_db(),
                               Product.ids_by_domain(domain)):
            # filter out archived products from export
            if not ('is_archived' in p_doc and p_doc['is_archived']):
                yield Product.wrap(p_doc)

    def _build_row(keys, product):
        row = []
        for key in keys:
            row.append(product.get(key, '') or '')

        return row

    file = BytesIO()
    writer = Excel2007ExportWriter()

    product_keys = [
        'id',
        'name',
        'unit',
        'product_id',
        'description',
        'category',
        'program_id',
        'cost',
    ]

    model_data = set()
    uncategorized_data = set()
    products = []

    for product in _get_products(domain):
        product_dict = product.to_dict()

        product_model, product_uncategorized = _parse_custom_properties(
            product)

        model_data.update(product_model)
        uncategorized_data.update(product_uncategorized)

        product_dict.update(product_model)
        product_dict.update(product_uncategorized)

        products.append(product_dict)

    keys = product_keys + list(model_data) + list(uncategorized_data)

    writer.open(
        header_table=[('products', [keys])],
        file=file,
    )

    for product in products:
        writer.write([('products', [_build_row(keys, product)])])

    writer.close()

    response = HttpResponse(content_type=Format.from_format('xlsx').mimetype)
    response['Content-Disposition'] = 'attachment; filename="products.xlsx"'
    response.write(file.getvalue())
    return response
Exemplo n.º 15
0
def dump_users_and_groups(response, domain):
    file = StringIO()
    writer = Excel2007ExportWriter()

    users = CommCareUser.by_domain(domain)
    user_data_keys = set()
    user_groups_length = 0
    user_dicts = []
    group_data_keys = set()
    group_dicts = []
    group_memoizer = GroupMemoizer(domain=domain)
    # load groups manually instead of calling group_memoizer.load_all()
    # so that we can detect blank groups
    blank_groups = set()
    for group in Group.by_domain(domain):
        if group.name:
            group_memoizer.add_group(group)
        else:
            blank_groups.add(group)

    if blank_groups:
        raise GroupNameError(blank_groups=blank_groups)

    for user in users:
        data = user.user_data
        group_names = sorted(map(lambda id: group_memoizer.get(id).name,
                                 Group.by_user(user, wrap=False)),
                             key=alphanumeric_sort_key)
        # exclude password and user_id
        user_dicts.append({
            'data': data,
            'group': group_names,
            'name': user.full_name,
            # dummy display string for passwords
            'password': "******",
            'phone-number': user.phone_number,
            'email': user.email,
            'username': user.raw_username,
            'language': user.language,
            'user_id': user._id,
        })
        user_data_keys.update(user.user_data.keys() if user.user_data else {})
        user_groups_length = max(user_groups_length, len(group_names))

    sorted_groups = sorted(group_memoizer.groups,
                           key=lambda group: alphanumeric_sort_key(group.name))
    for group in sorted_groups:
        group_dicts.append({
            'id': group.get_id,
            'name': group.name,
            'case-sharing': group.case_sharing,
            'reporting': group.reporting,
            'data': group.metadata,
        })
        group_data_keys.update(group.metadata.keys() if group.metadata else {})

    # include obscured password column for adding new users
    user_headers = [
        'username', 'password', 'name', 'phone-number', 'email', 'language',
        'user_id'
    ]
    user_headers.extend(
        json_to_headers(
            {'data': dict([(key, None) for key in user_data_keys])}))
    user_headers.extend(
        json_to_headers({'group': range(1, user_groups_length + 1)}))

    group_headers = ['id', 'name', 'case-sharing?', 'reporting?']
    group_headers.extend(
        json_to_headers(
            {'data': dict([(key, None) for key in group_data_keys])}))

    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]

    commtrack_enabled = Domain.get_by_name(domain).commtrack_enabled
    if commtrack_enabled:
        headers.append(
            ('locations',
             [['username', 'location-sms-code', 'location name (optional)']]))

    writer.open(
        header_table=headers,
        file=file,
    )

    def get_user_rows():
        for user_dict in user_dicts:
            row = dict(flatten_json(user_dict))
            yield [row.get(header) or '' for header in user_headers]

    def get_group_rows():
        for group_dict in group_dicts:
            row = dict(flatten_json(group_dict))
            yield [row.get(header) or '' for header in group_headers]

    rows = [
        ('users', get_user_rows()),
        ('groups', get_group_rows()),
    ]

    if commtrack_enabled:
        rows.append(('locations', get_location_rows(domain)))

    writer.write(rows)

    writer.close()
    response.write(file.getvalue())
Exemplo n.º 16
0
def dump_users_and_groups(domain, download_id):
    from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView

    def _load_memoizer(domain):
        group_memoizer = GroupMemoizer(domain=domain)
        # load groups manually instead of calling group_memoizer.load_all()
        # so that we can detect blank groups
        blank_groups = set()
        for group in Group.by_domain(domain):
            if group.name:
                group_memoizer.add_group(group)
            else:
                blank_groups.add(group)
        if blank_groups:
            raise GroupNameError(blank_groups=blank_groups)

        return group_memoizer

    writer = Excel2007ExportWriter()
    group_memoizer = _load_memoizer(domain)
    location_cache = LocationIdToSiteCodeCache(domain)

    user_data_model = CustomDataFieldsDefinition.get_or_create(
        domain, UserFieldsView.field_type)

    user_headers, user_rows = parse_users(group_memoizer, domain,
                                          user_data_model, location_cache)

    group_headers, group_rows = parse_groups(group_memoizer.groups)
    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]
    rows = [
        ('users', user_rows),
        ('groups', group_rows),
    ]

    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    file_path = _get_download_file_path(domain, use_transfer)
    writer.open(
        header_table=headers,
        file=file_path,
    )
    writer.write(rows)
    writer.close()

    common_kwargs = dict(
        mimetype=Format.from_format('xlsx').mimetype,
        content_disposition='attachment; filename="{fname}"'.format(
            fname='{}_users.xlsx'.format(domain)),
        download_id=download_id,
    )
    if use_transfer:
        expose_file_download(file_path,
                             use_transfer=use_transfer,
                             **common_kwargs)
    else:
        expose_cached_download(FileWrapper(open(file_path, 'r')),
                               expiry=(1 * 60 * 60),
                               file_extension='xlsx',
                               **common_kwargs)