def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60 * 60): export_file = get_export_file( export_instances, filters, # We don't have a great way to calculate progress if it's a bulk download, # so only track the progress for single instance exports. progress_tracker=populate_export_download_task if len(export_instances) == 1 else None) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name with export_file as file_: db = get_blob_db() db.put(file_, download_id, timeout=expiry) expose_blob_download( download_id, mimetype=file_format.mimetype, content_disposition=safe_filename_header(filename, file_format.extension), download_id=download_id, )
def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60 * 60): export_file = get_export_file( export_instances, filters, # We don't have a great way to calculate progress if it's a bulk download, # so only track the progress for single instance exports. progress_tracker=populate_export_download_task if len(export_instances) == 1 else None) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name payload = export_file.file.payload expose_cached_download( payload, expiry, ".{}".format(file_format.extension), mimetype=file_format.mimetype, content_disposition=safe_filename_header(filename, file_format.extension), download_id=download_id, ) export_file.file.delete()
def dump_locations(domain, download_id, include_consumption, headers_only, task=None): exporter = LocationExporter(domain, include_consumption=include_consumption, headers_only=headers_only, async_task=task) fd, path = tempfile.mkstemp() writer = Excel2007ExportWriter() writer.open(header_table=exporter.get_headers(), file=path) with writer: exporter.write_data(writer) with open(path, 'rb') as file_: db = get_blob_db() expiry_mins = 60 db.put( file_, domain=domain, parent_id=domain, type_code=CODES.tempfile, key=download_id, timeout=expiry_mins, ) file_format = Format.from_format(Excel2007ExportWriter.format) expose_blob_download( download_id, expiry=expiry_mins * 60, mimetype=file_format.mimetype, content_disposition=safe_filename_header('{}_locations'.format(domain), file_format.extension), download_id=download_id, )
def prepare_fixture_download(table_ids, domain, task, download_id): """Prepare fixture data for Excel download """ data_types_book, excel_sheets = _prepare_fixture(table_ids, domain, task=task) header_groups = [("types", excel_sheets["types"]["headers"])] value_groups = [("types", excel_sheets["types"]["rows"])] for data_type in data_types_book: header_groups.append( (data_type.tag, excel_sheets[data_type.tag]["headers"])) value_groups.append( (data_type.tag, excel_sheets[data_type.tag]["rows"])) file = StringIO() format = Format.XLS_2007 export_raw(tuple(header_groups), tuple(value_groups), file, format) return expose_cached_download( file.getvalue(), 60 * 60 * 2, file_extension=".xlsx", mimetype=Format.from_format(format).mimetype, content_disposition='attachment; filename="%s_lookup-tables.xlsx"' % domain, download_id=download_id, )
def cache_file_to_be_served(tmp, checkpoint, download_id, format=None, filename=None, expiry=10*60*60): """ tmp can be either either a path to a tempfile or a StringIO (the APIs for tempfiles vs StringIO are unfortunately... not similar) """ if checkpoint: format = Format.from_format(format) try: filename = unidecode(filename) except Exception: pass escaped_filename = escape_quotes('%s.%s' % (filename, format.extension)) payload = tmp.payload expose_cached_download(payload, expiry, ".{}".format(format.extension), mimetype=format.mimetype, content_disposition='attachment; filename="%s"' % escaped_filename, extras={'X-CommCareHQ-Export-Token': checkpoint.get_id}, download_id=download_id) tmp.delete() else: # this just gives you a link saying there wasn't anything there expose_cached_download("Sorry, there wasn't any data.", expiry, None, content_disposition="", mimetype="text/html", download_id=download_id).save(expiry)
def location_export(request, domain): response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename=locations.xlsx' dump_locations(response, domain) return response
def download_cases(request, domain): include_closed = json.loads(request.GET.get('include_closed', 'false')) format = Format.from_format(request.GET.get('format') or Format.XLS_2007) group = request.GET.get('group', None) user_filter, _ = FilterUsersField.get_user_filter(request) async = request.GET.get('async') == 'true' kwargs = { 'domain': domain, 'include_closed': include_closed, 'format': format, 'group': group, 'user_filter': user_filter, } payload_func = SerializableFunction(generate_case_export_payload, **kwargs) content_disposition = "attachment; filename={domain}_data.{ext}".format( domain=domain, ext=format.extension) mimetype = "%s" % format.mimetype def generate_payload(payload_func): if async: download = DownloadBase() a_task = prepare_download.delay(download.download_id, payload_func, content_disposition, mimetype) download.set_task(a_task) return download.get_start_response() else: payload = payload_func() response = HttpResponse(payload) response['Content-Type'] = mimetype response['Content-Disposition'] = content_disposition return response return generate_payload(payload_func)
def download_cases(request, domain): include_closed = json.loads(request.GET.get('include_closed', 'false')) format = Format.from_format(request.GET.get('format') or Format.XLS_2007) group = request.GET.get('group', None) user_filter, _ = FilterUsersField.get_user_filter(request) async = request.GET.get('async') == 'true' kwargs = { 'domain': domain, 'include_closed': include_closed, 'format': format, 'group': group, 'user_filter': user_filter, } payload_func = SerializableFunction(generate_case_export_payload, **kwargs) content_disposition = "attachment; filename={domain}_data.{ext}".format(domain=domain, ext=format.extension) mimetype = "%s" % format.mimetype def generate_payload(payload_func): if async: download = DownloadBase() a_task = prepare_download.delay(download.download_id, payload_func, content_disposition, mimetype) download.set_task(a_task) return download.get_start_response() else: payload = payload_func() response = HttpResponse(payload) response['Content-Type'] = mimetype response['Content-Disposition'] = content_disposition return response return generate_payload(payload_func)
def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60 * 60): export_file = get_export_file( export_instances, filters, # We don't have a great way to calculate progress if it's a bulk download, # so only track the progress for single instance exports. progress_tracker=populate_export_download_task if len(export_instances) == 1 else None) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name escaped_filename = escape_quotes('%s.%s' % (filename, file_format.extension)) escaped_filename = urllib.quote(escaped_filename.encode('utf8')) payload = export_file.file.payload expose_cached_download( payload, expiry, ".{}".format(file_format.extension), mimetype=file_format.mimetype, content_disposition='attachment; filename="%s"' % escaped_filename, download_id=download_id, ) export_file.file.delete()
def _render_report_configs(request, configs, domain, owner_id, couch_user, email, notes=None, attach_excel=False): from dimagi.utils.web import get_url_base report_outputs = [] excel_attachments = [] format = Format.from_format(request.GET.get('format') or Format.XLS_2007) for config in configs: content, excel_file = config.get_report_content(attach_excel=attach_excel) if excel_file: excel_attachments.append({ 'title': config.full_name + "." + format.extension, 'file_obj': excel_file, 'mimetype': format.mimetype }) report_outputs.append({ 'title': config.full_name, 'url': config.url, 'content': content }) date_range = config.get_date_range() return render(request, "reports/report_email.html", { "reports": report_outputs, "domain": domain, "couch_user": owner_id, "DNS_name": get_url_base(), "owner_name": couch_user.full_name or couch_user.get_email(), "email": email, "notes": notes or getattr(config, "description", ""), "startdate": date_range["startdate"] if date_range else "", "enddate": date_range["enddate"] if date_range else "", }), excel_attachments
def send_monthly_sms_report(): subject = _('Monthly SMS report') recipients = ['*****@*****.**', '*****@*****.**', '*****@*****.**', '*****@*****.**', '*****@*****.**', '*****@*****.**'] try: start_date = date.today().replace(day=1) - relativedelta(months=1) first_day, last_day = calendar.monthrange(start_date.year, start_date.month) end_date = start_date.replace(day=last_day) filename = call_command('get_icds_sms_usage', 'icds-cas', str(start_date), str(end_date)) with open(filename, 'rb') as f: cached_download = expose_cached_download( f.read(), expiry=24 * 60 * 60, file_extension=file_extention_from_filename(filename), mimetype=Format.from_format(Format.XLS_2007).mimetype, content_disposition='attachment; filename="%s"' % filename) path = reverse('retrieve_download', kwargs={'download_id': cached_download.download_id}) link = f"{web.get_url_base()}{path}?get_file" message = _(""" Hi, Please download the sms report for last month at {link}. The report is available only till midnight today. """).format(link=link) send_html_email_async.delay(subject, recipients, message, email_from=settings.DEFAULT_FROM_EMAIL) except Exception as e: message = _(""" Hi, Could not generate the montly SMS report for ICDS. The error has been notified. Please report as an issue for quick followup """) send_html_email_async.delay(subject, recipients, message, email_from=settings.DEFAULT_FROM_EMAIL) raise e
def expose_download(use_transfer, file_path, filename, download_id, file_type, owner_ids=None): common_kwargs = { 'mimetype': Format.from_format(file_type).mimetype, 'content_disposition': 'attachment; filename="{fname}"'.format(fname=filename), 'download_id': download_id, 'expiry': (1 * 60 * 60), 'owner_ids': owner_ids, } if use_transfer: expose_file_download(file_path, use_transfer=use_transfer, **common_kwargs) else: expose_cached_download(FileWrapper(open(file_path, 'rb')), file_extension=file_type, **common_kwargs)
def download_commcare_users(request, domain): response = HttpResponse(content_type=Format.from_format('xlsx').mimetype) response[ 'Content-Disposition'] = 'attachment; filename="%s_users.xlsx"' % domain try: dump_users_and_groups(response, domain) except GroupNameError as e: group_urls = [ reverse('group_members', args=[domain, group.get_id]) for group in e.blank_groups ] def make_link(url, i): return format_html('<a href="{}" target="_blank">{}</a>', url, _('Blank Group %s') % i) group_links = [ make_link(url, i + 1) for i, url in enumerate(group_urls) ] msg = format_html( _('The following groups have no name. ' 'Please name them before continuing: {}'), mark_safe(', '.join(group_links))) messages.error(request, msg, extra_tags='html') return HttpResponseRedirect( reverse('upload_commcare_users', args=[domain])) return response
def download_cases(request, domain): include_closed = json.loads(request.GET.get('include_closed', 'false')) format = Format.from_format(request.GET.get('format') or Format.XLS_2007) view_name = 'hqcase/all_cases' if include_closed else 'hqcase/open_cases' key = [domain, {}, {}] cases = CommCareCase.view(view_name, startkey=key, endkey=key + [{}], reduce=False, include_docs=True) # group, users = util.get_group_params(domain, **json_request(request.GET)) group = request.GET.get('group', None) user_filter, _ = FilterUsersField.get_user_filter(request) # todo deal with cached user dict here users = get_all_users_by_domain(domain, group=group, user_filter=user_filter) groups = Group.get_case_sharing_groups(domain) # if not group: # users.extend(CommCareUser.by_domain(domain, is_active=False)) workbook = WorkBook() export_cases_and_referrals(cases, workbook, users=users, groups=groups) export_users(users, workbook) response = HttpResponse(workbook.format(format.slug)) response['Content-Type'] = "%s" % format.mimetype response['Content-Disposition'] = "attachment; filename={domain}_data.{ext}".format(domain=domain, ext=format.extension) return response
def export_async(download_id, export_tag, format=None, filename=None, previous_export_id=None, filter=None, expiry=10*60*60): if not filename: filename = export_tag (tmp, checkpoint) = get_export_files(export_tag, format, previous_export_id, filter) if checkpoint: temp_id = uuid.uuid4().hex fd, path = tempfile.mkstemp() with os.fdopen(fd, 'wb') as file: file.write(tmp.getvalue()) # make file globally read/writeable in case celery runs as root os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | \ stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH) format = Format.from_format(format) try: filename = unidecode(filename) except Exception: pass cache.set(download_id, FileDownload(path, mimetype=format.mimetype, content_disposition='attachment; filename=%s.%s' % \ (filename, format.extension), extras={'X-CommCareHQ-Export-Token': checkpoint.get_id}), expiry) else: temp_id = uuid.uuid4().hex cache.set(temp_id, "Sorry, there wasn't any data.", expiry) cache.set(download_id, CachedDownload(temp_id,content_disposition="", mimetype="text/html"), expiry)
def get(self, request, *args, **kwargs): file_id = self.kwargs.get('file_id', None) content_type = Format.from_format('xlsx') response = HttpResponse(get_file_from_blobdb(file_id).read(), content_type=content_type.mimetype) response['Content-Disposition'] = safe_filename_header( 'unified_beneficiary_list', content_type.extension) return response
def get(self, request, domain, *args, **kwargs): outfile = _export_data_dictionary(domain) response = HttpResponse( content_type=Format.from_format('xlsx').mimetype) response[ 'Content-Disposition'] = 'attachment; filename="data_dictionary.xlsx"' response.write(outfile.getvalue()) return response
def download_products(request, domain): def _get_products(domain): for p_doc in iter_docs(Product.get_db(), Product.ids_by_domain(domain)): # filter out archived products from export if not ('is_archived' in p_doc and p_doc['is_archived']): yield Product.wrap(p_doc) def _build_row(keys, product): row = [] for key in keys: row.append(product.get(key, '') or '') return row file = StringIO() writer = Excel2007ExportWriter() product_keys = [ 'id', 'name', 'unit', 'product_id', 'description', 'category', 'program_id', 'cost', ] data_keys = set() products = [] for product in _get_products(domain): product_dict = product.to_dict() custom_properties = product.custom_property_dict() data_keys.update(custom_properties.keys()) product_dict.update(custom_properties) products.append(product_dict) keys = product_keys + list(data_keys) writer.open( header_table=[ ('products', [keys]) ], file=file, ) for product in products: writer.write([('products', [_build_row(keys, product)])]) writer.close() response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="products.xlsx"' response.write(file.getvalue()) return response
def download_daily_saved_export(req, domain, export_instance_id): with CriticalSection( ['export-last-accessed-{}'.format(export_instance_id)]): try: export_instance = get_properly_wrapped_export_instance( export_instance_id) except ResourceNotFound: raise Http404(_("Export not found")) assert domain == export_instance.domain if export_instance.export_format == "html": if not domain_has_privilege(domain, EXCEL_DASHBOARD): raise Http404 elif export_instance.is_daily_saved_export: if not domain_has_privilege(domain, DAILY_SAVED_EXPORT): raise Http404 if not export_instance.filters.is_location_safe_for_user(req): return location_restricted_response(req) if not can_download_daily_saved_export(export_instance, domain, req.couch_user): raise Http404 if export_instance.export_format == "html": message = "Download Excel Dashboard" else: message = "Download Saved Export" track_workflow(req.couch_user.username, message, properties={ 'domain': domain, 'is_dimagi': req.couch_user.is_dimagi }) if should_update_export(export_instance.last_accessed): try: rebuild_saved_export(export_instance_id, manual=False) except Exception: notify_exception( req, 'Failed to rebuild export during download', { 'export_instance_id': export_instance_id, 'domain': domain, }, ) export_instance.last_accessed = datetime.utcnow() export_instance.save() payload = export_instance.get_payload(stream=True) format = Format.from_format(export_instance.export_format) return get_download_response(payload, export_instance.file_size, format, export_instance.filename, req)
def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60): """ :param expiry: Time period for the export to be available for download in minutes """ domain = export_instances[0].domain with TransientTempfile() as temp_path, datadog_track_errors( 'populate_export_download_task'): export_file = get_export_file( export_instances, filters, temp_path, # We don't have a great way to calculate progress if it's a bulk download, # so only track the progress for single instance exports. progress_tracker=populate_export_download_task if len(export_instances) == 1 else None) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name with export_file as file_: db = get_blob_db() db.put( file_, domain=domain, parent_id=domain, type_code=CODES.data_export, key=download_id, timeout=expiry, ) expose_blob_download( download_id, expiry=expiry * 60, mimetype=file_format.mimetype, content_disposition=safe_filename_header( filename, file_format.extension), download_id=download_id, ) email_requests = EmailExportWhenDoneRequest.objects.filter( domain=domain, download_id=download_id) for email_request in email_requests: try: couch_user = CouchUser.get_by_user_id(email_request.user_id, domain=domain) except CouchUser.AccountTypeError: pass else: if couch_user is not None: process_email_request(domain, download_id, couch_user.get_email()) email_requests.delete()
def location_export(request, domain): if not LocationType.objects.filter(domain=domain).exists(): messages.error(request, _("You need to define location types before " "you can do a bulk import or export.")) return HttpResponseRedirect(reverse(LocationsListView.urlname, args=[domain])) include_consumption = request.GET.get('include_consumption') == 'true' response = HttpResponse(content_type=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="locations.xlsx"' dump_locations(response, domain, include_consumption) return response
def location_export(request, domain): if not LocationType.objects.filter(domain=domain).exists(): messages.error(request, _("You need to define organization levels before " "you can do a bulk import or export.")) return HttpResponseRedirect(reverse(LocationsListView.urlname, args=[domain])) include_consumption = request.GET.get('include_consumption') == 'true' response = HttpResponse(content_type=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="{}_locations.xlsx"'.format(domain) dump_locations(response, domain, include_consumption=include_consumption) return response
def filename(self): file_ext = Format.from_format(self.format).extension filename = "%s.%s" % (self.export_id, file_ext) try: app = Application.get(self.export_id) if app: filename = "%s-%s.%s" %(app.name, app.get_id, file_ext) except Exception: pass return filename
def filename(self): file_ext = Format.from_format(self.format).extension filename = "%s.%s" % (self.export_id, file_ext) try: app = Application.get(self.export_id) if app: filename = "%s-%s.%s" % (app.name, app.get_id, file_ext) except Exception: pass return filename
def save_dump_to_blob(data_file_path, data_file_name, result_file_format): with open(data_file_path, 'rb') as file_: blob_db = get_blob_db() blob_db.put(file_, data_file_name, timeout=60 * 24) # 24 hours file_format = Format.from_format(result_file_format) file_name_header = safe_filename_header(data_file_name, file_format.extension) blob_dl_object = expose_blob_download( data_file_name, mimetype=file_format.mimetype, content_disposition=file_name_header) return blob_dl_object.download_id
def superuser_table(request): superusers = augmented_superusers() f = StringIO() csv_writer = csv.writer(f) csv_writer.writerow(['Username', 'Developer', 'Superuser', 'Two Factor Enabled']) for user in superusers: csv_writer.writerow([ user.username, user.is_staff, user.is_superuser, user.two_factor_enabled]) response = HttpResponse(content_type=Format.from_format('csv').mimetype) response['Content-Disposition'] = 'attachment; filename="superuser_table.csv"' response.write(f.getvalue()) return response
def get(self, request, *args, **kwargs): file_id = self.kwargs.get('file_id', None) content_type = Format.from_format('xlsx') response = HttpResponse( get_file_from_blobdb(file_id).read(), content_type=content_type.mimetype ) response['Content-Disposition'] = safe_filename_header( 'unified_beneficiary_list', content_type.extension ) return response
def formdefs(request, domain, app_id): # TODO: Looks like this function is never used langs = [json.loads(request.GET.get('lang', '"en"'))] format = request.GET.get('format', 'json') app = get_app(domain, app_id) def get_questions(form): xform = XForm(form.source) prefix = '/%s/' % xform.data_node.tag_name def remove_prefix(string): if string.startswith(prefix): return string[len(prefix):] else: raise Exception() def transform_question(q): return { 'id': remove_prefix(q['value']), 'type': q['tag'], 'text': q['label'] if q['tag'] != 'hidden' else '' } return [transform_question(q) for q in xform.get_questions(langs)] formdefs = [{ 'name': "%s, %s" % (f['form'].get_module().name['en'], f['form'].name['en']) if f['type'] == 'module_form' else 'User Registration', 'columns': ['id', 'type', 'text'], 'rows': get_questions(f['form']) } for f in app.get_forms(bare=False)] if format == 'xlsx': f = StringIO() writer = Excel2007ExportWriter() writer.open([(sheet['name'], [FormattedRow(sheet['columns'])]) for sheet in formdefs], f) writer.write([(sheet['name'], [ FormattedRow([ cell for (_, cell) in sorted( row.items(), key=lambda item: sheet['columns'].index(item[0])) ]) for row in sheet['rows'] ]) for sheet in formdefs]) writer.close() response = HttpResponse( f.getvalue(), content_type=Format.from_format('xlsx').mimetype) set_file_download(response, 'formdefs.xlsx') return response else: return json_response(formdefs)
def formdefs(request, domain, app_id): # TODO: Looks like this function is never used langs = [json.loads(request.GET.get('lang', '"en"'))] format = request.GET.get('format', 'json') app = get_app(domain, app_id) def get_questions(form): xform = XForm(form.source) prefix = '/%s/' % xform.data_node.tag_name def remove_prefix(string): if string.startswith(prefix): return string[len(prefix):] else: raise Exception() def transform_question(q): return { 'id': remove_prefix(q['value']), 'type': q['tag'], 'text': q['label'] if q['tag'] != 'hidden' else '' } return [transform_question(q) for q in xform.get_questions(langs)] formdefs = [{ 'name': "%s, %s" % ( f['form'].get_module().name['en'], f['form'].name['en'] ) if f['type'] == 'module_form' else 'User Registration', 'columns': ['id', 'type', 'text'], 'rows': get_questions(f['form']) } for f in app.get_forms(bare=False)] if format == 'xlsx': f = StringIO() writer = Excel2007ExportWriter() writer.open([(sheet['name'], [FormattedRow(sheet['columns'])]) for sheet in formdefs], f) writer.write([( sheet['name'], [ FormattedRow([ cell for (_, cell) in sorted(row.items(), key=lambda item: sheet['columns'].index(item[0])) ]) for row in sheet['rows'] ] ) for sheet in formdefs]) writer.close() response = HttpResponse(f.getvalue(), content_type=Format.from_format('xlsx').mimetype) set_file_download(response, 'formdefs.xlsx') return response else: return json_response(formdefs)
def save_dump_to_blob(self, temp_path): with open(temp_path, 'rb') as file_: blob_db = get_blob_db() blob_db.put(file_, self.result_file_name, timeout=60 * 48) # 48 hours file_format = Format.from_format(Format.CSV) file_name_header = safe_filename_header(self.result_file_name, file_format.extension) blob_dl_object = expose_blob_download( self.result_file_name, mimetype=file_format.mimetype, content_disposition=file_name_header) return blob_dl_object.download_id
def run_data_pull(data_pull_slug, domain, month, location_id=None, email=None): subject = _('Custom ICDS Data Pull') try: filename = DataExporter(data_pull_slug, "icds-ucr-citus", month=month, location_id=location_id).export() except Exception: if email: message = _(""" Hi, Could not generate the requested data pull. The error has been notified. Please report as an issue for quicker followup """) send_html_email_async.delay(subject, [email], message, email_from=settings.DEFAULT_FROM_EMAIL) raise else: if email and filename: db = get_blob_db() download_id = DownloadBase.new_id_prefix + make_uuid() with open(filename, 'rb') as _file: db.put( _file, domain=domain, parent_id=domain, type_code=CODES.data_export, key=download_id, timeout=24 * 60, ) exposed_download = expose_blob_download( filename, expiry=24 * 60 * 60, mimetype=Format.from_format(Format.ZIP).mimetype, content_disposition=safe_filename_header(filename), download_id=download_id) os.remove(filename) path = reverse( 'retrieve_download', kwargs={'download_id': exposed_download.download_id}) link = f"{web.get_url_base()}{path}?get_file" message = _(""" Hi, Please download the data from {link}. The data is available only for 24 hours. """).format(link=link) send_html_email_async.delay(subject, [email], message, email_from=settings.DEFAULT_FROM_EMAIL)
def download_daily_saved_export(req, domain, export_instance_id): with CriticalSection(['export-last-accessed-{}'.format(export_instance_id)]): try: export_instance = get_properly_wrapped_export_instance(export_instance_id) except ResourceNotFound: raise Http404(_("Export not found")) assert domain == export_instance.domain if export_instance.export_format == "html": if not domain_has_privilege(domain, EXCEL_DASHBOARD): raise Http404 elif export_instance.is_daily_saved_export: if not domain_has_privilege(domain, DAILY_SAVED_EXPORT): raise Http404 if not export_instance.filters.is_location_safe_for_user(req): return location_restricted_response(req) if not can_download_daily_saved_export(export_instance, domain, req.couch_user): raise Http404 if export_instance.export_format == "html": message = "Download Excel Dashboard" else: message = "Download Saved Export" track_workflow(req.couch_user.username, message, properties={ 'domain': domain, 'is_dimagi': req.couch_user.is_dimagi }) if should_update_export(export_instance.last_accessed): try: rebuild_saved_export(export_instance_id, manual=False) except Exception: notify_exception( req, 'Failed to rebuild export during download', { 'export_instance_id': export_instance_id, 'domain': domain, }, ) export_instance.last_accessed = datetime.utcnow() export_instance.save() payload = export_instance.get_payload(stream=True) format = Format.from_format(export_instance.export_format) return get_download_response(payload, export_instance.file_size, format, export_instance.filename, req)
def dump_locations(domain, download_id, include_consumption, headers_only, owner_id, root_location_ids=None, task=None, **kwargs): exporter = LocationExporter(domain, include_consumption=include_consumption, root_location_ids=root_location_ids, headers_only=headers_only, async_task=task, **kwargs) fd, path = tempfile.mkstemp() writer = Excel2007ExportWriter() writer.open(header_table=exporter.get_headers(), file=path) with writer: exporter.write_data(writer) with open(path, 'rb') as file_: db = get_blob_db() expiry_mins = 60 db.put( file_, domain=domain, parent_id=domain, type_code=CODES.tempfile, key=download_id, timeout=expiry_mins, ) file_format = Format.from_format(Excel2007ExportWriter.format) filename = '{}_locations'.format(domain) if len(root_location_ids) == 1: root_location = SQLLocation.objects.get( location_id=root_location_ids[0]) filename += '_{}'.format(root_location.name) expose_blob_download( download_id, expiry=expiry_mins * 60, mimetype=file_format.mimetype, content_disposition=safe_filename_header(filename, file_format.extension), download_id=download_id, owner_ids=[owner_id], )
def expose_download(use_transfer, file_path, filename, download_id, file_type): common_kwargs = dict( mimetype=Format.from_format(file_type).mimetype, content_disposition='attachment; filename="{fname}"'.format( fname=filename), download_id=download_id, ) if use_transfer: expose_file_download(file_path, use_transfer=use_transfer, **common_kwargs) else: expose_cached_download(FileWrapper(open(file_path, 'rb')), expiry=(1 * 60 * 60), file_extension=file_type, **common_kwargs)
def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60 * 60): export_file = get_export_file(export_instances, filters) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name escaped_filename = escape_quotes('%s.%s' % (filename, file_format.extension)) payload = export_file.file.payload expose_cached_download( payload, expiry, ".{}".format(file_format.extension), mimetype=file_format.mimetype, content_disposition='attachment; filename="%s"' % escaped_filename, download_id=download_id, ) export_file.file.delete()
def expose_download(use_transfer, file_path, filename, download_id, file_type): common_kwargs = dict( mimetype=Format.from_format(file_type).mimetype, content_disposition='attachment; filename="{fname}"'.format(fname=filename), download_id=download_id, expiry=(1 * 60 * 60), ) if use_transfer: expose_file_download( file_path, use_transfer=use_transfer, **common_kwargs ) else: expose_cached_download( FileWrapper(open(file_path, 'rb')), file_extension=file_type, **common_kwargs )
def expose_zipped_blob_download(data_path, filename, format, domain): """Expose zipped file content as a blob download :param data_path: Path to data file. Will be deleted. :param filename: File name. :param format: `couchexport.models.Format` constant. :param domain: Domain name. :returns: A link to download the file. """ try: _, zip_temp_path = tempfile.mkstemp(".zip") with ZipFile(zip_temp_path, 'w') as zip_file_: zip_file_.write(data_path, filename) finally: os.remove(data_path) try: expiry_mins = 60 * 24 file_format = Format.from_format(format) file_name_header = safe_filename_header(filename, file_format.extension) ref = expose_blob_download( filename, expiry=expiry_mins * 60, mimetype=file_format.mimetype, content_disposition=file_name_header ) with open(zip_temp_path, 'rb') as file_: get_blob_db().put( file_, domain=domain, parent_id=domain, type_code=CODES.tempfile, key=ref.download_id, timeout=expiry_mins ) finally: os.remove(zip_temp_path) return "%s%s?%s" % ( get_url_base(), reverse('retrieve_download', kwargs={'download_id': ref.download_id}), "get_file" # download immediately rather than rendering page )
def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60 * 60): export_file = get_export_file( export_instances, filters, # We don't have a great way to calculate progress if it's a bulk download, # so only track the progress for single instance exports. progress_tracker=populate_export_download_task if len(export_instances) == 1 else None) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name with export_file as file_: db = get_blob_db() db.put(file_, download_id, timeout=expiry) expose_blob_download( download_id, mimetype=file_format.mimetype, content_disposition=safe_filename_header(filename, file_format.extension), download_id=download_id, ) domain = export_instances[0].domain email_requests = EmailExportWhenDoneRequest.objects.filter( domain=domain, download_id=download_id) for email_request in email_requests: try: couch_user = CouchUser.get_by_user_id(email_request.user_id, domain=domain) except CouchUser.AccountTypeError: pass else: if couch_user is not None: process_email_request(domain, download_id, couch_user.get_email()) email_requests.delete()
def send_custom_sms_report(start_date: str, end_date: str, email: str, domain: str): subject = _('Monthly SMS report') recipients = [email] try: filename = call_command('get_icds_sms_usage', 'icds-cas', start_date, end_date) with open(filename, 'rb') as f: cached_download = expose_cached_download( f.read(), expiry=24 * 60 * 60, file_extension=file_extention_from_filename(filename), mimetype=Format.from_format(Format.XLS_2007).mimetype, content_disposition='attachment; filename="%s"' % filename) path = reverse('retrieve_download', kwargs={'download_id': cached_download.download_id}) link = f"{web.get_url_base()}{path}?get_file" message = _(""" Hi, Please download the sms report for time frame {start_date} to {end_date} (inclusive) at {link}. The report is available only for next 24 hours. """).format(link=link, start_date=start_date, end_date=end_date) send_html_email_async.delay(subject, recipients, message, email_from=settings.DEFAULT_FROM_EMAIL) except Exception as e: message = _(""" Hi, Could not generate the custom SMS report for ICDS. The error has been notified. Please report as an issue for quick followup """) send_html_email_async.delay(subject, recipients, message, email_from=settings.DEFAULT_FROM_EMAIL) raise e finally: report_tracker = CustomSMSReportTracker(domain) report_tracker.remove_report(start_date, end_date)
def prepare_fixture_download(table_ids, domain, task, download_id): """Prepare fixture data for Excel download """ data_types_book, excel_sheets = _prepare_fixture(table_ids, domain, task=task) header_groups = [("types", excel_sheets["types"]["headers"])] value_groups = [("types", excel_sheets["types"]["rows"])] for data_type in data_types_book: header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"])) value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"])) file = StringIO() format = Format.XLS_2007 export_raw(tuple(header_groups), tuple(value_groups), file, format) return expose_cached_download( file.getvalue(), 60 * 60 * 2, mimetype=Format.from_format(format).mimetype, content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain, download_id=download_id, )
def download_commcare_users(request, domain): response = HttpResponse(mimetype=Format.from_format("xlsx").mimetype) response["Content-Disposition"] = 'attachment; filename="%s_users.xlsx"' % domain try: dump_users_and_groups(response, domain) except GroupNameError as e: group_urls = [reverse("group_members", args=[domain, group.get_id]) for group in e.blank_groups] def make_link(url, i): return format_html('<a href="{}" target="_blank">{}</a>', url, _("Blank Group %s") % i) group_links = [make_link(url, i + 1) for i, url in enumerate(group_urls)] msg = format_html( _("The following groups have no name. " "Please name them before continuing: {}"), mark_safe(", ".join(group_links)), ) messages.error(request, msg, extra_tags="html") return HttpResponseRedirect(reverse("upload_commcare_users", args=[domain])) return response
def download_commcare_users(request, domain): response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename=%s_users.xlsx' % domain try: dump_users_and_groups(response, domain) except GroupNameError as e: group_urls = [ reverse('group_members', args=[domain, group.get_id]) for group in e.blank_groups ] def make_link(url, i): return format_html( '<a href="{}">{}</a>', url, _('Blank Group %s') % i ) group_links = [ make_link(url, i + 1) for i, url in enumerate(group_urls) ] msg = format_html( _( 'The following groups have no name. ' 'Please name them before continuing: {}' ), mark_safe(', '.join(group_links)) ) messages.error(request, msg, extra_tags='html') return HttpResponseRedirect( reverse('upload_commcare_users', args=[domain]) ) return response
def populate_export_download_task(export_instances, filters, download_id, filename=None, expiry=10 * 60 * 60): export_file = get_export_file( export_instances, filters, # We don't have a great way to calculate progress if it's a bulk download, # so only track the progress for single instance exports. progress_tracker=populate_export_download_task if len(export_instances) == 1 else None ) file_format = Format.from_format(export_file.format) filename = filename or export_instances[0].name escaped_filename = escape_quotes('%s.%s' % (filename, file_format.extension)) escaped_filename = urllib.quote(escaped_filename.encode('utf8')) payload = export_file.file.payload expose_cached_download( payload, expiry, ".{}".format(file_format.extension), mimetype=file_format.mimetype, content_disposition='attachment; filename="%s"' % escaped_filename, download_id=download_id, ) export_file.file.delete()
def download_products(request, domain): def _parse_custom_properties(product): product_data_model = CustomDataFieldsDefinition.get_or_create( domain, ProductFieldsView.field_type ) product_data_fields = [f.slug for f in product_data_model.fields] model_data = {} uncategorized_data = {} for prop, val in product.product_data.iteritems(): if prop in product_data_fields: model_data['data: ' + prop] = encode_if_needed(val) else: uncategorized_data['uncategorized_data: ' + prop] = encode_if_needed(val) return model_data, uncategorized_data def _get_products(domain): for p_doc in iter_docs(Product.get_db(), Product.ids_by_domain(domain)): # filter out archived products from export if not ('is_archived' in p_doc and p_doc['is_archived']): yield Product.wrap(p_doc) def _build_row(keys, product): row = [] for key in keys: row.append(product.get(key, '') or '') return row file = StringIO() writer = Excel2007ExportWriter() product_keys = [ 'id', 'name', 'unit', 'product_id', 'description', 'category', 'program_id', 'cost', ] model_data = set() uncategorized_data = set() products = [] for product in _get_products(domain): product_dict = product.to_dict() product_model, product_uncategorized = _parse_custom_properties(product) model_data.update(product_model.keys()) uncategorized_data.update(product_uncategorized.keys()) product_dict.update(product_model) product_dict.update(product_uncategorized) products.append(product_dict) keys = product_keys + list(model_data) + list(uncategorized_data) writer.open( header_table=[ ('products', [keys]) ], file=file, ) for product in products: writer.write([('products', [_build_row(keys, product)])]) writer.close() response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="products.xlsx"' response.write(file.getvalue()) return response
def download_products(request, domain): def _parse_custom_properties(product): product_data_model = CustomDataFieldsDefinition.get_or_create( domain, ProductFieldsView.field_type) product_data_fields = [f.slug for f in product_data_model.fields] model_data = {} uncategorized_data = {} for prop, val in six.iteritems(product.product_data): if prop in product_data_fields: model_data['data: ' + prop] = encode_if_needed(val) else: uncategorized_data['uncategorized_data: ' + prop] = encode_if_needed(val) return model_data, uncategorized_data def _get_products(domain): for p_doc in iter_docs(Product.get_db(), Product.ids_by_domain(domain)): # filter out archived products from export if not ('is_archived' in p_doc and p_doc['is_archived']): yield Product.wrap(p_doc) def _build_row(keys, product): row = [] for key in keys: row.append(product.get(key, '') or '') return row file = BytesIO() writer = Excel2007ExportWriter() product_keys = [ 'id', 'name', 'unit', 'product_id', 'description', 'category', 'program_id', 'cost', ] model_data = set() uncategorized_data = set() products = [] for product in _get_products(domain): product_dict = product.to_dict() product_model, product_uncategorized = _parse_custom_properties( product) model_data.update(product_model) uncategorized_data.update(product_uncategorized) product_dict.update(product_model) product_dict.update(product_uncategorized) products.append(product_dict) keys = product_keys + list(model_data) + list(uncategorized_data) writer.open( header_table=[('products', [keys])], file=file, ) for product in products: writer.write([('products', [_build_row(keys, product)])]) writer.close() response = HttpResponse(content_type=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="products.xlsx"' response.write(file.getvalue()) return response
def filename(self): return "bulk_export.%(ext)s" % { 'ext': Format.from_format(self.format).extension, }
def download_item_lists(request, domain, html_response=False): """ Is used to serve excel_download and html_view for view_lookup_tables """ table_ids = request.GET.getlist("table_id") if table_ids and table_ids[0]: try: data_types_view = [ FixtureDataType.get(id) for id in request.GET.getlist("table_id") ] except ResourceNotFound as Ex: if html_response: messages.info( request, _("Sorry, we couldn't find that table. If you think this is a mistake please report an issue." )) raise data_types_view = FixtureDataType.by_domain(domain) else: data_types_view = FixtureDataType.by_domain(domain) if html_response: data_types_view = list(data_types_view)[0:1] # book-keeping data from view_results for repeated use data_types_book = [] data_items_book_by_type = {} item_helpers_by_type = {} """ Contains all excel sheets in following format excel_sheets = { "types": { "headers": [], "rows": [(row), (row), (row)] } "next-sheet": { "headers": [], "rows": [(row), (row), (row)] }, ... } """ excel_sheets = {} def empty_padding_list(length): return ["" for x in range(0, length)] max_fields = 0 max_item_attributes = 0 """ - Helper to generate headers like "field 2: property 1" - Captures max_num_of_properties for any field of any type at the list-index. Example values: [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property) [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property) [0, 2] -> "field 2: property 1", "field 2: property 2" """ field_prop_count = [] """ captures all possible 'field-property' values for each data-type Example value {u'clinics': {'field 2 : property 1': u'lang'}, u'growth_chart': {'field 2 : property 2': u'maxWeight'}} """ type_field_properties = {} get_field_prop_format = lambda x, y: "field " + str( x) + " : property " + str(y) for data_type in data_types_view: # Helpers to generate 'types' sheet type_field_properties[data_type.tag] = {} data_types_book.append(data_type) if len(data_type.fields) > max_fields: max_fields = len(data_type.fields) if len(data_type.item_attributes) > max_item_attributes: max_item_attributes = len(data_type.item_attributes) for index, field in enumerate(data_type.fields): if len(field_prop_count) <= index: field_prop_count.append(len(field.properties)) elif field_prop_count[index] <= len(field.properties): field_prop_count[index] = len(field.properties) if len(field.properties) > 0: for prop_index, property in enumerate(field.properties): prop_key = get_field_prop_format(index + 1, prop_index + 1) type_field_properties[data_type.tag][prop_key] = property # Helpers to generate item-sheets data_items_book_by_type[data_type.tag] = [] max_users = 0 max_groups = 0 max_field_prop_combos = { field_name: 0 for field_name in data_type.fields_without_attributes } for item_row in FixtureDataItem.by_data_type(domain, data_type.get_id): data_items_book_by_type[data_type.tag].append(item_row) group_len = len(item_row.groups) max_groups = group_len if group_len > max_groups else max_groups user_len = len(item_row.users) max_users = user_len if user_len > max_users else max_users for field_key in item_row.fields: if field_key in max_field_prop_combos: max_combos = max_field_prop_combos[field_key] cur_combo_len = len(item_row.fields[field_key].field_list) max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos max_field_prop_combos[field_key] = max_combos item_helpers = { "max_users": max_users, "max_groups": max_groups, "max_field_prop_combos": max_field_prop_combos, } item_helpers_by_type[data_type.tag] = item_helpers # Prepare 'types' sheet data types_sheet = {"headers": [], "rows": []} types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?'] types_sheet["headers"].extend( ["field %d" % x for x in range(1, max_fields + 1)]) types_sheet["headers"].extend( ["property %d" % x for x in range(1, max_item_attributes + 1)]) field_prop_headers = [] for field_num, prop_num in enumerate(field_prop_count): if prop_num > 0: for c in range(0, prop_num): prop_key = get_field_prop_format(field_num + 1, c + 1) field_prop_headers.append(prop_key) types_sheet["headers"].append(prop_key) for data_type in data_types_book: common_vals = ["N", data_type.tag, yesno(data_type.is_global)] field_vals = [ field.field_name for field in data_type.fields ] + empty_padding_list(max_fields - len(data_type.fields)) item_att_vals = data_type.item_attributes + empty_padding_list( max_item_attributes - len(data_type.item_attributes)) prop_vals = [] if type_field_properties.has_key(data_type.tag): props = type_field_properties.get(data_type.tag) prop_vals.extend( [props.get(key, "") for key in field_prop_headers]) row = tuple(common_vals[2 if html_response else 0:] + field_vals + item_att_vals + prop_vals) types_sheet["rows"].append(row) types_sheet["rows"] = tuple(types_sheet["rows"]) types_sheet["headers"] = tuple(types_sheet["headers"]) excel_sheets["types"] = types_sheet # Prepare 'items' sheet data for each data-type for data_type in data_types_book: item_sheet = {"headers": [], "rows": []} item_helpers = item_helpers_by_type[data_type.tag] max_users = item_helpers["max_users"] max_groups = item_helpers["max_groups"] max_field_prop_combos = item_helpers["max_field_prop_combos"] common_headers = ["UID", DELETE_HEADER] user_headers = ["user %d" % x for x in range(1, max_users + 1)] group_headers = ["group %d" % x for x in range(1, max_groups + 1)] field_headers = [] item_att_headers = [ "property: " + attribute for attribute in data_type.item_attributes ] for field in data_type.fields: if len(field.properties) == 0: field_headers.append("field: " + field.field_name) else: prop_headers = [] for x in range(1, max_field_prop_combos[field.field_name] + 1): for property in field.properties: prop_headers.append("%(name)s: %(prop)s %(count)s" % { "name": field.field_name, "prop": property, "count": x }) prop_headers.append("field: %(name)s %(count)s" % { "name": field.field_name, "count": x }) field_headers.extend(prop_headers) item_sheet["headers"] = tuple( common_headers[2 if html_response else 0:] + field_headers + item_att_headers + user_headers + group_headers) excel_sheets[data_type.tag] = item_sheet for item_row in data_items_book_by_type[data_type.tag]: common_vals = [str(_id_from_doc(item_row)), "N"] user_vals = [ user.raw_username for user in item_row.users ] + empty_padding_list(max_users - len(item_row.users)) group_vals = [ group.name for group in item_row.groups ] + empty_padding_list(max_groups - len(item_row.groups)) field_vals = [] item_att_vals = [ item_row.item_attributes[attribute] for attribute in data_type.item_attributes ] for field in data_type.fields: if len(field.properties) == 0: if any(item_row.fields.get(field.field_name).field_list): value = item_row.fields.get( field.field_name).field_list[0].field_value else: value = "" field_vals.append(value) else: field_prop_vals = [] cur_combo_count = len( item_row.fields.get(field.field_name).field_list) cur_prop_count = len(field.properties) for count, field_prop_combo in enumerate( item_row.fields.get(field.field_name).field_list): for property in field.properties: field_prop_vals.append( field_prop_combo.properties.get( property, None) or "") field_prop_vals.append(field_prop_combo.field_value) padding_list_len = ( max_field_prop_combos[field.field_name] - cur_combo_count) * (cur_prop_count + 1) field_prop_vals.extend( empty_padding_list(padding_list_len)) # import pdb; pdb.set_trace(); field_vals.extend(field_prop_vals) row = tuple(common_vals[2 if html_response else 0:] + field_vals + item_att_vals + user_vals + group_vals) item_sheet["rows"].append(row) item_sheet["rows"] = tuple(item_sheet["rows"]) excel_sheets[data_type.tag] = item_sheet if html_response: return excel_sheets header_groups = [("types", excel_sheets["types"]["headers"])] value_groups = [("types", excel_sheets["types"]["rows"])] for data_type in data_types_book: header_groups.append( (data_type.tag, excel_sheets[data_type.tag]["headers"])) value_groups.append( (data_type.tag, excel_sheets[data_type.tag]["rows"])) fd, path = tempfile.mkstemp() with os.fdopen(fd, 'w') as temp: export_raw(tuple(header_groups), tuple(value_groups), temp) format = Format.XLS_2007 fl = open(path, 'r') fileref = expose_download( fl.read(), 60 * 10, mimetype=Format.from_format(format).mimetype, content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain, ) return json_response({"download_id": fileref.download_id})
def formdefs(request, domain, app_id): # TODO: Looks like this function is never used langs = [json.loads(request.GET.get("lang", '"en"'))] format = request.GET.get("format", "json") app = get_app(domain, app_id) def get_questions(form): xform = XForm(form.source) prefix = "/%s/" % xform.data_node.tag_name def remove_prefix(string): if string.startswith(prefix): return string[len(prefix) :] else: raise Exception() def transform_question(q): return { "id": remove_prefix(q["value"]), "type": q["tag"], "text": q["label"] if q["tag"] != "hidden" else "", } return [transform_question(q) for q in xform.get_questions(langs)] formdefs = [ { "name": "%s, %s" % (f["form"].get_module().name["en"], f["form"].name["en"]) if f["type"] == "module_form" else "User Registration", "columns": ["id", "type", "text"], "rows": get_questions(f["form"]), } for f in app.get_forms(bare=False) ] if format == "xlsx": f = StringIO() writer = Excel2007ExportWriter() writer.open([(sheet["name"], [FormattedRow(sheet["columns"])]) for sheet in formdefs], f) writer.write( [ ( sheet["name"], [ FormattedRow( [ cell for (_, cell) in sorted(row.items(), key=lambda item: sheet["columns"].index(item[0])) ] ) for row in sheet["rows"] ], ) for sheet in formdefs ] ) writer.close() response = HttpResponse(f.getvalue(), content_type=Format.from_format("xlsx").mimetype) set_file_download(response, "formdefs.xlsx") return response else: return json_response(formdefs)
def download_item_lists(request, domain, html_response=False): """ Is used to serve excel_download and html_view for view_lookup_tables """ table_ids = request.GET.getlist("table_id") if table_ids and table_ids[0]: try: data_types_view = [FixtureDataType.get(id) for id in request.GET.getlist("table_id")] except ResourceNotFound as Ex: if html_response: messages.info(request, _("Sorry, we couldn't find that table. If you think this is a mistake please report an issue.")) raise data_types_view = FixtureDataType.by_domain(domain) else: data_types_view = FixtureDataType.by_domain(domain) if html_response: data_types_view = list(data_types_view)[0:1] # book-keeping data from view_results for repeated use data_types_book = [] data_items_book_by_type = {} item_helpers_by_type = {} """ Contains all excel sheets in following format excel_sheets = { "types": { "headers": [], "rows": [(row), (row), (row)] } "next-sheet": { "headers": [], "rows": [(row), (row), (row)] }, ... } """ excel_sheets = {} def empty_padding_list(length): return ["" for x in range(0, length)] max_fields = 0 """ - Helper to generate headers like "field 2: property 1" - Captures max_num_of_properties for any field of any type at the list-index. Example values: [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property) [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property) [0, 2] -> "field 2: property 1", "field 2: property 2" """ field_prop_count = [] """ captures all possible 'field-property' values for each data-type Example value {u'clinics': {'field 2 : property 1': u'lang'}, u'growth_chart': {'field 2 : property 2': u'maxWeight'}} """ type_field_properties = {} get_field_prop_format = lambda x, y: "field " + str(x) + " : property " + str(y) for data_type in data_types_view: # Helpers to generate 'types' sheet type_field_properties[data_type.tag] = {} data_types_book.append(data_type) if len(data_type.fields) > max_fields: max_fields = len(data_type.fields) for index, field in enumerate(data_type.fields): if len(field_prop_count) <= index: field_prop_count.append(len(field.properties)) elif field_prop_count[index] <= len(field.properties): field_prop_count[index] = len(field.properties) if len(field.properties) > 0: for prop_index, property in enumerate(field.properties): prop_key = get_field_prop_format(index + 1, prop_index + 1) type_field_properties[data_type.tag][prop_key] = property # Helpers to generate item-sheets data_items_book_by_type[data_type.tag] = [] max_users = 0 max_groups = 0 max_field_prop_combos = {field_name: 0 for field_name in data_type.fields_without_attributes} for item_row in FixtureDataItem.by_data_type(domain, data_type.get_id): data_items_book_by_type[data_type.tag].append(item_row) group_len = len(item_row.groups) max_groups = group_len if group_len > max_groups else max_groups user_len = len(item_row.users) max_users = user_len if user_len > max_users else max_users for field_key in item_row.fields: if field_key in max_field_prop_combos: max_combos = max_field_prop_combos[field_key] cur_combo_len = len(item_row.fields[field_key].field_list) max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos max_field_prop_combos[field_key] = max_combos item_helpers = { "max_users": max_users, "max_groups": max_groups, "max_field_prop_combos": max_field_prop_combos, } item_helpers_by_type[data_type.tag] = item_helpers # Prepare 'types' sheet data types_sheet = {"headers": [], "rows": []} types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?'] types_sheet["headers"].extend(["field %d" % x for x in range(1, max_fields + 1)]) field_prop_headers = [] for field_num, prop_num in enumerate(field_prop_count): if prop_num > 0: for c in range(0, prop_num): prop_key = get_field_prop_format(field_num + 1, c + 1) field_prop_headers.append(prop_key) types_sheet["headers"].append(prop_key) for data_type in data_types_book: common_vals = ["N", data_type.tag, yesno(data_type.is_global)] field_vals = [field.field_name for field in data_type.fields] + empty_padding_list(max_fields - len(data_type.fields)) prop_vals = [] if type_field_properties.has_key(data_type.tag): props = type_field_properties.get(data_type.tag) prop_vals.extend([props.get(key, "") for key in field_prop_headers]) row = tuple(common_vals[2 if html_response else 0:] + field_vals + prop_vals) types_sheet["rows"].append(row) types_sheet["rows"] = tuple(types_sheet["rows"]) types_sheet["headers"] = tuple(types_sheet["headers"]) excel_sheets["types"] = types_sheet # Prepare 'items' sheet data for each data-type for data_type in data_types_book: item_sheet = {"headers": [], "rows": []} item_helpers = item_helpers_by_type[data_type.tag] max_users = item_helpers["max_users"] max_groups = item_helpers["max_groups"] max_field_prop_combos = item_helpers["max_field_prop_combos"] common_headers = ["UID", DELETE_HEADER] user_headers = ["user %d" % x for x in range(1, max_users + 1)] group_headers = ["group %d" % x for x in range(1, max_groups + 1)] field_headers = [] for field in data_type.fields: if len(field.properties) == 0: field_headers.append("field: " + field.field_name) else: prop_headers = [] for x in range(1, max_field_prop_combos[field.field_name] + 1): for property in field.properties: prop_headers.append("%(name)s: %(prop)s %(count)s" % { "name": field.field_name, "prop": property, "count": x }) prop_headers.append("field: %(name)s %(count)s" % { "name": field.field_name, "count": x }) field_headers.extend(prop_headers) item_sheet["headers"] = tuple( common_headers[2 if html_response else 0:] + field_headers + user_headers + group_headers ) excel_sheets[data_type.tag] = item_sheet for item_row in data_items_book_by_type[data_type.tag]: common_vals = [str(_id_from_doc(item_row)), "N"] user_vals = [user.raw_username for user in item_row.users] + empty_padding_list(max_users - len(item_row.users)) group_vals = [group.name for group in item_row.groups] + empty_padding_list(max_groups - len(item_row.groups)) field_vals = [] for field in data_type.fields: if len(field.properties) == 0: if any(item_row.fields.get(field.field_name).field_list): value = item_row.fields.get(field.field_name).field_list[0].field_value else: value = "" field_vals.append(value) else: field_prop_vals = [] cur_combo_count = len(item_row.fields.get(field.field_name).field_list) cur_prop_count = len(field.properties) for count, field_prop_combo in enumerate(item_row.fields.get(field.field_name).field_list): for property in field.properties: field_prop_vals.append(field_prop_combo.properties.get(property, None) or "") field_prop_vals.append(field_prop_combo.field_value) padding_list_len = (max_field_prop_combos[field.field_name] - cur_combo_count) * (cur_prop_count + 1) field_prop_vals.extend(empty_padding_list(padding_list_len)) # import pdb; pdb.set_trace(); field_vals.extend(field_prop_vals) row = tuple( common_vals[2 if html_response else 0:] + field_vals + user_vals + group_vals ) item_sheet["rows"].append(row) item_sheet["rows"] = tuple(item_sheet["rows"]) excel_sheets[data_type.tag] = item_sheet if html_response: return excel_sheets header_groups = [("types", excel_sheets["types"]["headers"])] value_groups = [("types", excel_sheets["types"]["rows"])] for data_type in data_types_book: header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"])) value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"])) fd, path = tempfile.mkstemp() with os.fdopen(fd, 'w') as temp: export_raw(tuple(header_groups), tuple(value_groups), temp) format = Format.XLS_2007 fl = open(path, 'r') fileref = expose_download( fl.read(), 60 * 10, mimetype=Format.from_format(format).mimetype, content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain, ) return json_response({"download_id": fileref.download_id})
def filename(self): return "%s_custom_bulk_export.%s" % (self.domain, Format.from_format(self.format).extension)
def location_export(request, domain): include_consumption = request.GET.get('include_consumption') == 'true' response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="locations.xlsx"' dump_locations(response, domain, include_consumption) return response
def filename(self): return "bulk_export.%s" % Format.from_format(self.format).extension