Esempio n. 1
0
def cache_file_to_be_served(tmp, checkpoint, download_id, format=None, filename=None, expiry=10*60*60):
    """
    tmp can be either either a path to a tempfile or a StringIO
    (the APIs for tempfiles vs StringIO are unfortunately... not similar)
    """
    if checkpoint:
        format = Format.from_format(format)
        try:
            filename = unidecode(filename)
        except Exception: 
            pass

        tmp = Temp(tmp)
        payload = tmp.payload
        expose_download(payload, expiry,
                        mimetype=format.mimetype,
                        content_disposition='attachment; filename=%s.%s' % (filename, format.extension),
                        extras={'X-CommCareHQ-Export-Token': checkpoint.get_id},
                        download_id=download_id)
        
    else:
        # this just gives you a link saying there wasn't anything there
        expose_download("Sorry, there wasn't any data.", expiry, 
                        content_disposition="",
                        mimetype="text/html",
                        download_id=download_id).save(expiry)
Esempio n. 2
0
def export_ucr_async(report_export, download_id, user):
    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    ascii_title = report_export.title.encode('ascii', 'replace').decode('utf-8')
    filename = '{}.xlsx'.format(ascii_title.replace('/', '?'))
    file_path = get_download_file_path(use_transfer, filename)

    report_export.create_export(file_path, Format.XLS_2007)

    expose_download(use_transfer, file_path, filename, download_id, 'xlsx')
    link = reverse("retrieve_download", args=[download_id], params={"get_file": '1'}, absolute=True)

    send_report_download_email(report_export.title, user.get_email(), link)
Esempio n. 3
0
def build_form_multimedia_zip(
        domain,
        xmlns,
        startdate,
        enddate,
        app_id,
        export_id,
        zip_name,
        download_id,
        export_is_legacy,
        user_types=None,
        group=None):

    form_ids = get_form_ids_having_multimedia(
        domain,
        app_id,
        xmlns,
        parse(startdate),
        parse(enddate),
        group=group,
        user_types=user_types,
    )
    properties = _get_export_properties(export_id, export_is_legacy)

    if not app_id:
        zip_name = 'Unrelated Form'
    forms_info = list()
    for form in FormAccessors(domain).iter_forms(form_ids):
        if not zip_name:
            zip_name = unidecode(form.name or 'unknown form')
        forms_info.append(_extract_form_attachment_info(form, properties))

    num_forms = len(forms_info)
    DownloadBase.set_progress(build_form_multimedia_zip, 0, num_forms)

    case_id_to_name = _get_case_names(
        domain,
        set.union(*[form_info['case_ids'] for form_info in forms_info]) if forms_info else set(),
    )

    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    if use_transfer:
        fpath = _get_download_file_path(xmlns, startdate, enddate, export_id, app_id, num_forms)
    else:
        _, fpath = tempfile.mkstemp()

    _write_attachments_to_file(fpath, use_transfer, num_forms, forms_info, case_id_to_name)
    filename = "{}.zip".format(zip_name)
    expose_download(use_transfer, fpath, filename, download_id, 'zip')
    DownloadBase.set_progress(build_form_multimedia_zip, num_forms, num_forms)
Esempio n. 4
0
def export_async(custom_export, download_id, format=None, filename=None, **kwargs):
    try:
        tmp, checkpoint = custom_export.get_export_files(format=format, process=export_async, **kwargs)
    except SchemaMismatchException, e:
        # fire off a delayed force update to prevent this from happening again
        rebuild_schemas.delay(custom_export.index)
        expiry = 10*60*60
        expose_download(
            "Sorry, the export failed for %s, please try again later" % custom_export._id,
            expiry,
            content_disposition="",
            mimetype="text/html",
            download_id=download_id
        ).save(expiry)
Esempio n. 5
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get('bulk_upload_file')
        if not upload:
            messages.error(request, _('no file uploaded'))
            return self.get(request, *args, **kwargs)
        if not args:
            messages.error(request, _('no domain specified'))
            return self.get(request, *args, **kwargs)

        domain = args[0]

        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(),
                                   expiry=1*60*60)
        task = import_locations_async.delay(
            domain,
            file_ref.download_id,
        )
        file_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(
                LocationImportStatusView.urlname,
                args=[domain, file_ref.download_id]
            )
        )
Esempio n. 6
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get('locs')
        if not upload:
            return HttpResponse(_('no file uploaded'))
        if not args:
            return HttpResponse(_('no domain specified'))

        domain = args[0]

        update_existing = bool(request.POST.get('update'))

        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(),
                                   expiry=1*60*60)
        task = import_locations_async.delay(
            domain,
            file_ref.download_id,
            update_existing
        )
        file_ref.set_task(task)

        return HttpResponseRedirect(
            reverse(
                LocationImportStatusView.urlname,
                args=[domain, file_ref.download_id]
            )
        )
Esempio n. 7
0
    def post(self, request):
        replace = 'replace' in request.POST

        file_ref = expose_download(request.file.read(),
                                   expiry=1*60*60)

        # catch basic validation in the synchronous UI
        try:
            validate_file_format(file_ref.get_filename())
        except FixtureUploadError as e:
            messages.error(request, unicode(e))
            return HttpResponseRedirect(fixtures_home(self.domain))

        # hand off to async
        task = fixture_upload_async.delay(
            self.domain,
            file_ref.download_id,
            replace,
        )
        file_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(
                FixtureUploadStatusView.urlname,
                args=[self.domain, file_ref.download_id]
            )
        )
Esempio n. 8
0
def excel_config(request, domain):
    if request.method == 'POST':
        if request.FILES:
            named_columns = request.POST['named_columns']
            uses_headers = named_columns == 'yes'
            uploaded_file_handle = request.FILES['file']
            
            extension = os.path.splitext(uploaded_file_handle.name)[1][1:].strip().lower()
            
            if extension in ExcelFile.ALLOWED_EXTENSIONS:
                # NOTE: this is kinda messy and needs to be cleaned up but
                # just trying to get something functional in place.
                # We may not always be able to reference files from subsequent
                # views if your worker changes, so we have to store it elsewhere
                # using the soil framework.
                
                # stash content in the default storage for subsequent views
                file_ref = expose_download(uploaded_file_handle.read(),
                                           expiry=1*60*60)
                request.session[EXCEL_SESSION_ID] = file_ref.download_id

                spreadsheet = _get_spreadsheet(file_ref, uses_headers)
                if not spreadsheet:
                    return _spreadsheet_expired(request, domain)
                columns = spreadsheet.get_header_columns()
                row_count = spreadsheet.get_num_rows()
                if row_count > MAX_ALLOWED_ROWS:
                    messages.error(request, _('Sorry, your spreadsheet is too big. '
                                              'Please reduce the number of '
                                              'rows to less than %s and try again') % MAX_ALLOWED_ROWS)
                else:
                    # get case types in this domain
                    case_types = []
                    for row in CommCareCase.view('hqcase/types_by_domain',reduce=True,group=True,startkey=[domain],endkey=[domain,{}]).all():
                        if not row['key'][1] in case_types:
                            case_types.append(row['key'][1])

                    if len(case_types) > 0:
                        return render(request, "importer/excel_config.html", {
                                                    'named_columns': named_columns, 
                                                    'columns': columns,
                                                    'case_types': case_types,
                                                    'domain': domain,
                                                    'report': {
                                                        'name': 'Import: Configuration'
                                                     },
                                                    'slug': base.ImportCases.slug})
                    else:
                        messages.error(request, _('No cases have been submitted to this domain. '
                                                  'You cannot update case details from an Excel '
                                                  'file until you have existing cases.'))
            else:
                messages.error(request, _('The Excel file you chose could not be processed. '
                                          'Please check that it is saved as a Microsoft Excel '
                                          '97/2000 .xls file.'))
        else:
            messages.error(request, _('Please choose an Excel file to import.'))
    #TODO show bad/invalid file error on this page
    return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))
Esempio n. 9
0
def _dump_xlsx_and_expose_download(filename, headers, rows, download_id, task,
                                   total_count, owner_id):
    writer = Excel2007ExportWriter(format_as_text=True)
    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    file_path = get_download_file_path(use_transfer, filename)
    writer.open(
        header_table=headers,
        file=file_path,
    )
    writer.write(rows)
    writer.close()

    expose_download(use_transfer,
                    file_path,
                    filename,
                    download_id,
                    'xlsx',
                    owner_ids=[owner_id])
    DownloadBase.set_progress(task, total_count, total_count)
Esempio n. 10
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get("bulk_upload_file")
        """View's dispatch method automatically calls this"""
        try:
            self.workbook = WorkbookJSONReader(upload)
        except InvalidFileException:
            try:
                csv.DictReader(io.StringIO(upload.read().decode("ascii"), newline=None))
                return HttpResponseBadRequest(
                    "CommCare HQ no longer supports CSV upload. "
                    "Please convert to Excel 2007 or higher (.xlsx) "
                    "and try again."
                )
            except UnicodeDecodeError:
                return HttpResponseBadRequest("Unrecognized format")
        except JSONReaderError as e:
            messages.error(request, "Your upload was unsuccessful. %s" % e.message)
            return self.get(request, *args, **kwargs)
        except HeaderValueError as e:
            return HttpResponseBadRequest("Upload encountered a data type error: %s" % e.message)

        try:
            self.user_specs = self.workbook.get_worksheet(title="users")
        except WorksheetNotFound:
            try:
                self.user_specs = self.workbook.get_worksheet()
            except WorksheetNotFound:
                return HttpResponseBadRequest("Workbook has no worksheets")

        try:
            self.group_specs = self.workbook.get_worksheet(title="groups")
        except WorksheetNotFound:
            self.group_specs = []

        self.location_specs = []
        if Domain.get_by_name(self.domain).commtrack_enabled:
            try:
                self.location_specs = self.workbook.get_worksheet(title="locations")
            except WorksheetNotFound:
                # if there is no sheet for locations (since this was added
                # later and is optional) we don't error
                pass

        try:
            check_headers(self.user_specs)
        except UserUploadError as e:
            return HttpResponseBadRequest(e)

        task_ref = expose_download(None, expiry=1 * 60 * 60)
        task = bulk_upload_async.delay(
            self.domain, list(self.user_specs), list(self.group_specs), list(self.location_specs)
        )
        task_ref.set_task(task)
        return HttpResponseRedirect(reverse(UserUploadStatusView.urlname, args=[self.domain, task_ref.download_id]))
Esempio n. 11
0
def export_ucr_async(report_export, download_id, user):
    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    ascii_title = report_export.title.encode('ascii',
                                             'replace').decode('utf-8')
    filename = '{}.xlsx'.format(ascii_title.replace('/', '?'))
    file_path = get_download_file_path(use_transfer, filename)

    report_export.create_export(file_path, Format.XLS_2007)
    expose_download(use_transfer,
                    file_path,
                    filename,
                    download_id,
                    'xlsx',
                    owner_ids=[user.get_id])
    link = reverse("retrieve_download",
                   args=[download_id],
                   params={"get_file": '1'},
                   absolute=True)

    send_report_download_email(report_export.title, user.get_email(), link)
Esempio n. 12
0
def import_locations_async(download_id, domain, file_ref_id, update_existing=False):
    """
    Asynchronously import locations. download_id is for showing
    the results to the user through soil. file_ref_id is also a
    download_id, but should be a pointer to the import file.
    """
    download_ref = DownloadBase.get(file_ref_id)
    with open(download_ref.get_filename(), 'rb') as f:
        results_msg = '\n'.join(import_locations(domain, f, update_existing))
    ref = expose_download(results_msg, 60*60*3)
    cache.set(download_id, ref)
Esempio n. 13
0
def export_async(custom_export,
                 download_id,
                 format=None,
                 filename=None,
                 **kwargs):
    try:
        tmp, checkpoint = custom_export.get_export_files(format=format,
                                                         process=export_async,
                                                         **kwargs)
    except SchemaMismatchException, e:
        # fire off a delayed force update to prevent this from happening again
        rebuild_schemas.delay(custom_export.index)
        expiry = 10 * 60 * 60
        expose_download(
            "Sorry, the export failed for %s, please try again later" %
            custom_export._id,
            expiry,
            content_disposition="",
            mimetype="text/html",
            download_id=download_id).save(expiry)
Esempio n. 14
0
def historical_import(request, domain):
    if request.method == "POST":
        file_ref = expose_download(request.FILES['history'].read(),
                                   expiry=1*60*60)
        download_id = uuid.uuid4().hex
        import_stock_reports_async.delay(download_id, domain, file_ref.download_id)
        return _async_in_progress(request, domain, download_id)

    return HttpResponse("""
<form method="post" action="" enctype="multipart/form-data">
  <div><input type="file" name="history" /></div>
  <div><button type="submit">Import historical stock reports</button></div>
</form>
""")
Esempio n. 15
0
def historical_import(request, domain):
    if request.method == "POST":
        file_ref = expose_download(request.FILES['history'].read(),
                                   expiry=1*60*60)
        download_id = uuid.uuid4().hex
        import_stock_reports_async.delay(download_id, domain, file_ref.download_id)
        return _async_in_progress(request, domain, download_id)

    return HttpResponse("""
<form method="post" action="" enctype="multipart/form-data">
  <div><input type="file" name="history" /></div>
  <div><button type="submit">Import historical stock reports</button></div>
</form>
""")
Esempio n. 16
0
def cache_file_to_be_served(tmp,
                            checkpoint,
                            download_id,
                            format=None,
                            filename=None,
                            expiry=10 * 60 * 60):
    """
    tmp can be either either a path to a tempfile or a StringIO
    (the APIs for tempfiles vs StringIO are unfortunately... not similar)
    """
    if checkpoint:
        format = Format.from_format(format)
        try:
            filename = unidecode(filename)
        except Exception:
            pass

        escaped_filename = escape_quotes('%s.%s' %
                                         (filename, format.extension))

        payload = tmp.payload
        expose_download(
            payload,
            expiry,
            mimetype=format.mimetype,
            content_disposition='attachment; filename="%s"' % escaped_filename,
            extras={'X-CommCareHQ-Export-Token': checkpoint.get_id},
            download_id=download_id)

    else:
        # this just gives you a link saying there wasn't anything there
        expose_download("Sorry, there wasn't any data.",
                        expiry,
                        content_disposition="",
                        mimetype="text/html",
                        download_id=download_id).save(expiry)
Esempio n. 17
0
    def process_upload(self):
        # save the file w/ soil
        self.uploaded_file.file.seek(0)
        saved_file = expose_download(self.uploaded_file.file.read(), expiry=BulkMultimediaStatusCache.cache_expiry)
        processing_id = saved_file.download_id

        status = BulkMultimediaStatusCache(processing_id)
        status.save()

        process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id,
                                      username=self.request.couch_user.username if self.request.couch_user else None,
                                      share_media=self.share_media,
                                      license_name=self.license_used, author=self.author,
                                      attribution_notes=self.attribution_notes, replace_existing=self.replace_existing)
        return status.get_response()
Esempio n. 18
0
    def process_upload(self):
        # save the file w/ soil
        self.uploaded_file.file.seek(0)
        saved_file = expose_download(self.uploaded_file.file.read(), expiry=BulkMultimediaStatusCache.cache_expiry)
        processing_id = saved_file.download_id

        status = BulkMultimediaStatusCache(processing_id)
        status.save()

        process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id,
                                      username=self.username,
                                      share_media=self.share_media,
                                      license_name=self.license_used,
                                      author=self.author,
                                      attribution_notes=self.attribution_notes,
                                      replace_existing=self.replace_existing)
        return status.get_response()
Esempio n. 19
0
def location_import(request, domain):
    if request.method == "POST":
        upload = request.FILES.get('locs')
        if not upload:
            return HttpResponse('no file uploaded')

        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(),
                                   expiry=1*60*60)
        download_id = uuid.uuid4().hex
        import_locations_async.delay(download_id, domain, file_ref.download_id)
        return _async_in_progress(request, domain, download_id)

    return HttpResponse("""
<form method="post" action="" enctype="multipart/form-data">
  <div><input type="file" name="locs" /></div>
  <div><button type="submit">Import locations</button></div>
</form>
""")
Esempio n. 20
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get('bulk_upload_file')
        if not upload:
            messages.error(request, _('no file uploaded'))
            return self.get(request, *args, **kwargs)
        elif not upload.name.endswith('.csv'):
            messages.error(request, _('please use csv format only'))
            return self.get(request, *args, **kwargs)

        domain = args[0]
        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(), expiry=1 * 60 * 60)
        task = import_products_async.delay(
            domain,
            file_ref.download_id,
        )
        file_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(ProductImportStatusView.urlname,
                    args=[domain, file_ref.download_id]))
Esempio n. 21
0
def location_import(request, domain):
    if request.method == "POST":
        upload = request.FILES.get('locs')
        if not upload:
            return HttpResponse('no file uploaded')
        update_existing = bool(request.POST.get('update'))

        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(), expiry=1 * 60 * 60)
        download_id = uuid.uuid4().hex
        import_locations_async.delay(download_id, domain, file_ref.download_id,
                                     update_existing)
        return _async_in_progress(request, domain, download_id)

    return HttpResponse("""
<form method="post" action="" enctype="multipart/form-data">
  <div><input type="file" name="locs" /></div>
  <div><input id="update" type="checkbox" name="update" /> <label for="update">Update existing?</label></div>
  <div><button type="submit">Import locations</button></div>
</form>
""")
Esempio n. 22
0
def prepare_fixture_download(table_ids, domain, task, download_id):
    """Prepare fixture data for Excel download
    """
    data_types_book, excel_sheets = _prepare_fixture(table_ids, domain, task=task)

    header_groups = [("types", excel_sheets["types"]["headers"])]
    value_groups = [("types", excel_sheets["types"]["rows"])]
    for data_type in data_types_book:
        header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
        value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))

    file = StringIO()
    format = Format.XLS_2007
    export_raw(tuple(header_groups), tuple(value_groups), file, format)
    return expose_download(
        file.getvalue(),
        60 * 60 * 2,
        mimetype=Format.from_format(format).mimetype,
        content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
        download_id=download_id,
    )
Esempio n. 23
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get('bulk_upload_file')
        if not upload:
            messages.error(request, _('no file uploaded'))
            return self.get(request, *args, **kwargs)
        if not args:
            messages.error(request, _('no domain specified'))
            return self.get(request, *args, **kwargs)

        domain = args[0]

        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(), expiry=1 * 60 * 60)
        task = import_locations_async.delay(
            domain,
            file_ref.download_id,
        )
        file_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(LocationImportStatusView.urlname,
                    args=[domain, file_ref.download_id]))
Esempio n. 24
0
    def post(self, request):
        replace = 'replace' in request.POST

        file_ref = expose_download(request.file.read(), expiry=1 * 60 * 60)

        # catch basic validation in the synchronous UI
        try:
            validate_file_format(file_ref.get_filename())
        except FixtureUploadError as e:
            messages.error(request, unicode(e))
            return HttpResponseRedirect(fixtures_home(self.domain))

        # hand off to async
        task = fixture_upload_async.delay(
            self.domain,
            file_ref.download_id,
            replace,
        )
        file_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(FixtureUploadStatusView.urlname,
                    args=[self.domain, file_ref.download_id]))
Esempio n. 25
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get('bulk_upload_file')
        if not upload:
            messages.error(request, _('no file uploaded'))
            return self.get(request, *args, **kwargs)
        elif not upload.name.endswith('.xlsx'):
            messages.error(request, _('please use xlsx format only'))
            return self.get(request, *args, **kwargs)

        domain = args[0]
        # stash this in soil to make it easier to pass to celery
        file_ref = expose_download(upload.read(),
                                   expiry=1*60*60)
        task = import_products_async.delay(
            domain,
            file_ref.download_id,
        )
        file_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(
                ProductImportStatusView.urlname,
                args=[domain, file_ref.download_id]
            )
        )
Esempio n. 26
0
def download_item_lists(request, domain, html_response=False):
    """
        Is used to serve excel_download and html_view for view_lookup_tables
    """
    table_ids = request.GET.getlist("table_id")
    if table_ids and table_ids[0]:
        try:
            data_types_view = [FixtureDataType.get(id) for id in request.GET.getlist("table_id")]
        except ResourceNotFound as Ex:
            if html_response:
                messages.info(request, _("Sorry, we couldn't find that table. If you think this is a mistake please report an issue."))
                raise
            data_types_view = FixtureDataType.by_domain(domain)
    else:
        data_types_view = FixtureDataType.by_domain(domain)

    if html_response:
        data_types_view = list(data_types_view)[0:1]
    # book-keeping data from view_results for repeated use
    data_types_book = []
    data_items_book_by_type = {}
    item_helpers_by_type = {}
    """
        Contains all excel sheets in following format
        excel_sheets = {
            "types": {
                "headers": [],
                "rows": [(row), (row), (row)]
            }
            "next-sheet": {
                "headers": [],
                "rows": [(row), (row), (row)]
            },
            ...
        }
    """
    excel_sheets = {}
    
    def empty_padding_list(length):
        return ["" for x in range(0, length)]

    max_fields = 0
    """
        - Helper to generate headers like "field 2: property 1"
        - Captures max_num_of_properties for any field of any type at the list-index.
        Example values:
            [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
            [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
            [0, 2] -> "field 2: property 1", "field 2: property 2"
    """
    field_prop_count = []
    """
        captures all possible 'field-property' values for each data-type
        Example value
          {u'clinics': {'field 2 : property 1': u'lang'}, u'growth_chart': {'field 2 : property 2': u'maxWeight'}}
    """
    type_field_properties = {}
    get_field_prop_format = lambda x, y: "field " + str(x) + " : property " + str(y)
    for data_type in data_types_view:
        # Helpers to generate 'types' sheet
        type_field_properties[data_type.tag] = {}
        data_types_book.append(data_type)
        if len(data_type.fields) > max_fields:
            max_fields = len(data_type.fields)
        for index, field in enumerate(data_type.fields):
            if len(field_prop_count) <= index:
                field_prop_count.append(len(field.properties))
            elif field_prop_count[index] <= len(field.properties):
                field_prop_count[index] = len(field.properties)
            if len(field.properties) > 0:
                for prop_index, property in enumerate(field.properties):
                    prop_key = get_field_prop_format(index + 1, prop_index + 1)
                    type_field_properties[data_type.tag][prop_key] = property

        # Helpers to generate item-sheets
        data_items_book_by_type[data_type.tag] = []
        max_users = 0
        max_groups = 0
        max_field_prop_combos = {field_name: 0 for field_name in data_type.fields_without_attributes}
        for item_row in FixtureDataItem.by_data_type(domain, data_type.get_id):
            data_items_book_by_type[data_type.tag].append(item_row)
            group_len = len(item_row.groups)
            max_groups = group_len if group_len > max_groups else max_groups
            user_len = len(item_row.users)
            max_users = user_len if user_len > max_users else max_users
            for field_key in item_row.fields:
                if field_key in max_field_prop_combos:
                    max_combos = max_field_prop_combos[field_key]
                    cur_combo_len = len(item_row.fields[field_key].field_list)
                    max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
                    max_field_prop_combos[field_key] = max_combos

        item_helpers = {
            "max_users": max_users,
            "max_groups": max_groups,
            "max_field_prop_combos": max_field_prop_combos,
        }
        item_helpers_by_type[data_type.tag] = item_helpers

    # Prepare 'types' sheet data
    types_sheet = {"headers": [], "rows": []}
    types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
    types_sheet["headers"].extend(["field %d" % x for x in range(1, max_fields + 1)])
    field_prop_headers = []
    for field_num, prop_num in enumerate(field_prop_count):
        if prop_num > 0:
            for c in range(0, prop_num):
                prop_key = get_field_prop_format(field_num + 1, c + 1)
                field_prop_headers.append(prop_key)
                types_sheet["headers"].append(prop_key)

    for data_type in data_types_book:
        common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
        field_vals = [field.field_name for field in data_type.fields] + empty_padding_list(max_fields - len(data_type.fields))
        prop_vals = []
        if type_field_properties.has_key(data_type.tag):
            props = type_field_properties.get(data_type.tag)
            prop_vals.extend([props.get(key, "") for key in field_prop_headers])
        row = tuple(common_vals[2 if html_response else 0:] + field_vals + prop_vals)
        types_sheet["rows"].append(row)

    types_sheet["rows"] = tuple(types_sheet["rows"])
    types_sheet["headers"] = tuple(types_sheet["headers"])
    excel_sheets["types"] = types_sheet
    
    # Prepare 'items' sheet data for each data-type
    for data_type in data_types_book:
        item_sheet = {"headers": [], "rows": []}
        item_helpers = item_helpers_by_type[data_type.tag]
        max_users = item_helpers["max_users"]
        max_groups = item_helpers["max_groups"]
        max_field_prop_combos = item_helpers["max_field_prop_combos"]
        common_headers = ["UID", DELETE_HEADER]
        user_headers = ["user %d" % x for x in range(1, max_users + 1)]
        group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
        field_headers = []
        for field in data_type.fields:
            if len(field.properties) == 0:
                field_headers.append("field: " + field.field_name)
            else:
                prop_headers = []
                for x in range(1, max_field_prop_combos[field.field_name] + 1):
                    for property in field.properties:
                        prop_headers.append("%(name)s: %(prop)s %(count)s" % {
                            "name": field.field_name,
                            "prop": property,
                            "count": x
                        })
                    prop_headers.append("field: %(name)s %(count)s" % {
                        "name": field.field_name,
                        "count": x
                    })
                field_headers.extend(prop_headers)
        item_sheet["headers"] = tuple(
            common_headers[2 if html_response else 0:] + field_headers + user_headers + group_headers
        )
        excel_sheets[data_type.tag] = item_sheet
        for item_row in data_items_book_by_type[data_type.tag]:
            common_vals = [str(_id_from_doc(item_row)), "N"]
            user_vals = [user.raw_username for user in item_row.users] + empty_padding_list(max_users - len(item_row.users))
            group_vals = [group.name for group in item_row.groups] + empty_padding_list(max_groups - len(item_row.groups))
            field_vals = []
            for field in data_type.fields:
                if len(field.properties) == 0:
                    if any(item_row.fields.get(field.field_name).field_list):
                        value = item_row.fields.get(field.field_name).field_list[0].field_value
                    else:
                        value = ""
                    field_vals.append(value)
                else:
                    field_prop_vals = []
                    cur_combo_count = len(item_row.fields.get(field.field_name).field_list)
                    cur_prop_count = len(field.properties)
                    for count, field_prop_combo in enumerate(item_row.fields.get(field.field_name).field_list):
                        for property in field.properties:
                            field_prop_vals.append(field_prop_combo.properties.get(property, None) or "")
                        field_prop_vals.append(field_prop_combo.field_value)
                    padding_list_len = (max_field_prop_combos[field.field_name] - cur_combo_count) * (cur_prop_count + 1)
                    field_prop_vals.extend(empty_padding_list(padding_list_len))
                    # import pdb; pdb.set_trace();
                    field_vals.extend(field_prop_vals)
            row = tuple(
                common_vals[2 if html_response else 0:] + field_vals + user_vals + group_vals
            )
            item_sheet["rows"].append(row)
        item_sheet["rows"] = tuple(item_sheet["rows"])
        excel_sheets[data_type.tag] = item_sheet

    if html_response:
        return excel_sheets

    header_groups = [("types", excel_sheets["types"]["headers"])]
    value_groups = [("types", excel_sheets["types"]["rows"])]
    for data_type in data_types_book:
        header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
        value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))

    fd, path = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as temp:
        export_raw(tuple(header_groups), tuple(value_groups), temp)
    format = Format.XLS_2007

    fl = open(path, 'r')
    fileref = expose_download(
        fl.read(),
        60 * 10,
        mimetype=Format.from_format(format).mimetype,
        content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
    )
    return json_response({"download_id": fileref.download_id})
Esempio n. 27
0
    def post(self, request, *args, **kwargs):
        upload = request.FILES.get('bulk_upload_file')
        """View's dispatch method automatically calls this"""
        try:
            self.workbook = WorkbookJSONReader(upload)
        except InvalidFileException:
            try:
                csv.DictReader(io.StringIO(upload.read().decode('ascii'),
                                           newline=None))
                return HttpResponseBadRequest(
                    "CommCare HQ no longer supports CSV upload. "
                    "Please convert to Excel 2007 or higher (.xlsx) "
                    "and try again."
                )
            except UnicodeDecodeError:
                return HttpResponseBadRequest("Unrecognized format")
        except JSONReaderError as e:
            messages.error(request,
                           'Your upload was unsuccessful. %s' % e.message)
            return self.get(request, *args, **kwargs)
        except HeaderValueError as e:
            return HttpResponseBadRequest("Upload encountered a data type error: %s"
                                          % e.message)

        try:
            self.user_specs = self.workbook.get_worksheet(title='users')
        except WorksheetNotFound:
            try:
                self.user_specs = self.workbook.get_worksheet()
            except WorksheetNotFound:
                return HttpResponseBadRequest("Workbook has no worksheets")

        try:
            self.group_specs = self.workbook.get_worksheet(title='groups')
        except WorksheetNotFound:
            self.group_specs = []

        self.location_specs = []
        if Domain.get_by_name(self.domain).commtrack_enabled:
            try:
                self.location_specs = self.workbook.get_worksheet(title='locations')
            except WorksheetNotFound:
                # if there is no sheet for locations (since this was added
                # later and is optional) we don't error
                pass

        try:
            check_headers(self.user_specs)
        except UserUploadError as e:
            return HttpResponseBadRequest(e)

        task_ref = expose_download(None, expiry=1*60*60)
        task = bulk_upload_async.delay(
            self.domain,
            list(self.user_specs),
            list(self.group_specs),
            list(self.location_specs)
        )
        task_ref.set_task(task)
        return HttpResponseRedirect(
            reverse(
                UserUploadStatusView.urlname,
                args=[self.domain, task_ref.download_id]
            )
        )
Esempio n. 28
0
def dump_users_and_groups(domain, download_id, user_filters, task):
    from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView

    def _load_memoizer(domain):
        group_memoizer = GroupMemoizer(domain=domain)
        # load groups manually instead of calling group_memoizer.load_all()
        # so that we can detect blank groups
        blank_groups = set()
        for group in Group.by_domain(domain):
            if group.name:
                group_memoizer.add_group(group)
            else:
                blank_groups.add(group)
        if blank_groups:
            raise GroupNameError(blank_groups=blank_groups)

        return group_memoizer

    writer = Excel2007ExportWriter(format_as_text=True)
    group_memoizer = _load_memoizer(domain)
    location_cache = LocationIdToSiteCodeCache(domain)

    users_groups_count = count_users_and_groups(domain, user_filters,
                                                group_memoizer)
    DownloadBase.set_progress(task, 0, users_groups_count)

    user_data_model = CustomDataFieldsDefinition.get_or_create(
        domain, UserFieldsView.field_type)

    user_headers, user_rows = parse_users(
        group_memoizer,
        domain,
        user_data_model,
        location_cache,
        user_filters,
        task,
        users_groups_count,
    )

    group_headers, group_rows = parse_groups(group_memoizer.groups)
    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]
    rows = [
        ('users', user_rows),
        ('groups', group_rows),
    ]

    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    filename = "{}_users_{}.xlsx".format(domain, uuid.uuid4().hex)
    file_path = get_download_file_path(use_transfer, filename)
    writer.open(
        header_table=headers,
        file=file_path,
    )
    writer.write(rows)
    writer.close()

    expose_download(use_transfer, file_path, filename, download_id, 'xlsx')
    DownloadBase.set_progress(task, users_groups_count, users_groups_count)
Esempio n. 29
0
def download_item_lists(request, domain, html_response=False):
    """
        Is used to serve excel_download and html_view for view_lookup_tables
    """
    table_ids = request.GET.getlist("table_id")
    if table_ids and table_ids[0]:
        try:
            data_types_view = [
                FixtureDataType.get(id)
                for id in request.GET.getlist("table_id")
            ]
        except ResourceNotFound as Ex:
            if html_response:
                messages.info(
                    request,
                    _("Sorry, we couldn't find that table. If you think this is a mistake please report an issue."
                      ))
                raise
            data_types_view = FixtureDataType.by_domain(domain)
    else:
        data_types_view = FixtureDataType.by_domain(domain)

    if html_response:
        data_types_view = list(data_types_view)[0:1]
    # book-keeping data from view_results for repeated use
    data_types_book = []
    data_items_book_by_type = {}
    item_helpers_by_type = {}
    """
        Contains all excel sheets in following format
        excel_sheets = {
            "types": {
                "headers": [],
                "rows": [(row), (row), (row)]
            }
            "next-sheet": {
                "headers": [],
                "rows": [(row), (row), (row)]
            },
            ...
        }
    """
    excel_sheets = {}

    def empty_padding_list(length):
        return ["" for x in range(0, length)]

    max_fields = 0
    max_item_attributes = 0
    """
        - Helper to generate headers like "field 2: property 1"
        - Captures max_num_of_properties for any field of any type at the list-index.
        Example values:
            [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
            [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
            [0, 2] -> "field 2: property 1", "field 2: property 2"
    """
    field_prop_count = []
    """
        captures all possible 'field-property' values for each data-type
        Example value
          {u'clinics': {'field 2 : property 1': u'lang'}, u'growth_chart': {'field 2 : property 2': u'maxWeight'}}
    """
    type_field_properties = {}
    get_field_prop_format = lambda x, y: "field " + str(
        x) + " : property " + str(y)
    for data_type in data_types_view:
        # Helpers to generate 'types' sheet
        type_field_properties[data_type.tag] = {}
        data_types_book.append(data_type)
        if len(data_type.fields) > max_fields:
            max_fields = len(data_type.fields)
        if len(data_type.item_attributes) > max_item_attributes:
            max_item_attributes = len(data_type.item_attributes)
        for index, field in enumerate(data_type.fields):
            if len(field_prop_count) <= index:
                field_prop_count.append(len(field.properties))
            elif field_prop_count[index] <= len(field.properties):
                field_prop_count[index] = len(field.properties)
            if len(field.properties) > 0:
                for prop_index, property in enumerate(field.properties):
                    prop_key = get_field_prop_format(index + 1, prop_index + 1)
                    type_field_properties[data_type.tag][prop_key] = property

        # Helpers to generate item-sheets
        data_items_book_by_type[data_type.tag] = []
        max_users = 0
        max_groups = 0
        max_field_prop_combos = {
            field_name: 0
            for field_name in data_type.fields_without_attributes
        }
        for item_row in FixtureDataItem.by_data_type(domain, data_type.get_id):
            data_items_book_by_type[data_type.tag].append(item_row)
            group_len = len(item_row.groups)
            max_groups = group_len if group_len > max_groups else max_groups
            user_len = len(item_row.users)
            max_users = user_len if user_len > max_users else max_users
            for field_key in item_row.fields:
                if field_key in max_field_prop_combos:
                    max_combos = max_field_prop_combos[field_key]
                    cur_combo_len = len(item_row.fields[field_key].field_list)
                    max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
                    max_field_prop_combos[field_key] = max_combos

        item_helpers = {
            "max_users": max_users,
            "max_groups": max_groups,
            "max_field_prop_combos": max_field_prop_combos,
        }
        item_helpers_by_type[data_type.tag] = item_helpers

    # Prepare 'types' sheet data
    types_sheet = {"headers": [], "rows": []}
    types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
    types_sheet["headers"].extend(
        ["field %d" % x for x in range(1, max_fields + 1)])
    types_sheet["headers"].extend(
        ["property %d" % x for x in range(1, max_item_attributes + 1)])
    field_prop_headers = []
    for field_num, prop_num in enumerate(field_prop_count):
        if prop_num > 0:
            for c in range(0, prop_num):
                prop_key = get_field_prop_format(field_num + 1, c + 1)
                field_prop_headers.append(prop_key)
                types_sheet["headers"].append(prop_key)

    for data_type in data_types_book:
        common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
        field_vals = [
            field.field_name for field in data_type.fields
        ] + empty_padding_list(max_fields - len(data_type.fields))
        item_att_vals = data_type.item_attributes + empty_padding_list(
            max_item_attributes - len(data_type.item_attributes))
        prop_vals = []
        if type_field_properties.has_key(data_type.tag):
            props = type_field_properties.get(data_type.tag)
            prop_vals.extend(
                [props.get(key, "") for key in field_prop_headers])
        row = tuple(common_vals[2 if html_response else 0:] + field_vals +
                    item_att_vals + prop_vals)
        types_sheet["rows"].append(row)

    types_sheet["rows"] = tuple(types_sheet["rows"])
    types_sheet["headers"] = tuple(types_sheet["headers"])
    excel_sheets["types"] = types_sheet

    # Prepare 'items' sheet data for each data-type
    for data_type in data_types_book:
        item_sheet = {"headers": [], "rows": []}
        item_helpers = item_helpers_by_type[data_type.tag]
        max_users = item_helpers["max_users"]
        max_groups = item_helpers["max_groups"]
        max_field_prop_combos = item_helpers["max_field_prop_combos"]
        common_headers = ["UID", DELETE_HEADER]
        user_headers = ["user %d" % x for x in range(1, max_users + 1)]
        group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
        field_headers = []
        item_att_headers = [
            "property: " + attribute for attribute in data_type.item_attributes
        ]
        for field in data_type.fields:
            if len(field.properties) == 0:
                field_headers.append("field: " + field.field_name)
            else:
                prop_headers = []
                for x in range(1, max_field_prop_combos[field.field_name] + 1):
                    for property in field.properties:
                        prop_headers.append("%(name)s: %(prop)s %(count)s" % {
                            "name": field.field_name,
                            "prop": property,
                            "count": x
                        })
                    prop_headers.append("field: %(name)s %(count)s" % {
                        "name": field.field_name,
                        "count": x
                    })
                field_headers.extend(prop_headers)
        item_sheet["headers"] = tuple(
            common_headers[2 if html_response else 0:] + field_headers +
            item_att_headers + user_headers + group_headers)
        excel_sheets[data_type.tag] = item_sheet
        for item_row in data_items_book_by_type[data_type.tag]:
            common_vals = [str(_id_from_doc(item_row)), "N"]
            user_vals = [
                user.raw_username for user in item_row.users
            ] + empty_padding_list(max_users - len(item_row.users))
            group_vals = [
                group.name for group in item_row.groups
            ] + empty_padding_list(max_groups - len(item_row.groups))
            field_vals = []
            item_att_vals = [
                item_row.item_attributes[attribute]
                for attribute in data_type.item_attributes
            ]
            for field in data_type.fields:
                if len(field.properties) == 0:
                    if any(item_row.fields.get(field.field_name).field_list):
                        value = item_row.fields.get(
                            field.field_name).field_list[0].field_value
                    else:
                        value = ""
                    field_vals.append(value)
                else:
                    field_prop_vals = []
                    cur_combo_count = len(
                        item_row.fields.get(field.field_name).field_list)
                    cur_prop_count = len(field.properties)
                    for count, field_prop_combo in enumerate(
                            item_row.fields.get(field.field_name).field_list):
                        for property in field.properties:
                            field_prop_vals.append(
                                field_prop_combo.properties.get(
                                    property, None) or "")
                        field_prop_vals.append(field_prop_combo.field_value)
                    padding_list_len = (
                        max_field_prop_combos[field.field_name] -
                        cur_combo_count) * (cur_prop_count + 1)
                    field_prop_vals.extend(
                        empty_padding_list(padding_list_len))
                    # import pdb; pdb.set_trace();
                    field_vals.extend(field_prop_vals)
            row = tuple(common_vals[2 if html_response else 0:] + field_vals +
                        item_att_vals + user_vals + group_vals)
            item_sheet["rows"].append(row)
        item_sheet["rows"] = tuple(item_sheet["rows"])
        excel_sheets[data_type.tag] = item_sheet

    if html_response:
        return excel_sheets

    header_groups = [("types", excel_sheets["types"]["headers"])]
    value_groups = [("types", excel_sheets["types"]["rows"])]
    for data_type in data_types_book:
        header_groups.append(
            (data_type.tag, excel_sheets[data_type.tag]["headers"]))
        value_groups.append(
            (data_type.tag, excel_sheets[data_type.tag]["rows"]))

    fd, path = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as temp:
        export_raw(tuple(header_groups), tuple(value_groups), temp)
    format = Format.XLS_2007

    fl = open(path, 'r')
    fileref = expose_download(
        fl.read(),
        60 * 10,
        mimetype=Format.from_format(format).mimetype,
        content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
    )
    return json_response({"download_id": fileref.download_id})
Esempio n. 30
0
def excel_config(request, domain):
    if request.method == 'POST':
        if request.FILES:
            named_columns = request.POST['named_columns']
            uses_headers = named_columns == 'yes'
            uploaded_file_handle = request.FILES['file']

            extension = os.path.splitext(
                uploaded_file_handle.name)[1][1:].strip().lower()

            if extension in ExcelFile.ALLOWED_EXTENSIONS:
                # NOTE: this is kinda messy and needs to be cleaned up but
                # just trying to get something functional in place.
                # We may not always be able to reference files from subsequent
                # views if your worker changes, so we have to store it elsewhere
                # using the soil framework.

                # stash content in the default storage for subsequent views
                file_ref = expose_download(uploaded_file_handle.read(),
                                           expiry=1 * 60 * 60)
                request.session[EXCEL_SESSION_ID] = file_ref.download_id

                spreadsheet = _get_spreadsheet(file_ref, uses_headers)
                if not spreadsheet:
                    return _spreadsheet_expired(request, domain)
                columns = spreadsheet.get_header_columns()
                row_count = spreadsheet.get_num_rows()
                if row_count > MAX_ALLOWED_ROWS:
                    messages.error(
                        request,
                        _('Sorry, your spreadsheet is too big. '
                          'Please reduce the number of '
                          'rows to less than %s and try again') %
                        MAX_ALLOWED_ROWS)
                elif row_count == 0:
                    messages.error(
                        request,
                        'Your spreadsheet is empty. Please try again with a different spreadsheet.'
                    )
                else:
                    # get case types in this domain
                    case_types = []
                    for row in CommCareCase.view('hqcase/types_by_domain',
                                                 reduce=True,
                                                 group=True,
                                                 startkey=[domain],
                                                 endkey=[domain, {}]).all():
                        if not row['key'][1] in case_types:
                            case_types.append(row['key'][1])

                    if len(case_types) > 0:
                        return render(
                            request, "importer/excel_config.html", {
                                'named_columns': named_columns,
                                'columns': columns,
                                'case_types': case_types,
                                'domain': domain,
                                'report': {
                                    'name': 'Import: Configuration'
                                },
                                'slug': base.ImportCases.slug
                            })
                    else:
                        messages.error(
                            request,
                            _('No cases have been submitted to this domain. '
                              'You cannot update case details from an Excel '
                              'file until you have existing cases.'))
            else:
                messages.error(
                    request,
                    _('The Excel file you chose could not be processed. '
                      'Please check that it is saved as a Microsoft Excel '
                      '97/2000 .xls file.'))
        else:
            messages.error(request,
                           _('Please choose an Excel file to import.'))
    #TODO show bad/invalid file error on this page
    return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))
Esempio n. 31
0
def excel_config(request, domain):
    if request.method != 'POST':
        return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))

    if not request.FILES:
        return render_error(request, domain,
                            'Please choose an Excel file to import.')

    named_columns = request.POST.get('named_columns') == "on"
    uploaded_file_handle = request.FILES['file']

    extension = os.path.splitext(
        uploaded_file_handle.name)[1][1:].strip().lower()

    # NOTE: We may not always be able to reference files from subsequent
    # views if your worker changes, so we have to store it elsewhere
    # using the soil framework.

    if extension not in importer_util.ExcelFile.ALLOWED_EXTENSIONS:
        return render_error(
            request, domain,
            'The Excel file you chose could not be processed. '
            'Please check that it is saved as a Microsoft '
            'Excel 97/2000 .xls file.')

    # stash content in the default storage for subsequent views
    file_ref = expose_download(uploaded_file_handle.read(), expiry=1 * 60 * 60)
    request.session[EXCEL_SESSION_ID] = file_ref.download_id
    spreadsheet = importer_util.get_spreadsheet(file_ref, named_columns)

    if not spreadsheet:
        return _spreadsheet_expired(request, domain)

    columns = spreadsheet.get_header_columns()
    row_count = spreadsheet.get_num_rows()

    if row_count == 0:
        return render_error(
            request, domain, 'Your spreadsheet is empty. '
            'Please try again with a different spreadsheet.')

    case_types_from_apps = []
    # load types from all modules
    for row in ApplicationBase.view('app_manager/types_by_module',
                                    reduce=True,
                                    group=True,
                                    startkey=[domain],
                                    endkey=[domain, {}]).all():
        if not row['key'][1] in case_types_from_apps:
            case_types_from_apps.append(row['key'][1])

    case_types_from_cases = []
    # load types from all case records
    for row in CommCareCase.view('hqcase/types_by_domain',
                                 reduce=True,
                                 group=True,
                                 startkey=[domain],
                                 endkey=[domain, {}]).all():
        if row['key'][1] and not row['key'][1] in case_types_from_cases:
            case_types_from_cases.append(row['key'][1])

    # for this we just want cases that have data but aren't being used anymore
    case_types_from_cases = filter(lambda x: x not in case_types_from_apps,
                                   case_types_from_cases)

    if len(case_types_from_apps) == 0 and len(case_types_from_cases) == 0:
        return render_error(
            request, domain,
            'No cases have been submitted to this domain and there are no '
            'applications yet. You cannot import case details from an Excel '
            'file until you have existing cases or applications.')

    return render(
        request, "importer/excel_config.html", {
            'named_columns': named_columns,
            'columns': columns,
            'case_types_from_cases': case_types_from_cases,
            'case_types_from_apps': case_types_from_apps,
            'domain': domain,
            'report': {
                'name': 'Import: Configuration'
            },
            'slug': base.ImportCases.slug
        })
Esempio n. 32
0
from corehq.apps.locations.bulk import import_locations
from corehq.apps.commtrack.bulk import import_stock_reports
from soil.util import expose_download

@task
def import_locations_async(download_id, domain, file_ref_id, update_existing=False):
    """
    Asynchronously import locations. download_id is for showing
    the results to the user through soil. file_ref_id is also a
    download_id, but should be a pointer to the import file.
    """
    download_ref = DownloadBase.get(file_ref_id)
    with open(download_ref.get_filename(), 'rb') as f:
        results_msg = '\n'.join(import_locations(domain, f, update_existing))
    ref = expose_download(results_msg, 60*60*3)
    cache.set(download_id, ref)

@task
def import_stock_reports_async(download_id, domain, file_ref_id):
    """
    Same idea but for stock reports
    """
    download_ref = DownloadBase.get(file_ref_id)
    with open(download_ref.get_filename(), 'rb') as f:
        try:
            results = import_stock_reports(domain, f)
        except Exception, e:
            results = "ERROR: %s" % e
    ref = expose_download(results, 60*60*3, mimetype='text/csv')
    cache.set(download_id, ref)
Esempio n. 33
0
def dump_users_and_groups(domain, download_id, user_filters, task):
    from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView

    def _load_memoizer(domain):
        group_memoizer = GroupMemoizer(domain=domain)
        # load groups manually instead of calling group_memoizer.load_all()
        # so that we can detect blank groups
        blank_groups = set()
        for group in Group.by_domain(domain):
            if group.name:
                group_memoizer.add_group(group)
            else:
                blank_groups.add(group)
        if blank_groups:
            raise GroupNameError(blank_groups=blank_groups)

        return group_memoizer

    writer = Excel2007ExportWriter(format_as_text=True)
    group_memoizer = _load_memoizer(domain)
    location_cache = LocationIdToSiteCodeCache(domain)

    users_groups_count = count_users_and_groups(domain, user_filters, group_memoizer)
    DownloadBase.set_progress(task, 0, users_groups_count)

    user_data_model = CustomDataFieldsDefinition.get_or_create(
        domain,
        UserFieldsView.field_type
    )

    user_headers, user_rows = parse_users(
        group_memoizer,
        domain,
        user_data_model,
        location_cache,
        user_filters,
        task,
        users_groups_count,
    )

    group_headers, group_rows = parse_groups(group_memoizer.groups)
    headers = [
        ('users', [user_headers]),
        ('groups', [group_headers]),
    ]
    rows = [
        ('users', user_rows),
        ('groups', group_rows),
    ]

    use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
    filename = "{}_users_{}.xlsx".format(domain, uuid.uuid4().hex)
    file_path = get_download_file_path(use_transfer, filename)
    writer.open(
        header_table=headers,
        file=file_path,
    )
    writer.write(rows)
    writer.close()

    expose_download(use_transfer, file_path, filename, download_id, 'xlsx')
    DownloadBase.set_progress(task, users_groups_count, users_groups_count)
Esempio n. 34
0
def excel_config(request, domain):
    """
    Step one of three.

    This is the initial post when the user uploads the excel file

    named_columns:
        Whether or not the first row of the excel sheet contains
        header strings for the columns. This defaults to True and
        should potentially not be an option as it is always used
        due to how important it is to see column headers
        in the rest of the importer.
    """
    if request.method != "POST":
        return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))

    if not request.FILES:
        return render_error(request, domain, "Please choose an Excel file to import.")

    named_columns = request.POST.get("named_columns") == "on"
    uploaded_file_handle = request.FILES["file"]

    extension = os.path.splitext(uploaded_file_handle.name)[1][1:].strip().lower()

    # NOTE: We may not always be able to reference files from subsequent
    # views if your worker changes, so we have to store it elsewhere
    # using the soil framework.

    if extension not in importer_util.ExcelFile.ALLOWED_EXTENSIONS:
        return render_error(
            request,
            domain,
            "The Excel file you chose could not be processed. "
            "Please check that it is saved as a Microsoft "
            "Excel 97/2000 .xls file.",
        )

    # stash content in the default storage for subsequent views
    file_ref = expose_download(uploaded_file_handle.read(), expiry=1 * 60 * 60)
    request.session[EXCEL_SESSION_ID] = file_ref.download_id
    spreadsheet = importer_util.get_spreadsheet(file_ref, named_columns)

    if not spreadsheet:
        return _spreadsheet_expired(request, domain)

    columns = spreadsheet.get_header_columns()
    row_count = spreadsheet.get_num_rows()

    if row_count == 0:
        return render_error(
            request, domain, "Your spreadsheet is empty. " "Please try again with a different spreadsheet."
        )

    case_types_from_apps = []
    # load types from all modules
    for row in ApplicationBase.view(
        "app_manager/types_by_module", reduce=True, group=True, startkey=[domain], endkey=[domain, {}]
    ).all():
        if not row["key"][1] in case_types_from_apps:
            case_types_from_apps.append(row["key"][1])

    case_types_from_cases = []
    # load types from all case records
    for row in CommCareCase.view(
        "hqcase/types_by_domain", reduce=True, group=True, startkey=[domain], endkey=[domain, {}]
    ).all():
        if row["key"][1] and not row["key"][1] in case_types_from_cases:
            case_types_from_cases.append(row["key"][1])

    # for this we just want cases that have data but aren't being used anymore
    case_types_from_cases = filter(lambda x: x not in case_types_from_apps, case_types_from_cases)

    if len(case_types_from_apps) == 0 and len(case_types_from_cases) == 0:
        return render_error(
            request,
            domain,
            "No cases have been submitted to this domain and there are no "
            "applications yet. You cannot import case details from an Excel "
            "file until you have existing cases or applications.",
        )

    return render(
        request,
        "importer/excel_config.html",
        {
            "named_columns": named_columns,
            "columns": columns,
            "case_types_from_cases": case_types_from_cases,
            "case_types_from_apps": case_types_from_apps,
            "domain": domain,
            "report": {"name": "Import: Configuration"},
            "slug": base.ImportCases.slug,
        },
    )
Esempio n. 35
0
def excel_config(request, domain):
    if request.method != 'POST':
        return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))

    if not request.FILES:
        return render_error(request, domain, 'Please choose an Excel file to import.')

    named_columns = request.POST.get('named_columns') == "on"
    uploaded_file_handle = request.FILES['file']

    extension = os.path.splitext(uploaded_file_handle.name)[1][1:].strip().lower()

    # NOTE: We may not always be able to reference files from subsequent
    # views if your worker changes, so we have to store it elsewhere
    # using the soil framework.

    if extension not in ExcelFile.ALLOWED_EXTENSIONS:
        return render_error(request, domain,
                            'The Excel file you chose could not be processed. '
                            'Please check that it is saved as a Microsoft '
                            'Excel 97/2000 .xls file.')

    # stash content in the default storage for subsequent views
    file_ref = expose_download(uploaded_file_handle.read(), expiry=1*60*60)
    request.session[EXCEL_SESSION_ID] = file_ref.download_id
    spreadsheet = importer_util.get_spreadsheet(file_ref, named_columns)

    if not spreadsheet:
        return _spreadsheet_expired(request, domain)

    columns = spreadsheet.get_header_columns()
    row_count = spreadsheet.get_num_rows()

    if row_count == 0:
        return render_error(request, domain,
                            'Your spreadsheet is empty. '
                            'Please try again with a different spreadsheet.')

    case_types_from_apps = []
    # load types from all modules
    for row in ApplicationBase.view('app_manager/types_by_module',
                                 reduce=True,
                                 group=True,
                                 startkey=[domain],
                                 endkey=[domain,{}]).all():
        if not row['key'][1] in case_types_from_apps:
            case_types_from_apps.append(row['key'][1])

    case_types_from_cases = []
    # load types from all case records
    for row in CommCareCase.view('hqcase/types_by_domain',
                                 reduce=True,
                                 group=True,
                                 startkey=[domain],
                                 endkey=[domain,{}]).all():
        if not row['key'][1] in case_types_from_cases:
            case_types_from_cases.append(row['key'][1])

    # for this we just want cases that have data but aren't being used anymore
    case_types_from_cases = filter(lambda x: x not in case_types_from_apps, case_types_from_cases)

    if len(case_types_from_apps) == 0 and len(case_types_from_cases) == 0:
        return render_error(request, domain,
                            'No cases have been submitted to this domain and there are no '
                            'applications yet. You cannot import case details from an Excel '
                            'file until you have existing cases or applications.')

    return render(request, "importer/excel_config.html", {
                                'named_columns': named_columns,
                                'columns': columns,
                                'case_types_from_cases': case_types_from_cases,
                                'case_types_from_apps': case_types_from_apps,
                                'domain': domain,
                                'report': {
                                    'name': 'Import: Configuration'
                                 },
                                'slug': base.ImportCases.slug})