def _get_saved_export_download_data(export_instance_id): prefix = DownloadBase.new_id_prefix download_id = '{}rebuild_export_tracker.{}'.format(prefix, export_instance_id) download_data = DownloadBase.get(download_id) if download_data is None: download_data = DownloadBase(download_id=download_id) return download_data
def get_download_context(download_id, message=None, require_result=False): """ :param require_result: If set to True, is_ready will not be set to True unless result is also available. If check_state=False, this is ignored. """ download_data = DownloadBase.get(download_id) if download_data is None: download_data = DownloadBase(download_id=download_id) task = download_data.task task_status = get_task_status( task, is_multiple_download_task=isinstance(download_data, MultipleTaskDownload)) if task_status.failed(): # Celery replaces exceptions with a wrapped one that we can't directly import # so I think our best choice is to match off the name, even though that's hacky exception_name = (task.result.__class__.__name__ if isinstance(task.result, Exception) else None) raise TaskFailedError(task_status.error, exception_name=exception_name) if require_result: is_ready = task_status.success() and task_status.result is not None else: is_ready = task_status.success() return { 'result': task_status.result, 'error': task_status.error, 'is_ready': is_ready, 'is_alive': _is_alive(), 'progress': task_status.progress._asdict(), 'download_id': download_id, 'allow_dropbox_sync': isinstance(download_data, FileDownload) and download_data.use_transfer, 'has_file': download_data is not None and download_data.has_file, 'custom_message': message, }
def get_download_context(download_id, message=None, require_result=False): """ :param require_result: If set to True, is_ready will not be set to True unless result is also available. If check_state=False, this is ignored. """ download_data = DownloadBase.get(download_id) if download_data is None: download_data = DownloadBase(download_id=download_id) task = download_data.task task_status = get_task_status( task, is_multiple_download_task=isinstance(download_data, MultipleTaskDownload)) if task_status.failed(): raise TaskFailedError(task_status.error) if require_result: is_ready = task_status.success() and task_status.result is not None else: is_ready = task_status.success() return { 'result': task_status.result, 'error': task_status.error, 'is_ready': is_ready, 'is_alive': is_alive() if heartbeat_enabled() else True, 'progress': task_status.progress._asdict(), 'download_id': download_id, 'allow_dropbox_sync': isinstance(download_data, FileDownload) and download_data.use_transfer, 'has_file': download_data is not None and download_data.has_file, 'custom_message': message, }
def fixture_upload_async(domain, download_id, replace, skip_orm, user_email=None): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) time_start = datetime.datetime.now() result = upload_fixture_file(domain, download_ref.get_filename(), replace, task, skip_orm) time_end = datetime.datetime.now() DownloadBase.set_progress(task, 100, 100) messages = { 'success': result.success, 'messages': result.messages, 'errors': result.errors, 'number_of_fixtures': result.number_of_fixtures } if user_email: send_upload_fixture_complete_email(user_email, domain, time_start, time_end, messages) return { 'messages': messages, }
def ajax_job_poll(request, download_id, template="soil/partials/dl_status.html"): download_data = DownloadBase.get(download_id) if download_data is None: download_data = DownloadBase(download_id=download_id) is_ready = False try: if download_data.task.failed(): return HttpResponseServerError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass else: is_ready = True alive = True if heartbeat_enabled(): alive = is_alive() context = RequestContext(request) context['is_ready'] = is_ready context['is_alive'] = alive context['progress'] = download_data.get_progress() context['download_id'] = download_id return render_to_response(template, context_instance=context)
def fixture_upload_async(domain, download_id, replace): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = upload_fixture_file(domain, download_ref.get_filename(), replace, task) DownloadBase.set_progress(task, 100, 100) return {"messages": result}
def bulk_import_async(import_id, config, domain, excel_id): excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) result = do_import(spreadsheet, config, domain, task=bulk_import_async) # return compatible with soil return {"messages": result}
def get_download_context(download_id, message=None, require_result=False): """ :param require_result: If set to True, is_ready will not be set to True unless result is also available. If check_state=False, this is ignored. """ download_data = DownloadBase.get(download_id) if download_data is None: download_data = DownloadBase(download_id=download_id) task = download_data.task task_status = get_task_status( task, is_multiple_download_task=isinstance(download_data, MultipleTaskDownload)) if task_status.failed(): # Celery replaces exceptions with a wrapped one that we can't directly import # so I think our best choice is to match off the name, even though that's hacky exception_name = (task.result.__class__.__name__ if isinstance(task.result, Exception) else None) raise TaskFailedError(task_status.error, exception_name=exception_name) if require_result: is_ready = task_status.success() and task_status.result is not None else: is_ready = task_status.success() return { 'result': task_status.result, 'error': task_status.error, 'is_ready': is_ready, 'is_alive': is_alive() if heartbeat_enabled() else True, 'progress': task_status.progress._asdict(), 'download_id': download_id, 'allow_dropbox_sync': isinstance(download_data, FileDownload) and download_data.use_transfer, 'has_file': download_data is not None and download_data.has_file, 'custom_message': message, }
def import_products_async(domain, file_ref_id): task = import_products_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(file_ref_id) results = import_products(domain, download_ref, task) DownloadBase.set_progress(task, 100, 100) return {'messages': results}
def retrieve_download(request, download_id, template="soil/file_download.html", extra_context=None): """ Retrieve a download that's waiting to be generated. If it is the get_file, then download it, else, let the ajax on the page poll. """ context = RequestContext(request) if extra_context: context.update(extra_context) context['download_id'] = download_id if 'get_file' in request.GET: download = DownloadBase.get(download_id) if download is None: logging.error( "Download file request for expired/nonexistent file requested") raise Http404 if download.owner_ids and request.couch_user.get_id not in download.owner_ids: return HttpResponseForbidden( _("You do not have access to this file. It can only be downloaded by the user who created it" )) return download.toHttpResponse() return render(request, template, context=context.flatten())
def location_importer_job_poll(request, domain, download_id, template="locations/manage/partials/status.html"): download_data = DownloadBase.get(download_id) is_ready = False if download_data is None: download_data = DownloadBase(download_id=download_id) try: if download_data.task.failed(): return HttpResponseServerError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass alive = True if heartbeat_enabled(): alive = is_alive() context = RequestContext(request) if download_data.task.state == 'SUCCESS': is_ready = True context['result'] = download_data.task.result.get('messages') context['is_ready'] = is_ready context['is_alive'] = alive context['progress'] = download_data.get_progress() context['download_id'] = download_id return render_to_response(template, context_instance=context)
def get_download_context(download_id, check_state=False, message=None, require_result=False): """ :param require_result: If set to True, is_ready will not be set to True unless result is also available. If check_state=False, this is ignored. """ is_ready = False context = {} download_data = DownloadBase.get(download_id) context['has_file'] = download_data is not None and download_data.has_file if download_data is None: download_data = DownloadBase(download_id=download_id) if isinstance(download_data, MultipleTaskDownload): if download_data.task.ready(): context['result'], context[ 'error'] = _get_download_context_multiple_tasks(download_data) else: try: if download_data.task.failed(): raise TaskFailedError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass else: if not check_state: is_ready = True elif download_data.task.successful(): is_ready = True result = download_data.task.result context['result'] = result and result.get('messages') if result and result.get('errors'): raise TaskFailedError(result.get('errors')) alive = True if heartbeat_enabled(): alive = is_alive() progress = download_data.get_progress() def progress_complete(): return (getattr(settings, 'CELERY_ALWAYS_EAGER', False) or progress.get('percent', 0) == 100 and not progress.get('error', False)) context['is_ready'] = is_ready or progress_complete() if check_state and require_result: context['is_ready'] = context['is_ready'] and context.get( 'result') is not None context['is_alive'] = alive context['progress'] = progress context['download_id'] = download_id context['allow_dropbox_sync'] = isinstance( download_data, FileDownload) and download_data.use_transfer context['custom_message'] = message return context
def __init__(self, task, file_ref_id): self.task = task self.progress = 0 if self.task: DownloadBase.set_progress(self.task, 0, 100) download_ref = DownloadBase.get(file_ref_id) self.workbook = WorkbookJSONReader(download_ref.get_filename())
def import_products_async(domain, file_ref_id): task = import_products_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(file_ref_id) results = import_products(domain, download_ref, task) DownloadBase.set_progress(task, 100, 100) return { 'messages': results }
def fixture_upload_async(domain, download_id, replace): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = safe_fixture_upload(domain, download_ref, replace, task) DownloadBase.set_progress(task, 100, 100) return { 'messages': result, }
def import_stock_reports_async(download_id, domain, file_ref_id): """ Same idea but for stock reports """ download_ref = DownloadBase.get(file_ref_id) with open(download_ref.get_filename(), 'rb') as f: try: results = import_stock_reports(domain, f) except Exception, e: results = "ERROR: %s" % e
def import_locations_async(download_id, domain, file_ref_id, update_existing=False): """ Asynchronously import locations. download_id is for showing the results to the user through soil. file_ref_id is also a download_id, but should be a pointer to the import file. """ download_ref = DownloadBase.get(file_ref_id) with open(download_ref.get_filename(), 'rb') as f: results_msg = '\n'.join(import_locations(domain, f, update_existing)) ref = expose_download(results_msg, 60*60*3) cache.set(download_id, ref)
def bulk_import_async(import_id, config, domain, excel_id): excel_ref = DownloadBase.get(excel_id) try: spreadsheet_or_error = importer_util.get_spreadsheet(excel_ref, config.named_columns) except ImporterError as spreadsheet_or_error: pass result = do_import(spreadsheet_or_error, config, domain, task=bulk_import_async) # return compatible with soil return {"messages": result}
def __init__(self, task, file_ref_id): self.task = task self.progress = 0 if self.task: DownloadBase.set_progress(self.task, 0, 100) download_ref = DownloadBase.get(file_ref_id) if download_ref is None: raise UnknownFileRefException("Could not find file wih ref %s. It may have expired" % file_ref_id) self.workbook = WorkbookJSONReader(download_ref.get_filename())
def _get_upload_file(self): saved_file = StringIO.StringIO() try: saved_ref = DownloadBase.get(self.processing_id) data = saved_ref.get_content() except Exception as e: self.mark_with_error(_("Could not fetch cached bulk upload file. Error: %s." % e)) return saved_file.write(data) saved_file.seek(0) return saved_file
def excel_commit(request, domain): """ Step three of three. This page is submitted with the list of column to case property mappings for this upload. The config variable is an ImporterConfig object that has everything gathered from previous steps, with the addition of all the field data. See that class for more information. """ config = importer_util.ImporterConfig.from_request(request) excel_id = request.session.get(EXCEL_SESSION_ID) excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) if not spreadsheet: return _spreadsheet_expired(request, domain) if spreadsheet.has_errors: messages.error(request, _('The session containing the file you ' 'uploaded has expired - please upload ' 'a new one.')) return HttpResponseRedirect(base.ImportCases.get_url(domain=domain) + "?error=cache") download = DownloadBase() download.set_task(bulk_import_async.delay( download.download_id, config, domain, excel_id, )) try: del request.session[EXCEL_SESSION_ID] except KeyError: pass return render( request, "importer/excel_commit.html", { 'download_id': download.download_id, 'template': 'importer/partials/import_status.html', 'domain': domain, 'report': { 'name': 'Import: Completed' }, 'slug': base.ImportCases.slug } )
def _get_upload_file(self): saved_file = io.BytesIO() try: saved_ref = DownloadBase.get(self.processing_id) data = saved_ref.get_content() except Exception as e: self.mark_with_error(_("Could not fetch cached bulk upload file. Error: %s." % e)) return saved_file.write(data) saved_file.seek(0) return saved_file
def import_locations_async(domain, file_ref_id): task = import_locations_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(file_ref_id) workbook = WorkbookJSONReader(download_ref.get_filename()) worksheets = workbook.worksheets results = list(import_locations(domain, worksheets, task)) DownloadBase.set_progress(task, 100, 100) return {'messages': results}
def fixture_upload_async(domain, download_id, replace, skip_orm): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = upload_fixture_file(domain, download_ref.get_filename(), replace, task, skip_orm) DownloadBase.set_progress(task, 100, 100) return { 'messages': { 'success': result.success, 'messages': result.messages, 'errors': result.errors, 'number_of_fixtures': result.number_of_fixtures, }, }
def bulk_import_async(config, domain, excel_id): excel_ref = DownloadBase.get(excel_id) try: spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) except Exception as e: return {'errors': get_importer_error_message(e)} try: result = do_import(spreadsheet, config, domain, task=bulk_import_async) except Exception as e: return {'errors': 'Error: ' + e.message} # return compatible with soil return {'messages': result}
def import_locations_async(domain, file_ref_id): task = import_locations_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(file_ref_id) workbook = WorkbookJSONReader(download_ref.get_filename()) worksheets = workbook.worksheets results = list(import_locations(domain, worksheets, task)) DownloadBase.set_progress(task, 100, 100) return { 'messages': results }
def get_download_context(download_id, check_state=False, message=None): is_ready = False context = {} download_data = DownloadBase.get(download_id) context['has_file'] = download_data is not None and download_data.has_file if download_data is None: download_data = DownloadBase(download_id=download_id) if isinstance(download_data, MultipleTaskDownload): if download_data.task.ready(): context['result'], context['error'] = _get_download_context_multiple_tasks(download_data) else: try: if download_data.task.failed(): raise TaskFailedError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass else: if not check_state: is_ready = True elif download_data.task.successful(): is_ready = True result = download_data.task.result context['result'] = result and result.get('messages') if result and result.get('errors'): raise TaskFailedError(result.get('errors')) alive = True if heartbeat_enabled(): alive = is_alive() progress = download_data.get_progress() def progress_complete(): return ( getattr(settings, 'CELERY_ALWAYS_EAGER', False) or progress.get('percent', 0) == 100 and not progress.get('error', False) ) context['is_ready'] = is_ready or progress_complete() context['is_alive'] = alive context['progress'] = progress context['download_id'] = download_id context['allow_dropbox_sync'] = isinstance(download_data, FileDownload) and download_data.use_transfer context['custom_message'] = message return context
def direct_ccz(request, domain): if 'app_id' in request.GET: app = get_app(domain, request.GET['app_id']) app.set_media_versions(None) download = DownloadBase() build_application_zip( include_multimedia_files=False, include_index_files=True, app=app, download_id=download.download_id, compress_zip=True, filename='{}.ccz'.format(slugify(app.name)), ) return DownloadBase.get(download.download_id).toHttpResponse() msg = "You must specify `app_id` in your GET parameters" return json_response({'status': 'error', 'message': msg}, status_code=400)
def excel_commit(request, domain): """ Step three of three. This page is submitted with the list of column to case property mappings for this upload. The config variable is an ImporterConfig object that has everything gathered from previous steps, with the addition of all the field data. See that class for more information. """ config = importer_util.ImporterConfig.from_request(request) excel_id = request.session.get(EXCEL_SESSION_ID) excel_ref = DownloadBase.get(excel_id) try: importer_util.get_spreadsheet(excel_ref, config.named_columns) except ImporterError as e: return render_error(request, domain, _get_importer_error_message(e)) download = DownloadBase() download.set_task(bulk_import_async.delay( download.download_id, config, domain, excel_id, )) try: del request.session[EXCEL_SESSION_ID] except KeyError: pass return render( request, "importer/excel_commit.html", { 'download_id': download.download_id, 'template': 'importer/partials/import_status.html', 'domain': domain, 'report': { 'name': 'Import: Completed' }, 'slug': base.ImportCases.slug } )
def retrieve_download(request, download_id, template="soil/file_download.html"): """ Retrieve a download that's waiting to be generated. If it is the get_file, then download it, else, let the ajax on the page poll. """ context = RequestContext(request) context['download_id'] = download_id do_download = request.GET.has_key('get_file') if do_download: download = DownloadBase.get(download_id) if download is None: logging.error("Download file request for expired/nonexistent file requested") raise Http404 else: return download.toHttpResponse() return render_to_response(template, context_instance=context)
def excel_commit(request, domain): """ Step three of three. This page is submitted with the list of column to case property mappings for this upload. The config variable is an ImporterConfig object that has everything gathered from previous steps, with the addition of all the field data. See that class for more information. """ config = importer_util.ImporterConfig.from_request(request) excel_id = request.session.get(EXCEL_SESSION_ID) excel_ref = DownloadBase.get(excel_id) try: importer_util.get_spreadsheet(excel_ref, config.named_columns) except ImporterError as e: return render_error(request, domain, get_importer_error_message(e)) download = DownloadBase() download.set_task(bulk_import_async.delay( config, domain, excel_id, )) try: del request.session[EXCEL_SESSION_ID] except KeyError: pass return render( request, "importer/excel_commit.html", { 'download_id': download.download_id, 'template': 'importer/partials/import_status.html', 'domain': domain, 'report': { 'name': 'Import: Completed' }, 'slug': base.ImportCases.slug } )
def dropbox_upload(request, download_id): download = DownloadBase.get(download_id) if download is None: logging.error( "Download file request for expired/nonexistent file requested") raise Http404 if download.owner_ids and request.couch_user.get_id not in download.owner_ids: return no_permissions( request, message= _("You do not have access to this file. It can only be uploaded to dropbox by the user who created it" )) filename = download.get_filename() # Hack to get target filename from content disposition match = re.search('filename="([^"]*)"', download.content_disposition) dest = match.group(1) if match else 'download.txt' try: uploader = DropboxUploadHelper.create( request.session.get(DROPBOX_ACCESS_TOKEN), src=filename, dest=dest, download_id=download_id, user=request.user, ) except DropboxInvalidToken: return HttpResponseRedirect(reverse(DropboxAuthInitiate.slug)) except DropboxUploadAlreadyInProgress: uploader = DropboxUploadHelper.objects.get(download_id=download_id) messages.warning( request, 'The file is in the process of being synced to dropbox! It is {0:.2f}% ' 'complete.'.format(uploader.progress * 100)) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) uploader.upload() messages.success( request, _("Apps/{app}/{dest} is queued to sync to dropbox! You will receive an email when it" " completes.".format(app=settings.DROPBOX_APP_NAME, dest=dest))) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def excel_commit(request, domain): config = importer_util.ImporterConfig.from_request(request) excel_id = request.session.get(EXCEL_SESSION_ID) excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) if not spreadsheet: return _spreadsheet_expired(request, domain) if spreadsheet.has_errors: messages.error( request, _('The session containing the file you ' 'uploaded has expired - please upload ' 'a new one.')) return HttpResponseRedirect( base.ImportCases.get_url(domain=domain) + "?error=cache") download = DownloadBase() download.set_task( bulk_import_async.delay( download.download_id, config, domain, excel_id, )) try: del request.session[EXCEL_SESSION_ID] except KeyError: pass return render( request, "importer/excel_commit.html", { 'download_id': download.download_id, 'template': 'importer/partials/import_status.html', 'domain': domain, 'report': { 'name': 'Import: Completed' }, 'slug': base.ImportCases.slug })
def get(self, request, *args, **kwargs): download_id = request.GET.get('download_id') download = DownloadBase.get(download_id) if download is None: return json_response({ 'download_id': download_id, 'progress': None, }) status = get_task_status(download.task) return json_response({ 'download_id': download_id, 'success': status.success(), 'failed': status.failed(), 'missing': status.missing(), 'not_started': status.not_started(), 'progress': status.progress._asdict(), })
def retrieve_download(request, download_id, template="soil/file_download.html", extra_context=None): """ Retrieve a download that's waiting to be generated. If it is the get_file, then download it, else, let the ajax on the page poll. """ context = RequestContext(request) if extra_context: context.update(extra_context) context['download_id'] = download_id if 'get_file' in request.GET: download = DownloadBase.get(download_id) if download is None: logging.error("Download file request for expired/nonexistent file requested") raise Http404 return download.toHttpResponse() return render_to_response(template, context=context.flatten())
def importer_job_poll(request, domain, download_id, template="importer/partials/import_status.html"): download_data = DownloadBase.get(download_id) is_ready = False if download_data is None: download_data = DownloadBase(download_id=download_id) try: if download_data.task.failed(): return HttpResponseServerError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass alive = True if heartbeat_enabled(): alive = is_alive() context = RequestContext(request) if download_data.task.result and 'error' in download_data.task.result: error = download_data.task.result['error'] if error == 'EXPIRED': return _spreadsheet_expired(request, domain) elif error == 'HAS_ERRORS': messages.error( request, _('The session containing the file you ' 'uploaded has expired - please upload ' 'a new one.')) return HttpResponseRedirect( base.ImportCases.get_url(domain=domain) + "?error=cache") if download_data.task.state == 'SUCCESS': is_ready = True context['result'] = download_data.task.result context['is_ready'] = is_ready context['is_alive'] = alive context['progress'] = download_data.get_progress() context['download_id'] = download_id return render_to_response(template, context_instance=context)
def ajax_job_poll(request, download_id, template="soil/partials/dl_status.html"): message = request.GET['message'] if 'message' in request.GET else None try: context = get_download_context(download_id, message=message) except TaskFailedError as e: context = { 'error': list(e.errors) if e.errors else [_("An error occurred during the download.")] } return HttpResponseServerError(render(request, template, context)) download = DownloadBase.get(download_id) if download and download.owner_ids and request.couch_user.get_id not in download.owner_ids: return HttpResponseForbidden(_("You do not have access to this file")) return render(request, template, context)
def __init__(self, task, file_ref_id): self.start = self.last_update = datetime.now() self.task = task self.progress = 0 self.total_rows = 100 if getattr(settings, 'CELERY_ALWAYS_EAGER', False): # Log progress since tasks are executed synchronously when # CELERY_ALWAYS_EAGER is true self.log = logging.getLogger(__name__).info else: self.log = lambda *a, **k: None if self.task: DownloadBase.set_progress(self.task, 0, 100) download_ref = DownloadBase.get(file_ref_id) if download_ref is None: raise UnknownFileRefException("Could not find file wih ref %s. It may have expired" % file_ref_id) self.workbook = WorkbookJSONReader(download_ref.get_filename())
def __init__(self, task, file_ref_id): self.start = self.last_update = datetime.now() self.task = task self.progress = 0 self.total_rows = 100 if getattr(settings, 'CELERY_TASK_ALWAYS_EAGER', False): # Log progress since tasks are executed synchronously when # CELERY_TASK_ALWAYS_EAGER is true self.log = logging.getLogger(__name__).info else: self.log = lambda *a, **k: None if self.task: DownloadBase.set_progress(self.task, 0, 100) download_ref = DownloadBase.get(file_ref_id) if download_ref is None: raise UnknownFileRefException("Could not find file wih ref %s. It may have expired" % file_ref_id) self.workbook = WorkbookJSONReader(download_ref.get_filename())
def get_download_context(download_id, check_state=False): is_ready = False context = {} download_data = DownloadBase.get(download_id) context['has_file'] = bool(download_data) if download_data is None: download_data = DownloadBase(download_id=download_id) try: if download_data.task.failed(): raise TaskFailedError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass else: if not check_state: is_ready = True elif download_data.task.state == 'SUCCESS': is_ready = True result = download_data.task.result context['result'] = result and result.get('messages') if result and result.get('errors'): raise TaskFailedError(result.get('errors')) alive = True if heartbeat_enabled(): alive = is_alive() progress = download_data.get_progress() def progress_complete(): return ( getattr(settings, 'CELERY_ALWAYS_EAGER', False) and progress.get('percent', 0) == 100 and not progress.get('error', False) ) context['is_ready'] = is_ready or progress_complete() context['is_alive'] = alive context['progress'] = progress context['download_id'] = download_id return context
def retrieve_download(request, download_id, template="soil/file_download.html"): """ Retrieve a download that's waiting to be generated. If it is the get_file, then download it, else, let the ajax on the page poll. """ context = RequestContext(request) context['download_id'] = download_id do_download = request.GET.has_key('get_file') if do_download: download = DownloadBase.get(download_id) if download is None: logging.error( "Download file request for expired/nonexistent file requested") raise Http404 else: return download.toHttpResponse() return render_to_response(template, context_instance=context)
def importer_job_poll(request, domain, download_id, template="importer/partials/import_status.html"): download_data = DownloadBase.get(download_id) is_ready = False if download_data is None: download_data = DownloadBase(download_id=download_id) try: if download_data.task.failed(): return HttpResponseServerError() except (TypeError, NotImplementedError): # no result backend / improperly configured pass alive = True if heartbeat_enabled(): alive = is_alive() context = RequestContext(request) if download_data.task.result and 'error' in download_data.task.result: error = download_data.task.result['error'] if error == 'EXPIRED': return _spreadsheet_expired(request, domain) elif error == 'HAS_ERRORS': messages.error(request, _('The session containing the file you ' 'uploaded has expired - please upload ' 'a new one.')) return HttpResponseRedirect(base.ImportCases.get_url(domain=domain) + "?error=cache") if download_data.task.state == 'SUCCESS': is_ready = True context['result'] = download_data.task.result context['is_ready'] = is_ready context['is_alive'] = alive context['progress'] = download_data.get_progress() context['download_id'] = download_id context['url'] = base.ImportCases.get_url(domain=domain) return render_to_response(template, context_instance=context)
def dropbox_upload(request, download_id): download = DownloadBase.get(download_id) if download is None: logging.error("Download file request for expired/nonexistent file requested") raise Http404 else: filename = download.get_filename() # Hack to get target filename from content disposition match = re.search('filename="([^"]*)"', download.content_disposition) dest = match.group(1) if match else 'download.txt' try: uploader = DropboxUploadHelper.create( request.session.get(DROPBOX_ACCESS_TOKEN), src=filename, dest=dest, download_id=download_id, user=request.user, ) except DropboxInvalidToken: return HttpResponseRedirect(reverse(DropboxAuthInitiate.slug)) except DropboxUploadAlreadyInProgress: uploader = DropboxUploadHelper.objects.get(download_id=download_id) messages.warning( request, 'The file is in the process of being synced to dropbox! It is {0:.2f}% ' 'complete.'.format(uploader.progress * 100) ) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) uploader.upload() messages.success( request, _("Apps/{app}/{dest} is queued to sync to dropbox! You will receive an email when it" " completes.".format(app=settings.DROPBOX_APP_NAME, dest=dest)) ) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def excel_commit(request, domain): config = ImporterConfig(request) excel_id = request.session.get(EXCEL_SESSION_ID) excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) if not spreadsheet: return _spreadsheet_expired(request, domain) if spreadsheet.has_errors: messages.error(request, _('The session containing the file you ' 'uploaded has expired - please upload ' 'a new one.')) return HttpResponseRedirect(base.ImportCases.get_url(domain=domain) + "?error=cache") download = DownloadBase() download.set_task(bulk_import_async.delay( download.download_id, config, domain, excel_id, )) try: del request.session[EXCEL_SESSION_ID] except KeyError: pass return render(request, "importer/excel_commit.html", { 'download_id': download.download_id, 'template': 'importer/partials/import_status.html', 'domain': domain, 'report': { 'name': 'Import: Completed' }, 'slug': base.ImportCases.slug})
def dropbox_upload(request, download_id): download = DownloadBase.get(download_id) if download is None: logging.error("Download file request for expired/nonexistent file requested") raise Http404 else: filename = download.get_filename() # Hack to get target filename from content disposition match = re.search('filename="([^"]*)"', download.content_disposition) dest = match.group(1) if match else 'download.txt' try: uploader = DropboxUploadHelper.create( request.session.get(DROPBOX_ACCESS_TOKEN), src=filename, dest=dest, download_id=download_id, user=request.user, ) except DropboxUploadAlreadyInProgress: uploader = DropboxUploadHelper.objects.get(download_id=download_id) messages.warning( request, u'The file is in the process of being synced to dropbox! It is {0:.2f}% ' 'complete.'.format(uploader.progress * 100) ) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) uploader.upload() messages.success( request, _(u"Apps/{app}/{dest} is queued to sync to dropbox! You will receive an email when it" " completes.".format(app=settings.DROPBOX_APP_NAME, dest=dest)) ) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def excel_fields(request, domain): """ Step two of three. Important values that are grabbed from the POST or defined by the user on this page: named_columns: Passed through from last step, see that for documentation case_type: The type of case we are matching to. When creating new cases, this is the type they will be created as. When updating existing cases, this is the type that we will search for. If the wrong case type is used when looking up existing cases, we will not update them. create_new_cases: A boolean that controls whether or not the user wanted to create new cases for any case that doesn't have a matching case id in the upload. search_column: Which column of the excel file we are using to specify either case ids or external ids. This is, strangely, required. If creating new cases only you would expect these to be blank with the create_new_cases flag set. search_field: Either case id or external id, determines which type of identification we are using to match to cases. key_column/value_column: These correspond to an advanced feature allowing a user to modify a single case with multiple rows. """ named_columns = request.POST['named_columns'] case_type = request.POST['case_type'] try: search_column = request.POST['search_column'] except MultiValueDictKeyError: # this is only true if your configuration is messed up in an irreparable way messages.error(request, _('It looks like you may have accessed this page from a stale page. ' 'Please start over.')) return _spreadsheet_expired(request, domain) search_field = request.POST['search_field'] create_new_cases = request.POST.get('create_new_cases') == 'on' key_value_columns = request.POST.get('key_value_columns') == 'on' key_column = '' value_column = '' download_ref = DownloadBase.get(request.session.get(EXCEL_SESSION_ID)) spreadsheet = importer_util.get_spreadsheet(download_ref, named_columns) if not spreadsheet: return _spreadsheet_expired(request, domain) columns = spreadsheet.get_header_columns() if key_value_columns: key_column = request.POST['key_column'] value_column = request.POST['value_column'] excel_fields = [] key_column_index = columns.index(key_column) # if key/value columns were specified, get all the unique keys listed if key_column_index: excel_fields = spreadsheet.get_unique_column_values(key_column_index) # concatenate unique key fields with the rest of the columns excel_fields = columns + excel_fields # remove key/value column names from list excel_fields.remove(key_column) if value_column in excel_fields: excel_fields.remove(value_column) else: excel_fields = columns case_fields = get_case_properties(domain, case_type) # hide search column and matching case fields from the update list try: excel_fields.remove(search_column) except: pass try: case_fields.remove(search_field) except: pass # we can't actually update this so don't show it try: case_fields.remove('type') except: pass return render( request, "importer/excel_fields.html", { 'named_columns': named_columns, 'case_type': case_type, 'search_column': search_column, 'search_field': search_field, 'create_new_cases': create_new_cases, 'key_column': key_column, 'value_column': value_column, 'columns': columns, 'excel_fields': excel_fields, 'case_fields': case_fields, 'domain': domain, 'report': { 'name': 'Import: Match columns to fields' }, 'slug': base.ImportCases.slug } )
def bulk_import_async(import_id, config, domain, excel_id): task = bulk_import_async excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) if not spreadsheet: return {'error': 'EXPIRED'} if spreadsheet.has_errors: return {'error': 'HAS_ERRORS'} row_count = spreadsheet.get_num_rows() columns = spreadsheet.get_header_columns() match_count = created_count = too_many_matches = 0 blank_external_ids = [] invalid_dates = [] prime_offset = 1 # used to prevent back-to-back priming user = CouchUser.get_by_user_id(config.couch_user_id, domain) username = user.username user_id = user._id for i in range(row_count): DownloadBase.set_progress(task, i, row_count) # skip first row if it is a header field if i == 0 and config.named_columns: continue priming_progress = match_count + created_count + prime_offset if priming_progress % PRIME_VIEW_FREQUENCY == 0: prime_views(POOL_SIZE) # increment so we can't possibly prime on next iteration prime_offset += 1 row = spreadsheet.get_row(i) search_id = importer_util.parse_search_id(config, columns, row) if config.search_field == 'external_id' and not search_id: # do not allow blank external id since we save this blank_external_ids.append(i + 1) continue case, error = importer_util.lookup_case(config.search_field, search_id, domain) try: fields_to_update = importer_util.populate_updated_fields( config, columns, row ) except importer_util.InvalidDateException: invalid_dates.append(i + 1) continue if case: match_count += 1 elif error == LookupErrors.NotFound: if not config.create_new_cases: continue created_count += 1 elif error == LookupErrors.MultipleResults: too_many_matches += 1 continue if 'owner_id' in fields_to_update: owner_id = fields_to_update['owner_id'] del fields_to_update['owner_id'] else: owner_id = user_id if not case: id = uuid.uuid4().hex caseblock = CaseBlock( create=True, case_id=id, version=V2, user_id=user_id, owner_id=owner_id, case_type=config.case_type, external_id=search_id if config.search_field == 'external_id' else '', update=fields_to_update ) submit_case_block(caseblock, domain, username, user_id) elif case and case.type == config.case_type: caseblock = CaseBlock( create=False, case_id=case._id, owner_id=owner_id, version=V2, update=fields_to_update ) submit_case_block(caseblock, domain, username, user_id) return { 'created_count': created_count, 'match_count': match_count, 'too_many_matches': too_many_matches, 'blank_externals': blank_external_ids, 'invalid_dates': invalid_dates, }
def process_bulk_upload_zip(processing_id, domain, app_id, username=None, share_media=False, license_name=None, author=None, attribution_notes=None, replace_existing=False): """ Responsible for processing the uploaded zip from Bulk Upload. """ status = BulkMultimediaStatusCache.get(processing_id) if not status: # no download data available, abort return app = get_app(domain, app_id) status.in_celery = True status.save() try: saved_file = StringIO.StringIO() saved_ref = DownloadBase.get(processing_id) data = saved_ref.get_content() saved_file.write(data) except Exception as e: status.mark_with_error(_("Could not fetch cached bulk upload file. Error: %s." % e)) return try: saved_file.seek(0) uploaded_zip = zipfile.ZipFile(saved_file) except Exception as e: status.mark_with_error(_("Error opening file as zip file: %s" % e)) return if uploaded_zip.testzip(): status.mark_with_error(_("Error encountered processing Zip File. File doesn't look valid.")) return zipped_files = uploaded_zip.namelist() status.total_files = len(zipped_files) checked_paths = [] try: for index, path in enumerate(zipped_files): status.update_progress(len(checked_paths)) checked_paths.append(path) file_name = os.path.basename(path) try: data = uploaded_zip.read(path) except Exception as e: status.add_unmatched_path(path, _("Error reading file: %s" % e)) continue media_class = CommCareMultimedia.get_class_by_data(data) if not media_class: # skip these... continue app_paths = app.get_all_paths_of_type(media_class.__name__) form_path = media_class.get_form_path(path) if not form_path in app_paths: status.add_unmatched_path(path, _("Did not match any %s paths in application." % media_class.get_nice_name())) continue multimedia = media_class.get_by_data(data) if not multimedia: status.add_unmatched_path(path, _("Matching path found, but could not save the data to couch.")) continue is_updated = multimedia.attach_data(data, original_filename=file_name, username=username, replace_attachment=replace_existing) if not is_updated and not getattr(multimedia, '_id'): status.add_unmatched_path(form_path, _("Matching path found, but didn't save new multimedia correctly.")) continue if is_updated: multimedia.add_domain(domain, owner=True) if share_media: multimedia.update_or_add_license(domain, type=license_name, author=author, attribution_notes=attribution_notes) app.create_mapping(multimedia, form_path) media_info = multimedia.get_media_info(form_path, is_updated=is_updated, original_path=path) status.add_matched_path(media_class, media_info) status.update_progress(len(checked_paths)) except Exception as e: status.mark_with_error(_("Error while processing zip: %s" % e)) uploaded_zip.close() status.complete = True status.save()
def process_bulk_upload_zip(processing_id, domain, app_id, username=None, share_media=False, license_name=None, author=None, attribution_notes=None, replace_existing=False): """ Responsible for processing the uploaded zip from Bulk Upload. """ status = BulkMultimediaStatusCache.get(processing_id) if not status: # no download data available, abort return app = get_app(domain, app_id) status.in_celery = True status.save() try: saved_file = StringIO.StringIO() saved_ref = DownloadBase.get(processing_id) data = saved_ref.get_content() saved_file.write(data) except Exception as e: status.mark_with_error( _("Could not fetch cached bulk upload file. Error: %s." % e)) return try: saved_file.seek(0) uploaded_zip = zipfile.ZipFile(saved_file) except Exception as e: status.mark_with_error(_("Error opening file as zip file: %s" % e)) return if uploaded_zip.testzip(): status.mark_with_error( _("Error encountered processing Zip File. File doesn't look valid." )) return zipped_files = uploaded_zip.namelist() status.total_files = len(zipped_files) checked_paths = [] try: for index, path in enumerate(zipped_files): status.update_progress(len(checked_paths)) checked_paths.append(path) file_name = os.path.basename(path) try: data = uploaded_zip.read(path) except Exception as e: status.add_unmatched_path(path, _("Error reading file: %s" % e)) continue media_class = CommCareMultimedia.get_class_by_data(data, filename=path) if not media_class: status.add_skipped_path(path, CommCareMultimedia.get_mime_type(data)) continue app_paths = list(app.get_all_paths_of_type(media_class.__name__)) app_paths_lower = [p.lower() for p in app_paths] form_path = media_class.get_form_path(path, lowercase=True) if not form_path in app_paths_lower: status.add_unmatched_path( path, _("Did not match any %s paths in application." % media_class.get_nice_name())) continue index_of_path = app_paths_lower.index(form_path) form_path = app_paths[ index_of_path] # this is the correct capitalization as specified in the form multimedia = media_class.get_by_data(data) if not multimedia: status.add_unmatched_path( path, _("Matching path found, but could not save the data to couch." )) continue is_new = not form_path in app.multimedia_map.keys() is_updated = multimedia.attach_data( data, original_filename=file_name, username=username, replace_attachment=replace_existing) if not is_updated and not getattr(multimedia, '_id'): status.add_unmatched_path( form_path, _("Matching path found, but didn't save new multimedia correctly." )) continue if is_updated or is_new: multimedia.add_domain(domain, owner=True) if share_media: multimedia.update_or_add_license( domain, type=license_name, author=author, attribution_notes=attribution_notes) app.create_mapping(multimedia, form_path) media_info = multimedia.get_media_info(form_path, is_updated=is_updated, original_path=path) status.add_matched_path(media_class, media_info) status.update_progress(len(checked_paths)) except Exception as e: status.mark_with_error(_("Error while processing zip: %s" % e)) uploaded_zip.close() status.complete = True status.save()
def _get_saved_export_download_data(export_instance_id): download_id = 'rebuild_export_tracker.{}'.format(export_instance_id) download_data = DownloadBase.get(download_id) if download_data is None: download_data = DownloadBase(download_id=download_id) return download_data
def bulk_import_async(import_id, config, domain, excel_id): excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) return do_import(spreadsheet, config, domain, task=bulk_import_async)