Beispiel #1
0
def export_results(request):

    total = int(request.GET.get("total", 0))
    format = request.GET.get("format", "tilecsv")
    download_limit = settings.SEARCH_EXPORT_IMMEDIATE_DOWNLOAD_THRESHOLD
    if total > download_limit:
        celery_worker_running = task_management.check_if_celery_available()
        if celery_worker_running is True:
            request_values = dict(request.GET)
            request_values["path"] = request.get_full_path()
            result = tasks.export_search_results.apply_async(
                (request.user.id, request_values, format),
                link=tasks.update_user_task_record.s(),
                link_error=tasks.log_error.s())
            message = _(f"{total} instances have been submitted for export. \
                Click the Bell icon to check for a link to download your data")
            return JSONResponse({"success": True, "message": message})
        else:
            message = _(
                f"Your search exceeds the {download_limit} instance download limit. Please refine your search"
            )
            return JSONResponse({"success": False, "message": message})
    else:
        exporter = SearchResultsExporter(search_request=request)
        export_files, export_info = exporter.export(format)
        if len(export_files) == 0 and format == "shp":
            message = _(
                "Either no instances were identified for export or no resources have exportable geometry nodes\
                Please confirm that the models of instances you would like to export have geometry nodes and that\
                those nodes are set as exportable")
            dest = StringIO()
            dest.write(message)
            export_files.append({"name": "error.txt", "outputfile": dest})
        return zip_utils.zip_response(
            export_files, zip_file_name=f"{settings.APP_NAME}_export.zip")
Beispiel #2
0
def export_results(request):

    total = int(request.GET.get("total", 0))
    format = request.GET.get("format", "tilecsv")
    download_limit = settings.SEARCH_EXPORT_IMMEDIATE_DOWNLOAD_THRESHOLD
    if total > download_limit:
        celery_worker_running = task_management.check_if_celery_available()
        if celery_worker_running:
            req_dict = dict(request.GET)
            result = tasks.export_search_results.apply_async(
                (request.user.id, req_dict, format),
                link=tasks.update_user_task_record.s(),
                link_error=tasks.log_error.s())
            # if os.path.exists("result"): # this might not exist until after write_zip_file in task is done ?
            message = _(f"{total} instances have been submitted for export. \
                Click the bell icon to check for a notification once your export is completed and ready for download"
                        )
            return JSONResponse({"success": True, "message": message})
        else:
            message = _(
                f"Your search exceeds the {download_limit} instance download limit. Please refine your search"
            )
            return JSONResponse({"success": False, "message": message})
    else:
        exporter = SearchResultsExporter(search_request=request)
        export_files = exporter.export(format)
        return zip_utils.zip_response(
            export_files, zip_file_name=f"{settings.APP_NAME}_export.zip")
Beispiel #3
0
    def get(self, request, surveyid=None):
        can_sync = userCanAccessMobileSurvey(request, surveyid)
        if can_sync:
            try:
                logger.info("Starting sync for user {0}".format(
                    request.user.username))
                celery_worker_running = task_management.check_if_celery_available(
                )
                if celery_worker_running is True:
                    res = tasks.sync.apply_async(
                        (surveyid, request.user.id),
                        link=tasks.update_user_task_record.s(),
                        link_error=tasks.log_error.s())
                else:
                    management.call_command("mobile",
                                            operation="sync_survey",
                                            id=surveyid,
                                            user=request.user.id)
                logger.info("Sync complete for user {0}".format(
                    request.user.username))
            except Exception:
                logger.exception(_("Sync Failed"))

            return JSONResponse(_("Sync Failed"))
        else:
            return JSONResponse(_("Sync Failed"), status=403)
Beispiel #4
0
    def sync(self, userid=None, use_celery=True):
        # delegates the _sync process to celery or to be directly invoked
        synclog = MobileSyncLog(userid=userid,
                                survey=self,
                                status="PROCESSING")
        synclog.save()
        logger.info("Starting sync for userid {0}".format(userid))
        res = None
        if use_celery:
            celery_worker_running = task_management.check_if_celery_available()
            if celery_worker_running is True:
                import arches.app.tasks as tasks

                tasks.sync.apply_async((self.id, userid, synclog.pk),
                                       link=tasks.update_user_task_record.s(),
                                       link_error=tasks.log_error.s())
            else:
                self._sync_failed(synclog, userid)
                synclog.message = _(
                    "Celery appears not to be running, you need to have celery running in order to sync from Arches Collector."
                )
                synclog.save()
        else:
            self._sync(synclog.pk, userid=userid)
        return synclog
Beispiel #5
0
    def get(self, request):
        map_layers = models.MapLayer.objects.all()
        map_markers = models.MapMarker.objects.all()
        map_sources = models.MapSource.objects.all()
        resource_graphs = (models.GraphModel.objects.exclude(
            pk=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID).exclude(
                isresource=False).exclude(isactive=False))
        geocoding_providers = models.Geocoder.objects.all()
        search_components = models.SearchComponent.objects.all()
        datatypes = models.DDataType.objects.all()
        widgets = models.Widget.objects.all()
        templates = models.ReportTemplate.objects.all()
        card_components = models.CardComponent.objects.all()

        context = self.get_context_data(
            map_layers=map_layers,
            map_markers=map_markers,
            map_sources=map_sources,
            geocoding_providers=geocoding_providers,
            search_components=search_components,
            widgets=widgets,
            report_templates=templates,
            card_components=card_components,
            main_script="views/search",
            resource_graphs=resource_graphs,
            datatypes=datatypes,
            user_is_reviewer=user_is_resource_reviewer(request.user),
        )

        graphs = JSONSerializer().serialize(
            context["resource_graphs"],
            exclude=[
                "functions",
                "author",
                "deploymentdate",
                "deploymentfile",
                "version",
                "subtitle",
                "description",
                "disable_instance_creation",
                "ontology_id",
            ],
        )
        context["graphs"] = graphs
        context["nav"]["title"] = _("Search")
        context["nav"]["icon"] = "fa-search"
        context["nav"]["search"] = False
        context["nav"]["help"] = {
            "title": _("Searching the Database"),
            "template": "search-help",
        }
        context["celery_running"] = task_management.check_if_celery_available()
        context[
            "export_html_templates"] = HtmlWriter.get_graphids_with_export_template(
            )

        return render(request, "views/search.htm", context)
Beispiel #6
0
    def get(self, request, surveyid=None):
        can_sync = userCanAccessMobileSurvey(request, surveyid)
        if can_sync:
            try:
                logger.info("Starting sync for user {0}".format(request.user.username))
                celery_worker_running = task_management.check_if_celery_available()
                if celery_worker_running is True:
                    tasks.sync.delay(surveyid=surveyid, userid=request.user.id)
                else:
                    management.call_command('mobile', operation='sync_survey', id=surveyid, user=request.user.id)
                logger.info("Sync complete for user {0}".format(request.user.username))
            except Exception:
                logger.exception(_('Sync Failed'))

            return JSONResponse(_('Sync Failed'))
        else:
            return JSONResponse(_('Sync Failed'), status=403)
Beispiel #7
0
    def import_business_data(
        self,
        file_format=None,
        business_data=None,
        mapping=None,
        overwrite="append",
        bulk=False,
        create_concepts=False,
        create_collections=False,
        use_multiprocessing=False,
    ):
        import arches.app.utils.task_management as task_management
        import arches.app.tasks as tasks
        reader = None
        start = time()
        cursor = connection.cursor()
        celery_worker_running = task_management.check_if_celery_available()

        try:
            if file_format is None:
                file_format = self.file_format
            if business_data is None:
                business_data = self.business_data
            if mapping is None:
                mapping = self.mapping
            if file_format == "json":
                if celery_worker_running is True:
                    res = tasks.import_resource_instances.apply_async((file_format, business_data, mapping), link_error=tasks.log_error.s())
                else:
                    reader = ArchesFileReader()
                    reader.import_business_data(business_data, mapping)
            elif file_format == "jsonl":
                with open(self.file[0], "rU") as openf:
                    lines = openf.readlines()
                    if use_multiprocessing is True:
                        pool = Pool(cpu_count())
                        pool.map(import_one_resource, lines)
                        connections.close_all()
                        reader = ArchesFileReader()
                    else:
                        reader = ArchesFileReader()
                        for line in lines:
                            archesresource = JSONDeserializer().deserialize(line)
                            reader.import_business_data({"resources": [archesresource]})
            elif file_format == "csv" or file_format == "shp" or file_format == "zip":
                if mapping is not None:
                    if celery_worker_running is True:
                        res = tasks.import_resource_instances.apply_async(
                            (file_format, business_data, mapping, overwrite, bulk, create_concepts, create_collections),
                            link_error=tasks.log_error.s(),
                        )
                    else:
                        reader = CsvReader()
                        reader.import_business_data(
                            business_data=business_data,
                            mapping=mapping,
                            overwrite=overwrite,
                            bulk=bulk,
                            create_concepts=create_concepts,
                            create_collections=create_collections,
                        )
                else:
                    print("*" * 80)
                    print(
                        f"ERROR: No mapping file detected for {self.file[0]}. Please indicate one \
                        with the '-c' paramater or place one in the same directory as your business data."
                    )
                    print("*" * 80)
                    sys.exit()

            elapsed = time() - start
            print("Time to import_business_data = {0}".format(datetime.timedelta(seconds=elapsed)))

            if reader is not None:
                reader.report_errors()

        finally:
            datatype_factory = DataTypeFactory()
            datatypes = DDataType.objects.all()
            for datatype in datatypes:
                try:
                    datatype_instance = datatype_factory.get_instance(datatype.datatype)
                    datatype_instance.after_update_all()
                except BrokenPipeError as e:
                    logger = logging.getLogger(__name__)
                    logger.info("Celery not working: tasks unavailable during import.")
Beispiel #8
0
def export_results(request):

    total = int(request.GET.get("total", 0))
    format = request.GET.get("format", "tilecsv")
    report_link = request.GET.get("reportlink", False)
    app_name = settings.APP_NAME
    if format == "html":
        download_limit = settings.SEARCH_EXPORT_IMMEDIATE_DOWNLOAD_THRESHOLD_HTML_FORMAT
    else:
        download_limit = settings.SEARCH_EXPORT_IMMEDIATE_DOWNLOAD_THRESHOLD

    if total > download_limit and format != "geojson":
        if (settings.RESTRICT_CELERY_EXPORT_FOR_ANONYMOUS_USER is
                True) and (request.user.username == "anonymous"):
            message = _(
                "Your search exceeds the {download_limit} instance download limit.  \
                Anonymous users cannot run an export exceeding this limit.  \
                Please sign in with your {app_name} account or refine your search"
            ).format(**locals())
            return JSONResponse({"success": False, "message": message})
        else:
            celery_worker_running = task_management.check_if_celery_available()
            if celery_worker_running is True:
                request_values = dict(request.GET)
                request_values["path"] = request.get_full_path()
                result = tasks.export_search_results.apply_async(
                    (request.user.id, request_values, format, report_link),
                    link=tasks.update_user_task_record.s(),
                    link_error=tasks.log_error.s(),
                )
                message = _(
                    "{total} instances have been submitted for export. \
                    Click the Bell icon to check for a link to download your data"
                ).format(**locals())
                return JSONResponse({"success": True, "message": message})
            else:
                message = _(
                    "Your search exceeds the {download_limit} instance download limit. Please refine your search"
                ).format(**locals())
                return JSONResponse({"success": False, "message": message})

    elif format == "tilexl":
        exporter = SearchResultsExporter(search_request=request)
        export_files, export_info = exporter.export(format, report_link)
        wb = export_files[0]["outputfile"]
        with NamedTemporaryFile() as tmp:
            wb.save(tmp.name)
            tmp.seek(0)
            stream = tmp.read()
            export_files[0]["outputfile"] = tmp
            return zip_utils.zip_response(
                export_files, zip_file_name=f"{settings.APP_NAME}_export.zip")
    else:
        exporter = SearchResultsExporter(search_request=request)
        export_files, export_info = exporter.export(format, report_link)

        if len(export_files) == 0 and format == "shp":
            message = _(
                "Either no instances were identified for export or no resources have exportable geometry nodes\
                Please confirm that the models of instances you would like to export have geometry nodes and that\
                those nodes are set as exportable")
            dest = StringIO()
            dest.write(message)
            export_files.append({"name": "error.txt", "outputfile": dest})
        return zip_utils.zip_response(
            export_files, zip_file_name=f"{settings.APP_NAME}_export.zip")