def impact_function_filter(request): """Ajax Request for filtered available IF """ if request.method != 'GET': return HttpResponseBadRequest() exposure_id = request.GET.get('exposure_id') hazard_id = request.GET.get('hazard_id') if not (exposure_id and hazard_id): return HttpResponse(json.dumps([]), content_type="application/json") try: hazard_layer = Layer.objects.get(id=hazard_id) exposure_layer = Layer.objects.get(id=exposure_id) hazard_url = get_layer_path(hazard_layer) exposure_url = get_layer_path(exposure_layer) async_result = filter_impact_function.delay(hazard_url, exposure_url) impact_functions = async_result.get() return HttpResponse(json.dumps(impact_functions), content_type="application/json") except Exception as e: LOGGER.exception(e) return HttpResponseServerError()
def test_get_layer_path(self): """Test return file://schema if using direct file access.""" data_helper = InaSAFETestData() filename = data_helper.hazard('flood_data.geojson') hazard = file_upload(filename) """:type: geonode.layers.models.Layer""" wait_metadata(hazard) layer_uri = get_layer_path(hazard) parsed_uri = urlparse.urlparse(layer_uri) # Use direct access by default self.assertTrue(not parsed_uri.scheme or parsed_uri.scheme == 'file') inasafe_layer_dir = settings.INASAFE_LAYER_DIRECTORY settings.set('INASAFE_LAYER_DIRECTORY', None) layer_uri = get_layer_path(hazard) parsed_uri = urlparse.urlparse(layer_uri) # Use http access when lack configuration self.assertEqual(parsed_uri.scheme, 'http') hazard.delete() settings.set('INASAFE_LAYER_DIRECTORY', inasafe_layer_dir)
def prepare_analysis(analysis_id): """Prepare and run analysis :param analysis_id: analysis id of the object :type analysis_id: int :return: Celery Async Result :rtype: celery.result.AsyncResult """ analysis = Analysis.objects.get(id=analysis_id) # Set analysis start time Analysis.objects.filter(id=analysis_id).update( start_time=datetime.now()) analysis.refresh_from_db() hazard = get_layer_path(analysis.hazard_layer) exposure = get_layer_path(analysis.exposure_layer) aggregation = ( get_layer_path(analysis.aggregation_layer) if ( analysis.aggregation_layer) else None) # Create temporary aggregation layer if aggregation filter exists if aggregation: aggregation = prepare_aggregation_filter(analysis_id) # Execute analysis in chains: # - Run analysis # - Process analysis result tasks_chain = chain( run_analysis.s( hazard, exposure, aggregation, locale=analysis.language_code).set( queue=run_analysis.queue).set( time_limit=settings.INASAFE_ANALYSIS_RUN_TIME_LIMIT), process_impact_result.s(analysis_id).set( queue=process_impact_result.queue), clean_up_temp_aggregation.s(analysis_id).set( queue=clean_up_temp_aggregation.queue) ) result = tasks_chain.delay() # Parent information will be lost later. # What we should save is the run_analysis task result as this is the # chain's parent while result.parent: result = result.parent return result
def aggregation_field_name(self): # Get aggregation field name from InaSAFE keywords if self.aggregation_layer: from geosafe.tasks.headless.analysis import get_keywords from geosafe.helpers.utils import get_layer_path keywords = get_keywords.delay( get_layer_path(self.aggregation_layer)).get() return keywords['inasafe_fields']['aggregation_name_field']
def impact_function_name(self): # Set impact function name from provenance data if self.impact_layer: from geosafe.tasks.headless.analysis import get_keywords from geosafe.helpers.utils import get_layer_path keywords = get_keywords.delay(get_layer_path( self.impact_layer)).get() return keywords['provenance_data']['impact_function_name'] return self.get_default_impact_title()
def prepare_analysis(analysis_id): """Prepare and run analysis :param analysis_id: analysis id of the object :type analysis_id: int :return: Celery Async Result :rtype: celery.result.AsyncResult """ analysis = Analysis.objects.get(id=analysis_id) hazard = get_layer_path(analysis.hazard_layer) exposure = get_layer_path(analysis.exposure_layer) function = analysis.impact_function_id extent = analysis.user_extent if extent: # Reformat extent into list(float) extent = ast.literal_eval('[' + extent + ']') # Execute analysis in chains: # - Run analysis # - Process analysis result tasks_chain = chain( run_analysis.s( hazard, exposure, function, generate_report=True, requested_extent=extent, archive_impact=False ).set( queue='inasafe-headless-analysis').set( time_limit=settings.INASAFE_ANALYSIS_RUN_TIME_LIMIT), process_impact_result.s( analysis_id ).set(queue='geosafe') ) result = tasks_chain.delay() # Parent information will be lost later. # What we should save is the run_analysis task result as this is the # chain's parent return result.parent
def create_metadata_object(self, layer_id): """Create metadata object of a given layer :param self: Celery task instance :type self: celery.app.task.Task :param layer_id: layer ID :type layer_id: int :return: True if success :rtype: bool """ try: layer = Layer.objects.get(id=layer_id) # Now that layer exists, get InaSAFE keywords using_direct_access = ( hasattr(settings, 'INASAFE_LAYER_DIRECTORY') and settings.INASAFE_LAYER_DIRECTORY) if using_direct_access and not layer.is_remote: # If direct disk access were configured, then use it. base_file_path = get_layer_path(layer) base_file_path, _ = os.path.splitext(base_file_path) xml_file_path = base_file_path + '.xml' layer_url = urlparse.urljoin('file://', xml_file_path) else: # InaSAFE Headless celery will download metadata from url layer_url = reverse( 'geosafe:layer-metadata', kwargs={'layer_id': layer.id}) layer_url = urlparse.urljoin(settings.GEONODE_BASE_URL, layer_url) # Execute in chain: # - Get InaSAFE keywords from InaSAFE worker # - Set Layer metadata according to InaSAFE keywords read_keywords_iso_metadata_queue = read_keywords_iso_metadata.queue set_layer_purpose_queue = set_layer_purpose.queue tasks_chain = chain( read_keywords_iso_metadata.s( layer_url, ('layer_purpose', 'hazard', 'exposure')).set( queue=read_keywords_iso_metadata_queue), set_layer_purpose.s(layer_id).set( queue=set_layer_purpose_queue) ) tasks_chain.delay() except Layer.DoesNotExist as e: # Perhaps layer wasn't saved yet. # Retry later self.retry(exc=e, countdown=5) except AttributeError as e: # This signal is called too early # We can't get layer file pass return True
def layer_keywords(request): """request to get inasafe keywords from given layer""" if request.method != 'GET': return HttpResponseBadRequest() layer_id = request.GET.get('layer_id') if not layer_id: return HttpResponseBadRequest() try: from geosafe.tasks.headless.analysis import get_keywords from geosafe.helpers.utils import get_layer_path layer = Layer.objects.get(id=layer_id) keywords = get_keywords.delay(get_layer_path(layer)).get() return HttpResponse(json.dumps(keywords), content_type="application/json") except Exception as e: LOGGER.exception(e) return HttpResponseServerError()
def prepare_context_layer_order(analysis, impact_url): """This helper method will prepare context layer order. This order will be used in report generation. :param analysis: Analysis object to prepare :type analysis: Analysis :param impact_url: The impact URI returned by Headless :type impact_url: basestring :return: The layer order data source uri to be loaded. It will contain a list of QGIS data source. It can be a filepath or any valid QGIS datasource URI. example: [ '/path/to/impact', '/path/to/hazard', 'type=xyz&url=http://basemap.url/z/x/y.png|qgis_provider=wms' ] :rtype: list(basestring) """ # define the layer order of the map report layer_order = ( list(settings.REPORT_LAYER_ORDER) if settings.REPORT_LAYER_ORDER else None) if layer_order: # Regular context layer_source = { 'impact': impact_url, 'hazard': get_layer_path(analysis.hazard_layer), 'exposure': get_layer_path(analysis.exposure_layer) } # Aggregation layer if analysis.aggregation_layer: layer_source.update({ 'aggregation': get_layer_path(analysis.aggregation_layer)}) else: try: layer_order.remove('@aggregation') except ValueError: pass # Basemap if settings.REPORT_DEFAULT_BASEMAP: try: basemap_url = settings.REPORT_DEFAULT_BASEMAP basemap_source_uri = 'type=xyz&url={0}|qgis_provider=wms' \ .format(basemap_url) layer_source['basemap'] = basemap_source_uri except BaseException: pass if 'basemap' not in layer_source: try: layer_order.remove('@basemap') except ValueError: pass # Prepare styles # Impact layer will have default style defined, so we don't deal # with it again. # GeoNode layer will have default style defined in QGISServerStyle layer_list = [ analysis.hazard_layer, analysis.exposure_layer ] if analysis.aggregation_layer: layer_list.append(analysis.aggregation_layer) for l in layer_list: l.qgis_layer.extract_default_style_to_qml() layer_order = substitute_layer_order(layer_order, layer_source) return layer_order
def prepare_aggregation_filter(analysis_id): """Filter current aggregation layer. :param analysis_id: analysis id of the object :type analysis_id: int :return: uri path of filtered aggregation layer :rtype: basestring """ analysis = Analysis.objects.get(id=analysis_id) if not analysis.aggregation_layer: return None aggregation_layer = analysis.aggregation_layer.qgis_layer endpoint = qgis_server_endpoint(internal=True) # construct WFS filter query_params query_string = { 'MAP': aggregation_layer.qgis_project_path, 'SERVICE': 'WFS', 'REQUEST': 'GetFeature', 'TYPENAME': aggregation_layer.layer.name, 'OUTPUTFORMAT': 'GeoJSON' } filter_string = None if analysis.aggregation_filter: try: filter_dict = json.loads(analysis.aggregation_filter) property_name = filter_dict['property_name'] property_values = filter_dict['values'] like_statement = [] for val in property_values: like_statement.append( '<PropertyIsLike>' '<PropertyName>{name}</PropertyName>' '<Literal>{value}</Literal>' '</PropertyIsLike>'.format(name=property_name, value=val) ) filter_string = '<Filter>{filter}</Filter>'.format( filter=''.join(like_statement)) except BaseException as e: LOGGER.error(e) # something happened, don't use filter filter_string = None if filter_string: query_string['FILTER'] = filter_string response = requests.get(endpoint, params=query_string) if response.ok: try: # try parse geojson geojson = response.json() # if successful, create temporary inasafe layer prefix_name = '{layer_name}_'.format( layer_name=aggregation_layer.qgis_layer_name) # the files needs to be at the same dir where aggregation layer is dirname = os.path.dirname(aggregation_layer.base_layer_path) temp_aggregation = tempfile.mkstemp( prefix=prefix_name, suffix='.geojson', dir=dirname)[1] with open(temp_aggregation, mode='w+b') as f: # Re dump just to be safe f.write(json.dumps(geojson)) filename, _ = os.path.splitext(os.path.basename(temp_aggregation)) # copy metadata copy_inasafe_metadata( aggregation_layer.base_layer_path, dirname, filename) # Update filtered aggregation location Analysis.objects.filter(id=analysis_id).update( filtered_aggregation=temp_aggregation) # Return temporary path return get_layer_path(temp_aggregation) except BaseException as e: LOGGER.error(e) # Failed to filter aggregation layer somehow # when everything fails return get_layer_path(analysis.aggregation_layer)