def handle(self, request): decoder = WMS11GetFeatureInfoDecoder(request.GET) bbox = decoder.bbox time = decoder.time srs = decoder.srs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=srs) if time: subsets.append(time) renderer = self.renderer root_group = lookup_layers(layers, subsets, renderer.suffixes) result, _ = renderer.render(root_group, request.GET.items(), request, time=decoder.time, bands=decoder.dim_bands) return to_http_response(result)
def handle(self, request): decoder = WMS11GetMapDecoder(request.GET) bbox = decoder.bbox time = decoder.time srs = decoder.srs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( srs, (crss.fromShortCode, crss.fromURN, crss.fromURL) ) if srid is None: raise InvalidCRS(srs, "srs") # WMS 1.1 knows no swapped axes minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=srs) if time: subsets.append(time) renderer = self.renderer root_group = lookup_layers(layers, subsets, renderer.suffixes) result, _ = renderer.render( root_group, request.GET.items(), time=decoder.time, bands=decoder.dim_bands ) return to_http_response(result)
def handle(self, request): decoder = WMS11GetFeatureInfoDecoder(request.GET) bbox = decoder.bbox time = decoder.time srs = decoder.srs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=srs) if time: subsets.append(time) renderer = self.renderer root_group = lookup_layers(layers, subsets, renderer.suffixes) result, _ = renderer.render( root_group, request.GET.items(), request, time=decoder.time, bands=decoder.dim_bands ) return to_http_response(result)
def handle(self, request): decoder = WMS13GetMapDecoder(request.GET) bbox = decoder.bbox time = decoder.time crs = decoder.crs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( crs, (crss.fromShortCode, crss.fromURN, crss.fromURL)) if srid is None: raise InvalidCRS(crs, "crs") if crss.hasSwappedAxes(srid): miny, minx, maxy, maxx = bbox else: minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=crs) if time: subsets.append(time) # TODO: adjust way to get to renderer styles = decoder.styles if styles: styles = styles.split(',') from eoxserver.services.ows.wms.layerquery import LayerQuery render_map = LayerQuery().create_map( layers=layers, styles=styles, bbox=bbox, crs=crs, width=decoder.width, height=decoder.height, format=decoder.format, transparent=decoder.transparent, bgcolor=decoder.bgcolor, time=time, range=decoder.dim_range, bands=None, wavelengths=None, elevation=None, cql=decoder.cql, ) from eoxserver.render.mapserver.map_renderer import MapserverMapRenderer return MapserverMapRenderer().render_map(render_map)
def handle(self, request): decoder = WMS10GetMapDecoder(request.GET) bbox = decoder.bbox srs = decoder.srs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( srs, (crss.fromShortCode, crss.fromURN, crss.fromURL) ) if srid is None: raise InvalidCRS(srs, "srs") # WMS 1.1 knows no swapped axes minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=srs) root_group = lookup_layers(layers, subsets) result, _ = self.renderer.render( root_group, request.GET.items(), subsets=subsets, width=int(decoder.width), height=int(decoder.height) ) return to_http_response(result)
def get_params(self, coverage, decoder, request): subsets = Subsets(decoder.subsets, crs=decoder.subsettingcrs) encoding_params = None for encoding_extension in get_encoding_extensions(): if encoding_extension.supports(decoder.format, {}): encoding_params = encoding_extension.get_encoding_params( request) scalefactor = decoder.scalefactor scales = list( chain(decoder.scaleaxes, decoder.scalesize, decoder.scaleextent)) # check scales validity: ScaleFactor and any other scale if scalefactor and scales: raise InvalidRequestException( "ScaleFactor and any other scale operation are mutually " "exclusive.", locator="scalefactor") # check scales validity: Axis uniqueness axes = set() for scale in scales: if scale.axis in axes: raise InvalidRequestException( "Axis '%s' is scaled multiple times." % scale.axis, locator=scale.axis) axes.add(scale.axis) return WCS20CoverageRenderParams(coverage, subsets, decoder.rangesubset, decoder.format, decoder.outputcrs, decoder.mediatype, decoder.interpolation, scalefactor, scales, encoding_params or {}, request)
def handle(self, request): decoder = WMS13GetMapDecoder(request.GET) bbox = decoder.bbox time = decoder.time crs = decoder.crs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( crs, (crss.fromShortCode, crss.fromURN, crss.fromURL) ) if srid is None: raise InvalidCRS(crs, "crs") if crss.hasSwappedAxes(srid): miny, minx, maxy, maxx = bbox else: minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=crs) if time: subsets.append(time) renderer = self.renderer root_group = lookup_layers(layers, subsets, renderer.suffixes) result, _ = renderer.render( root_group, request.GET.items(), width=int(decoder.width), height=int(decoder.height), time=decoder.time, bands=decoder.dim_bands, subsets=subsets, elevation=decoder.elevation, dimensions=dict( (key[4:], values) for key, values in decoder.dimensions ) ) return to_http_response(result)
def handle(self, request): decoder = WMS13GetMapDecoder(request.GET) bbox = decoder.bbox time = decoder.time crs = decoder.crs layers = decoder.layers elevation = decoder.elevation if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( crs, (crss.fromShortCode, crss.fromURN, crss.fromURL) ) if srid is None: raise InvalidCRS(crs, "crs") if crss.hasSwappedAxes(srid): miny, minx, maxy, maxx = bbox else: minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx, crs), Trim("y", miny, maxy, crs), )) if time: subsets.append(time) renderer = self.renderer result, _ = renderer.render( layers, (minx, miny, maxx, maxy), crs, (decoder.width, decoder.height), decoder.format, time, elevation, decoder.styles ) return to_http_response(result)
def handle(self, request): decoder = WMS13GetFeatureInfoDecoder(request.GET) bbox = decoder.bbox time = decoder.time crs = decoder.crs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( crs, (crss.fromShortCode, crss.fromURN, crss.fromURL)) if srid is None: raise InvalidParameterException("Invalid CRS specifier.", "crs") if crss.hasSwappedAxes(srid): miny, minx, maxy, maxx = bbox else: minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=crs) if time: subsets.append(time) renderer = self.renderer root_group = lookup_layers(layers, subsets, renderer.suffixes) result, _ = renderer.render(root_group, request.GET.items(), request, time=decoder.time, bands=decoder.dim_bands) return to_http_response(result)
def handle(self, request): decoder = WMS13GetMapDecoder(request.GET) bbox = decoder.bbox time = decoder.time crs = decoder.crs layers = decoder.layers elevation = decoder.elevation if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( crs, (crss.fromShortCode, crss.fromURN, crss.fromURL)) if srid is None: raise InvalidCRS(crs, "crs") if crss.hasSwappedAxes(srid): miny, minx, maxy, maxx = bbox else: minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx, crs), Trim("y", miny, maxy, crs), )) if time: subsets.append(time) renderer = self.renderer result, _ = renderer.render(layers, (minx, miny, maxx, maxy), crs, (decoder.width, decoder.height), decoder.format, time, elevation, decoder.styles) return to_http_response(result)
def handle(self, request): decoder = WMS13GetFeatureInfoDecoder(request.GET) bbox = decoder.bbox time = decoder.time crs = decoder.crs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") srid = crss.parseEPSGCode( crs, (crss.fromShortCode, crss.fromURN, crss.fromURL) ) if srid is None: raise InvalidParameterException("Invalid CRS specifier.", "crs") if crss.hasSwappedAxes(srid): miny, minx, maxy, maxx = bbox else: minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=crs) if time: subsets.append(time) renderer = self.renderer root_group = lookup_layers(layers, subsets, renderer.suffixes) result, _ = renderer.render( root_group, request.GET.items(), time=decoder.time, bands=decoder.dim_bands ) return to_http_response(result)
def handle(self, request): decoder = self.get_decoder(request) eo_ids = decoder.eo_ids containment = decoder.containment if not containment: containment = "overlaps" count_default = self.constraints["CountDefault"] count = decoder.count if count_default is not None: count = min(count, count_default) try: subsets = Subsets( decoder.subsets, crs="http://www.opengis.net/def/crs/EPSG/0/4326", allowed_types=Trim ) except ValueError, e: raise InvalidSubsettingException(str(e))
def handle(self, request): decoder = WMS10GetFeatureInfoDecoder(request.GET) bbox = decoder.bbox srs = decoder.srs layers = decoder.layers if not layers: raise InvalidParameterException("No layers specified", "layers") minx, miny, maxx, maxy = bbox subsets = Subsets(( Trim("x", minx, maxx), Trim("y", miny, maxy), ), crs=srs) root_group = lookup_layers(layers, subsets) result, _ = self.renderer.render(root_group, request.GET.items(), request) return to_http_response(result)
def handle(self, request): # For data/metadata extraction import json from eoxserver.contrib import gdal min_level = -40 # maps to 0 in output texture max_level = 50 # maps to 255 in output texture exaggeration = 10 # multiplier for curtain height in visualization dae_converter_path="/var/vmanip/lib/collada2gltf" obj_converter_path="/var/vmanip/lib/blender/blender --background --python /var/vmanip/lib/dae2obj.py -- " decoder = W3DSGetSceneKVPDecoder(request.GET) print "Layer: %s"%decoder.layer print "Bounding box: ", decoder.boundingBox print "Time from ", decoder.time.low, " to ", decoder.time.high base_path = '/var/vmanip/data/' layer = decoder.layer[0] try: bl = BrowseLayer.objects.get(pk=layer) except BrowseLayer.DoesNotExist: bl = False #print("contains curtains: %s, contains volumes: %s, format: %s"%(bl.contains_vertical_curtains, bl.contains_volumes, decoder.format)) if layer == 'h2o_vol_demo': model_filename = join(base_path, 'H2O.nii.gz') print '[MeshFactory] delivered h2o_vol_demo product' return (open(model_filename,"r"), 'text/plain') elif layer == 'pressure_vol': model_filename = join(base_path, 'Pressure.nii.gz') print '[MeshFactory] delivered pressure_vol_demo product' return (open(model_filename,"r"), 'text/plain') elif layer == 'temperature_vol': model_filename = join(base_path, 'Temperature.nii.gz') print '[MeshFactory] delivered temperature_vol_demo product' return (open(model_filename,"r"), 'text/plain') TextureResolutionPerTile = 256 GeometryResolutionPerTile = 16 MaximalCurtainsPerResponse = 32 output_dir=tempfile.mkdtemp(prefix='tmp_meshfactory_') print "creating %s"%output_dir # create new collada scene mesh = Collada() geom_nodes=[] # list for all the curtain parts # debug response generation! response = [] result_set = [] bbox=Polygon.from_bbox(tuple(decoder.boundingBox)) mybbox=BoundingBox(decoder.boundingBox[0], decoder.boundingBox[1], decoder.boundingBox[2], decoder.boundingBox[3]) # use a minimal step size of (diagonal of bbox) / GeometryResolutionPerTile minimalStepSize = v2dp(decoder.boundingBox[0], decoder.boundingBox[1], 0.0).great_circle_distance( v2dp(decoder.boundingBox[2], decoder.boundingBox[3], 0.0)) / GeometryResolutionPerTile #response.append( "minimal step size: %6.4f<br>" % minimalStepSize ) timesubset = Subsets([Trim("t", decoder.time.low, decoder.time.high)]) # trim to requested time interval if bl and bl.contains_vertical_curtains: print "Curtain creation" # iterate over all "curtain" coverages for l in decoder.layer: layer = models.DatasetSeries.objects.get(identifier=l) #pdb.set_trace() for coverage in timesubset.filter(models.CurtainCoverage.objects.filter(collections__in=[layer.pk]).filter(footprint__intersects=bbox)): logger.info('Creating Curtain data for coverage: %s' % (coverage.identifier,)) # write the ID of the coverage response.append("%s: " % coverage.identifier) # retrieve the data item pointing to the raster data raster_item = coverage.data_items.get( semantic__startswith="bands" ) in_name=raster_item.location # texture file name # construct the texture names for conversion name=str(uuid4()) # generate a REALLY unique identifier out_name=os.path.join(output_dir, name+'.png') textureImage = Image.open(in_name) # textureImage = Image.open('/vagrant/shares/data/UV_map.png') # debug texture print "Texture="+in_name (width, height) = textureImage.size if textureImage.mode == 'F': # still a float image: (we expect 8bit) # map a subrange of a float image to an 8 bit PNG i = np.array(list(textureImage.getdata())).reshape(textureImage.size[::-1]) g = np.divide(np.subtract(i, min_level), (max_level - min_level) / 255.0) g[g < 0] = 0 textureImage = Image.fromarray(g.astype(np.uint8), 'L') # open it with GDAL to get the width/height of the raster # ds = gdal.Open(raster_item.location) # width=ds.RasterXSize # height=ds.RasterYSize # retrieve the data item pointing to the height values/levels height_values_item = coverage.data_items.get( semantic__startswith="heightvalues" ) # retrieve the data item pointing to the coordinates gcps_item = coverage.data_items.get( semantic__startswith="gcps" ) # load the json files to lists with open(height_values_item.location) as f: height_values = json.load(f) heightLevelsList=np.array(height_values) with open(gcps_item.location) as f: gcps = json.load(f) coords=np.array(gcps) X=coords[:,0] Y=coords[:,1] # write out the coordinates #print "%d coordinates (Xmin: %d, Xmax: %d, Ymin: %d, Ymax: %d), %d height levels (min: %d, max: %d)<br/>" % (len(gcps), X.min(), X.max(), Y.min(), Y.max(), len(height_values), heightLevelsList.min(), heightLevelsList.max()) # now build the geometry: # stuff curtain piece footprint in polyline polyline=list() [x, y, u, v] = gcps[0] previous_position = v2dp(x, y, u) polyline.append(v2dp(x, y, u)) # insert first ColRow entry for [x, y, u, v] in gcps[1:-1]: # loop over inner ColRows position = v2dp(x, y, u) if position.great_circle_distance(previous_position) >= minimalStepSize: polyline.append(position) # append only ColRows with minimal step size previous_position = position #[u, v, x, y] = UVXY[-1] [x, y, u, v] = gcps[-1] polyline.append(v2dp(x, y, u)) # insert last ColRow entry #print "- %d nodes, length curtain = %6.3f" % ( # len(polyline), polyline[0].great_circle_distance(polyline[-1])) # clip curtain on bounding box polylist=clipPolylineBoundingBoxOnSphere(polyline, mybbox) u_min = sys.float_info.max u_max = -sys.float_info.max #print " width=%d"%width if len(polylist)>0: # create a unique material for each texture matnode = make_emissive_material(mesh, "Material-"+name, name+".png") # determine texture coordinate U range for pl in polylist: if len(pl)>0: # now build the geometry t=trianglestrip() for p in pl: u = p.u u_min = min (u_min, u) u_max = max (u_max, u) #print "U: min=%f, max=%f"%(u_min, u_max) u_scale=u_max-u_min # convert all clipped polylines to triangle strips n=0 if (u_scale>sys.float_info.min): for pl in polylist: if len(pl)>0: # now build the geometry t=trianglestrip() for p in pl: x=p.x y=p.y u = ((p.u - u_min)/ u_scale) # normalize u to range [0,1] print ("U(%5.2f %5.2f) X, Y=(%5.2f,%5.2f), " % (p.u, u, x, y)) point = geocoord.fromGeoTo3D(np.array((x, y, heightLevelsList.min()))) t.add_point(point, [u, 0], [0, 0, 1]) point = geocoord.fromGeoTo3D(np.array((x, y, heightLevelsList.max() * exaggeration))) t.add_point(point, [u, 1], [0, 0, 1]) n=n+1 # put everything in a geometry node geomnode = t.make_geometry(mesh, "Strip-%d-" % n + name, # return time interval as meta data appended in geometry id "%s-%s_%s"%(name, coverage.begin_time.isoformat(), coverage.end_time.isoformat()), matnode) # all these pieces have the same material geom_nodes.append(geomnode) # now crop the image to the resolution we need: textureImage = textureImage.crop((int(round(u_min)), 0, int(round(u_max)) + 1, height)) # and resize it to the maximum allowed tile size (width, height) = textureImage.size if width > TextureResolutionPerTile: height = float(height) * float(TextureResolutionPerTile) / float(width) width = float(TextureResolutionPerTile) if height > TextureResolutionPerTile: height = float(TextureResolutionPerTile) width = float(width) * float(TextureResolutionPerTile) / float(height) textureImage = textureImage.resize((int(round(width)), int(round(height))), Image.ANTIALIAS) textureImage.save(out_name, "PNG") print 'texture %s resized to w=%5.2f h=%5.2f'%(out_name, width, height) # put all the geometry nodes in a scene node node = scene.Node("node0", children=geom_nodes) myscene = scene.Scene("myscene", [node]) mesh.scenes.append(myscene) mesh.scene = myscene id = str(uuid4()) out_file_dae=os.path.join(output_dir, id + '.dae') out_file_gltf=os.path.join(output_dir, id + '.json') out_file_obj=os.path.join(output_dir, id + '.obj') # now write the collada file to a temporary location mesh.write(out_file_dae) ##print("Format = '%s'"%decoder.format) logger.info('Creating response data for curtain in format: %s' % (decoder.format,)) if decoder.format == "model/obj": # and convert it to obj converter_output=os.popen(obj_converter_path+out_file_dae+" "+out_file_obj).read() print("convert from %s to %s:"%(out_file_dae, out_file_obj)) else: # or convert it to glTF converter_output=os.popen(dae_converter_path+" -f "+out_file_dae+" -o "+out_file_gltf).read() print("convert from %s to %s:"%(out_file_dae, out_file_gltf)) print converter_output #pdb.set_trace() #response.append(converter_path+" -f "+out_file_dae+" -o "+out_file_gltf) #response.append("<h3>converter output</h3><pre>") #response.append(converter_output+"</pre>") os.remove(out_file_dae) # we do not need the collada file anymore # now put all files generated by the converter in the multipart response outfiles = glob.glob(output_dir + '/*.*') for of in outfiles: print "attaching file: ", of contenttype = "" if of.endswith('.obj'): contenttype = "model/obj" elif of.endswith('.mtl'): contenttype = "text/plain" elif of.endswith('.png'): contenttype = "image/png" else: contenttype = "application/octet-stream" result_set.append(ResultFile(of, filename=os.path.split(of)[1], content_type=contenttype)) logger.info('Returning curtain response') response=to_http_response(result_set) elif bl and bl.contains_volumes: print "Volumes!" # iterate over all "volume" coverages result = [] for l in decoder.layer: layer = models.DatasetSeries.objects.get(identifier=l) #pdb.set_trace() for coverage in timesubset.filter(models.CubeCoverage.objects.filter(collections__in=[layer.pk]).filter(footprint__intersects=bbox)): #for coverage in models.CubeCoverage.objects.filter(collections__in=[layer.pk]).filter(footprint__intersects=bbox): # retrieve the data item pointing to the raster data raster_item = coverage.data_items.get( semantic__startswith="bands" ) in_name=raster_item.location # texture file name print("ID=%s, Name=%s " % (coverage.identifier, in_name)) id = str(uuid4()) out_file_nii=os.path.join(output_dir, id + '.nii.gz') convert_GeoTIFF_2_NiFTi(coverage, in_name, out_file_nii, decoder.boundingBox, decoder.crs) #pdb.set_trace() result.append(ResultFile(out_file_nii, content_type='application/x-nifti')) response = to_http_response(result) elif decoder.format == "model/nii-gz": print "2D Volume creation" # iterate over all "volume" coverages result = [] for l in decoder.layer: layer = models.DatasetSeries.objects.get(identifier=l) coverage_collection = [] id = str(uuid4()) out_file_nii=os.path.join(output_dir, id + '.nii.gz') for coverage in timesubset.filter(models.RectifiedDataset.objects.filter(collections__in=[layer.pk]).filter(footprint__intersects=bbox)): #retrieve the data item pointing to the raster data raster_item = coverage.data_items.get( semantic__startswith="bands" ) in_name=raster_item.location # texture file name coverage_collection.append((coverage, in_name)) convert_collection_GeoTIFF_2_NiFTi(coverage_collection, out_file_nii, decoder.boundingBox, decoder.crs) result.append(ResultFile(out_file_nii, content_type='application/x-nifti')) response = to_http_response(result) print "removing %s"%output_dir shutil.rmtree(output_dir) # remove temp directory return response # return response
def subsets(self): return Subsets(( Trim("x", self._bbox[0], self._bbox[2]), Trim("y", self._bbox[1], self._bbox[3]), ), crs=self._crs)
def dispatch_wcs_get_coverage(request, config_client): if request.method == 'GET': decoder = WCS20GetCoverageKVPDecoder(request.query) else: decoder = WCS20GetCoverageXMLDecoder(request.body) coverage_id = decoder.coverage_id dataset_name, _, datestr = coverage_id.partition('__') dataset = config_client.get_dataset(dataset_name) coverage = get_coverage(config_client, coverage_id, dataset_name, datestr) crs = decoder.subsettingcrs or DEFAULT_CRS auth, code = crs.split('/')[-3::2] auth = auth.upper() code = int(code) crs_short = f'{auth}:{code}' crs_bounds = SUPPORTED_CRSS[crs_short] # TODO: collect parameters subsets = Subsets(decoder.subsets) # calculate BBox x_bounds = None y_bounds = None if not subsets.has_x: raise Exception('No subset for X dimension provided') if not subsets.has_y: raise Exception('No subset for Y dimension provided') for subset in subsets: if hasattr(subset, 'value'): raise Exception('Slicing is not supported') if subset.is_x: x_bounds = ( subset.low if subset.low is not None else crs_bounds[0], subset.high if subset.high is not None else crs_bounds[2]) if subset.is_y: y_bounds = ( subset.low if subset.low is not None else crs_bounds[1], subset.high if subset.high is not None else crs_bounds[3]) bbox = (x_bounds[0], y_bounds[0], x_bounds[1], y_bounds[1]) # TODO: outputcrs not supported? # rangesubset all_bands = dataset['bands'] rangesubset = decoder.rangesubset if rangesubset: indices = [] for rsub in rangesubset: if not isinstance(rsub, str): indices.append( (all_bands.index(rsub[0]), all_bands.index(rsub[1]))) else: indices.append(all_bands.index(rsub)) bands = [] for index in indices: if isinstance(index, int): bands.append(all_bands[index]) else: start, end = index if start <= end: end += 1 else: end -= 1 bands.extend(all_bands[start:end]) else: bands = all_bands # scaling # TODO: maybe make this optional and also support scalefactor width = None height = None if crs == DEFAULT_CRS: width = round(abs((bbox[2] - bbox[0]) / coverage.grid.offsets[0])) height = round(abs((bbox[3] - bbox[1]) / coverage.grid.offsets[1])) for scale in decoder.scalesize: if scale.axis in x_axes: width = scale.size elif scale.axis in y_axes: height = scale.size else: raise Exception('invalid scalesize axis') if decoder.scalefactor is not None: if width is not None: width = width * decoder.scalefactor if height is not None: height = height * decoder.scalefactor for scale in decoder.scaleaxes: if width is not None and scale.axis in x_axes: width = width * scale.scale elif height is not None and scale.axis in y_axes: height = height * scale.scale else: raise Exception('invalid scale axis') # TODO: scaleextent if width is None: raise Exception('No size for X dimension given') elif height is None: raise Exception('No size for Y dimension given') # get the evalscript for the given layer name and style and get the # defaults for the datasource evalscript, datasource = config_client.get_evalscript_and_defaults( dataset_name, None, bands, None, False, visual=False, ) frmt = decoder.format or 'image/tiff' if frmt not in SUPPORTED_FORMATS: raise Exception(f'Format {frmt} is not supported') # send a process request to the MDI mdi_client = config_client.get_mdi(dataset_name) return mdi_client.process_image( sources=[datasource], bbox=bbox, crs=crs, width=width, height=height, format=frmt, evalscript=evalscript, time=[coverage.begin_time, coverage.end_time], upsample=decoder.interpolation, downsample=decoder.interpolation, ), frmt
def execute(self, collections, begin_time, end_time, coord_list, srid): """ The main execution function for the process. """ eo_ids = collections.split(',') containment = "overlaps" subsets = Subsets((Trim("t", begin_time, end_time),)) if len(eo_ids) == 0: raise # fetch a list of all requested EOObjects available_ids = models.EOObject.objects.filter( identifier__in=eo_ids ).values_list("identifier", flat=True) # match the requested EOIDs against the available ones. If any are # requested, that are not available, raise and exit. failed = [ eo_id for eo_id in eo_ids if eo_id not in available_ids ] if failed: raise NoSuchDatasetSeriesOrCoverageException(failed) collections_qs = subsets.filter(models.Collection.objects.filter( identifier__in=eo_ids ), containment="overlaps") # create a set of all indirectly referenced containers by iterating # recursively. The containment is set to "overlaps", to also include # collections that might have been excluded with "contains" but would # have matching coverages inserted. def recursive_lookup(super_collection, collection_set): sub_collections = models.Collection.objects.filter( collections__in=[super_collection.pk] ).exclude( pk__in=map(lambda c: c.pk, collection_set) ) sub_collections = subsets.filter(sub_collections, "overlaps") # Add all to the set collection_set |= set(sub_collections) for sub_collection in sub_collections: recursive_lookup(sub_collection, collection_set) collection_set = set(collections_qs) for collection in set(collection_set): recursive_lookup(collection, collection_set) collection_pks = map(lambda c: c.pk, collection_set) # Get all either directly referenced coverages or coverages that are # within referenced containers. Full subsetting is applied here. coverages_qs = subsets.filter(models.Coverage.objects.filter( Q(identifier__in=eo_ids) | Q(collections__in=collection_pks) ), containment=containment) coordinates = coord_list.split(';') points = [] for coordinate in coordinates: x,y = coordinate.split(',') # parameter parsing point = Point(float(x), float(y)) point.srid = srid points.append(point) points = MultiPoint(points) points.srid = srid eo_objects = coverages_qs.filter( footprint__intersects=points ).order_by('begin_time') output = StringIO() writer = csv.writer(output, quoting=csv.QUOTE_NONE) header = ["id", "time", "val"] writer.writerow(header) for eo_object in eo_objects: coverage = eo_object.cast() #layer = models.DatasetSeries.objects.get(identifier__in=coverage.identifier) layer = eo_object.collections.all()[0] time = isoformat(coverage.begin_time) data_item = coverage.data_items.get(semantic__startswith="bands") filename = connect(data_item) ds = gdal.Open(filename) if ds.GetProjection(): gt = ds.GetGeoTransform() sr = SpatialReference(ds.GetProjection()) points_t = points.transform(sr, clone=True) else: bbox = coverage.footprint.extent gt = [ bbox[0], (bbox[2] - bbox[0])/ds.RasterXSize, 0, bbox[3], 0, (bbox[1] - bbox[3])/ds.RasterYSize ] for index, point in enumerate(points, start=1): print index, point if not coverage.footprint.contains(point): continue #point.transform(sr) # Works only if gt[2] and gt[4] equal zero! px = int((point[0] - gt[0]) / gt[1]) #x pixel py = int((point[1] - gt[3]) / gt[5]) #y pixel pixelVal = ds.GetRasterBand(1).ReadAsArray(px,py,1,1)[0,0] if pixelVal != -9999: writer.writerow([ str(layer.identifier), time, pixelVal]) return { "processed": output.getvalue() }
def render(self, params): # get coverage related stuff coverage = params.coverage # ReferenceableDataset are not supported in WCS < 2.0 if issubclass(coverage.real_type, models.ReferenceableDataset): raise NoSuchCoverageException((coverage.identifier,)) data_items = self.data_items_for_coverage(coverage) range_type = coverage.range_type bands = list(range_type) subsets = Subsets(params.subsets) # create and configure map object map_ = self.create_map() # configure outputformat native_format = self.get_native_format(coverage, data_items) if get_format_by_mime(native_format) is None: native_format = "image/tiff" frmt = params.format or native_format if frmt is None: raise Exception("format could not be determined") mime_type, frmt = split_format(frmt) # TODO: imagemode imagemode = ms.gdalconst_to_imagemode(bands[0].data_type) time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") basename = "%s_%s" % (coverage.identifier, time_stamp) of = create_outputformat(mime_type, frmt, imagemode, basename) map_.appendOutputFormat(of) map_.setOutputFormat(of) # TODO: use layer factory here layer = self.layer_for_coverage(coverage, native_format, params.version) map_.insertLayer(layer) for connector in self.connectors: if connector.supports(data_items): break else: raise Exception("Could not find applicable layer connector.") try: connector.connect(coverage, data_items, layer) # create request object and dispatch it agains the map request = ms.create_request(params) request.setParameter("format", mime_type) raw_result = ms.dispatch(map_, request) finally: # perform any required layer related cleanup connector.disconnect(coverage, data_items, layer) result_set = result_set_from_raw_data(raw_result) if getattr(params, "mediatype", None) in ("multipart/mixed", "multipart/related"): encoder = WCS20EOXMLEncoder() result_set[0] = ResultBuffer( encoder.serialize( encoder.alter_rectified_dataset( coverage, getattr(params, "http_request", None), etree.parse(result_set[0].data_file).getroot(), subsets.bounding_polygon(coverage), ) ), encoder.content_type, ) # "default" response return result_set
def execute(self, collection, begin_time, end_time): """ The main execution function for the process. """ eo_ids = [collection] containment = "overlaps" subsets = Subsets((Trim("t", begin_time, end_time),)) if len(eo_ids) == 0: raise # fetch a list of all requested EOObjects available_ids = models.EOObject.objects.filter( identifier__in=eo_ids ).values_list("identifier", flat=True) # match the requested EOIDs against the available ones. If any are # requested, that are not available, raise and exit. failed = [ eo_id for eo_id in eo_ids if eo_id not in available_ids ] if failed: raise NoSuchDatasetSeriesOrCoverageException(failed) collections_qs = subsets.filter(models.Collection.objects.filter( identifier__in=eo_ids ), containment="overlaps") # create a set of all indirectly referenced containers by iterating # recursively. The containment is set to "overlaps", to also include # collections that might have been excluded with "contains" but would # have matching coverages inserted. def recursive_lookup(super_collection, collection_set): sub_collections = models.Collection.objects.filter( collections__in=[super_collection.pk] ).exclude( pk__in=map(lambda c: c.pk, collection_set) ) sub_collections = subsets.filter(sub_collections, "overlaps") # Add all to the set collection_set |= set(sub_collections) for sub_collection in sub_collections: recursive_lookup(sub_collection, collection_set) collection_set = set(collections_qs) for collection in set(collection_set): recursive_lookup(collection, collection_set) collection_pks = map(lambda c: c.pk, collection_set) # Get all either directly referenced coverages or coverages that are # within referenced containers. Full subsetting is applied here. coverages_qs = subsets.filter(models.Coverage.objects.filter( Q(identifier__in=eo_ids) | Q(collections__in=collection_pks) ), containment=containment) output = StringIO() writer = csv.writer(output, quoting=csv.QUOTE_ALL) header = ["starttime", "endtime", "bbox", "identifier" ] writer.writerow(header) for coverage in coverages_qs: starttime = coverage.begin_time endtime = coverage.end_time identifier = coverage.identifier bbox = coverage.extent_wgs84 writer.writerow([isoformat(starttime), isoformat(endtime), bbox, identifier]) return output.getvalue()
def execute(self, collections, begin_time, end_time, bbox, crs, unit): """ The main execution function for the process. """ eo_ids = collections.split(',') containment = "overlaps" subsets = Subsets((Trim("t", begin_time, end_time),)) if len(eo_ids) == 0: raise # fetch a list of all requested EOObjects available_ids = models.EOObject.objects.filter( identifier__in=eo_ids ).values_list("identifier", flat=True) # match the requested EOIDs against the available ones. If any are # requested, that are not available, raise and exit. failed = [ eo_id for eo_id in eo_ids if eo_id not in available_ids ] if failed: raise NoSuchDatasetSeriesOrCoverageException(failed) collections_qs = subsets.filter(models.Collection.objects.filter( identifier__in=eo_ids ), containment="overlaps") # create a set of all indirectly referenced containers by iterating # recursively. The containment is set to "overlaps", to also include # collections that might have been excluded with "contains" but would # have matching coverages inserted. def recursive_lookup(super_collection, collection_set): sub_collections = models.Collection.objects.filter( collections__in=[super_collection.pk] ).exclude( pk__in=map(lambda c: c.pk, collection_set) ) sub_collections = subsets.filter(sub_collections, "overlaps") # Add all to the set collection_set |= set(sub_collections) for sub_collection in sub_collections: recursive_lookup(sub_collection, collection_set) collection_set = set(collections_qs) for collection in set(collection_set): recursive_lookup(collection, collection_set) collection_pks = map(lambda c: c.pk, collection_set) # Get all either directly referenced coverages or coverages that are # within referenced containers. Full subsetting is applied here. coverages_qs = subsets.filter(models.Coverage.objects.filter( Q(identifier__in=eo_ids) | Q(collections__in=collection_pks) ), containment=containment) #for coverage in coverages_qs: bbox = map(float, bbox.split(',')) bbox_poly=Polygon.from_bbox(tuple(bbox)) coverages_qs = coverages_qs.filter(footprint__intersects=bbox_poly).order_by('-begin_time') if len(coverages_qs) < 2: raise return { "processed": create_diff_label(self, coverages_qs[0].identifier, coverages_qs[len(coverages_qs)-1].identifier, bbox, 1, crs, unit) }
def handle(self, request): decoder = self.get_decoder(request) eo_ids = decoder.eo_ids containment = decoder.containment if not containment: containment = "overlaps" count_default = self.constraints["CountDefault"] count = decoder.count if count_default is not None: count = min(count, count_default) try: subsets = Subsets( decoder.subsets, crs="http://www.opengis.net/def/crs/EPSG/0/4326", allowed_types=Trim ) except ValueError as e: raise InvalidSubsettingException(str(e)) # check whether the DatasetSeries and CoverageDescriptions sections are # included inc_dss_section = decoder.section_included("DatasetSeriesDescriptions") inc_cov_section = decoder.section_included("CoverageDescriptions") if len(eo_ids) == 0: raise # fetch the objects directly referenced by EOID eo_objects = models.EOObject.objects.filter( identifier__in=eo_ids ).select_subclasses() # check if all EOIDs are available available_ids = set(eo_object.identifier for eo_object in eo_objects) failed = [ eo_id for eo_id in eo_ids if eo_id not in available_ids ] # fail when some objects are not available if failed: raise NoSuchDatasetSeriesOrCoverageException(failed) # split list of objects into Collections, Products and Coverages collections = [] mosaics = [] products = [] coverages = [] for eo_object in eo_objects: if isinstance(eo_object, models.Collection): collections.append(eo_object) elif isinstance(eo_object, models.Mosaic): mosaics.append(eo_object) elif isinstance(eo_object, models.Product): products.append(eo_object) elif isinstance(eo_object, models.Coverage): coverages.append(eo_object) filters = subsets.get_filters(containment=containment) # get a QuerySet of all dataset series, directly or indirectly referenced all_dataset_series_qs = models.EOObject.objects.filter( Q( # directly referenced Collections collection__isnull=False, identifier__in=[ collection.identifier for collection in collections ], ) | Q( # directly referenced Products product__isnull=False, identifier__in=[product.identifier for product in products], ) | Q( # Products within Collections product__isnull=False, product__collections__in=collections, **filters ) ) if inc_dss_section: dataset_series_qs = all_dataset_series_qs[:count] else: dataset_series_qs = models.EOObject.objects.none() # Allow metadata queries on coverage itself or on the # parent product if available parent_product_filters = [] for key, value in filters.items(): prop = key.partition('__')[0] parent_product_filters.append( Q(**{ key: value }) | Q(**{ '%s__isnull' % prop: True, 'coverage__parent_product__%s' % key: value }) ) # get a QuerySet for all Coverages, directly or indirectly referenced all_coverages_qs = models.EOObject.objects.filter( *parent_product_filters ).filter( Q( # directly referenced Coverages identifier__in=[ coverage.identifier for coverage in coverages ] ) | Q( # Coverages within directly referenced Products coverage__parent_product__in=products, ) | Q( # Coverages within indirectly referenced Products coverage__parent_product__collections__in=collections ) | Q( # Coverages within directly referenced Collections coverage__collections__in=collections ) | Q( # Coverages within directly referenced Collections coverage__mosaics__in=mosaics ) | Q( # directly referenced Mosaics identifier__in=[ mosaic.identifier for mosaic in mosaics ] ) | Q( # Mosaics within directly referenced Collections mosaic__collections__in=collections ) ).select_subclasses(models.Coverage, models.Mosaic) all_coverages_qs = all_coverages_qs.order_by('identifier') # check if the CoverageDescriptions section is included. If not, use an # empty queryset if inc_cov_section: coverages_qs = all_coverages_qs else: coverages_qs = models.Coverage.objects.none() # limit coverages according to the number of dataset series coverages_qs = coverages_qs[:max( 0, count - dataset_series_qs.count() - len(mosaics) )] # compute the number of all items that would match number_matched = all_coverages_qs.count() + all_dataset_series_qs.count() # create an encoder and encode the result encoder = WCS20EOXMLEncoder() return ( encoder.serialize( encoder.encode_eo_coverage_set_description( dataset_series_set=[ objects.DatasetSeries.from_model(eo_object) for eo_object in dataset_series_qs ], coverages=[ objects.from_model(coverage) for coverage in coverages_qs ], number_matched=number_matched ), pretty_print=True ), encoder.content_type )
def get_subsets(self, decoder): return Subsets(decoder.subsets, crs=decoder.subsettingcrs)
def handle(self, request): decoder = self.get_decoder(request) eo_ids = decoder.eo_ids format, format_params = decoder.format writer = self.get_pacakge_writer(format, format_params) containment = decoder.containment count_default = self.constraints["CountDefault"] count = decoder.count if count_default is not None: count = min(count, count_default) try: subsets = Subsets(decoder.subsets, crs="http://www.opengis.net/def/crs/EPSG/0/4326", allowed_types=Trim) except ValueError as e: raise InvalidSubsettingException(str(e)) if len(eo_ids) == 0: raise # fetch a list of all requested EOObjects available_ids = models.EOObject.objects.filter( identifier__in=eo_ids).values_list("identifier", flat=True) # match the requested EOIDs against the available ones. If any are # requested, that are not available, raise and exit. failed = [eo_id for eo_id in eo_ids if eo_id not in available_ids] if failed: raise NoSuchDatasetSeriesOrCoverageException(failed) collections_qs = subsets.filter( models.Collection.objects.filter(identifier__in=eo_ids), containment="overlaps") # create a set of all indirectly referenced containers by iterating # recursively. The containment is set to "overlaps", to also include # collections that might have been excluded with "contains" but would # have matching coverages inserted. def recursive_lookup(super_collection, collection_set): sub_collections = models.Collection.objects.filter( collections__in=[super_collection.pk]).exclude( pk__in=map(lambda c: c.pk, collection_set)) sub_collections = subsets.filter(sub_collections, "overlaps") # Add all to the set collection_set |= set(sub_collections) for sub_collection in sub_collections: recursive_lookup(sub_collection, collection_set) collection_set = set(collections_qs) for collection in set(collection_set): recursive_lookup(collection, collection_set) collection_pks = map(lambda c: c.pk, collection_set) # Get all either directly referenced coverages or coverages that are # within referenced containers. Full subsetting is applied here. coverages_qs = models.Coverage.objects.filter( Q(identifier__in=eo_ids) | Q(collections__in=collection_pks)) coverages_qs = subsets.filter(coverages_qs, containment=containment) # save a reference before limits are applied to obtain the full number # of matched coverages. coverages_no_limit_qs = coverages_qs # compute how many (if any) coverages can be retrieved. This depends on # the "count" parameter and default setting. Also, if we already # exceeded the count, limit the number of dataset series aswell """ if inc_dss_section: num_collections = len(collection_set) else: num_collections = 0 if num_collections < count and inc_cov_section: coverages_qs = coverages_qs.order_by("identifier")[:count - num_collections] elif num_collections == count or not inc_cov_section: coverages_qs = [] else: coverages_qs = [] collection_set = sorted(collection_set, key=lambda c: c.identifier)[:count] """ # get a number of coverages that *would* have been included, but are not # because of the count parameter # count_all_coverages = coverages_no_limit_qs.count() # TODO: if containment is "within" we need to check all collections # again if containment == "within": collection_set = filter(lambda c: subsets.matches(c), collection_set) coverages = [] dataset_series = [] # finally iterate over everything that has been retrieved and get # a list of dataset series and coverages to be encoded into the response for eo_object in chain(coverages_qs, collection_set): if issubclass(eo_object.real_type, models.Coverage): coverages.append(eo_object.cast()) fd, pkg_filename = tempfile.mkstemp() tmp = os.fdopen(fd) tmp.close() package = writer.create_package(pkg_filename, format, format_params) for coverage in coverages: params = self.get_params(coverage, decoder, request) renderer = self.get_renderer(params) result_set = renderer.render(params) all_filenames = set() for result_item in result_set: if not result_item.filename: ext = mimetypes.guess_extension(result_item.content_type) filename = coverage.identifier + ext else: filename = result_item.filename if filename in all_filenames: continue # TODO: create new filename all_filenames.add(filename) location = "%s/%s" % (coverage.identifier, filename) writer.add_to_package(package, result_item.data_file, result_item.size, location) mime_type = writer.get_mime_type(package, format, format_params) ext = writer.get_file_extension(package, format, format_params) writer.cleanup(package) response = StreamingHttpResponse(tempfile_iterator(pkg_filename), mime_type) response["Content-Disposition"] = 'inline; filename="ows%s"' % ext response["Content-Length"] = str(os.path.getsize(pkg_filename)) return response
def get_params(self, coverage, decoder, request): return WCS20CoverageRenderParams(coverage, Subsets(decoder.subsets), http_request=request)
def make_subsets(self, begin, end=None): if end is None: return Subsets([Slice("t", parse_iso8601(begin))]) else: return Subsets( [Trim("t", parse_iso8601(begin), parse_iso8601(end))])