def fnGetVDepartment(keyword): result = [] if keyword == "": return result v_depart_list = CVDepartment.objects.filter(vname__contains=keyword).order_by('count').reverse() for i in v_depart_list: i.count = i.count+1 i.save() vd_entry = {} vd_entry['type'] = 'department' vd_entry['name'] = i.vname vd_entry['id'] = i.pk p = Point(0,0) mp = MultiPoint(p) mp.remove(p) attributes_entry = {} attributes_entry['url'] = i.vlink children_list = [] #e_depart_list = CEDepartment.objects.filter(cvdepartment_cedepartment_exact=i) e_depart_list = CEDepartment.objects.filter(evid__pk__exact=i.pk) for j in e_depart_list: j.count = j.count+1 j.save() mp.append(j.ebid.loc) children_item = {} children_item['id'] = j.pk children_item['name'] = j.ename children_list.append(children_item) attributes_entry['children'] = children_list vd_entry['footprint'] = mp.wkt vd_entry['attributes'] = attributes_entry result.append(vd_entry) return result
def get_near(self, json, p_lat, p_lng, p_r): central_point = Point(p_lng, p_lat) circle = central_point.buffer(p_r/100000.0) #print circle.json points = [] for o in json: lat = o['geo']['lat'] lng = o['geo']['lng'] if lat == None: continue point = Point( lng , lat) points.append(point) mp = MultiPoint(points) mp_results = mp.intersection(circle) result = [mp_results.coords] if mp_results.geom_type == 'Point' else [coord for coord in mp_results.coords] return result
def execute(self, collections, begin_time, end_time, coord_list, srid): """ The main execution function for the process. """ eo_ids = collections.split(',') containment = "overlaps" subsets = Subsets((Trim("t", begin_time, end_time),)) if len(eo_ids) == 0: raise # fetch a list of all requested EOObjects available_ids = models.EOObject.objects.filter( identifier__in=eo_ids ).values_list("identifier", flat=True) # match the requested EOIDs against the available ones. If any are # requested, that are not available, raise and exit. failed = [ eo_id for eo_id in eo_ids if eo_id not in available_ids ] if failed: raise NoSuchDatasetSeriesOrCoverageException(failed) collections_qs = subsets.filter(models.Collection.objects.filter( identifier__in=eo_ids ), containment="overlaps") # create a set of all indirectly referenced containers by iterating # recursively. The containment is set to "overlaps", to also include # collections that might have been excluded with "contains" but would # have matching coverages inserted. def recursive_lookup(super_collection, collection_set): sub_collections = models.Collection.objects.filter( collections__in=[super_collection.pk] ).exclude( pk__in=map(lambda c: c.pk, collection_set) ) sub_collections = subsets.filter(sub_collections, "overlaps") # Add all to the set collection_set |= set(sub_collections) for sub_collection in sub_collections: recursive_lookup(sub_collection, collection_set) collection_set = set(collections_qs) for collection in set(collection_set): recursive_lookup(collection, collection_set) collection_pks = map(lambda c: c.pk, collection_set) # Get all either directly referenced coverages or coverages that are # within referenced containers. Full subsetting is applied here. coverages_qs = subsets.filter(models.Coverage.objects.filter( Q(identifier__in=eo_ids) | Q(collections__in=collection_pks) ), containment=containment) coordinates = coord_list.split(';') points = [] for coordinate in coordinates: x,y = coordinate.split(',') # parameter parsing point = Point(float(x), float(y)) point.srid = srid points.append(point) points = MultiPoint(points) points.srid = srid eo_objects = coverages_qs.filter( footprint__intersects=points ).order_by('begin_time') output = StringIO() writer = csv.writer(output, quoting=csv.QUOTE_NONE) header = ["id", "time", "val"] writer.writerow(header) for eo_object in eo_objects: coverage = eo_object.cast() #layer = models.DatasetSeries.objects.get(identifier__in=coverage.identifier) layer = eo_object.collections.all()[0] time = isoformat(coverage.begin_time) data_item = coverage.data_items.get(semantic__startswith="bands") filename = connect(data_item) ds = gdal.Open(filename) if ds.GetProjection(): gt = ds.GetGeoTransform() sr = SpatialReference(ds.GetProjection()) points_t = points.transform(sr, clone=True) else: bbox = coverage.footprint.extent gt = [ bbox[0], (bbox[2] - bbox[0])/ds.RasterXSize, 0, bbox[3], 0, (bbox[1] - bbox[3])/ds.RasterYSize ] for index, point in enumerate(points, start=1): print index, point if not coverage.footprint.contains(point): continue #point.transform(sr) # Works only if gt[2] and gt[4] equal zero! px = int((point[0] - gt[0]) / gt[1]) #x pixel py = int((point[1] - gt[3]) / gt[5]) #y pixel pixelVal = ds.GetRasterBand(1).ReadAsArray(px,py,1,1)[0,0] if pixelVal != -9999: writer.writerow([ str(layer.identifier), time, pixelVal]) return { "processed": output.getvalue() }
def execute(self, collection, begin_time, end_time, coord_list, srid): """ The main execution function for the process. """ col_name = collection collection = models.Collection.objects.get(identifier=collection) eo_objects = collection.eo_objects.filter( begin_time__lte=end_time, end_time__gte=begin_time ) coordinates = coord_list.split(';') points = [] for coordinate in coordinates: x,y = coordinate.split(',') # parameter parsing point = Point(float(x), float(y)) point.srid = srid points.append(point) points = MultiPoint(points) points.srid = srid eo_objects = eo_objects.filter( footprint__intersects=points ) output = StringIO() writer = csv.writer(output, quoting=csv.QUOTE_ALL) #header = ["id", "begin", "end"] + ["point%d" % i for i in range(len(points))] header = ["id", "Green", "Red", "NIR", "MIR" ] writer.writerow(header) for eo_object in eo_objects: coverage = eo_object.cast() #values = [coverage.identifier, isoformat(coverage.begin_time), isoformat(coverage.end_time)] + [None] * len(points) values = [collection] + [None] * 4 data_item = coverage.data_items.get(semantic__startswith="bands") filename = connect(data_item) ds = gdal.Open(filename) sr = SpatialReference(ds.GetProjection()) #points_t = points.transform(sr, clone=True) for index, point in enumerate(points, start=1): if not coverage.footprint.contains(point): continue gt = ds.GetGeoTransform() point.transform(sr) # Works only if gt[2] and gt[4] equal zero! px = int((point[0] - gt[0]) / gt[1]) #x pixel py = int((point[1] - gt[3]) / gt[5]) #y pixel #array = ds.ReadRaster(px, py, 1, 1) #structval = ds.ReadRaster(px,py,1,1,buf_type=gdal.GDT_Int16) #TODO: Check Range Type to adapt buf_type! pixelVal = ds.ReadAsArray(px,py,1,1)[:,0,0] #pixel_value = array[0][0] #print structval #pixel_value = struct.unpack('IIII' , structval) #use the 'short' format code (2 bytes) not int (4 bytes) #values[index] = pixel_value[0] #writer.writerow([ col_name+"_p"+str(index), pixelVal[0], pixelVal[1], pixelVal[2], pixelVal[3] ]) writer.writerow([ "P_"+str(index), pixelVal[0], pixelVal[1], pixelVal[2], pixelVal[3] ]) return { "processed": output.getvalue() }
def get_points(self): return MultiPoint([x.point for x in self.postcode_points.all()])
def get(value): return MultiPoint(value)