Exemple #1
0
def vector_catalog_save_layer(tenant, layer, vector_layer, features):
    connection.close()
    connection.set_schema(tenant)

    features = VECTOR_LAYERS[vector_layer]['geometries_by_id'](features)

    with transaction.atomic():
        union = GEOSGeometry('POINT EMPTY')
        keys = None
        for g, props in features:
            if not keys:
                keys = props.keys()

            union = union.union(g)
            g.transform(3857)

            s = hashlib.sha1()
            s.update(GeometryCollection(g).ewkb)
            props['shaid'] = s.hexdigest()
            f = Feature(layer=layer,
                        geometry=GeometryCollection(g),
                        properties=props)
            f.save()

        envelope = union.envelope.coords[0]
        layer.bounds = envelope[2] + envelope[0]
        layer.status = 0
        layer.field_names = list(set(layer.field_names).union(set(keys)))
        layer.schema['properties'] = {n: "str" for n in layer.field_names}
        layer.save()
Exemple #2
0
    def to_internal_value(self, data):
        ret = {}

        # Update ret to include passed in data
        for field, val in data.items():
            if field == "entity":
                ret["entity"] = PoliticalEntity.objects.get(pk=val)
            if field != "geo" and field != "entity":
                ret[field] = val

        # Convert geo field to MultiPolygon if it is a FeatureCollection
        geojson = loads(data["geo"])
        if geojson["type"] == "FeatureCollection":
            features = geojson["features"]
            features_union = GEOSGeometry(dumps(features[0]["geometry"]))
            features = features[1:]

            for feature in features:
                if feature["geometry"]["type"] == "Polygon":
                    features_union = features_union.union(
                        GEOSGeometry(dumps(feature["geometry"])))

            ret["geo"] = features_union
        else:
            ret["geo"] = data["geo"]

        return ret
 def augment_cities(self):
     # Add in county subdivisions, deleting from their shapes any area
     # already covered by a "proper" city.
     fkey = 'cousub'
     starter_cities = Location.objects.filter(location_type=self.city_type)
     within_cities = GEOSGeometry('MULTIPOLYGON EMPTY')
     for city in starter_cities:
         within_cities = within_cities.union(city.location)
     city_pks = [l.pk for l in starter_cities]
     layer = DataSource('%s/%s.shp' % (self.zip_dir, self.datafiles[fkey]['file_name']))[0]
     loc_importer = LocationImporter(layer,
         self.city_type,
         source = self.datafiles[fkey].get('source', 'Unknown'),
         filter_bounds=False,
         verbose=True)
     loc_created_count = loc_importer.save(self.datafiles[fkey]['name_field'])
     townships = Location.objects.filter(location_type=self.city_type).exclude(pk__in=city_pks)
     city_names = Location.objects.filter(location_type=self.city_type,
         pk__in=city_pks).values_list('name', flat=True)
     city_names = [name.lower() for name in city_names]
     for township in townships:
         # If a same-named city already exists, then rename the township to "Cityname area."
         if township.name.lower() in city_names:
             township.name = '%s area' % capwords(township.name)
         else:
             township.name = capwords(township.name)
         township.slug = slugify(township.name)
         township.location = township.location.difference(within_cities)
         township.save()
     return loc_created_count
Exemple #4
0
def union_geometries(geometries: Iterator[MultiPolygon]) -> Optional[MultiPolygon]:
    total_area = GEOSGeometry("POINT EMPTY", srid=4326)
    for geom in geometries:
        total_area = total_area.union(geom)
    if isinstance(total_area, Point):
        return None
    return to_multipolygon(total_area)
Exemple #5
0
def _validate_geometry(geom):
    if geom.srid != 4326:
        try:
            geom.transform(4326)
        except GEOSException:
            raise ValidationError("Geometry SRID must be 4326")
    if geom.geom_type == "GeometryCollection":
        # TODO find a better way to convert Geometry Collection to MultiPolygon
        tmp_geom = GEOSGeometry("POINT EMPTY", srid=4326)
        for i in geom:
            try:
                tmp_geom = tmp_geom.union(i.buffer(0))
            except GEOSException:
                raise ValidationError("Invalid geometry")
        geom = tmp_geom
    if geom.geom_type not in ["Polygon", "MultiPolygon"]:
        raise ValidationError("Invalid geometry type")
    if not geom.valid:
        geom = geom.buffer(0)
    if not geom.within(
            GEOSGeometry(
                "POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))")):
        raise ValidationError(
            "Not bounded by EPSG:4326 coordinates, check file projection")
    return geom
    def save(self,tile,request):
        """ Finds the PLSS for the centroid of the envelope(extent) of the Geometry,
            and saves that value to the plssid_node_id of the tile
        """
        def get_plss(x, y):
            url = 'https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer/3/query?where=1%3D1&text=&objectIds=&time=&geometry=%7B%22x%22%3A${0}%2C%22y%22%3A${1}%2C%22spatialReference%22%3A%7B%22wkid%22%3A4326%7D%7D&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=*&returnGeometry=false&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&having=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&historicMoment=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentOnly=false&datumTransformation=&parameterValues=&rangeValues=&quantizationParameters=&f=pjson&__ncforminfo=i3tgZCqTzeKi1SHp4wmtFEy4aFMLhX9LqrXB77w7F_EgHOvPt67dS7SP50Lm-l5AzS0FNHEkuSP2x-ZKXylvtc9M3OmZpVXm6dXfPYWOOFsU_FwTvJg-ZmM65c-Vz_UoqLzdGaWV9Cb-xEEOhRBT9b5wW1AfqGpf-sDHc1DBUckHvdgksgdEjiSEsYvyhwFIkpwgcY2crkn8XmgBfnjMK0LJKybG1QxlZZJ5OgD1JajNTtA3ENhBLHko30b3hCZvnDL4kupg8u2wTASEfGSll3FrE8elkwn09SJquRRGhVCTyqFvOtygW-flftd_MGikcKmA4FzCMRy2PO15e4PsV31--5DIOMmW3jTv0qaNgBTQVvqxRxPK8o56KKu2tKZp91kLrf692jPCNnA-oJAMF2a03VL2bL0i3qT_Ag4zSxcPBD325O4uH72JNRbcPf6cHo-0ej1h1tI1s62m0RB2gqAEfvptFc4Vd7p0ml1X_fcsqYHz4ukRRvdLId_zrpNht56QrikKHcRCYDVJYGZ3Rifg-QoKarMypeT0VwSP0ySV_jIfBm8-AtWJ0KBw1_WYpZkf7mypLnXRv-24Q9g28dluOARu71fIhSfXuWejPa8aas5ziDK5eEIRFkfaXxDPQqaSkxMwjRjXW4azlcRAZKzEWgrAc6Wwiv8eiB7mEavf7bpschF35-hQWzI3cIft'.format(x, y)
            resp = requests.get(url)
            data = resp.json()

            try:
                plssid = data['features'][0]['attributes']['TWNSHPLAB']
            except (KeyError, IndexError) as e:
                plssid = "not available at this location"

            return plssid

        # First let's check if this call is as a result of an inbound request (user action) or
        # as a result of the complementary BNGPointToGeoJSON function saving a new GeoJson.
        if request is None:
            return

        srid_LatLong = 4326

        geom_node = self.config[u"geom_node_id"]
        plss_node = self.config[u"plss_node_id"]

        geom = tile.data[geom_node]

        if geom != None:

            #Grab a copy of the Geometry collection.
            geoJsFeatures = geom[u'features']

            # Get the first feature as a GeosGeometry.
            geosGeom_union = GEOSGeometry(json.dumps(geoJsFeatures[0]['geometry']))

            # update list.
            geoJsFeatures = geoJsFeatures[1:]

            # loop through list of geoJsFeatures.
            for item in geoJsFeatures:
                # .union seems to generate 'GEOS_ERROR: IllegalArgumentException:'
                # exceptions, but they seem spurious and are automatically ignored.
                geosGeom_union = geosGeom_union.union(GEOSGeometry(json.dumps(item['geometry'])))

            # find the centroid of the envelope for the resultant Geometry Collection.
            centroidPoint = geosGeom_union.envelope.centroid

            # Explicitly declare the SRID for the current lat/long.
            centroidPoint = GEOSGeometry(centroidPoint, srid=srid_LatLong)

            tile.data[plss_node] = get_plss(centroidPoint.x, centroidPoint.y)

        # Save Tile to cement Parent Tile to subsequent bng_output_nodegroup.
        return
Exemple #7
0
 def union_geometry(self, target_geometry):
     #the target could have no geometry
     if not target_geometry:
         return True
     
     #composite place could have no geometry (especially at the beginning)    
     if not self.geometry:
         self.geometry = target_geometry
         return True
         
     place_geom = GEOSGeometry(json.dumps(self.geometry))
     target_geom = GEOSGeometry(json.dumps(target_geometry))
     union = place_geom.union(target_geom)
     self.geometry  = json.loads(union.json)
     return True
Exemple #8
0
def _parse_geometry(request):
    content = request.data["territory"].read().decode("utf-8")
    try:
        try:
            # Parse GeoJSON FeatureCollection
            geom = GEOSGeometry("POINT EMPTY", srid=4326)
            features = json.loads(content)
            for feature in features["features"]:
                feature_geom = feature["geometry"]
                if "crs" in features:  # Copy SRID data if present
                    feature_geom["crs"] = features["crs"]
                tmp_geom = GEOSGeometry(json.dumps(feature_geom))
                if tmp_geom.srid != geom.srid:
                    tmp_geom.transform(geom.srid)
                geom = geom.union(tmp_geom)
        except (GDALException, KeyError, JSONDecodeError):
            geom = GEOSGeometry(content)
    except GDALException:
        raise ValidationError("Geometry is not recognized")
    return geom
Exemple #9
0
def create_geom_from_curie_list(curie_list):
    obj_list = []
    gss_list = [x.split(':')[1] for x in curie_list if x.startswith('gss:')]
    obj_list += Organisation.objects.filter(gss__in=gss_list)
    obj_list += OrganisationDivision.objects.filter(
        geography_curie__in=curie_list)
    name = obj_list[0].name
    print(name)
    print("Joining…")

    obj_list = set(obj_list)

    geo_list = [x.geography.geography for x in obj_list]

    mp = GEOSGeometry(geo_list[0])

    for x in geo_list[1:]:
        # print("Adding {}".format(x.name))
        # x = x.buffer(0)
        # print(x.valid)
        # import ipdb; ipdb.set_trace()
        mp = mp.union(x)

    return mp
Exemple #10
0
def handle_te_time(entity):
    """ Prepare changes in list of Spacetimve Volumes for provided Territorial Entity """
    stvs = SpacetimeVolume.objects.filter(entity=entity)
    dates = []
    to_remove = []
    to_create = []

    for stv in stvs:
        dates.extend([stv.start_date, stv.end_date + 1])
    dates = sorted(set(dates))

    for start, next_start in pairwise(dates):
        end = next_start - 1
        if int(end - start) <= 1:
            continue

        overlaps = SpacetimeVolume.objects.filter(
            entity=entity, start_date__lte=start, end_date__gte=end
        )
        if overlaps.count() == 1:
            stv = overlaps[0]
            if not (stv.start_date == start and stv.end_date == end):
                print(
                    "[{} - {}]: Using geometry from STV #{} [{} - {}]".format(
                        start, end, stv.id, stv.start_date, stv.end_date
                    )
                )
                to_create.append(
                    {
                        "entity": entity,
                        "start_date": start,
                        "end_date": end,
                        "territory": stv.territory,
                    }
                )
                to_remove.append(stv.id)
        if overlaps.count() > 1:
            print(
                "[{} - {}]: Creating union of {} STVs".format(
                    start, end, overlaps.count()
                )
            )
            geom = GEOSGeometry("POINT EMPTY", srid=4326)
            for stv in overlaps:
                print(
                    "\t...STV #{} [{} - {}]".format(
                        stv.id, stv.start_date, stv.end_date
                    )
                )
                geom = geom.union(stv.territory)
                to_remove.append(stv.id)
            to_create.append(
                {
                    "entity": entity,
                    "start_date": start,
                    "end_date": end,
                    "territory": geom,
                }
            )
    to_remove = sorted(set(to_remove))
    if len(to_create) > 0 or len(to_remove) > 0:
        print("> Unique dates {}".format(len(dates)))
        print("+ Total STVs to create count({})".format(len(to_create)))
        print("- Total STVs to remove count({})".format(len(to_remove)))
        recreate_stvs(to_create, to_remove)
Exemple #11
0
    def update_bbox_extent(self, trigger, geometry, original_geometry, item):
        '''Updates the collection's spatial extent if needed when an item is updated.

        This function generates a new extent regarding all the items with the same
        collection foreign key. If there is no spatial bbox yet, the one of the geometry of the
        item is being used.

        Args:
            trigger: str
                Item trigger event, one of 'insert', 'update' or 'delete'
            geometry: GeometryField
                the geometry of the item
            original_geometry:
                the original geometry during an updated or None
            item: Item
                the item being treated

        Returns:
            bool: True if the collection temporal extent has been updated, false otherwise
        '''
        updated = False
        try:
            # insert (as item_id is None)
            if trigger == 'insert':
                # the first item of this collection
                if self.extent_geometry is None:
                    logger.info(
                        'Set collections extent_geometry with geometry %s, '
                        'triggered by the first item insertion',
                        GEOSGeometry(geometry).extent,
                        extra={
                            'collection': self.name,
                            'item': item.name,
                            'trigger': 'item-insert'
                        },
                    )
                    self.extent_geometry = Polygon.from_bbox(
                        GEOSGeometry(geometry).extent)
                # there is already a geometry in the collection a union of the geometries
                else:
                    logger.info(
                        'Updating collections extent_geometry with geometry %s, '
                        'triggered by an item insertion',
                        GEOSGeometry(geometry).extent,
                        extra={
                            'collection': self.name,
                            'item': item.name,
                            'trigger': 'item-insert'
                        },
                    )
                    self.extent_geometry = Polygon.from_bbox(
                        GEOSGeometry(self.extent_geometry).union(
                            GEOSGeometry(geometry)).extent)
                updated |= True

            # update
            if trigger == 'update' and geometry != original_geometry:
                # is the new bbox larger than (and covering) the existing
                if Polygon.from_bbox(GEOSGeometry(geometry).extent).covers(
                        self.extent_geometry):
                    logger.info(
                        'Updating collections extent_geometry with item geometry changed '
                        'from %s to %s, (larger and covering bbox)',
                        GEOSGeometry(original_geometry).extent,
                        GEOSGeometry(geometry).extent,
                        extra={
                            'collection': self.name,
                            'item': item.name,
                            'trigger': 'item-update'
                        },
                    )
                    self.extent_geometry = Polygon.from_bbox(
                        GEOSGeometry(geometry).extent)
                # we need to iterate trough the items
                else:
                    logger.warning(
                        'Updating collections extent_geometry with item geometry changed '
                        'from %s to %s. We need to loop over all items of the collection, '
                        'this may take a while !',
                        GEOSGeometry(original_geometry).extent,
                        GEOSGeometry(geometry).extent,
                        extra={
                            'collection': self.name,
                            'item': item.name,
                            'trigger': 'item-update'
                        },
                    )
                    start = time.time()
                    bbox_other_items = type(item).objects.filter(
                        collection_id=self.pk).exclude(id=item.pk).only(
                            'geometry',
                            'collection').aggregate(Extent('geometry'))
                    geometry_updated_item = GEOSGeometry(geometry)

                    if bool(bbox_other_items['geometry__extent']):
                        geometry_other_items = GEOSGeometry(
                            Polygon.from_bbox(
                                bbox_other_items['geometry__extent']))
                        self.extent_geometry = Polygon.from_bbox(
                            geometry_updated_item.union(
                                geometry_other_items).extent)
                    else:
                        self.extent_geometry = geometry_updated_item
                    logger.info(
                        'Collection extent_geometry updated to %s in %ss, after item update',
                        self.extent_geometry.extent,
                        time.time() - start,
                        extra={
                            'collection': self.name,
                            'item': item.name,
                            'trigger': 'item-update'
                        },
                    )
                updated |= True

            # delete, we need to iterate trough the items
            if trigger == 'delete':
                logger.warning(
                    'Updating collections extent_geometry with removal of item geometry %s. '
                    'We need to loop over all items of the collection, this may take a while !',
                    GEOSGeometry(geometry).extent,
                    extra={
                        'collection': self.name,
                        'item': item.name,
                        'trigger': 'item-delete'
                    },
                )
                start = time.time()
                bbox_other_items = type(item).objects.filter(
                    collection_id=self.pk).exclude(id=item.pk).only(
                        'geometry', 'collection').aggregate(Extent('geometry'))
                if bool(bbox_other_items['geometry__extent']):
                    self.extent_geometry = GEOSGeometry(
                        Polygon.from_bbox(
                            bbox_other_items['geometry__extent']))
                else:
                    self.extent_geometry = None
                logger.info(
                    'Collection extent_geometry updated to %s in %ss, after item deletion',
                    self.extent_geometry.extent
                    if self.extent_geometry else None,
                    time.time() - start,
                    extra={
                        'collection': self.name,
                        'item': item.name,
                        'trigger': 'item-delete'
                    },
                )
                updated |= True
        except GEOSException as error:
            logger.error(
                'Failed to update spatial extend in collection %s with item %s, trigger=%s, '
                'current-extent=%s, new-geometry=%s, old-geometry=%s: %s',
                self.name,
                item.name,
                trigger,
                self.extent_geometry,
                GEOSGeometry(geometry).extent,
                GEOSGeometry(original_geometry).extent,
                error,
                extra={
                    'collection': self.name,
                    'item': item.name,
                    'trigger': f'item-{trigger}'
                },
            )
            raise GEOSException(
                f'Failed to update spatial extend in colletion {self.name} with item '
                f'{item.name}: {error}')
        return updated
Exemple #12
0
class CSVLoader(BaseDataLoader):
    name = 'csv'

    def __init__(self, *args, **kwargs):
        '''
        Note if fieldnames is passed in it is assumed to be a list of field
        names for this file (normally would be the header line of the CSV -
        first line of data.)  If skip_header is passed in, the assumption is
        that the first line of the file should be skipped.  Note that the default
        is to use the first line for the header, so the setting only makes sense
        when fieldnames is passed in.

        Note that if this module is used to produce spatial data, the SRID of
        the data is ALWAYS EPSG:4326, this format only supports that single
        projection type.
        '''
        logger.debug('CSVLoader: %s, %s', args, kwargs)
        self.fieldnames = kwargs.pop('fieldnames', None)
        # Passed in if we provide fieldnames and a header is present.
        self.skip_header = kwargs.pop('skip_header', False)
        if not self.fieldnames:
            self.skip_header = False
        super(CSVLoader, self).__init__(*args, **kwargs)
        self.process_file()

    def __iter__(self):
        if not hasattr(self, '_feature_count'):
            self.feature_counter = 0
        if self.filename:
            self.csv = csvkit.CSVKitDictReader(open(self.filename, 'r'),
                                               self.fieldnames,
                                               dialect=self.dialect)
            if self.skip_header:
                self.csv.next()
        return self

    def is_valid_range(self, v, max):
        '''
        Used for determining if a lat or long passed in via a delimited
        file is a valid set of values or not.  Our test in this case
        is to see if it is between the range +/-90 for lat and
        +/-180 for lat (the caller passes in the range as the max argument.)
        '''
        try:
            v = decimal.Decimal(v)
        except:
            return None
        if abs(v) <= max:
            return v
        return None

    @property
    def extent(self):
        # Geosgeometry extent is xmin, ymin, xmax, ymax
        # ogr extent is ymin, ymax, xmin, xmax
        # We need to return the ogr one..
        if self.spatial:
            if not hasattr(self, '_extent'):
                for v in self:
                    pass
            return (
                self._extent[0],
                self._extent[1],
                self._extent[2],
                self._extent[3],
            )

    def next(self):
        '''
        Here we iterate over the results/values from this set of data.  If
        the data is spatial, we will return a tuple of the fields and the
        geometry data.  Otherwise a single value is returned that is
        a dictionary of the field data.
        '''
        try:
            data = self.csv.next()
            # We found the types for the fields in our first pass through the
            # csv file, but when we get the data back out, we need to be sure
            # to cast it to the expected type.  Otherwise it won't work
            # properly...
            for field_name, expected_type in self._fields:
                if field_name in data:
                    # if the expected type is one of these we use
                    # dateutil.parser.parse to parse
                    if expected_type in (datetime.date, datetime.time,
                                         datetime.datetime):
                        # Get a value of datetime.datetime.
                        v = parse(data[field_name])
                        # Pare it down to date if needed.
                        if expected_type == datetime.date:
                            v = v.date()
                        # pare it down to time if needed.
                        elif expected_type == datetime.time:
                            v = v.time()
                        data[field_name] = v
                    elif data[field_name]:
                        data[field_name] = expected_type(data[field_name])
                    else:
                        data[field_name] = None
            if not hasattr(self, '_feature_count'):
                self.feature_counter += 1
        except StopIteration as e:
            self._feature_count = self.feature_counter
            if hasattr(self, '_union_geom'):
                self._extent = self._union_geom.extent
            raise StopIteration
        if getattr(self, 'spatial', False):
            lat = self.is_valid_range(data.get(self.latitude_field, None), 90)
            lon = self.is_valid_range(data.get(self.longitude_field, None),
                                      180)
            if lat and lon:
                wkt = 'POINT({0} {1})'.format(
                    lon,
                    lat,
                )
                if not hasattr(self, '_extent'):
                    if not hasattr(self, '_union_geom'):
                        self._union_geom = GEOSGeometry(wkt)
                    else:
                        self._union_geom = self._union_geom.union(
                            GEOSGeometry(wkt))
            else:
                wkt = None
            return (data, wkt)
        return (data, None)

    @property
    def feature_count(self):
        '''
        Upon complete iteration through the data, we will have a feature count,
        so just loop through the data to count the features.
        '''
        if not hasattr(self, '_feature_count'):
            for v in self:
                pass
        return self._feature_count

    def process_file(self):
        '''
        Here we will see if the input file is CSV, or if it is an understood
        format that can be converted to CSV.  Assuming it's one of those two,
        we will pass the resulting CSV file over to the csv processor.
        '''
        for this_filename in self.filelist:
            logger.debug('Filename processing is %s', this_filename)
            self.format = convert.guess_format(this_filename)
            logger.debug('Guessed format of %s', self.format)
            if self.format == 'csv':
                self.filename = this_filename
                break
            elif self.format:
                # If it is not a CSV file, but some other
                # understood format, we will convert it to a CSV and
                # write it out to a temporary file.
                fh, self.temp_file = tempfile.mkstemp(suffix='.csv')
                os.close(fh)
                self.filename = self.temp_file
                try:
                    logger.debug(
                        'Attempting to convert to format CSV (from %s)',
                        self.format)
                    with open(self.temp_file, 'w') as fh:
                        fh.write(
                            convert.convert(open(this_filename, 'rb'),
                                            self.format))
                    break
                except Exception as e:
                    logger.exception('Failed to process %s to CSV: %s',
                                     self.filename, e)
                    os.unlink(self.filename)
                    self.filename = None

        if getattr(self, 'filename', None):
            return self.process_csv(self.filename)

    def fields(self):
        return [field for field, field_type in self._fields]

    def fields_types(self):
        '''
        This returns a list of tuples, with the first being a field name
        and the second element of each being the python type of the field.
        '''
        return self._fields

    def ogr_fields_types(self):
        '''
        This returns a list of tuples, with the first being a field name
        and the second element of each being the python type of the field.
        '''
        ft = []
        OGR_TYPE_MAPPINGS = {
            bool: ogr.OFTInteger,
            None: ogr.OFTString,
            int: ogr.OFTInteger,
            float: ogr.OFTReal,
            datetime.datetime: ogr.OFTDateTime,
            datetime.date: ogr.OFTDate,
            datetime.time: ogr.OFTTime,
        }
        for field, ftype in self._fields:
            ft.append((
                field,
                OGR_TYPE_MAPPINGS.get(ftype, ogr.OFTString),
            ))
        logger.debug('Field types are %s', ft)
        return ft

    def process_csv(self, filename):
        '''
        Here we have a CSV file that we need to process...
        '''
        try:
            with open(filename, 'r') as csvfile:
                data = '{0}{1}'.format(csvfile.readline(), csvfile.readline())
            logger.debug('First 2 lines of data data is %s', data)
            self.dialect = csvkit.sniffer.sniff_dialect(data)
            logger.debug('Dialect is %s', self.dialect)
            if self.dialect:
                self.filename = filename
            else:
                logger.warn(
                    'Unable to determine dialect in use for CSV file (%s)',
                    filename)
        except Exception as e:
            logger.warn('Found a CSV file (%s) with an invalid format: %s',
                        filename, e)
        if self.filename:
            reader = csvkit.CSVKitDictReader(open(self.filename, 'r'),
                                             self.fieldnames,
                                             dialect=self.dialect)
            if self.skip_header:
                reader.next()
            self._fieldnames = reader.fieldnames
            # Here we will gather each column of values in the input CSV
            # to figure out what the data type is for each, so we can
            # properly generate the database, etc.
            valuelists = collections.defaultdict(list)
            self._fields = []
            for row in reader:
                for f in self._fieldnames:
                    valuelists[f].append(row[f])
            for f in self._fieldnames:
                type, valuelists[f] = normalize_column_type(
                    valuelists[f], blanks_as_nulls=False)
                self._fields.append((
                    f,
                    type,
                ))

            latitude_field_candidates = ['latitude', 'lat']
            longitude_field_candidates = ['longitude', 'long', 'lon']
            lat = long = False

            # case-insensitive check to see if lat/long is in the resulting
            # fields from the data.
            # Now that we have the types for the fields, also ensure that the
            # field we are considering for lat/long is a float or int field,
            # otherwise it won't work as a lat/long value (even int is questionable..)
            #
            # Since we also have the full range of values, we can also check to see if
            # they are within the acceptable range...
            for field in latitude_field_candidates:
                for this_field, field_type in self._fields:
                    if field == this_field.lower() and field_type in (
                            int, float) and min(
                                valuelists[this_field]) >= -90 and max(
                                    valuelists[this_field]) <= 90:
                        lat = this_field
                        break
            for field in longitude_field_candidates:
                for this_field, field_type in self._fields:
                    if field == this_field.lower() and field_type in (
                            int, float) and min(
                                valuelists[this_field]) >= -180 and max(
                                    valuelists[this_field]) <= 180:
                        long = this_field
                        break

            if lat and long:
                # Here it is assumed we have geo-data, so we will
                # convert it to a GIS format and then handle it as such
                # going forward.
                #                 self._fields.remove(lat)
                #                 self._fields.remove(long)
                self.latitude_field = lat
                self.longitude_field = long
                self.spatial = True
                self.spatial_type = ogr.wkbPoint
                # We assume this based on the lat/long values we validate
                # against.
                self.srid = 4326
                srs = osr.SpatialReference()
                epsg = str('EPSG:%s' % (self.srid, ))
                srs.SetFromUserInput(epsg)
                self.srs = srs.ExportToWkt()

    def is_supported(self):
        if getattr(self, 'filename', None):
            return True
        else:
            return False
    def save_bngpoint(self, tile, request, is_function_save_method=True):
        """Finds the BNG Alphanumeric value for the centroid of the envelope(extent) of the Geometry,
        and saves that value to the bng_output_nodegroup of the tile.

        Args:
            self: GeoJSONToBNGPoint object.

            tile: Tile to attach / amend bng_output_nodegroup of.

            request: WSGI Request used to varify call is result of user action. N.B. Function Returns if empty.

            is_function_save_method: a bool stating whether the function calling it is the save function.
        """

        # First let's check if this call is as a result of an inbound request (user action) or
        # as a result of the complementary BNGPointToGeoJSON function saving a new GeoJson.
        if request is None and is_function_save_method == True:
            return

        srid_LatLong = 4326
        srid_BngAbs = 27700
        # Reference grid for Easting/Northing to BNG Alphas.

        os_grid = {
            "09": "NA",
            "19": "NB",
            "29": "NC",
            "39": "ND",
            "49": "NE",
            "59": "OA",
            "69": "OB",
            "08": "NF",
            "18": "NG",
            "28": "NH",
            "38": "NJ",
            "48": "NK",
            "58": "OF",
            "68": "OG",
            "07": "NL",
            "17": "NM",
            "27": "NN",
            "37": "NO",
            "47": "NP",
            "57": "OL",
            "67": "OM",
            "06": "NQ",
            "16": "NR",
            "26": "NS",
            "36": "NT",
            "46": "NU",
            "56": "OQ",
            "66": "OR",
            "05": "NV",
            "15": "NW",
            "25": "NX",
            "35": "NY",
            "45": "NZ",
            "55": "OV",
            "65": "OW",
            "04": "SA",
            "14": "SB",
            "24": "SC",
            "34": "SD",
            "44": "SE",
            "54": "TA",
            "64": "TB",
            "03": "SF",
            "13": "SG",
            "23": "SH",
            "33": "SJ",
            "43": "SK",
            "53": "TF",
            "63": "TG",
            "02": "SL",
            "12": "SM",
            "22": "SN",
            "32": "SO",
            "42": "SP",
            "52": "TL",
            "62": "TM",
            "01": "SQ",
            "11": "SR",
            "21": "SS",
            "31": "ST",
            "41": "SU",
            "51": "TQ",
            "61": "TR",
            "00": "SV",
            "10": "SW",
            "20": "SX",
            "30": "SY",
            "40": "SZ",
            "50": "TV",
            "60": "TW",
        }

        geojsonnode = self.config["geojson_input_node"]
        bngnode = self.config["bng_output_node"]

        geojsonValue = tile.data[geojsonnode]

        if geojsonValue != None:

            # Grab a copy of the Geometry collection.
            geoJsFeatures = geojsonValue["features"]

            # Get the first feature as a GeosGeometry.
            geosGeom_union = GEOSGeometry(json.dumps(geoJsFeatures[0]["geometry"]))

            # update list.
            geoJsFeatures = geoJsFeatures[1:]

            # loop through list of geoJsFeatures.
            for item in geoJsFeatures:
                # .union seems to generate 'GEOS_ERROR: IllegalArgumentException:'
                # exceptions, but they seem spurious and are automatically ignored.
                geosGeom_union = geosGeom_union.union(GEOSGeometry(json.dumps(item["geometry"])))

            # find the centroid of the envelope for the resultant Geometry Collection.
            centroidPoint = geosGeom_union.envelope.centroid

            # Explicitly declare the SRID for the current lat/long.
            centroidPoint = GEOSGeometry(centroidPoint, srid=srid_LatLong)

            # Transform to Absolute BNG.
            centroidPoint.transform(srid_BngAbs, False)

            # Get initial Easting and Northing digits. N.B. Left Zero pad integer coords to 6 digits!
            easting = str(int(centroidPoint.coords[0])).zfill(6)
            northing = str(int(centroidPoint.coords[1])).zfill(6)
            gridref = easting[0] + northing[0]

            # Get AlphaNumeric BNG
            try:
                gridref = os_grid[gridref] + easting[1:6] + northing[1:6]
            except KeyError:
                raise Exception("Conversion Error : Coordinates outside of BNG for England.")

            if self.config["bng_output_nodegroup"] == str(tile.nodegroup_id):
                tile.data[bngnode] = gridref
            else:

                previously_saved_tiles = Tile.objects.filter(
                    nodegroup_id=self.config["bng_output_nodegroup"], resourceinstance_id=tile.resourceinstance_id
                )

                # Update pre-existing tiles, or Create new one.
                if len(previously_saved_tiles) > 0:
                    for p in previously_saved_tiles:
                        p.data[bngnode] = gridref
                        p.save()
                else:
                    new_bng_tile = Tile().get_blank_tile_from_nodegroup_id(
                        self.config["bng_output_nodegroup"], resourceid=tile.resourceinstance_id, parenttile=tile.parenttile
                    )
                    new_bng_tile.data[bngnode] = gridref
                    new_bng_tile.save()

            return

        return
Exemple #14
0
class CSVLoader(BaseDataLoader):
    name = 'csv'

    def __init__(self, *args, **kwargs):
        '''
        Note if fieldnames is passed in it is assumed to be a list of field
        names for this file (normally would be the header line of the CSV -
        first line of data.)  If skip_header is passed in, the assumption is
        that the first line of the file should be skipped.  Note that the default
        is to use the first line for the header, so the setting only makes sense
        when fieldnames is passed in.

        Note that if this module is used to produce spatial data, the SRID of
        the data is ALWAYS EPSG:4326, this format only supports that single
        projection type.
        '''
        logger.debug('CSVLoader: %s, %s', args, kwargs)
        self.fieldnames = kwargs.pop('fieldnames', None)
        # Passed in if we provide fieldnames and a header is present.
        self.skip_header = kwargs.pop('skip_header', False)
        if not self.fieldnames:
            self.skip_header = False
        super(CSVLoader, self).__init__(*args, **kwargs)
        self.process_file()

    def __iter__(self):
        if not hasattr(self, '_feature_count'):
            self.feature_counter = 0
        if self.filename:
            self.csv = csvkit.CSVKitDictReader(open(self.filename, 'r'),
                                               self.fieldnames,
                                               dialect=self.dialect)
            if self.skip_header:
                self.csv.next()
        return self

    def is_valid_range(self, v, max):
        '''
        Used for determining if a lat or long passed in via a delimited
        file is a valid set of values or not.  Our test in this case
        is to see if it is between the range +/-90 for lat and
        +/-180 for lat (the caller passes in the range as the max argument.)
        '''
        try:
            v = decimal.Decimal(v)
        except:
            return None
        if abs(v) <= max:
            return v
        return None

    @property
    def extent(self):
        # Geosgeometry extent is xmin, ymin, xmax, ymax
        # ogr extent is ymin, ymax, xmin, xmax
        # We need to return the ogr one..
        if self.spatial:
            if not hasattr(self, '_extent'):
                for v in self:
                    pass
            return (
                self._extent[0],
                self._extent[1],
                self._extent[2],
                self._extent[3],
            )

    def next(self):
        '''
        Here we iterate over the results/values from this set of data.  If
        the data is spatial, we will return a tuple of the fields and the
        geometry data.  Otherwise a single value is returned that is
        a dictionary of the field data.
        '''
        try:
            data = self.csv.next()
            # We found the types for the fields in our first pass through the
            # csv file, but when we get the data back out, we need to be sure
            # to cast it to the expected type.  Otherwise it won't work
            # properly...
            for field_name, expected_type in self._fields:
                if field_name in data:
                    # if the expected type is one of these we use
                    # dateutil.parser.parse to parse
                    if expected_type in (
                            datetime.date,
                            datetime.time,
                            datetime.datetime):
                        # Get a value of datetime.datetime.
                        v = parse(data[field_name])
                        # Pare it down to date if needed.
                        if expected_type == datetime.date:
                            v = v.date()
                        # pare it down to time if needed.
                        elif expected_type == datetime.time:
                            v = v.time()
                        data[field_name] = v
                    elif data[field_name]:
                        data[field_name] = expected_type(data[field_name])
                    else:
                        data[field_name] = None
            if not hasattr(self, '_feature_count'):
                self.feature_counter += 1
        except StopIteration as e:
            self._feature_count = self.feature_counter
            if hasattr(self, '_union_geom'):
                self._extent = self._union_geom.extent
            raise StopIteration
        if getattr(self, 'spatial', False):
            lat = self.is_valid_range(data.get(self.latitude_field, None),
                                      90)
            lon = self.is_valid_range(data.get(self.longitude_field, None),
                                      180)
            if lat and lon:
                wkt = 'POINT({0} {1})'.format(lon,
                                              lat,)
                if not hasattr(self, '_extent'):
                    if not hasattr(self, '_union_geom'):
                        self._union_geom = GEOSGeometry(wkt)
                    else:
                        self._union_geom = self._union_geom.union(
                            GEOSGeometry(wkt))
            else:
                wkt = None
            return (data, wkt)
        return (data, None)

    @property
    def feature_count(self):
        '''
        Upon complete iteration through the data, we will have a feature count,
        so just loop through the data to count the features.
        '''
        if not hasattr(self, '_feature_count'):
            for v in self:
                pass
        return self._feature_count

    def process_file(self):
        '''
        Here we will see if the input file is CSV, or if it is an understood
        format that can be converted to CSV.  Assuming it's one of those two,
        we will pass the resulting CSV file over to the csv processor.
        '''
        for this_filename in self.filelist:
            logger.debug('Filename processing is %s', this_filename)
            self.format = convert.guess_format(this_filename)
            logger.debug('Guessed format of %s', self.format)
            if self.format == 'csv':
                self.filename = this_filename
                break
            elif self.format:
                # If it is not a CSV file, but some other
                # understood format, we will convert it to a CSV and
                # write it out to a temporary file.
                fh, self.temp_file = tempfile.mkstemp(suffix='.csv')
                os.close(fh)
                self.filename = self.temp_file
                try:
                    logger.debug(
                        'Attempting to convert to format CSV (from %s)',
                        self.format)
                    with open(self.temp_file, 'w') as fh:
                        fh.write(convert.convert(open(this_filename, 'rb'),
                                                 self.format))
                    break
                except Exception as e:
                    logger.exception('Failed to process %s to CSV: %s',
                                     self.filename, e)
                    os.unlink(self.filename)
                    self.filename = None

        if getattr(self, 'filename', None):
            return self.process_csv(self.filename)

    def fields(self):
        return [field for field, field_type in self._fields]

    def fields_types(self):
        '''
        This returns a list of tuples, with the first being a field name
        and the second element of each being the python type of the field.
        '''
        return self._fields

    def ogr_fields_types(self):
        '''
        This returns a list of tuples, with the first being a field name
        and the second element of each being the python type of the field.
        '''
        ft = []
        OGR_TYPE_MAPPINGS = {bool: ogr.OFTInteger,
                             None: ogr.OFTString,
                             int: ogr.OFTInteger,
                             float: ogr.OFTReal,
                             datetime.datetime: ogr.OFTDateTime,
                             datetime.date: ogr.OFTDate,
                             datetime.time: ogr.OFTTime,
                             }
        for field, ftype in self._fields:
            ft.append((field, OGR_TYPE_MAPPINGS.get(ftype, ogr.OFTString),))
        logger.debug('Field types are %s', ft)
        return ft

    def process_csv(self, filename):
        '''
        Here we have a CSV file that we need to process...
        '''
        try:
            with open(filename, 'r') as csvfile:
                data = '{0}{1}'.format(csvfile.readline(),
                                       csvfile.readline())
            logger.debug('First 2 lines of data data is %s', data)
            self.dialect = csvkit.sniffer.sniff_dialect(data)
            logger.debug('Dialect is %s', self.dialect)
            if self.dialect:
                self.filename = filename
            else:
                logger.warn(
                    'Unable to determine dialect in use for CSV file (%s)',
                    filename)
        except Exception as e:
            logger.warn('Found a CSV file (%s) with an invalid format: %s',
                        filename, e)
        if self.filename:
            reader = csvkit.CSVKitDictReader(open(self.filename, 'r'),
                                             self.fieldnames,
                                             dialect=self.dialect)
            if self.skip_header:
                reader.next()
            self._fieldnames = reader.fieldnames
            # Here we will gather each column of values in the input CSV
            # to figure out what the data type is for each, so we can
            # properly generate the database, etc.
            valuelists = collections.defaultdict(list)
            self._fields = []
            for row in reader:
                for f in self._fieldnames:
                    valuelists[f].append(row[f])
            for f in self._fieldnames:
                type, valuelists[f] = normalize_column_type(
                    valuelists[f], blanks_as_nulls=False)
                self._fields.append((f, type,))

            latitude_field_candidates = ['latitude', 'lat']
            longitude_field_candidates = ['longitude', 'long', 'lon']
            lat = long = False

            # case-insensitive check to see if lat/long is in the resulting
            # fields from the data.
            # Now that we have the types for the fields, also ensure that the
            # field we are considering for lat/long is a float or int field,
            # otherwise it won't work as a lat/long value (even int is questionable..)
            #
            # Since we also have the full range of values, we can also check to see if
            # they are within the acceptable range...
            for field in latitude_field_candidates:
                for this_field, field_type in self._fields:
                    if field == this_field.lower() and field_type in (int, float) and min(
                            valuelists[this_field]) >= -90 and max(valuelists[this_field]) <= 90:
                        lat = this_field
                        break
            for field in longitude_field_candidates:
                for this_field, field_type in self._fields:
                    if field == this_field.lower() and field_type in (int, float) and min(
                            valuelists[this_field]) >= -180 and max(valuelists[this_field]) <= 180:
                        long = this_field
                        break

            if lat and long:
                # Here it is assumed we have geo-data, so we will
                # convert it to a GIS format and then handle it as such
                # going forward.
                #                 self._fields.remove(lat)
                #                 self._fields.remove(long)
                self.latitude_field = lat
                self.longitude_field = long
                self.spatial = True
                self.spatial_type = ogr.wkbPoint
                # We assume this based on the lat/long values we validate
                # against.
                self.srid = 4326
                srs = osr.SpatialReference()
                epsg = str('EPSG:%s' % (self.srid,))
                srs.SetFromUserInput(epsg)
                self.srs = srs.ExportToWkt()

    def is_supported(self):
        if getattr(self, 'filename', None):
            return True
        else:
            return False
Exemple #15
0
def search_for_cave(request):
    """ Barlagok meghatározott tulajdonságai szerinti leválogatását végzem """
    kozet = request.data['kozet'] if 'kozet' in request.data else None
    vedettseg = request.data[
        'vedettseg'] if 'vedettseg' in request.data else None
    lathatosag = request.data[
        'lathatosag'] if 'lathatosag' in request.data else None
    np = request.data[
        'illetekes_np'] if 'illetekes_np' in request.data else None
    city_id = request.data['city_id'] if 'city_id' in request.data else None
    county_id = request.data[
        'county_id'] if 'county_id' in request.data else None
    region_id = request.data[
        'region_id'] if 'region_id' in request.data else None
    microregion_id = request.data[
        'microregion_id'] if 'microregion_id' in request.data else None

    logger.info("Barlangok leválogatása paraméterek alapján")

    filter_params = {}
    if kozet:
        filter_params['kozet'] = kozet

    if vedettseg:
        filter_params['vedettseg'] = vedettseg

    if lathatosag:
        filter_params['lathatosag'] = lathatosag

    if np:
        filter_params['illetekes_np'] = np

    if city_id:
        try:
            city = City.objects.get(pk=city_id)
            filter_params['geom__coveredby'] = city.geom
        except City.DoesNotExist:
            raise NotFound(
                detail=
                'Hoppá! A megadott azonosítóval nem létezik város az adatbázisban!',
                code=404)

    if county_id:
        try:
            county = County.objects.get(pk=county_id)
            filter_params['geom__coveredby'] = county.geom
        except County.DoesNotExist:
            raise NotFound(
                detail=
                'Hoppá! A megadott azonosítóval nem létezik megye az adatbázisban!',
                code=404)

    if region_id:
        try:
            microregions = Microregion.objects.filter(
                nagytaj_id__exact=region_id)
            geos_mregions = GEOSGeometry(microregions[0].geom.wkt)
            microregions = microregions[1:]

            for microregion in microregions:
                geom = GEOSGeometry(microregion.geom.wkt)
                geos_mregions = geos_mregions.union(geom)

            filter_params['geom__coveredby'] = geos_mregions
        except Region.DoesNotExist:
            raise NotFound(
                detail=
                'Hoppá! A megadott azonosítóval nem létezik nagytáj az adatbázisban!',
                code=404)

    if microregion_id:
        try:
            microregion = Microregion.objects.get(pk=microregion_id)
            filter_params['geom__coveredby'] = microregion.geom
        except Microregion.DoesNotExist:
            raise NotFound(
                detail=
                'Hoppá! A megadott azonosítóval nem létezik kistáj az adatbázisban!',
                code=404)

    try:
        caves = Cave.objects.filter(**filter_params)
        serializer = CaveListSerializer(caves)

        return JsonResponse(serializer.data, safe=False)
    except Cave.DoesNotExist:
        raise NotFound(
            detail=
            'Hoppá! A megadott paraméterek alapján nem található barlang az adatbázisban!',
            code=404)