示例#1
0
 def __init__(self, resolution, basename=None, overwrite=False):
     # type: (float) -> None
     self._res = resolution
     self._cnt = 0
     if basename is None:
         self._index = Index(interleaved=True)
     else:
         p = Property(overwrite=overwrite)
         self._index = Index(basename, interleaved=True, properties=p)
示例#2
0
def get_rtree(geometries, fp):
    fp = fp.as_posix()
    if not os.path.exists(fp + '.idx'):
        # Populate R-tree index with bounds of geometries
        print('Populate {} tree'.format(fp))
        idx = Index(fp)
        for i, geo in enumerate(geometries):
            idx.insert(i, geo.bounds)
        idx.close()

    return {'rtree': Index(fp), 'geometries': geometries}
示例#3
0
def create_spatial_index(shape_dict):
    print >> sys.stderr, 'Making spatial index...',
    spatial_index = Index()
    for index, (blockid, shape) in enumerate(shape_dict.iteritems()):
        spatial_index.insert(index, shape.bounds, obj=blockid)
    print >> sys.stderr, 'done.'
    return spatial_index
示例#4
0
 def read_airspace(self, airspace):
     index = Index()
     with open(airspace, 'r') as f:
         reader = openair.Reader(f)
         for record, error in reader:
             if error:
                 logging.warning(
                     f'line {error.lineno} of {os.path.basename(airspace)} - {error}'
                 )
             else:
                 try:
                     zone = Airspace(record)
                     if not self.agl_validable and (zone.ground_floor
                                                    or zone.ground_ceiling):
                         logging.warning(
                             f'{zone.name} will not be checked because ground altitude of flight could not be retrieved.'
                         )
                     else:
                         if zone.bounds:
                             index.insert(id(zone), zone.bounds, obj=zone)
                 except KeyError:
                     logging.warning(
                         f'line {reader.reader.lineno} of {os.path.basename(airspace)} - error in previous record'
                     )
     return index
示例#5
0
文件: test_tpr.py 项目: sthagen/rtree
    def test_tpr(self):
        # TODO : this freezes forever on some windows cloud builds
        if os.name == 'nt':
            return

        # Cartesians list for brute force
        objects = dict()
        tpr_tree = Index(properties=Property(type=RT_TPRTree))

        for operation, t_now, object_ in data_generator():
            if operation == "INSERT":
                tpr_tree.insert(object_.id, object_.get_coordinates())
                objects[object_.id] = object_
            elif operation == "DELETE":
                tpr_tree.delete(object_.id, object_.get_coordinates(t_now))
                del objects[object_.id]
            elif operation == "QUERY":
                tree_intersect = set(
                    tpr_tree.intersection(object_.get_coordinates()))

                # Brute intersect
                brute_intersect = set()
                for tree_object in objects.values():
                    x_low, y_low = tree_object.getXY(object_.start_time)
                    x_high, y_high = tree_object.getXY(object_.end_time)

                    if intersects(
                            x_low, y_low, x_high, y_high,  # Line
                            object_.x, object_.y, object_.dx, object_.dy):  # Rect
                        brute_intersect.add(tree_object.id)

                # Tree should match brute force approach
                assert tree_intersect == brute_intersect
示例#6
0
def get_sindex(gdf):
    """Get or build an R-Tree spatial index.

    Particularly useful for geopandas<0.2.0;>0.7.0;0.9.0
    """
    sindex = None
    if (hasattr(gdf, '_rtree_sindex')):
        return getattr(gdf, '_rtree_sindex')
    if (isinstance(gdf, geopandas.GeoDataFrame)
            and hasattr(gdf.geometry, 'sindex')):
        sindex = gdf.geometry.sindex
    elif isinstance(gdf, geopandas.GeoSeries) and hasattr(gdf, 'sindex'):
        sindex = gdf.sindex
    if sindex is not None:
        if (hasattr(sindex, "nearest")
                and sindex.__class__.__name__ != "PyGEOSSTRTreeIndex"):
            # probably rtree.index.Index
            return sindex
        else:
            # probably PyGEOSSTRTreeIndex but unfortunately, 'nearest'
            # with 'num_results' is required
            sindex = None
    if rtree and len(gdf) >= rtree_threshold:
        # Manually populate a 2D spatial index for speed
        sindex = Index()
        # slow, but reliable
        for idx, item in enumerate(gdf.bounds.itertuples()):
            sindex.add(idx, item[1:])
        # cache the index for later
        setattr(gdf, '_rtree_sindex', sindex)
    return sindex
示例#7
0
 def _construct_index(self):
     """!
     Separate the index construction from the constructor, allowing a GUI override
     """
     self.index = Index()
     for i, j, k in self.ikle:
         t = Polygon([self.points[i], self.points[j], self.points[k]])
         self.triangles[i, j, k] = t
         self.index.insert(i, t.bounds, obj=(i, j, k))
示例#8
0
 def _construct_index(self, iter_pbar):
     """!
     Separate the index construction from the constructor, allowing a GUI override
     @param iter_pbar: iterable progress bar
     """
     self.index = Index()
     for i, j, k in iter_pbar(self.ikle, unit='elements'):
         t = Polygon([self.points[i], self.points[j], self.points[k]])
         self.triangles[i, j, k] = t
         self.index.insert(i, t.bounds, obj=(i, j, k))
示例#9
0
    def __init__(self, streets_file):
        self.idx = Index()
        with open(streets_file) as f:
            for line in f.readlines():
                street = json.loads(line)
                street_id = street['properties']['id']
                street_shape = asShape(street['geometry'])
                for i in range(len(street_shape.geoms)):
                    seg_id = self.encode_seg_id(i, street_id)
                    self.idx.insert(seg_id, street_shape.geoms[i].coords[0])
                    self.idx.insert(-seg_id, street_shape.geoms[i].coords[-1])

        self.bb_idx = Index()
        with open(streets_file) as f:
            for line in f.readlines():
                street = json.loads(line)
                street_id = int(street['properties']['id'])
                street_shape = asShape(street['geometry'])
                self.bb_idx.insert(street_id, list(street_shape.bounds))
示例#10
0
文件: gis.py 项目: willeforce/atlite
def compute_indicatormatrix(orig,
                            dest,
                            orig_proj='latlong',
                            dest_proj='latlong'):
    """
    Compute the indicatormatrix

    The indicatormatrix I[i,j] is a sparse representation of the ratio
    of the area in orig[j] lying in dest[i], where orig and dest are
    collections of polygons, i.e.

    A value of I[i,j] = 1 indicates that the shape orig[j] is fully
    contained in shape dest[j].

    Note that the polygons must be in the same crs.

    Parameters
    ---------
    orig : Collection of shapely polygons
    dest : Collection of shapely polygons

    Returns
    -------
    I : sp.sparse.lil_matrix
      Indicatormatrix
    """

    dest = reproject_shapes(dest, dest_proj, orig_proj)
    indicator = sp.sparse.lil_matrix((len(dest), len(orig)), dtype=np.float)

    try:
        from rtree.index import Index

        idx = Index()
        for j, o in enumerate(orig):
            idx.insert(j, o.bounds)

        for i, d in enumerate(dest):
            for j in idx.intersection(d.bounds):
                o = orig[j]
                area = d.intersection(o).area
                indicator[i, j] = area / o.area

    except ImportError:
        logger.warning(
            "Rtree is not available. Falling back to slower algorithm.")

        dest_prepped = list(map(prep, dest))

        for i, j in product(range(len(dest)), range(len(orig))):
            if dest_prepped[i].intersects(orig[j]):
                area = dest[i].intersection(orig[j]).area
                indicator[i, j] = area / orig[j].area

    return indicator
示例#11
0
def demo_delete():
    seed = 1  # Seed for random points

    countries = get_countries()

    country_id_to_remove = 170  # United States of America
    country_uuids_to_remove = []  # Polygons' ids to remove from the index

    properties = Property()
    # properties.writethrough = True
    # properties.leaf_capacity = 1000
    # properties.fill_factor = 0.5
    index = Index(properties=properties)

    points_per_polygon = 1
    points = []

    # Inserts countries data to the index
    for i, (country_name, geometry) in enumerate(countries):
        for polygon in get_polygons(geometry):
            temp_uuid = uuid.uuid1().int
            index.insert(temp_uuid, polygon.bounds, country_name)

            if i == country_id_to_remove:
                # Saves index ids of the polygon to be removed later
                country_uuids_to_remove.append(temp_uuid)

            # Generates random points in every polygon and saves them
            random_points = gen_random_point(points_per_polygon, polygon, seed)
            points.append((country_name, random_points))

    # Checks every generated point has matches
    for (country_name, country_points) in points:
        for point in country_points:
            hits = list(index.intersection(point.bounds, objects=True))
            assert any(hit.object == country_name for hit in hits)

    # Remove geometry
    geometry = countries[country_id_to_remove][1]
    for i, polygon in enumerate(get_polygons(geometry)):
        index.delete(country_uuids_to_remove[i], polygon.bounds)

    points_missing = []

    # Checks (again) if every generated point has matches
    for (country_name, country_points) in points:
        for point in country_points:
            hits = list(index.intersection(point.bounds, objects=True))
            # Save any point without matches
            if not any(hit.object == country_name for hit in hits):
                points_missing.append(str(point) + " - " + country_name)

    # Print missing points
    for point in points_missing:
        print(point)
示例#12
0
    def build_index(self):
        label_candidates = []
        for p in self.points:
            label_candidates.extend(p.label_candidates)
        self.items = []
        self.items.extend(label_candidates)
        self.items.extend(self.points)
        self.items.extend(self.bounding_box.border_config)

        self.idx = Index()
        for i, item in enumerate(self.items):
            item.index = i
            self.idx.insert(i, item.box)
示例#13
0
 def __init__(self, input_header, construct_index=False, iter_pbar=lambda x, unit: x):
     """!
     @param input_header <slf.Serafin.SerafinHeader>: input Serafin header
     @param construct_index <bool>: perform the index construction
     @param iter_pbar: iterable progress bar
     """
     self.x, self.y = input_header.x[:input_header.nb_nodes_2d], input_header.y[:input_header.nb_nodes_2d]
     self.ikle = input_header.ikle_2d - 1  # back to 0-based indexing
     self.triangles = {}
     self.nb_points = self.x.shape[0]
     self.nb_triangles = self.ikle.shape[0]
     self.points = np.stack([self.x, self.y], axis=1)
     if not construct_index:
         self.index = Index()
     else:
         self._construct_index(iter_pbar)
示例#14
0
def get_sindex(gdf):
    """Helper function to get or build a spatial index

    Particularly useful for geopandas<0.2.0
    """
    assert isinstance(gdf, geopandas.GeoDataFrame)
    has_sindex = hasattr(gdf, 'sindex')
    if has_sindex:
        sindex = gdf.geometry.sindex
    elif rtree and len(gdf) >= rtree_threshold:
        # Manually populate a 2D spatial index for speed
        sindex = Index()
        # slow, but reliable
        for idx, (segnum, row) in enumerate(gdf.bounds.iterrows()):
            sindex.add(idx, tuple(row))
    else:
        sindex = None
    return sindex
示例#15
0
def local_search(points, bounding_box, iterations):
    labeled_points = [p for p in points if p.text]

    items = []
    items.extend([p.label for p in labeled_points])
    items.extend(points)
    items.extend(bounding_box.border_config)

    idx = Index()
    for i, item in enumerate(items):
        item.index = i
        idx.insert(item.index, item.box)

    for i in range(iterations):
        for lp in labeled_points:
            best_candidate = None
            min_penalty = None
            for lc1 in lp.label_candidates:
                penalty = POSITION_WEIGHT * lc1.position

                # Check overlap with other labels and points
                intersecting_item_ids = idx.intersection(lc1.box)
                for item_id in intersecting_item_ids:
                    item = items[item_id]
                    if hasattr(item, "point") and lc1.point == item.point:
                        continue
                    penalty += item.overlap(lc1)

                if min_penalty is None or penalty < min_penalty:
                    min_penalty = penalty
                    best_candidate = lc1

            # Remove the old label from the index
            idx.delete(lp.label.index, lp.label.box)

            # Select the new label
            best_candidate.select()

            # Add the new label to the index and item list
            idx.insert(len(items), lp.label.box)
            items.append(lp.label)
示例#16
0
def make_index(shapes):
    """Creates an index for fast and efficient spatial queries.

    Args:
      shapes: shapely shapes to bulk-insert bounding boxes for into the spatial index.

    Returns:
      The spatial index created from the shape's bounding boxes.
    """

    # Todo: benchmark these for our use-cases
    prop = Property()
    prop.dimension = 2
    prop.leaf_capacity = 1000
    prop.fill_factor = 0.9

    def bounded():
        for i, shape in enumerate(shapes):
            yield (i, shape.bounds, None)

    return Index(bounded(), properties=prop)
示例#17
0
    def build_cache(self):
        label_candidates = []
        for p in self.points:
            label_candidates.extend(p.label_candidates)
        items = []
        items.extend(label_candidates)
        items.extend(self.points)
        items.extend(self.bounding_box.border_config)

        idx = Index()
        for i, item in enumerate(items):
            item.index = i
            idx.insert(i, item.box)

        for lc in label_candidates:
            lc.penalty = POSITION_WEIGHT * lc.position
            lc.label_penalties = [0 for i in range(len(label_candidates))]
            intersecting_item_ids = idx.intersection(lc.box)
            bbox_counted = False

            for item_id in intersecting_item_ids:
                item = items[item_id]

                if item == lc or item == lc.point:
                    continue

                if isinstance(item, Label):
                    if lc.point == item.point:
                        continue
                    else:
                        lc.label_penalties[item.index] = item.overlap(lc)
                        continue

                if isinstance(item, BoundingBoxBorder):
                    if bbox_counted:
                        continue
                    bbox_counted = True

                lc.penalty += item.overlap(lc)
示例#18
0
 def build_rtree(self):
     '''
     Construct an R-tree for the domain. This may reduce the
     computational complexity of the methods `intersection_count`,
     `contains`, `orient_simplices`, and `snap`.
     '''
     # create a bounding box for each simplex and add those
     # bounding boxes to the R-tree
     if self.rtree is not None:
         # do nothing because the R-tree already exists
         logger.debug('R-tree already exists')
         return
         
     smp_min = self.vertices[self.simplices].min(axis=1)
     smp_max = self.vertices[self.simplices].max(axis=1)
     bounds = np.hstack((smp_min, smp_max))
     
     p = Property()
     p.dimension = self.dim
     self.rtree = Index(properties=p)
     for i, bnd in enumerate(bounds):
         self.rtree.add(i, bnd)
示例#19
0
def evaluate_labels(labels, points, bounding_box):
    items = []
    items.extend(labels)
    items.extend(points)
    items.extend(bounding_box.border_config)

    t1 = time.clock()
    idx = Index()

    for i, item in enumerate(items):
        item.index = i
        idx.insert(i, item.box)

    t2 = time.clock()
    # print(f"Index creation: {t2-t1}")

    # Update penalties for overlap with other objects
    penalties = [evaluate_label(l, items, idx) for l in labels]

    t3 = time.clock()
    # print(f"Overlap checking: {t3 - t2}")

    print(f"Total time: {t3 - t1}")
    return penalties
示例#20
0
def parse_temperatures(database: SqliteUtil, tmin_files: List[str],
                       tmax_files: List[str], steps: int, day: int,
                       src_epsg: int, prj_epsg: int):

    log.info('Allocating tables for air temperatures.')
    create_tables(database)

    files = zip(tmax_files, tmin_files)
    profile_count = 0
    point_count = 0
    temperatures = []
    points = []
    profiles = {}
    n = 1

    transformer = Transformer.from_crs(f'epsg:{src_epsg}',
                                       f'epsg:{prj_epsg}',
                                       always_xy=True,
                                       skip_equivalent=True)
    project = transformer.transform

    def apply(id: int, temp: Callable):
        for step in range(steps):
            prop = step / steps
            row = (id, step, int(86400 * prop), temp(24 * prop))
            yield row

    log.info('Loading temperatures from netCDF4 files.')
    for tmax_file, tmin_file in files:
        tmaxnc = Dataset(tmax_file, 'r')
        tminnc = Dataset(tmin_file, 'r')

        lons = tmaxnc.variables['lon']
        lats = tmaxnc.variables['lat']
        shape = tmaxnc.variables['tmax'].shape

        tmaxs = tmaxnc.variables['tmax'][day]
        tmins = tminnc.variables['tmin'][day]

        for i in range(shape[1]):
            for j in range(shape[2]):
                tmax = tmaxs[i][j]
                tmin = tmins[i][j]

                if tmax != -9999.0:
                    x, y = project(lons[i][j], lats[i][j])
                    idx = f'{tmax}-{tmin}'

                    if idx not in profiles:
                        temp = iterpolation(tmin, tmax, 5, 15)
                        temperatures.extend(apply(profile_count, temp))
                        profiles[idx] = profile_count
                        profile_count += 1

                    profile = profiles[idx]
                    point = Point(point_count, x, y, profile)
                    points.append(point)
                    point_count += 1

                    if point_count == n:
                        log.info(
                            f'Loading air temperature reading {point_count}.')
                        n <<= 1

        tmaxnc.close()
        tminnc.close()

    if point_count != n >> 1:
        log.info(f'Loading air temperature reading {point_count}.')

    def load():
        for point in points:
            x, y = point.x, point.y
            yield (point.id, (x, y, x, y), point.profile)

    log.info('Starting network update for air temperatures.')
    log.info('Building spatial index from temperature profile locations.')
    index = Index(load())
    used = set()

    log.info('Loading network links.')
    links = load_links(database)

    log.info('Applying temperature profiles to links.')
    iter_links = counter(links, 'Applying profile to link %s.')
    for link in iter_links:
        result = index.nearest((link.x, link.y, link.x, link.y), objects=True)
        profile = next(result).object
        link.air_temperature = profile
        used.add(profile)

    def dump_links():
        for link in links:
            yield (link.id, link.air_temperature)

    log.info('Writing updated links to database.')
    database.insert_values('temp_links', dump_links(), 2)
    database.connection.commit()
    del links

    log.info('Loading network parcels.')
    parcels = load_parcels(database)

    residential = profile_count
    temperatures.extend(apply(profile_count, lambda x: 26.6667))
    profile_count += 1
    commercial = profile_count
    temperatures.extend(apply(profile_count, lambda x: 26.6667))
    profile_count += 1
    other = profile_count
    temperatures.extend(apply(profile_count, lambda x: 26.6667))
    profile_count += 1
    used.add(residential)
    used.add(commercial)
    used.add(other)

    log.info('Applying temperature profiles to parcels.')
    iter_parcels = counter(parcels, 'Applying profile to parcel %s.')
    for parcel in iter_parcels:
        if not parcel.cooling:
            x, y = xy(parcel.center)
            result = index.nearest((x, y, x, y), objects=True)
            profile = next(result).object
            parcel.air_temperature = profile
            used.add(profile)
        elif parcel.kind == 'residential':
            parcel.air_temperature = residential
        elif parcel.kind == 'commercial':
            parcel.air_temperature = commercial
        else:
            parcel.air_temperature = other

    def dump_parcels():
        for parcel in parcels:
            yield (parcel.apn, parcel.air_temperature)

    log.info('Writing updated parcels to database.')
    database.insert_values('temp_parcels', dump_parcels(), 2)
    database.connection.commit()
    del parcels

    def dump_temperatures():
        for temp in temperatures:
            if temp[0] in used:
                yield temp

    log.info('Writing parsed air temperatures to database.')
    database.insert_values('air_temperatures', dump_temperatures(), 4)
    database.connection.commit()
    del temperatures

    log.info('Merging, dropping and renaming old tables.')

    query = '''
        CREATE INDEX temp_links_link
        ON temp_links(link_id);
    '''
    database.cursor.execute(query)
    query = '''
        CREATE TABLE temp_links_merged
        AS SELECT
            links.link_id,
            links.source_node,
            links.terminal_node,
            links.length,
            links.freespeed,
            links.capacity,
            links.permlanes,
            links.oneway,
            links.modes,
            temp_links.air_temperature,
            links.mrt_temperature
        FROM links
        INNER JOIN temp_links
        USING(link_id);
    '''
    database.cursor.execute(query)
    query = '''
        CREATE INDEX temp_parcels_parcel
        ON temp_parcels(apn);
    '''
    database.cursor.execute(query)
    query = '''
        CREATE TABLE temp_parcels_merged
        AS SELECT
            parcels.apn,
            parcels.maz,
            parcels.type,
            parcels.cooling,
            temp_parcels.air_temperature,
            parcels.mrt_temperature,
            parcels.center,
            parcels.region
        FROM parcels
        INNER JOIN temp_parcels
        USING(apn);
    '''
    database.cursor.execute(query)

    original = database.count_rows('links')
    merged = database.count_rows('temp_links_merged')
    if original != merged:
        log.error('Original links and updated links tables '
                  'do not align; quiting to prevent data loss.')
        raise RuntimeError
    else:
        database.drop_table('links', 'temp_links')
        query = '''
            ALTER TABLE temp_links_merged
            RENAME TO links;
        '''
        database.cursor.execute(query)

    original = database.count_rows('parcels')
    merged = database.count_rows('temp_parcels_merged')
    if original != merged:
        log.error('Original parcels and updated parcels tables '
                  'do not align; quiting to prevent data loss.')
        raise RuntimeError
    else:
        database.drop_table('parcels', 'temp_parcels')
        query = '''
            ALTER TABLE temp_parcels_merged
            RENAME TO parcels;
        '''
        database.cursor.execute(query)

    database.connection.commit()

    log.info('Creating indexes on new tables.')
    create_indexes(database)

    log.info('Writing process metadata.')
示例#21
0
 def create_index_stream(self, generator):
     self.idx = Index(generator)
示例#22
0
def parse_parcels(database: SqliteUtil, residence_file: str,
                  commerce_file: str, parcel_file: str, cooling_file: str,
                  src_epsg: int, prj_epsg: int):
    boundaries = {}
    cooling = {}
    parcels = []
    apns = set()

    transformer = Transformer.from_crs(f'epsg:{src_epsg}',
                                       f'epsg:{prj_epsg}',
                                       always_xy=True,
                                       skip_equivalent=True)
    project = transformer.transform

    log.info('Allocating tables for parcels.')
    create_tables(database)

    log.info('Parsing parcel boudaries from shapefile.')
    parser = shapefile.Reader(parcel_file)
    iter_boundaries = counter(iter(parser), 'Parsing parcel boundary %s.')
    for parcel in iter_boundaries:
        if len(parcel.shape.points):
            apn = parcel.record['APN']
            points = (project(*pt) for pt in parcel.shape.points)
            polygon = Polygon(points)
            boundaries[apn] = polygon
    parser.close()

    log.info('Loading cooling information from csv file.')
    with open(cooling_file, 'r') as open_file:
        lines = csv.reader(open_file, delimiter=',', quotechar='"')
        next(lines)
        for desc, _, cool in lines:
            cooling[desc] = bool(cool)

    log.info('Parsing residential parcels from database file.')
    parser = shapefile.Reader(residence_file)
    iter_parcels = counter(parser.iterRecords(),
                           'Parsing residential parcel %s.')
    for record in iter_parcels:
        apn = record['APN']
        if apn in boundaries and apn not in apn:
            cool = True
            polygon = boundaries[apn]
            parcel = Parcel(apn, 'residential', cool, polygon)
            parcels.append(parcel)
            apns.add(apn)
    parser.close()

    log.info('Parsing comercial parcels from database file.')
    parser = shapefile.Reader(commerce_file)
    iter_parcels = counter(parser.iterRecords(),
                           'Parsing commercial parcel %s.')
    for record in iter_parcels:
        apn = record['APN']
        if apn in boundaries and apn not in apns:
            desc = record['DESCRIPT']
            cool = cooling[desc]
            polygon = boundaries[apn]
            parcel = Parcel(apn, 'commercial', cool, polygon)
            parcels.append(parcel)
            apns.add(apn)
    parser.close()

    log.info('Parsing extraneous parcels from shapefile.')
    other = set(boundaries.keys()) - apns
    other = counter(other, 'Parsing extraneous parcel %s.')
    for apn in other:
        polygon = boundaries[apn]
        parcel = Parcel(apn, 'other', True, polygon)
        parcels.append(parcel)

    def load():
        for idx, parcel in enumerate(parcels):
            pt = parcel.polygon.centroid
            yield (idx, (pt.x, pt.y, pt.x, pt.y), None)

    log.info('Building spatial index from parcel data.')
    index = Index(load())

    log.info('Loading network region data.')
    regions = load_regions(database)

    log.info('Scanning regions and mapping mazs to parcels.')
    iter_regions = counter(regions, 'Sacnning region %s.')
    for region in iter_regions:
        apn = f'maz-{region.maz}'
        parcel = Parcel(apn, 'default', True, region.polygon)
        parcel.maz = region.maz
        parcels.append(parcel)
        result = index.intersection(region.polygon.bounds)
        for idx in result:
            parcel = parcels[idx]
            if region.polygon.contains(parcel.polygon.centroid):
                if parcel.maz is not None:
                    warning = 'Parcel %s is in both region %s and %s' \
                        '; the latter region will be kept.'
                    log.warning(warning % (parcel.apn, parcel.maz, region.maz))
                parcel.maz = region.maz
    del regions

    def dump():
        for parcel in parcels:
            yield (parcel.apn, parcel.maz, parcel.kind, int(parcel.cooling),
                   None, None, dumps(parcel.polygon.centroid),
                   dumps(parcel.polygon))

    log.info('Writing parsed parcels to database.')
    database.insert_values('parcels', dump(), 8)
    database.connection.commit()

    log.info('Creating indexes on new tables.')
    create_indexes(database)
示例#23
0
class Shape:
    '''
        Base class of BlueSky shapes
    '''
    # Global counter to keep track of used shape ids
    max_area_id = 0

    # Weak-value dictionary of all Shape-derived objects by name, and id
    areas_by_id = WeakValueDictionary()
    areas_by_name = WeakValueDictionary()

    # RTree of all areas for efficient geospatial searching
    areatree = Index()

    @classmethod
    def reset(cls):
        ''' Reset shape data when simulation is reset. '''
        # Weak dicts and areatree should be cleared automatically
        # Reset max area id
        cls.max_area_id = 0

    def __init__(self, name, coordinates, top=1e9, bottom=-1e9):
        self.raw = dict(name=name, shape=self.kind(), coordinates=coordinates)
        self.name = name
        self.coordinates = coordinates
        self.top = np.maximum(bottom, top)
        self.bottom = np.minimum(bottom, top)
        lat = coordinates[::2]
        lon = coordinates[1::2]
        self.bbox = [min(lat), min(lon), max(lat), max(lon)]

        # Global weak reference and tree storage
        self.area_id = Shape.max_area_id
        Shape.max_area_id += 1
        Shape.areas_by_id[self.area_id] = self
        Shape.areas_by_name[self.name] = self
        Shape.areatree.insert(self.area_id, self.bbox)

    def __del__(self):
        # Objects are removed automatically from the weak-value dicts,
        # but need to be manually removed from the rtree
        Shape.areatree.delete(self.area_id, self.bbox)

    def checkInside(self, lat, lon, alt):
        ''' Returns True (or boolean array) if coordinate lat, lon, alt lies
            within this shape.

            Reimplement this function in the derived shape classes for this to
            work.
        '''
        return False

    def _str_vrange(self):
        if self.top < 9e8:
            if self.bottom > -9e8:
                return f' with altitude between {self.bottom} and {self.top}'
            else:
                return f' with altitude below {self.top}'
        if self.bottom > -9e8:
            return f' with altitude above {self.bottom}'
        return ''

    def __str__(self):
        return f'{self.name} is a {self.raw["shape"]} with coordinates ' + \
            ', '.join(str(c) for c in self.coordinates) + self._str_vrange()

    @classmethod
    def kind(cls):
        ''' Return a string describing what kind of shape this is. '''
        return cls.__name__.upper()
示例#24
0
def tpr_tree(request):
    # Create tree
    from rtree.index import Index, Property, RT_TPRTree
    return Index(properties=Property(type=RT_TPRTree))
示例#25
0
 def __init__(self):
     self.index = Index()
示例#26
0
def create_spatial_index(shape_dict):

    spatial_index = Index()
    for index, (name, shape) in enumerate(shape_dict.iteritems()):
        spatial_index.insert(index, shape.bounds, obj=name)
    return spatial_index
示例#27
0
        'Newry, Mourne and Down.shp',
        'North Down and Ards.shp'
    ]
    polygons = []
    shapefile_records = []
    for shape_file in shape_files:
        print 'Getting polygons from: {}'.format(shape_file)
        with fiona.open('shape files/' + shape_file) as collection:
            for shapefile_record in collection:
                shape = asShape(shapefile_record['geometry'])
                y1, x1, y2, x2 = shape.bounds
                shapefile_record['properties']['bounds'] = str((x1, y1, x2, y2))
                shapefile_records.append(shapefile_record)
                polygons.append(shape)

    index = Index()
    count = 0
    for polygon in polygons:
        index.insert(count, polygon.bounds)
        count += 1

    # recursively loop over every directory
    for root, directories, filenames in os.walk('root'):
        for filename in filenames:
            obj = None
            with open(os.path.join(root, filename), 'r') as f:
                bb = f.readline()
                tpl = eval(bb)
                r = Rect(*tpl)
                # point = Point(*r.centre_point)
                records = []
示例#28
0
def parse_mrt(database: SqliteUtil, path: str, src_epsg: int, prj_epsg: int,
        bounds:int = 30, steps: int = 96):
    log.info('Allocating tables for MRT temperature profiles.')
    create_tables(database)

    log.info('Loading network nodes from database.')
    nodes: Dict[str,Node]
    nodes = load_nodes(database)

    log.info('Loading network links from database.')
    links: Dict[str,Link] 
    links= load_links(database, nodes)

    log.info(f'Searching for mrt files in {path}')
    csvfiles = iter(glob(f'{path}/**/*.csv', recursive=True))

    log.info('Handling initial dataset for profile construction.')
    points: List[Point]
    time: int 
    points, time = parse_points(next(csvfiles), src_epsg, prj_epsg)
    
    log.info('Building spatial index on MRT points.')
    index = Index((point.entry() for point in points))

    log.info('Scanning link bounds and building profiles.')
    mapping: Dict[FrozenSet[int],int] = {}
    count = 0
    empty = 0
    iter_links = counter(links.values(), 'Scanning link %s.')
    for link in iter_links:
        d = link.terminal_node.x * link.source_node.y - \
            link.source_node.x * link.terminal_node.y
        dx = link.terminal_node.x - link.source_node.x
        dy = link.terminal_node.y - link.source_node.y
        l = sqrt(dy * dy + dx * dx)

        nearby = index.intersection(link.bounds(bounds))
        contained = []
        for uuid in nearby:
            point = points[uuid]
            x = point.x
            y = point.y
            if l > 0:
                dist = abs(dy * x - dx * y + d ) / l
            else:
                px = point.x - link.source_node.x
                py = point.y - link.source_node.y
                dist = sqrt(px * px + py * py)
            if dist <= bounds:
                contained.append(point.id)
        
        if contained:
            profile = frozenset(contained)
            if profile in mapping:
                link.profile = mapping[profile]
            else:
                mapping[profile] = count
                link.profile = count
                count += 1
        else:
            empty += 1

    profiles: List[Tuple[int]]
    profiles = [tuple(key) for key in mapping.keys()]

    if empty:
        log.warning(f'Found {empty} links without any MRT temperature profile.')

    def dump_points():
        idx = time // (86400 // steps)
        for uuid, profile in enumerate(profiles):
            mrt, pet, utci = 0, 0, 0
            count = len(profile)
            for ptid in profile:
                point = points[ptid]
                mrt += point.mrt
                pet += point.pet
                utci += point.utci
            yield (uuid, idx, time, mrt / count, pet / count, utci / count)

    def dump_links():
        for link in links.values():
            yield (link.id, link.profile)

    log.info('Writing link updates and temperatures to dataabse.')

    database.insert_values('mrt_temperatures', dump_points(), 6)
    database.insert_values('temp_links', dump_links(), 2)

    log.info('Merging, dropping and renaming old tables.')

    query = '''
        CREATE INDEX temp_links_link
        ON temp_links(link_id);
    '''
    database.cursor.execute(query)
    query = '''
        CREATE TABLE temp_links_merged
        AS SELECT
            links.link_id,
            links.source_node,
            links.terminal_node,
            links.length,
            links.freespeed,
            links.capacity,
            links.permlanes,
            links.oneway,
            links.modes,
            links.air_temperature,
            temp_links.mrt_temperature
        FROM links
        INNER JOIN temp_links
        USING(link_id);
    '''
    database.cursor.execute(query)

    original = database.count_rows('links')
    merged = database.count_rows('temp_links_merged')
    if original != merged:
        log.error('Original links and updated links tables '
            'do not align; quiting to prevent data loss.')
        raise RuntimeError
    
    database.drop_table('links', 'temp_links')
    query = '''
        ALTER TABLE temp_links_merged
        RENAME TO links;
    '''
    database.cursor.execute(query)

    database.connection.commit()

    del links
    del nodes
    del index
    del mapping
    del points

    log.info('Handling remain temperatures with defined profile.')

    def dump_temperaures(time: int, temperatures: List[Tuple[float,float,float]]):
        idx = time // (86400 // steps)
        for uuid, profile in enumerate(profiles):
            mrt, pet, utci = 0, 0, 0
            count = len(profile)
            for tempid in profile:
                temp = temperatures[tempid]
                mrt += temp[0]
                pet += temp[1]
                utci += temp[2]
            yield (uuid, idx, time, mrt / count, pet / count, utci / count)

    for csvfile in csvfiles:
        time: int
        temperatures: List[Tuple[float,float,float]]
        temperatures, time = parse_temperatures(csvfile)

        log.info('Writing temperature data to database.')
        database.insert_values('mrt_temperatures', 
            dump_temperaures(time, temperatures), 6)
        database.connection.commit()

    log.info('Creating indexes on new/updated tables.')
    create_indexes(database)
示例#29
0
        "name": "Irrigated agriculture",
        "color": "#14fb28"
    }
}

# In[2]:

osm_land_use_idx = osm_land_use.sindex
# osm_roads_idx = osm_roads.sindex
# osm_waterways_idx = osm_waterways.sindex
# osm_water_idx = osm_water.sindex
# osm_traffic_idx = osm_traffic.sindex
# osm_transport_idx = osm_transport.sindex
# acled_idx = acled.sindex

idx = Index("./index/osm_land_use_idx")
print("here")
for i, k in list(osm_land_use_idx):
    idx.insert(i, k)
idx.close()
print("Done with osm_land_use_idx")

# idx = Index("./index/osm_roads_idx")
# idx.insert(osm_roads_idx)
# idx.close()
# print("Done with osm_roads_idx")

# idx = Index("./index/osm_waterways_idx")
# idx.insert(osm_waterways_idx)
# idx.close()
# print("Done with osm_waterways_idx")
示例#30
0
 def create_index_data(self, data):
     self.idx = Index()
     for d in data:
         self.idx.insert(d.index, d.box, d.index)