예제 #1
0
def test_mask_pad(basic_image_2x2, basic_image_file, basic_geometry):
    """Output should be cropped to extent of data"""

    geometries = [basic_geometry]
    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries, crop=True, pad=True)

    assert masked.shape == (1, 4, 4)
    assert np.array_equal(masked[0], basic_image_2x2[1:5, 1:5])
예제 #2
0
def test_mask_invert(basic_image, basic_image_file, basic_geometry):
    """Pixels inside the geometry are masked to nodata (0)"""

    geometries = [basic_geometry]
    basic_image[2:4, 2:4] = 0

    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries, invert=True)

    assert np.array_equal(masked[0], basic_image)
예제 #3
0
def test_mask(basic_image_2x2, basic_image_file, basic_geometry):
    """Pixels outside the geometry are masked to nodata (0)"""

    geometries = [basic_geometry]

    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries)

    assert np.array_equal(masked[0], basic_image_2x2)
    assert (type(masked) == np.ndarray)
예제 #4
0
def test_mask_all_touched(basic_image, basic_image_file, basic_geometry):
    """All pixels touched by geometry should be masked out as 3"""

    nodata = 3
    geometries = [basic_geometry]

    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries, nodata=nodata,
                                      invert=True, all_touched=True)

    assert np.array_equal(masked[0], basic_image * nodata)
예제 #5
0
def test_mask_crop_all_touched(basic_image, basic_image_file, basic_geometry):
    """Output should be cropped to extent of data"""

    geometries = [basic_geometry]

    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries, crop=True,
                                      all_touched=True)

    assert masked.shape == (1, 3, 3)
    assert np.array_equal(masked[0], basic_image[2:5, 2:5])
예제 #6
0
def test_mask_nodata(basic_image_2x2, basic_image_file, basic_geometry):
    """All pixels outside geometry should be masked out as 3"""

    nodata = 3
    geometries = [basic_geometry]

    basic_image_2x2[basic_image_2x2 == 0] = nodata

    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries, nodata=nodata)

    assert np.array_equal(masked[0], basic_image_2x2)
예제 #7
0
def test_mask_filled(basic_image, basic_image_2x2, basic_image_file,
                     basic_geometry):
    """Should be returned as numpy.ma.MaskedArray if filled is False"""

    geometries = [basic_geometry]

    with rasterio.open(basic_image_file) as src:
        masked, transform = mask(src, geometries, filled=False)

    image = np.ma.MaskedArray(basic_image, mask=basic_image_2x2==0)

    assert (type(masked) == np.ma.MaskedArray)
    assert np.array_equal(masked[0].mask, image.mask)
    assert np.array_equal(masked[0], image)
예제 #8
0
def clip_raster( rst_fn, shp_fn, out_fn ):
    '''
    take input raster and shapefile in the same
    CRS (strict) and output a raster clipped / cropped
    to the shapefile shape domain. The idea is to use
    a shapefile with only a single extent shape, but may
    work for multiples...

    ARGUMENTS:
    ----------
    rst_fn: [str] path to the GeoTiff to clip
    shp_fn: [str] path to the shapefile containing masking polygon(s)
    out_fn: [str] path to dump the clipped raster (will be overwritten)

    RETURNS:
    --------
    out_fn with the side-effect of writing a clipped GeoTiff file to disk with
    LZW compression. 

    '''
    import fiona
    import rasterio
    from rasterio.mask import mask
    from rasterio.warp import reproject, Resampling

    # shapefile work
    with fiona.open( shp_fn, "r") as shapefile:
        features = [ feature["geometry"] for feature in shapefile ]

    # input raster work
    with rasterio.open( rst_fn ) as src:
        out_image, out_transform = mask( src, features, crop=True )
        out_meta = src.meta.copy()

    # output raster work
    out_meta.update({"driver": "GTiff",
                     "height": out_image.shape[1],
                     "width": out_image.shape[2],
                     "transform": out_transform,
                     "compress": "lzw"})

    with rasterio.open(out_fn, "w", **out_meta) as dst:
        dst.write( out_image )

    return out_fn
예제 #9
0
파일: GISops.py 프로젝트: aleaf/GIS_utils
def _clip_raster(inraster, features, outraster, **kwargs):

    # convert the features to geojson
    geoms = _to_geojson(features)
    rasterio = import_rasterio()  # check for rasterio
    from rasterio.mask import mask
    with rasterio.open(inraster) as src:
        print('clipping {}...'.format(inraster))

        defaults = {'crop': True,
                    'nodata': src.nodata}
        defaults.update(kwargs)

        out_image, out_transform = mask(src, geoms, **defaults)
        out_meta = src.meta.copy()

        out_meta.update({"driver": "GTiff",
                         "height": out_image.shape[1],
                         "width": out_image.shape[2],
                         "transform": out_transform})

        with rasterio.open(outraster, "w", **out_meta) as dest:
            dest.write(out_image)
            print('wrote {}'.format(outraster))
예제 #10
0
    tiff_path,
    "w",
    driver="GTiff",
    height=HEIGHT,
    width=WIDTH,
    count=1, #????
    dtype=var_ch07.dtype,
    crs=p_crs,
    transform=new_affine,
    nodata=fill_value,
) as dst:
    dst.write(np.reshape(var_ch07,(1,HEIGHT,WIDTH)))

src = rasterio.open(tiff_path, mode='r+')
geodf = geopandas.read_file(LAND_POLYGON_SHAPE)
land_masking, other_affine = mask.mask(src, geodf[['geometry']].values.flatten(), invert=True, filled=False)
land_masking = np.ma.getmask(land_masking)
land_masking = np.reshape(land_masking, (HEIGHT,WIDTH))
src.close() # Free memory
src = None
geodf = None
############################################################

# Init multi-tracker
trackers = MultiTrackerImproved(cv2.TrackerCSRT_create)

image_list = []
BTD_list = []
refl_ch2_list = []
refl_ch6_list = []
golden_arch_list = [] #TODO: Both of these next two are temp!!!
예제 #11
0
def extract_deforestation(inputs, def_value, dst_crs, pixel_size):
    # Creates output folder
    outputs_folder = os.path.join(inputs,"fixed")
    if not os.path.exists(outputs_folder):
        os.mkdir(outputs_folder)
    detail_folder = os.path.join(outputs_folder,"raster_detail")
    if not os.path.exists(detail_folder):
        os.mkdir(detail_folder)
    # Listing files
    pattern = inputs + os.path.sep + "content" + os.path.sep + '**' + os.path.sep + '**.tif'
    files = glob.glob(pattern, recursive=True)    
    # Parameters
    minx, miny, maxx, maxy = 0, 0, 0, 0
    crs = None
    epsg_code = None
        
    # loop for getting parameters of all raster files (*.tif)
    print("Calculating parameters for new files")
    for idx,rf in enumerate(files):
        print("file: " + rf)
        with rio.open(rf) as raster:
            raster_meta = raster.meta.copy()
            # Copying the first metadata
            if idx == 0:
                crs = raster.crs
                minx, miny, maxx, maxy = raster.bounds[0], raster.bounds[1], raster.bounds[2], raster.bounds[3]
            # Checking which is the min left corner 
            if raster.bounds[0] > minx:
                minx = raster.bounds[0]
            if raster.bounds[1] > miny:
                miny = raster.bounds[1]             
            # Checking which is the min right corner 
            if raster.bounds[2] < maxx:
                maxx = raster.bounds[2]
            if raster.bounds[3] < maxy:
                maxy = raster.bounds[3]
    
    # Creating polygon to crop the rasters files.
    bbox = box(minx, miny, maxx, maxy)
    print("Bounds: " + str(bbox))
    print("CRS: " + str(crs))
    geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=crs.data)
    geo = geo.to_crs(crs = from_epsg(dst_crs))
    coords = getFeatures(geo)
    
    # loop for extracting, cropping and resampling raster files (*.tif)
    for rf in files:
        print("Working: " + rf)
        
        # Reproject the original raster and creates a tmp file
        rf_paths = rf.split(os.path.sep)
        rf_tmp = rf_paths[len(rf_paths)-1].replace(".tif","_tmp.tif")  
        rf_tmp = outputs_folder + os.path.sep + rf_tmp
        print("Reprojecting: " + rf_tmp)
        
        reproject_raster(rf, rf_tmp, 'EPSG:' + str(dst_crs), pixel_size)
        
        print("Opening: " + rf_tmp)
        with rio.open(rf_tmp) as raster:
            # Copy meta data from tmp file reprojected
            meta_dst = raster.meta.copy()
            
            print("Cropping raster")
            out_img, out_transform = mask(dataset=raster, shapes=coords, crop=True)            
            print("Dimention: H=" + str(out_img.shape[1]) + " W=" + str(out_img.shape[2]))
            # Extract values deforestation
            out_img[out_img != def_value] = 0
            meta_dst.update({"driver": "GTiff",
                 "height": out_img.shape[1],
                 "width": out_img.shape[2],
                 "transform": out_transform,
                 'compress': 'lzw',
                 'nodata': 0})
            
            file_name = rf_paths[len(rf_paths)-1]
            file_len = len(file_name)
            file_name = file_name[file_len-13:file_len-9]
            dest_file = os.path.join(detail_folder, file_name + ".tif")
            print("Saving: " + dest_file)
            with rio.open(dest_file, 'w', **meta_dst) as dst:
                dst.write(out_img)
        
        # Delete the tmp file
        print("Deleting tmp: " + rf_tmp)        
        os.remove(rf_tmp)
 def mask_ras(raster, buffer):
     out_img, out_transform = mask.mask(raster,
                                        shapes=buffer['geometry'],
                                        crop=True,
                                        nodata=-1)
     return out_img, out_transform
예제 #13
0
            with rasterio.open(parent_path + '/imagery/' + year + '/' + tile +
                               '/' + tile + '_full.tif') as src:
                out_meta = src.meta.copy()
                print(src.bounds)

                tick = 0  # To be used for the iterations
                index = 0  # To be used to index the geometries, will allow us to skip geometries that raise exceptions

                # Use while so that when an exception is raised we do not produce less examples
                while tick < count * 10:
                    if True in list(
                            year_shape.contains(
                                buildings['geometry'].iloc[index])):
                        try:
                            out_image, out_transform = mask(
                                src, [buildings['geometry'].iloc[index]],
                                crop=True)

                            out_meta.update({
                                "driver": "GTiff",
                                "height": out_image.shape[1],
                                "width": out_image.shape[2],
                                "transform": out_transform
                            })

                            with rasterio.open(
                                    parent_path +
                                    "/labelled/building_mask/negative/" +
                                    year + "/" + tile + "/" +
                                    buildings.iloc[index]['TOID'] + ".tif",
                                    "w+", **out_meta) as dest:
예제 #14
0
def get_dem(dem_root, epsg, coords, outfold, hyd_num, grid_list, work_hydAr):
    # print("extracting DEM for Op Catch")

    dtm_list = []

    for grid in grid_list:
        path = os.path.join(dem_root, grid.lower())
        ras_test = os.listdir(path)
        for x in ras_test:
            if x[-10:] == 'DTM_5m.tif':
                ras_file = os.path.join(path, x)
                dtm_list.append(ras_file)

    # src_files_to_mosaic = []
    if len(dtm_list) > 1:
        # print(">1 OS grid masking and merging rasters")
        mx, my, Mx, My = work_hydAr.geometry.total_bounds
        src_files_to_mosaic = []
        for fp in dtm_list:
            src = rasterio.open(fp)
            # out_img, out_transform = mask(dataset=src, shapes=coords, crop=True)
            src_files_to_mosaic.append(src)
        mosaic, out_trans = merge(src_files_to_mosaic, bounds=[mx, my, Mx, My])

    elif len(dtm_list) == 0:
        raise ValueError(
            "eh what's going on? looks like you've got no DTMs to merge?")
    else:
        # print("just one OS Grid - masking now")
        src = rasterio.open(dtm_list[0])
        mosaic, out_trans = mask(dataset=src, shapes=coords, crop=True)

    out_meta = src.meta.copy()

    out_meta.update({
        "driver": "GTiff",
        "height": mosaic.shape[1],
        "width": mosaic.shape[2],
        "transform": out_trans,
        "crs": CRS.from_epsg(epsg),
        "compress": "lzw"
    })

    # print("exporting output raster")
    out_ras = os.path.join(outfold, "OC{0}_DTM.tif".format(hyd_num))
    with rasterio.open(out_ras, "w", **out_meta) as dest:
        dest.write(mosaic)

    mosaic = None
    out_img = None
    src = None

    maskedRas = rasterio.open(out_ras)

    hydAr_gj = getFeatures(work_hydAr)
    mosaicb, otb = mask(dataset=maskedRas,
                        shapes=hydAr_gj,
                        crop=False,
                        nodata=(-100),
                        all_touched=False)

    maskedRas = None

    with rasterio.open(out_ras, "w", **out_meta) as dest:
        dest.write(mosaicb)
예제 #15
0
def grid_raster_and_annotations_to_coco(src_grid,
                                        src_im,
                                        src_ann,
                                        dst_im,
                                        prefix,
                                        dst_ann,
                                        invert_y=False,
                                        category_field="Class",
                                        verbose=True):
    #Function for SegUtils
    grid = geopandas.read_file(src_grid)  #Open The Grid shp
    data = rio.open(src_im)  # Open the raster tif
    ntiles = len(grid)

    if verbose:
        print("==============================================")
        print("Preprocessing Annotation Shapefile")
    anno = geopandas.read_file(src_ann)
    anno = anno[anno["geometry"].notnull()]
    anno = anno.explode()
    anno['geometry'] = anno.geometry.buffer(0)
    anno.to_file(driver='ESRI Shapefile',
                 filename=dst_ann + "preprocessed_annotations.shp")
    if verbose:
        print("Done Preprocessing Annotation Shapefile")
        print("==============================================")
    ##create categories for coco format
    coco_info = {}
    coco_licenses = {}
    coco_images = []  #List of dict of {file_name,id, height,width}
    coco_categories = []  # List of dict of{supercategory, id, name}
    coco_annotations = [
    ]  # List of dict of {id, category_id,iscrowd,segmentation[[LISTofLISTS]],image_id,area,bbox[]}
    category_log = []  #list of classnames
    category_id_log = []
    anno_id_log = []  #list of anno nids
    im_id_log = []  #list of image ids
    if verbose:
        print("==============================================")
        print("Starting to process image and annotation files")
    for i in tqdm(range(0, ntiles),
                  desc="Processing Tiles"):  #for every object in grid
        #for i in range(0,100):
        im_id = ("{:05d}".format(prefix)) + "0123" + (
            "{:05d}".format(i)
        )  #Unique id: first 5 digits=prefix, 0123 = image ist, las 5 digits refer to id
        outfile_im = dst_im + im_id + ".tif"
        outfile_ann = dst_ann + "Tile_%d_Annotation.shp" % i
        #anno_tilename = f'COCO_train2016_000000{100000+i}'
        if verbose:
            print("Clipping tile" + str(i) + " of " + str(ntiles))
        #print("Target: "+outfile_im)
        coords = getFeatures(grid, i)  #get the polygon
        Tile, out_transform = mask(dataset=data, shapes=coords,
                                   crop=True)  #crop the raster to the polygon
        xres = abs(out_transform[0])
        yres = abs(out_transform[4])
        out_meta = data.meta.copy()  #get a copy of the metadata of the raster
        out_meta.update({
            "driver": "GTiff",
            "height": Tile.shape[1],
            "width": Tile.shape[2],
            "transform": out_transform
        })  #update the meta for the cropped raster
        #make the dir if necessary
        if not os.path.exists(dst_im):
            os.makedirs(dst_im)
        #write the image
        with rio.open(outfile_im, "w", **out_meta) as dest:
            dest.write(Tile)
        #UPDATE COCO FOR THIS IMAGE
        Image_descr = {
            "file_name": im_id + ".tif",
            "id": im_id,
            "height": out_meta["height"],
            "width": out_meta["width"]
        }
        coco_images.append(Image_descr)
        im_id_log.append(im_id)  #also register the image id in the log
        if verbose:
            print("Clipping annotation" + str(i) + "of " + str(ntiles))
        #print("Target: "+outfile_ann)
        (xmin, ymin, xmax, ymax) = grid.bounds[i:i + 1].iloc[0, :]
        subprocess.call("ogr2ogr -clipsrc %d %d %d %d " %
                        (xmin, ymin, xmax, ymax) + '"' + outfile_ann + '"' +
                        " " + '"' + dst_ann + "preprocessed_annotations.shp" +
                        '"',
                        shell=True)
        #print("Processing Annotations")
        anno = geopandas.read_file(outfile_ann)
        #drop missing geometries
        anno = anno[anno["geometry"].notnull()]
        if anno.empty:
            if verbose:
                print("No Valid Annotations found, skipping Tile ", str(i))
            continue
        #EXPLODE to remove multipart features
        anno = anno.explode()
        #anno.buffer(0)
        #simplify remaining geometries CAN CAUSE CRASHES
        #if not all(anno.geometry.is_empty):
        #    anno.anno = anno.simplify(1, preserve_topology=True)
        #drop small geometries
        anno = anno[anno.geometry.area *
                    (10 * 10) > 500]  #enterne ganz kleine polys
        if anno.empty:
            if verbose:
                print("No Valid Annotations found, skipping Tile ", str(i))
            continue
        #anno.to_file(driver = 'ESRI Shapefile', filename= "result.shp")
        #print("Converting to Local Coordinates")
        #width=xmax-xmin
        height = ymax - ymin
        npoly = len(anno)
        # ADD CLASSES FOR THIS ANNO TO COCO CLASS, IF THEY ARE NOT REGISTERED THERE YET
        anno_classes = anno[
            category_field]  #get classes in current anno   (in relevant category column)
        anno_classes_uniq = list(set(anno_classes))  #get uniques
        for k in anno_classes_uniq:
            if k not in category_log:  #wenn es die category noch ned gibt
                new_category_id = len(coco_categories)
                if verbose:
                    print("creating new category: ", k, " under id: ",
                          new_category_id)

                Class_descr = {
                    "supercategory": "LandCover",
                    "id": new_category_id,
                    "name": k
                }  #mach neu (ids start at 0)
                coco_categories.append(Class_descr)  #hänge an
                category_log.append(k)  # registrier the new class in log
                category_id_log.append(
                    new_category_id)  #register the new class in log


#For each poly in the clipped shape:
        for j in range(0, npoly):

            #UPDATE THE GEOMETRY
            oldpoly = anno.exterior.iloc[j]
            poly_x, poly_y = oldpoly.coords.xy
            newpoly = shapely.geometry.Polygon([[
                (x - xmin) / xres, (y - ymin) / yres
            ] for x, y in zip(poly_x, poly_y)])
            if invert_y:
                #print("Also inverting Y Axis")
                #Die coordinaten werden auf tile coordinaten transformiert(subtraktion), mittels der auflösung werden die meter in pixelwerte transformiert, die y axe gespiegelt
                newpoly = shapely.geometry.Polygon([[
                    (x - xmin) / xres, (height - (y - ymin)) / yres
                ] for x, y in zip(poly_x, poly_y)])
            anno.geometry.iloc[j] = newpoly
            #ADD THE ANNOTATIONS TO COCO ANNOTATIONS
            anno_id = ("{:05d}".format(prefix)) + "0987" + (
                "{:05d}".format(len(anno_id_log))
            )  #Unique id: erste 5 ziffern=prefix, 0987 bedeutet dass es anno ist, letzte 5 ziffern id
            image_id = im_id
            category_name = anno[category_field].values[
                j]  #find the category name
            category_id = category_id_log[category_log.index(
                category_name
            )]  #find the corresponding id in this category name (this is where the logs come in)
            poly_x_n, poly_y_n = newpoly.exterior.coords.xy
            segmentation = [[
                round(val, 3) for pair in zip(poly_x_n, poly_y_n)
                for val in pair
            ]]  #interleave x and y coordinates (von stackoverflow geklaut)
            bbox = newpoly.bounds
            area = newpoly.area
            iscrowd = 0
            Anno_descr = {
                "id": anno_id,
                "category_id": category_id,
                "iscrowd": iscrowd,
                "segmentation": segmentation,
                "image_id": image_id,
                "area": area,
                "bbox": bbox
            }
            coco_annotations.append(Anno_descr)
            anno_id_log.append(anno_id)  #also register the anno id in the log
    if verbose:
        print("Finished processing image and annotation files")
        print("==============================================")
        print("Creating Coco-styled Database")
    coco_style_db = {
        "info": coco_info,
        "licenses": coco_licenses,
        "images": coco_images,
        "annotations": coco_annotations,
        "categories": coco_categories
    }
    registers = [category_log, category_id_log, anno_id_log, im_id_log]
    return (coco_style_db, registers)
예제 #16
0
# Check the dimensions and bands
print(stack.shape)  # dimensions
print(stack.count)  # bands

##############################################################################
# SHAPEFILE
##############################################################################

shapefile_fp = raw_fp + 'Lindsay_white_river_land_cover/Lindsay_white_river_land_cover.shp'
shapefile = gpd.read_file(shapefile_fp)
shapefile.crs
shapefile.bounds

geoms = shapefile.geometry.values

geometry = geoms[0]
print(type(geometry))
print(geometry)

feature = [mapping(geometry)
           ]  # can also do this using polygon.__geo_interface__
print(type(feature))
print(feature)

out_image, out_transform = mask(stack, feature, crop=True, nodata=0)
out_image.shape

##############################################################################
# RANDOM FOREST MODEL
##############################################################################
예제 #17
0
    def _sample_rio_dataset(self, polygon, invert):
        """
        Internal method to sample a rasterIO dataset using
        rasterIO built ins

        Parameters
        ----------
        polygon : (shapely.geometry.Polygon or GeoJSON-like dict)
            The values should be a GeoJSON-like dict or object
            implements the Python geo interface protocal.

            Alternatively if the user supplies the vectors
            of a polygon in the format [(x0, y0), ..., (xn, yn)]
            a single shapely polygon will be created for
            cropping the data

        invert : bool
            Default value is False. If invert is True then the
            area inside the shapes will be masked out

        Returns
        -------
            tuple : (arr_dict, raster_crp_meta)

        """
        if rasterio is None:
            msg = 'Raster()._sample_rio_dataset(): error ' + \
                  'importing rasterio try "pip install rasterio"'
            raise ImportError(msg)
        else:
            from rasterio.mask import mask

        if shapely is None:
            msg = 'Raster()._sample_rio_dataset(): error ' + \
                  'importing shapely - try "pip install shapely"'
            raise ImportError(msg)
        else:
            from shapely import geometry

        if isinstance(polygon, list) or isinstance(polygon, np.ndarray):
            shapes = [geometry.Polygon([[x, y] for x, y in polygon])]

        else:
            shapes = [polygon]

        rstr_crp, rstr_crp_affine = mask(self._dataset,
                                         shapes,
                                         crop=True,
                                         invert=invert)

        rstr_crp_meta = self._dataset.meta.copy()
        rstr_crp_meta.update({
            "driver": "GTiff",
            "height": rstr_crp.shape[1],
            "width": rstr_crp.shape[2],
            "transform": rstr_crp_affine
        })

        arr_dict = {self.bands[b]: arr for b, arr in enumerate(rstr_crp)}

        return arr_dict, rstr_crp_meta
def _create_non_zero_population_by_pixels_locations(geodataframe,
                                                    raster,
                                                    pop_string,
                                                    weights=None,
                                                    force_crs_match=True):
    """Function that returns the actual population of each pixel from a given
    geodataframe and variable.

    geodataframe       : a geopandas dataframe that it is contained in the raster

    raster             : the raster used from rasterio

    pop_string         : a string of the column name of the geodataframe that the estimation will be made

    weights            : vector of weights in each position of the pixel values according 'return_weights_from_regression' function. This must be provided by the user.

    force_crs_match    : bool. Default is True.
                         Wheter the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
                         It is recommended to let this argument as True.
    """

    _check_presence_of_crs(geodataframe)

    if not force_crs_match:
        warnings.warn(
            "The polygon is not being reprojected. The clipping might be being performing on unmatching polygon to the raster."
        )

    else:
        with rasterio.open(raster) as raster:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                geodataframe_projected = geodataframe.to_crs(
                    crs=raster.crs.data)
            result_pops_array = np.array([])
            result_lons_array = np.array([])
            result_lats_array = np.array([])

            pbar = tqdm(
                total=len(geodataframe_projected),
                desc="Estimating population per pixel",
            )

            for line_index in range(len(geodataframe_projected)):
                polygon_projected = geodataframe_projected.iloc[[line_index]]

                coords = getFeatures(polygon_projected)

                out_img, out_transform = mask(dataset=raster,
                                              shapes=coords,
                                              crop=True)
                """Calculating the population for each pixel"""
                trans_numpy = weights[
                    out_img]  # Pixel population from regression
                orig_estimate = polygon_projected[
                    pop_string]  # Original Population Value of The polygon
                correction_term = orig_estimate / trans_numpy.sum()
                final_pop_numpy_pre = trans_numpy * np.array(correction_term)

                flatten_final_pop_numpy_pre = np.ndarray.flatten(
                    final_pop_numpy_pre)

                non_zero_pop_index = np.where(flatten_final_pop_numpy_pre != 0)

                final_pop_numpy = flatten_final_pop_numpy_pre[
                    non_zero_pop_index]
                """Retrieving location of each pixel"""
                lons, lats = create_lon_lat(out_img, out_transform)

                final_lons = np.ndarray.flatten(lons)[non_zero_pop_index]
                final_lats = np.ndarray.flatten(lats)[non_zero_pop_index]
                """Append all flattens numpy arrays"""
                result_pops_array = np.append(result_pops_array,
                                              final_pop_numpy)
                result_lons_array = np.append(result_lons_array, final_lons)
                result_lats_array = np.append(result_lats_array, final_lats)

                pbar.update(1)

            data = {
                "pop_value": result_pops_array,
                "lons": result_lons_array.round().astype(int).tolist(),
                "lats": result_lats_array.round().astype(int).tolist(),
            }

            corresp = pd.DataFrame.from_dict(data)
        pbar.close()

    return corresp
예제 #19
0
    temp_outFolder = "C:/Temp/LEI_Cities"

    inR = rasterio.open(inGHSL)
    meta = inR.meta.copy()
    inD = gpd.read_file(inShp)
    inD = inD.to_crs(inR.crs)

    #Loop through input dataset
    for idx, row in inD.iterrows():
        print(idx)
        if idx >= 0:
            outFolder = os.path.join(temp_outFolder, "%s" % idx)
            try:
                #os.mkdir(outFolder)
                coords = getFeatures(inD, idx)
                out_img, out_transform = mask(inR, shapes=coords, crop=True)
                #Compare urban expansions to existing built area for three comparisons
                #   new2014     vs existing2000
                #   new2000     vs existing1990
                #   new20002014 vs existing1990

                #Create all baseline data
                meta.update({
                    "height": out_img.shape[1],
                    "width": out_img.shape[2],
                    "transform": out_transform,
                    "driver": "GTiff"
                })
                with rasterio.open(os.path.join(outFolder, "GHSL.tif"), 'w',
                                   **meta) as outFile:
                    outFile.write(out_img)
예제 #20
0
    projection = partial(pyproj.transform, pyproj.Proj(init="epsg:4326"), pyproj.Proj(init="epsg:32629"))
    wkt = loads(wkt_in)
    return transform(projection, wkt)


areas_json = {}
mask_json = {}
loop_dir = classified_LC_dir
sorted_files = sorted(os.listdir(loop_dir))
for folder in sorted_files:
    logger.info(folder)
    img = rasterio.open(loop_dir + folder)
    file_name = re.sub("\\..*$", "", folder)
    date = file_name.split('_')[-2][:8]
    LC_polygon = convert_wkt_to_polygon(config.TEXANA_WKT_REDUCED)
    [prdt_arr], prdt_xy = mask.mask(dataset=img, shapes=[LC_polygon], nodata=config.NO_DATA, all_touched=True, crop=True)

    height, width = prdt_arr.shape
    binary_img = prdt_arr
    logger.info(binary_img.shape)
    msk_arr = prdt_arr.astype(np.uint8)
    msk_arr[msk_arr == config.BLACK] = config.WATER
    msk_arr[msk_arr == config.NO_DATA] = config.LAND
    msk_arr[msk_arr == config.WHITE] = config.LAND
    bool_msk = msk_arr > 0
    msk_arr = remove_small_holes(bool_msk, area_threshold=2048).astype(np.uint8)

    bool_msk = msk_arr > 0
    msk_arr = remove_small_objects(bool_msk, min_size=1024).astype(np.uint8)
    msk_arr[msk_arr == config.WATER] = config.RESERVOIR_COLOR
    msk_arr_flatten = msk_arr.flatten()
예제 #21
0
    allTiffFiles = [f for f in listdir(folder_containing_tifffiles) if isfile(join(folder_containing_tifffiles, f))]
    jsonFileList = [json_file]

    for tiffFileName in allTiffFiles:
        for jsonFileName in jsonFileList:
            stateData = json.loads(open(jsonFileName).read())
            print('tiffFileName',tiffFileName)
            print('jsonFileName',jsonFileName)
            for currVillageFeature in stateData["features"]:
                try:
                    vCode2001=currVillageFeature["properties"]["pc01_village_id"]
                    vCode2011=currVillageFeature["properties"]["pc11_village_id"]
                    geoms=currVillageFeature["geometry"]
                    listGeom=[]
                    listGeom.append(geoms)
                    geoms=listGeom
                    with rasterio.open(folder_containing_tifffiles+'/'+tiffFileName) as src:
                        out_image, out_transform = mask(src, geoms, crop=True)

                    out_meta = src.meta.copy()
                    # save the resulting raster  
                    out_meta.update({"driver": "GTiff",
                        "height": out_image.shape[1],
                        "width": out_image.shape[2],
                    "transform": out_transform})
                    saveFileName= output_directory+'/'+tiffFileName[:-4]+"@"+str(vCode2001)+"@"+str(vCode2011)+".tif"
                    with rasterio.open(saveFileName, "w", **out_meta) as dest:
                        dest.write(out_image)
                except:
                    continue
예제 #22
0
def VectorScanner(landuse,
                  inun_file,
                  curve_path,
                  maxdam_path,
                  landuse_col='landuse',
                  inun_col='inun_val',
                  centimeters=False,
                  save=False,
                  **kwargs):
    """
    Vector based implementation of a direct damage assessment
    
    Arguments:
        *landuse_map* : Shapefile, Pandas DataFrame or Geopandas GeoDataFrame 
        with land-use information of the area.
     
        *inun_map* : GeoTiff with inundation depth per grid cell. Make sure 
        that the unit of the inundation map corresponds with the unit of the 
        first column of the curves file. 
     
        *curve_path* : File with the stage-damage curves of the different 
        land-use classes. Can also be a pandas DataFrame (but not a numpy Array).
     
        *maxdam_path* : File with the maximum damages per land-use class 
        (in euro/m2). Can also be a pandas DataFrame (but not a numpy Array).

    Optional Arguments:
        *centimeters* : Set to True if the inundation map and curves are in 
        centimeters
        
        *landuse_col* : Specify the column name of the unique landuse id's. 
        Default is set to **landuse**.
        
        *inun_col* : Specify the column name of the inundation depth 
        Default is set to **inun_val**.       
        
        *save* : Set to True if you would like to save the output. Requires 
        several **kwargs**
        
    kwargs:
        *output_path* : Specify where files should be saved.
        
        *scenario_name*: Give a unique name for the files that are going to be saved.
        
        *print_tqdm*: Set to **False** when progress output is undesired.
    
    Raises:
        *ValueError* : on missing kwargs
    
    Returns:    
     *damagebin* : Table with the land-use class names (1st column) and the 
     damage for that land-use class (2nd column).
     
    """
    # load land-use map
    if isinstance(landuse, str):
        landuse = gpd.read_file(landuse)
    elif isinstance(landuse, gpd.GeoDataFrame):
        landuse = landuse.copy()
    elif isinstance(landuse, pd.DataFrame):
        landuse = gpd.GeoDataFrame(landuse, geometry='geometry')
    else:
        print(
            'ERROR: landuse should either be a shapefile, a GeoDataFrame or a pandas Dataframe with a geometry column'
        )

    if isinstance(inun_file, str):
        # try to open it first as raster, if that doesnt work, we assume its a shapefile or GeoPackage that can be opened by geopandas
        try:
            with rasterio.open(inun_file) as src:
                if src.crs.to_dict() != landuse.crs:
                    landuse = landuse.to_crs(src.crs.to_dict())

                geoms = [mapping(geom) for geom in landuse.geometry]

                out_image, out_transform = mask(src, geoms, crop=True)
                out_image = np.array(out_image, dtype=int)

                # if inundation map is not in centimeters (and assumed to be in meters), multiple by 100
                if not centimeters:
                    out_image = out_image * 100
                out_image[out_image > 1000] = -1
                out_image[out_image <= 0] = -1
                gdf = None
        except:
            gdf = gpd.read_file(inun_file)
            out_image = None

    elif isinstance(inun_file, gpd.GeoDataFrame):
        gdf = inun_file.copy()
    elif isinstance(inun_file, pd.DataFrame):
        gdf = gpd.GeoDataFrame(inun_file, geometry='geometry')
    else:
        raise ValueError(
            'ERROR: inundation file should be a GeoTiff,  a shapefile, a GeoDataFrame \
              or any other georeferenced format that can be read by rasterio or geopandas'
        )

    # rename inundation colum, set values to centimeters if required and to integers
    if isinstance(gdf, gpd.GeoDataFrame):
        gdf = gdf.rename(columns={inun_col: 'inun_val'})
        if not centimeters:
            gdf['inun_val'] = gdf.inun_val * 100
            gdf['inun_val'] = gdf.inun_val.astype(int)

    # Load curves
    if isinstance(curve_path, pd.DataFrame):
        curves = curve_path.copy()
    elif isinstance(curve_path, np.ndarray):
        raise ValueError(
            'ERROR: for the vector-based approach we use a pandas DataFrame, not a Numpy Array'
        )
    elif curve_path.endswith('.csv'):
        curves = pd.read_csv(curve_path, index_col=[0])

    # Load maximum damages
    if isinstance(maxdam_path, str) and maxdam_path.endswith('.csv'):
        maxdam_path = pd.read_csv(maxdam_path)

    if isinstance(maxdam_path, pd.DataFrame):
        maxdam = dict(zip(maxdam_path[landuse_col], maxdam_path['damage']))
    elif isinstance(maxdam_path, np.ndarray):
        maxdam = dict(zip(maxdam_path[:, 0], maxdam_path[:, 1]))
    elif isinstance(maxdam_path, dict):
        maxdam = maxdam_path

    # convert raster to polygon
    if isinstance(out_image, np.ndarray):
        results = ({
            'properties': {
                'inun_val': v
            },
            'geometry': s
        } for i, (s, v) in enumerate(
            shapes(out_image[0, :, :], mask=None, transform=out_transform)))

        gdf = gpd.GeoDataFrame.from_features(list(results), crs=src.crs)

    # cut down to feasible area
    gdf = gdf.loc[gdf.inun_val > 0]
    gdf = gdf.loc[gdf.inun_val < 1000]

    # Check if we need to turn off tqdm:
    tqdm_print = kwargs.get('print_tqdm', True)

    # Split GeoDataFrame to make sure we have a unique shape per land use and inundation depth
    unique_df = []
    for row in tqdm(gdf.itertuples(index=False),
                    total=len(gdf),
                    desc='Get unique shapes',
                    disable=not tqdm_print):
        hits = landuse.loc[list(
            landuse.sindex.intersection(row.geometry.bounds))]
        row_buff = row.geometry.buffer(0)
        for hit in hits.itertuples(index=False):
            hit_buff = hit.geometry.buffer(0)
            if hit_buff.intersects(row_buff):
                unique_df.append([
                    row.inun_val, hit.landuse,
                    row_buff.intersection(hit_buff)
                ])

    # Create new dataframe
    tmp_df = pd.DataFrame(unique_df,
                          columns=['depth', landuse_col, 'geometry'])
    new_gdf = gpd.GeoDataFrame(tmp_df)

    # And remove empty geometries where there was no intersection in the end
    new_gdf = new_gdf.loc[new_gdf.geometry.geom_type != 'GeometryCollection']

    # Get area of shape
    new_gdf['area_m2'] = new_gdf.area

    # And estimate the losses
    if tqdm_print:
        tqdm.pandas(desc='Estimate damages')
        func = new_gdf.progress_apply
    else:
        func = new_gdf.apply

    new_gdf['damaged'] = func(lambda x: get_losses(x, curves, maxdam), axis=1)

    # Write the damages back to the original land-use shapes
    d_sindex = new_gdf.sindex

    landuse.reset_index()
    landuse['area_m2'] = landuse.area

    loss_dict = {}
    for x in tqdm(landuse.itertuples(),
                  total=len(landuse),
                  desc='Damage per object',
                  disable=not tqdm_print):
        hits = new_gdf.iloc[list(d_sindex.intersection(x.geometry.bounds))]
        damage = 0
        area_flooded = 0
        inun_levels = []
        x_buff = x.geometry.buffer(0)
        for hit in hits.itertuples(index=False):
            hit_buff = hit.geometry.buffer(0)
            if (x_buff.intersection(hit_buff)).area / hit_buff.area > 0.95:
                damage += hit.damaged
                area_flooded += hit.area_m2
                inun_levels.append(hit.depth)

        if len(inun_levels) == 0:
            loss_dict[x.Index] = 0, 0, 0, 0, 0
        else:
            loss_dict[x.Index] = damage, area_flooded, min(inun_levels), max(
                inun_levels), np.mean(inun_levels)

    tgt_cols = ['tot_dam', 'area_flooded', 'min_inun', 'max_inun', 'mean_inun']
    loss_df = pd.DataFrame.from_dict(loss_dict,
                                     orient='index',
                                     columns=tgt_cols)
    loss_gdf = gpd.GeoDataFrame(
        landuse.merge(loss_df, left_index=True, right_index=True))

    # If save is set to True, save original land-use map with damage values per shape.
    if save:
        # requires adding output_path and scenario_name to function call
        output_path = check_output_path(kwargs)
        scenario_name = check_scenario_name(kwargs)
        loss_gdf.to_file(
            p_join(output_path, 'damages_{}.shp'.format(scenario_name)))

    # And return the GeoDataFrame with damage statistics per unique object or shape
    return loss_gdf
예제 #23
0
def compute_glcm_textures(polarisations, kinds, radii):
    """Compute GLCM textures for various offsets and radiuses using
    OrfeoToolbox.
    """
    progress = tqdm(total=(len(CASE_STUDIES) * len(polarisations) *
                           len(kinds) * len(radii)))

    for city in CASE_STUDIES:

        output_dir = os.path.join(DATA_DIR, 'processed', 'sentinel-1', city.id,
                                  'textures')
        os.makedirs(output_dir, exist_ok=True)

        # Get min. and max. values by cutting 2% the histogram of the image
        # masked by a rough urban mask (smaller AOI).
        aoi = city.aoi['features'][0]['geometry']
        aoi = shape(aoi)
        aoi = reproject_geom(aoi, src_epsg=city.epsg, dst_epsg=4326)
        aoi = mapping(aoi)

        for polarisation in polarisations:

            # Load image data both masked and unmasked
            filename = f'{polarisation}-gamma0.tif'
            img_path = os.path.join(DATA_DIR, 'processed', 'sentinel-1',
                                    city.id, filename)
            with rasterio.open(img_path) as src:
                profile = src.profile
                gamma0 = src.read(1)
                gamma0_masked, _ = mask(src, [aoi], crop=True, nodata=0)
                gamma0_masked = gamma0_masked[0, :, :]

            # Get new vmin and vmax values based on the rough
            # urban mask and a 2% histogram cutting
            values = gamma0_masked[gamma0_masked != 0].ravel()
            vmin = np.percentile(values, 2)
            vmax = np.percentile(values, 98)
            gamma0[gamma0 < vmin] = vmin
            gamma0[gamma0 > vmax] = vmax

            # Rescale to UINT8 range
            gamma0 = np.interp(gamma0, (vmin, vmax), (0, 255))
            gamma0 = gamma0.astype(np.uint8)
            profile.update(dtype=gamma0.dtype.name, nodata=None)

            for kind, radius in product(kinds, radii):

                if textures_computed(output_dir, kind, radius, polarisation):
                    progress.update(1)
                    continue

                compute_textures(src_image=gamma0,
                                 src_profile=profile,
                                 dst_dir=output_dir,
                                 kind=kind,
                                 x_radius=radius,
                                 y_radius=radius,
                                 x_offset=1,
                                 y_offset=1,
                                 image_min=0,
                                 image_max=255,
                                 nb_bins=32,
                                 prefix=polarisation + '_')

                progress.update(1)

    progress.close()
예제 #24
0
def main(mosaic, data, dest, ntl, bbox, country):

    os.makedirs(dest, exist_ok=True)
    os.makedirs(dest + '/pre-event', exist_ok=True)
    os.makedirs(dest + '/post-event', exist_ok=True)

    # create raster mosaic for rasters with same name (~ same area)
    print('creating mosaic of overlapping rasters')
    if mosaic:
        for prepost in ['pre', 'post']:
            filenames = os.listdir(os.path.join(data, prepost + '-event'))
            tuples = []
            for filename in filenames:
                name = filename.split('-')[1]
                same = sorted(
                    [x for x in filenames if x.split('-')[1] == name])
                if same not in tuples and len(same) > 1:
                    tuples.append(same)
            for tuple in tuples:
                out_file = tuple[0].split('.')[0] + '-merged.tif'
                for ix, file in enumerate(tuple):
                    if ix == 0:
                        os.system('gdalwarp -r average {} {} {}'.format(
                            os.path.join(data, prepost + '-event', file),
                            os.path.join(data, prepost + '-event',
                                         tuple[ix + 1]),
                            os.path.join(dest, prepost + '-event', out_file)))
                    elif ix == 1:
                        continue
                    else:
                        os.system('gdalwarp -r average {} {} {}'.format(
                            os.path.join(data, prepost + '-event', file),
                            os.path.join(dest, prepost + '-event', out_file),
                            os.path.join(dest, prepost + '-event', out_file)))
            # copy all the other rasters to dest
            for file in [
                    x for x in filenames
                    if x not in [item for tuple in tuples for item in tuple]
            ]:
                copyfile(os.path.join(data, prepost + '-event', file),
                         os.path.join(dest, prepost + '-event', file))

    # filter pre-event rasters

    print('filtering pre-event rasters')

    # filter by bounding box (if provided)
    if bbox != '':
        bbox_tuple = [float(x) for x in bbox.split(',')]
        bbox = box(bbox_tuple[0], bbox_tuple[1], bbox_tuple[2], bbox_tuple[3])
        geo = gpd.GeoDataFrame({'geometry': bbox},
                               index=[0],
                               crs=from_epsg(4326))
        coords = getFeatures(geo)
        print('filtering on bbox:')
        print(coords)

        # loop over images and filter
        for raster in tqdm(glob.glob(dest + '/pre-event/*.tif')):
            raster = raster.replace('\\', '/')
            raster_or = raster
            out_name = raster.split('.')[0] + '-bbox.tif'
            with rasterio.open(raster) as src:
                print('cropping on bbox')

                try:
                    out_img, out_transform = mask(dataset=src,
                                                  shapes=coords,
                                                  crop=True)
                    out_meta = src.meta.copy()
                    out_meta.update({
                        'height': out_img.shape[1],
                        'width': out_img.shape[2],
                        'transform': out_transform
                    })

                    print('saving', out_name)
                    with rasterio.open(out_name, 'w', **out_meta) as dst:
                        dst.write(out_img)
                except:
                    print('empty raster, discard')

            os.remove(raster_or)

    # filter by nighttime lights

    # load nighttime light mask
    ntl_shapefile = 'input/ntl_mask_extended.shp'
    if ntl:
        # filter mask by country (if provided)
        if country != '':
            country_ntl_shapefile = ntl_shapefile.split(
                '.')[0] + '_' + country.lower() + '.shp'
            if not os.path.exists(country_ntl_shapefile):
                ntl_world = gpd.read_file(ntl_shapefile)
                ntl_world.crs = {'init': 'epsg:4326'}
                ntl_world = ntl_world.to_crs("EPSG:4326")
                world = gpd.read_file(
                    gpd.datasets.get_path('naturalearth_lowres'))
                country_shape = world[world.name == country]
                if country_shape.empty:
                    print('WARNING: country', country, 'not found!!!')
                    print('available countries:')
                    print(world.name.unique())
                    print('proceeding with global mask')
                    country_ntl_shapefile = ntl_shapefile
                else:
                    country_shape = country_shape.reset_index()
                    country_shape.at[0, 'geometry'] = box(
                        *country_shape.at[0, 'geometry'].bounds)
                    country_shape.geometry = country_shape.geometry.scale(
                        xfact=1.1, yfact=1.1)
                    ntl_country = gpd.clip(ntl_world, country_shape)
                    ntl_country.to_file(country_ntl_shapefile)
            with fiona.open(country_ntl_shapefile, "r") as shapefile:
                shapes = [feature["geometry"] for feature in shapefile]
        else:
            with fiona.open(ntl_shapefile, "r") as shapefile:
                shapes = [feature["geometry"] for feature in shapefile]

        # loop over images and filter
        for raster in tqdm(glob.glob(dest + '/pre-event/*.tif')):
            raster = raster.replace('\\', '/')
            raster_or = raster
            out_name = raster.split('.')[0] + '-ntl.tif'
            if 'ntl' in raster:
                continue
            crop_next = True

            print('processing', raster)
            out_name_ntl = raster.split('.')[0] + '-ntl-mask.tif'
            try:
                with rasterio.open(raster) as src:
                    shapes_r = [
                        x for x in shapes
                        if not rasterio.coords.disjoint_bounds(
                            src.bounds, rasterio.features.bounds(x))
                    ]
                    if len(shapes_r) == 0:
                        print('no ntl present, discard')
                        crop_next = False
                    else:
                        print('ntl present, creating mask')
                        out_image, out_transform = rasterio.mask.mask(
                            src, shapes_r, crop=True)
                        out_meta = src.meta

                        out_meta.update({
                            "driver": "GTiff",
                            "height": out_image.shape[1],
                            "width": out_image.shape[2],
                            "transform": out_transform
                        })
                        # save temporary ntl file
                        print('saving mask', out_name_ntl)
                        with rasterio.open(out_name_ntl, "w",
                                           **out_meta) as dst:
                            dst.write(out_image)
                        crop_next = True
                    raster = out_name_ntl
                if crop_next:
                    with rasterio.open(raster) as src:
                        print('cropping nan on', raster)
                        window = get_data_window(src.read(1, masked=True))

                        kwargs = src.meta.copy()
                        kwargs.update({
                            'height':
                            window.height,
                            'width':
                            window.width,
                            'transform':
                            rasterio.windows.transform(window, src.transform)
                        })

                        print('saving', out_name)
                        try:
                            with rasterio.open(out_name, 'w', **kwargs) as dst:
                                dst.write(src.read(window=window))
                        except:
                            print('empty raster, discard')

                    # remove temporary ntl file
                    os.remove(raster)
                    # remove original raster
                    os.remove(raster_or)
            except:
                print('error loading raster, skipping')
예제 #25
0
            transform = Affine(t.a * scale, t.b, t.c, t.d, t.e * scale, t.f)
            height = int(src.height / scale)
            width = int(src.width / scale)

            #clip the dem
            with rio.open('out.tiff') as origin:

                epsg4326_dem = origin.read(1)
                print('dem meta origin', origin.meta)

                print('planet origin', src.meta)
                #pf = src.read(1, masked=True)
                print(box(*src.bounds))

                try:
                    clipped_raster, clipped_transform = mask(
                        origin, [box(*src.bounds)], crop=True, nodata=0)
                except ValueError as err:
                    print('Handling run-time error:', err)

                print('clipped transform', clipped_transform)
                clipped_meta = origin.meta.copy()
                clipped_meta.update({
                    "driver": "GTiff",
                    "height": clipped_raster.shape[1],
                    "width": clipped_raster.shape[2],
                    "nodata": 0,
                    "transform": clipped_transform
                })
                print(src.meta, "ds")
                print(clipped_raster[0].shape)
                print(src.crs)
예제 #26
0
minx, miny = 24.60, 60.00
maxx, maxy = 25.22, 60.35
bbox = box(minx, miny, maxx, maxy)
# create GeoDataFrame from the bounding box
crs_code = pycrs.parser.from_epsg_code(4326).to_proj4()
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=crs_code)
print(geo)

# project the polygon into same crs as image
geo = geo.to_crs(crs=raster.crs)

# convert GeoDataFrame to geometric features dictionary
coords = getFeatures(geo)

# clip the raster with polyg
out_img, out_transform = mask(dataset=raster, shapes=coords, crop=1)

# copy the metadata from original
out_meta = raster.meta.copy()

# update the metadata, first parse epsg
epsg_code = int(raster.crs.data['init'].replace('epsg:', ''))
epsg_proj4 = pycrs.parser.from_epsg_code(epsg_code).to_proj4()
out_meta.update({
    "driver": "GTiff",
    "height": out_img.shape[1],
    "width": out_img.shape[2],
    "transform": out_transform,
    "crs": epsg_proj4
})
예제 #27
0
def run(input_dir, output_dir):
    DATA_DIR_2 = os.path.join(input_dir, "ch2")
    DATA_DIR_6 = os.path.join(input_dir, "ch6")
    DATA_DIR_7 = os.path.join(input_dir, "ch7")
    DATA_DIR_14 = os.path.join(input_dir, "ch14")

    # Get contents of data dir for ch 7
    data_list_7 = os.listdir(DATA_DIR_7)
    if ".DS_Store" in data_list_7:
        data_list_7.remove(".DS_Store")  # For mac users
    data_list_7 = sorted(data_list_7)

    # Get contents of data dir for ch14
    data_list_14 = os.listdir(DATA_DIR_14)
    if ".DS_Store" in data_list_14:
        data_list_14.remove(".DS_Store")  # For mac users
    data_list_14 = sorted(data_list_14)

    # Get contents of data dir for ch 2
    data_list_2 = os.listdir(DATA_DIR_2)
    if ".DS_Store" in data_list_2:
        data_list_2.remove(".DS_Store")  # For mac users
    data_list_2 = sorted(data_list_2)

    # Get contents of data dir for ch 6
    data_list_6 = os.listdir(DATA_DIR_6)
    if ".DS_Store" in data_list_6:
        data_list_6.remove(".DS_Store")  # For mac users
    data_list_6 = sorted(data_list_6)

    # Load ch7 for projection constants
    first_ds_name = data_list_7[0]
    first_ds_path = os.path.join(DATA_DIR_7, first_ds_name)
    first_ds = GOES.open_dataset(first_ds_path)
    var_ch02, lons, lats = first_ds.image("Rad",
                                          domain=[LLLon, URLon, LLLat, URLat])
    var_ch02, lons, lats = var_ch02.data, lons.data, lats.data
    HEIGHT = var_ch02.shape[0]
    WIDTH = var_ch02.shape[1]

    # Setup projection constants used throughout the script.
    tiff_path = os.path.join(TIFF_DIR, "0.tif")
    p_crs = CRS.from_epsg(3857)
    p_latlon = CRS.from_proj4("+proj=latlon")
    crs_transform = Transformer.from_crs(p_latlon, p_crs)
    ll_x, ll_y = crs_transform.transform(LLLon, LLLat)
    ur_x, ur_y = crs_transform.transform(URLon, URLat)
    area_extent = (ll_x, ll_y, ur_x, ur_y)
    ul_x = ll_x  # Why these?
    ul_y = ur_y
    area_id = "California Coast"
    description = "See area ID"
    proj_id = "Mercator"
    pixel_size_x = (ur_x - ll_x) / (WIDTH - 1)
    pixel_size_y = (ur_y - ll_y) / (HEIGHT - 1)
    new_affine = Affine(pixel_size_x, 0.0, ul_x, 0.0, -pixel_size_y, ul_y)
    area_def = AreaDefinition(area_id, description, proj_id, p_crs, WIDTH,
                              HEIGHT, area_extent)
    fill_value = np.nan

    # Load ch7 for land masking
    first_ds_name = data_list_7[0]
    first_ds_path = os.path.join(DATA_DIR_7, first_ds_name)
    first_ds = GOES.open_dataset(first_ds_path)
    var_ch07, lons, lats = first_ds.image("Rad",
                                          domain=[LLLon, URLon, LLLat, URLat])
    var_ch07, lons, lats = var_ch07.data, lons.data, lats.data
    swath_def = SwathDefinition(lons, lats)
    first_ds = None  # Free the memory from these big datasets
    var_ch07 = kd_tree.resample_nearest(swath_def,
                                        var_ch07.ravel(),
                                        area_def,
                                        radius_of_influence=5000,
                                        nprocs=2,
                                        fill_value=fill_value)

    ###### New land masking system #######################
    with rasterio.open(
            tiff_path,
            "w",
            driver="GTiff",
            height=HEIGHT,
            width=WIDTH,
            count=1,  #????
            dtype=var_ch07.dtype,
            crs=p_crs,
            transform=new_affine,
            nodata=fill_value,
    ) as dst:
        dst.write(np.reshape(var_ch07, (1, HEIGHT, WIDTH)))

    src = rasterio.open(tiff_path, mode='r+')
    geodf = geopandas.read_file(LAND_POLYGON_SHAPE)
    land_masking, other_affine = mask.mask(src,
                                           geodf[['geometry'
                                                  ]].values.flatten(),
                                           invert=True,
                                           filled=False)
    land_masking = np.ma.getmask(land_masking)
    land_masking = np.reshape(land_masking, (HEIGHT, WIDTH))
    src.close()  # Free memory
    src = None
    geodf = None
    ############################################################

    # Init multi-tracker
    trackers = MultiTrackerImproved(cv2.TrackerCSRT_create)

    image_list = []
    # BTD_list = []
    refl_ch2_list = []
    refl_ch6_list = []

    i = 0
    for ds_name_7 in data_list_7:
        ds_name_14 = data_list_14[i]
        ds_name_2 = data_list_2[i]
        ds_name_6 = data_list_6[i]
        ds_path_7 = os.path.join(DATA_DIR_7, ds_name_7)
        ds_path_14 = os.path.join(DATA_DIR_14, ds_name_14)
        ds_path_2 = os.path.join(DATA_DIR_2, ds_name_2)
        ds_path_6 = os.path.join(DATA_DIR_6, ds_name_6)

        # Load channel 2
        ds_2 = GOES.open_dataset(ds_path_2)
        var_ch02, lons, lats = ds_2.image("Rad",
                                          domain=[LLLon, URLon, LLLat, URLat])
        var_ch02, lons, lats = var_ch02.data, lons.data, lats.data
        swath_def = SwathDefinition(lons, lats)
        var_ch02 = kd_tree.resample_nearest(swath_def,
                                            var_ch02.ravel(),
                                            area_def,
                                            radius_of_influence=5000,
                                            nprocs=2,
                                            fill_value=fill_value)

        # Load channel 2 reflectivity
        ds_2 = GOES.open_dataset(ds_path_2)
        refl_var_ch02, lons, lats = ds_2.image(
            "Rad", up_level=True, domain=[LLLon, URLon, LLLat, URLat])
        refl_var_ch02 = refl_var_ch02.refl_fact_to_refl(lons, lats).data
        swath_def = SwathDefinition(lons.data, lats.data)
        refl_var_ch02 = kd_tree.resample_nearest(swath_def,
                                                 refl_var_ch02.ravel(),
                                                 area_def,
                                                 radius_of_influence=5000,
                                                 nprocs=2,
                                                 fill_value=fill_value)

        # Load channel 6 reflectivity
        ds_6 = GOES.open_dataset(ds_path_6)
        refl_var_ch06, lons, lats = ds_6.image(
            "Rad", up_level=True, domain=[LLLon, URLon, LLLat, URLat])
        refl_var_ch06 = refl_var_ch06.refl_fact_to_refl(lons, lats).data
        swath_def = SwathDefinition(lons.data, lats.data)
        refl_var_ch06 = kd_tree.resample_nearest(swath_def,
                                                 refl_var_ch06.ravel(),
                                                 area_def,
                                                 radius_of_influence=5000,
                                                 nprocs=2,
                                                 fill_value=fill_value)

        # Load channel 7
        ds_7 = GOES.open_dataset(ds_path_7)
        var_ch07, lons, lats = ds_7.image("Rad",
                                          domain=[LLLon, URLon, LLLat, URLat])
        var_ch07, lons, lats = var_ch07.data, lons.data, lats.data
        swath_def = SwathDefinition(lons, lats)
        var_ch07 = kd_tree.resample_nearest(swath_def,
                                            var_ch07.ravel(),
                                            area_def,
                                            radius_of_influence=5000,
                                            nprocs=2,
                                            fill_value=fill_value)

        # Load channel 14
        ds_14 = GOES.open_dataset(ds_path_14)
        var_ch14, lons, lats = ds_14.image("Rad",
                                           domain=[LLLon, URLon, LLLat, URLat])
        var_ch14, lons, lats = var_ch14.data, lons.data, lats.data
        swath_def = SwathDefinition(lons, lats)
        var_ch14 = kd_tree.resample_nearest(swath_def,
                                            var_ch14.ravel(),
                                            area_def,
                                            radius_of_influence=5000,
                                            nprocs=2,
                                            fill_value=fill_value)

        # Make BTD
        var = calc_BTD.main_func(var_ch14, var_ch07, 14, 7)

        # Skip day if it has bad data
        if np.isnan(var).any():
            i = i + 1
            continue

        # Make copy of the BTD for use as a backround in cv2 image output
        # Maps the BTD values to a range of [0,255]
        # BTD = copy.deepcopy(var)
        BTD_img = copy.deepcopy(var)
        min_BTD = np.nanmin(BTD_img)
        if min_BTD < 0:
            BTD_img = BTD_img + np.abs(min_BTD)
        max_BTD = np.nanmax(BTD_img)
        BTD_img = BTD_img / max_BTD
        # BTD_img = cv2.cvtColor(BTD_img*255, cv2.COLOR_GRAY2BGR)
        # BTD_img_trackers = copy.deepcopy(BTD_img) # Next two lines are for new BTD data for trackers
        # BTD_img_trackers = np.array(BTD_img_trackers).astype('uint8') # Since it seems the trackers need images of type uint8

        # Filter out the land
        var[land_masking] = np.nan

        # Create mask array for the highest clouds
        high_cloud_mask = calc_BTD.bt_ch14_temp_conv(
            var_ch14) < 5  # TODO: Make this more robust

        #### Use reflectivity of channel 2 and BT of channel 14 to filter out open ocean data ###########
        BT = calc_BTD.bt_ch14_temp_conv(var_ch14)

        BT = BT[np.logical_and(
            np.logical_not(land_masking), np.logical_not(high_cloud_mask)
        )]  # Filter out the land since golden arches works best when only over water
        var_ch02 = var_ch02[np.logical_and(
            np.logical_not(land_masking), np.logical_not(high_cloud_mask)
        )]  # Filter out the land since golden arches works best when only over water

        BT_and_CH02 = np.vstack((BT, var_ch02)).T
        BT_and_CH02_sample, _ = train_test_split(BT_and_CH02, train_size=10000)

        clusterer = DBSCAN(eps=1.5,
                           min_samples=100)  # Found through extensive testing
        classifier = DecisionTreeClassifier()
        inductive_cluster = InductiveClusterer(
            clusterer, classifier).fit(BT_and_CH02_sample)
        IC_labels = inductive_cluster.predict(BT_and_CH02) + 1

        all_labels = np.unique(IC_labels)
        min_refl = np.Inf
        open_ocean_label = 0
        for j in all_labels:
            labeled_refl_array = var_ch02[IC_labels == j]
            mean_refl = np.nanmean(labeled_refl_array)
            if mean_refl < min_refl:
                open_ocean_label = j
                min_refl = mean_refl
        golden_arch_mask_ocean = IC_labels == open_ocean_label

        golden_arch_mask = np.zeros(var.shape, dtype=bool)
        golden_arch_mask[np.logical_and(
            np.logical_not(land_masking),
            np.logical_not(high_cloud_mask))] = golden_arch_mask_ocean

        var = np.where(golden_arch_mask, np.nan, var)
        ###############################################################################################

        #Filter out the cold high altitude clouds
        var = np.where(high_cloud_mask, np.nan, var)

        var = feature.canny(var,
                            sigma=2.2,
                            low_threshold=0,
                            high_threshold=1.2)
        var = np.where(var == np.nan, 0, var)

        ## Skimage hough line transform #################################
        var = np.array(var).astype('uint8')
        img = cv2.cvtColor(var * 255, cv2.COLOR_GRAY2BGR)

        # Was 0, 30, 1
        threshold = 0
        minLineLength = 30
        maxLineGap = 2
        theta = np.linspace(-np.pi, np.pi, 1000)

        lines = transform.probabilistic_hough_line(var,
                                                   threshold=threshold,
                                                   line_length=minLineLength,
                                                   line_gap=maxLineGap,
                                                   theta=theta)
        #############################################################

        #### TRACKER #################
        trackers.update(img, i)

        if lines is not None:
            for line in lines:
                p0, p1 = line
                x1 = p0[0]
                y1 = p0[1]
                x2 = p1[0]
                y2 = p1[1]

                min_x = np.minimum(x1, x2)
                min_y = np.minimum(y1, y2)
                max_x = np.maximum(x1, x2)
                max_y = np.maximum(y1, y2)

                rect = (min_x - 2, min_y - 2, max_x - min_x + 4,
                        max_y - min_y + 4
                        )  #TODO: Maybe expand the size of the boxes a bit?
                trackers.add_tracker(img, rect, len(data_list_7))
        ###############################

        image_list.append(BTD_img)
        # BTD_list.append(BTD)
        refl_ch2_list.append(refl_var_ch02)
        refl_ch6_list.append(refl_var_ch06)

        print("Image " + str(i) + " Calculated")
        i = i + 1

    # TODO: Remove BTD_list in all areas if I am not using it for real final pngs
    for i in range(len(image_list)):
        label_name = "labels"
        data_name = "data"
        filename = str(i) + ".tif"
        data_file_path = os.path.join(output_dir, data_name, filename)
        label_file_path = os.path.join(output_dir, label_name, filename)
        boxes = trackers.get_boxes(i)

        BTD_img = image_list[i]
        # BTD = BTD_list[i]
        refl_var_ch02 = refl_ch2_list[i]
        refl_var_ch06 = refl_ch6_list[i]

        # Make box plots for trackers
        # Also make and highlight the labels
        labels = np.zeros([BTD_img.shape[0], BTD_img.shape[1]],
                          dtype=np.float32)
        for box in boxes:
            (x, y, w, h) = [int(v) for v in box]

            if w > 0 and h > 0 and x >= 0 and y >= 0 and y + h <= BTD_img.shape[
                    0] and x + w <= BTD_img.shape[1] and y < BTD_img.shape[
                        0] and x < BTD_img.shape[1]:
                ch2_slice = refl_var_ch02[y:y + h, x:x + w]
                ch6_slice = refl_var_ch06[y:y + h, x:x + w]

                labels_slice = labels[y:y + h, x:x + w]
                labels_slice = np.where(
                    np.logical_and(ch6_slice >= 0.28, ch2_slice >= 0.3), 1.0,
                    labels_slice)
                labels[y:y + h, x:x + w] = labels_slice  # Add red for labels

        with rasterio.open(
                data_file_path,
                "w",
                driver="GTiff",
                height=HEIGHT,
                width=WIDTH,
                count=1,  #????
                dtype=BTD_img.dtype,
                crs=p_crs,
                transform=new_affine,
                nodata=fill_value,
        ) as dst:
            dst.write(np.reshape(BTD_img, (1, HEIGHT, WIDTH)))

        with rasterio.open(
                label_file_path,
                "w",
                driver="GTiff",
                height=HEIGHT,
                width=WIDTH,
                count=1,  #????
                dtype=labels.dtype,
                crs=p_crs,
                transform=new_affine,
                nodata=fill_value,
        ) as dst:
            dst.write(np.reshape(labels, (1, HEIGHT, WIDTH)))

        # BTD_img = cv2.addWeighted(BTD_img, 1.0, labels, 0.5, 0)

        # cv2.imwrite(file_path, BTD_img)

        print("Image " + str(i) + " Complete")
예제 #28
0
def inference_on_tif(src_grid,
                     src_im,
                     dst_im,
                     dst_pred,
                     inf_model,
                     output_image_tile=False,
                     verbose=True):
    grid = geopandas.read_file(src_grid)  #Open The Grid shp
    img = rasterio.open(src_im)  # Open the raster tif
    ntiles = len(grid)
    pred_polys = []  #collects all detected and polygonized features
    pred_classes = []  #collects their class ids
    pred_scores = []  #collects their scores
    pred_tiles = []  #collects the index of the tile they are predicted on
    #Loop over all the tiles
    for i in tqdm(range(0, ntiles),
                  desc="Processing and Predicting Image Tiles"
                  ):  #for every object in grid
        outfile_im = dst_im + "Tile" + ("{:04d}".format(i)) + ".tif"
        coords = SegUtils.getFeatures(grid, i)  #get geometries of the tile
        tile_bounds = grid.bounds.iloc[i][
            0:
            2].values  #get the min x min y coords of the tile for adjusting the coordinates fo the predicted polygons later
        tile_height = grid.bounds.iloc[i][3] - grid.bounds.iloc[i][
            1]  #get the height in map units to later invert the y coordinates
        try:
            Tile, out_transform = mask(dataset=img, shapes=coords, crop=True)
        except ValueError as error:
            print(error)
            print("Skipping Tile " + str(i))
            continue

        if output_image_tile:
            out_meta = img.meta.copy()
            out_meta.update({
                "driver": "GTiff",
                "height": Tile.shape[1],
                "width": Tile.shape[2],
                "transform": out_transform
            })  #update the meta for the cropped raster
            if not os.path.exists(dst_im):
                os.makedirs(dst_im)
            if os.path.isfile(outfile_im):
                print("Image File exists already and will not be overwritten.")
            else:
                with rasterio.open(outfile_im, "w", **out_meta) as dest:
                    dest.write(Tile)
        Tile = Tile.transpose(
            1, 2, 0
        )  #We have to switch the dimensions around so that  the rgb dimension is the last one, as required by the network
        #inference
        pred_results = inf_model.detect([Tile], verbose=0)
        n_pred_obj = len(
            pred_results[0]["class_ids"]
        )  #get the number of predicted objects to iterate over
        for j in range(0, n_pred_obj):  #For every predicted object:
            new_poly = [
            ]  #list for all parts of that multipolygon that gets created for the predicted object
            results_mask = pred_results[0]["masks"][:, :, j]  #get the mask
            results_mask = results_mask.astype('int16')  #must not be boolean
            #visualize (not neessary)
            #plt.imshow(results_mask, interpolation='nearest')
            #plt.show()
            #polygonize and grab the raster value and the geometries
            results = ({
                'raster_val': v,
                'geometry': s
            } for i, (s, v) in enumerate(shapes(results_mask)))
            #the result can be converted to a list of npolys dicts of
            #keys raster_val and geometry, geometry being a dict of
            #keys type and coordinates, coordinates being a list
            #this stuff now gets converted into shapely objects
            results = list(results)
            for result in results:
                if result[
                        'raster_val']:  #if the geometry is not the background
                    coordlist = result['geometry']['coordinates'][
                        0]  #get a list of tuples of 2
                    coordlist_adj = [(x, (tile_height - y)) + tile_bounds
                                     for x, y in coordlist
                                     ]  #add the coordinates of the origin
                    new_part = shapely.geometry.Polygon(
                        coordlist_adj)  #make a new polygon out of it
                    new_poly.append(new_part)  # add that as part to a list
            new_poly = shapely.geometry.MultiPolygon(
                new_poly
            )  #after all parts for an object are collected, merge toa multipolygon (often there will just be one part)
            if (new_poly):  #if there was something detected
                pred_polys.append(
                    new_poly)  #collect all detected and polygonized features
                pred_classes.append(
                    pred_results[0]["class_ids"][j])  #collect their class ids
                pred_scores.append(
                    pred_results[0]["scores"][j])  #collect their scores
                pred_tiles.append(i)

    df = pandas.DataFrame({
        "pred_class": pred_classes,
        "pred_score": pred_scores,
        "pred_tiles": pred_tiles
    })
    pred_gdf = geopandas.GeoDataFrame(df, geometry=pred_polys)
    pred_gdf.crs = grid.crs
    pred_gdf.to_file(dst_pred)
    return (pred_gdf)
예제 #29
0
def get_one_sub_image_label(idx, center_polygon, class_int, polygons_all,
                            class_int_all, bufferSize, img_tile_boxes,
                            image_tile_list):
    '''
    get an sub image and the corresponding labe raster
    :param idx: the polygon index
    :param center_polygon: the polygon in training polygon
    :param class_int: the class number of this polygon
    :param polygons_all: the full set of training polygons, for generating label images
    :param class_int_all: the class number for the full set of training polygons
    :param bufferSize: the buffer area to generate sub-images
    :param img_tile_boxes: the bound boxes of all the image tiles
    :param image_tile_list: the list of image paths
    :return:
    '''

    ############# This function is not working  #############

    # center_polygon corresponds to one polygon in the full set of training polygons, so it is not necessary to check
    # get adjacent polygon
    adj_polygons, adj_polygons_class = get_adjacent_polygons(
        center_polygon, polygons_all, class_int_all, bufferSize)

    # add the center polygons to adj_polygons
    adj_polygons.extend([center_polygon])
    adj_polygons_class.extend([class_int])
    basic.outputlogMessage('get a sub image covering %d training polygons' %
                           len(adj_polygons))

    # find the images which the center polygon overlap (one or two images)
    img_index = get_overlap_image_index(adj_polygons, img_tile_boxes)
    if len(img_index) < 1:
        basic.outputlogMessage(
            'Warning, %dth polygon and the adjacent ones do not overlap any image tile, please check '
            '(1) the shape file and raster have the same projection'
            'and (2) this polygon is in the extent of images' % idx)

    image_list = [image_tile_list[item] for item in img_index]

    # open the raster to get projection, resolution
    # with rasterio.open(image_list[0]) as src:
    #     resX = src.res[0]
    #     resY = src.res[1]
    #     src_profile = src.profile
    src = rasterio.open(image_list[0])
    resX = src.res[0]
    resY = src.res[1]
    src_profile = src.profile

    # rasterize the shapes
    burn_shapes = [
        (item_shape, item_class_int)
        for (item_shape,
             item_class_int) in zip(adj_polygons, adj_polygons_class)
    ]
    burn_boxes = get_bounds_of_polygons(adj_polygons)

    # check weather the extent is too large
    burn_boxes_width = math.ceil((burn_boxes[2] - burn_boxes[0]) / resX)
    burn_boxes_height = math.ceil((burn_boxes[3] - burn_boxes[1]) / resY)

    if burn_boxes_width * burn_boxes_height > 10000 * 10000:
        raise ValueError(
            'error, the polygons want to burn cover a very large area')

    # fill as 255 for region outsize shapes for test purpose
    # set all_touched as True, may good small shape
    # new_transform = (burn_boxes[0], resX, 0, burn_boxes[3], 0, -resY )  # (X_min, resX, 0, Y_max, 0, -resY)  # GDAL-style transforms, have been deprecated after raster 1.0
    # affine.Affine() vs. GDAL-style geotransforms: https://rasterio.readthedocs.io/en/stable/topics/migrating-to-v1.html
    new_transform = (resX, 0, burn_boxes[0], 0, -resY, burn_boxes[3]
                     )  # (resX, 0, X_min, 0, -resY, Y_max)
    out_label = rasterize(burn_shapes,
                          out_shape=(burn_boxes_width, burn_boxes_height),
                          transform=new_transform,
                          fill=0,
                          all_touched=False,
                          dtype=rasterio.uint8)
    print('new_transform', new_transform)
    print('out_label', out_label.shape)

    # test, save to disk
    kwargs = src.meta
    kwargs.update(dtype=rasterio.uint8,
                  count=1,
                  width=burn_boxes_width,
                  height=burn_boxes_height,
                  transform=new_transform)
    with rasterio.open('test_6_albers.tif', 'w', **kwargs) as dst:
        dst.write_band(1, out_label.astype(rasterio.uint8))

    # mask, get pixels cover by polygons, set all_touched as True
    polygons_json = [mapping(item) for item in adj_polygons]
    out_image, out_transform = mask(src,
                                    polygons_json,
                                    nodata=0,
                                    all_touched=True,
                                    crop=True)

    #test: output infomation
    print('out_transform', out_transform)
    print('out_image', out_image.shape)

    # test: save it to disk
    out_meta = src.meta.copy()
    out_meta.update(
        {
            "driver": "GTiff",
            "height": out_image.shape[1],
            "width": out_image.shape[2],
            "transform": out_transform
        }
    )  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
    save_path = "masked_of_polygon_%d.tif" % (idx + 1)
    with rasterio.open(save_path, "w", **out_meta) as dest:
        dest.write(out_image)

    # return image_array, label_array
    return 1, 1
예제 #30
0
def triplify_dataset(file, parcel_file, product_id, output_folder='./rdf'):

    startEpoch = time.time()

    files = []
    triples = []
    iFiles = 0
    iTriples = 0
    village = parcel_file[-20:-15]
    lsNameSpaces, lsTriplesTemplate = readTemplate(templateFile)

    ndvi_file = rasterio.open(file)

    print("ndvi_file data read, CRS (expected 32630):", ndvi_file.crs)
    parcelFile = gpd.read_file(parcel_file)
    if '/' in file:
        image_date = file[file.rfind('/') + 1:-7]
    else:
        image_date = file[0:-7]

    project = partial(pyproj.transform, pyproj.Proj(init='epsg:4326'),
                      pyproj.Proj(init=ndvi_file.crs))

    for index, row in parcelFile.iterrows():
        clear_output()
        print(str(index) + '/' + str(len(parcelFile)))
        feat = {}
        feat['id'] = str(row['id'])
        feat['parcel'] = str(row['id'])
        feat['area_type'] = str(row['area_type'])
        feat['geojson'] = shape(row['geometry']).wkt
        try:
            geom = transform(project, row['geometry'])
            ndviParcel, ndviTransform = mask(ndvi_file, [geom],
                                             crop=True,
                                             all_touched=True,
                                             indexes=1)
        except ValueError as err:
            print(err)
            continue
        ndvi = np.float16(ndviParcel)
        total = ndvi[ndvi >= 0.2].size
        if total <= 0:
            try:
                geom = transform(project, row['geometry'])
                ndviParcel, ndviTransform = mask(ndvi_file, [geom],
                                                 crop=True,
                                                 indexes=1)
            except ValueError as err:
                print(err)
                continue
        ndvi = np.float16(ndviParcel)
        total = ndvi[ndvi >= 0.2].size
        if total > 0:
            feat['lowp'] = round(
                ndvi[(ndvi >= 0.2) & (ndvi < 0.5)].size * 100 / total, 2)
            feat['midp'] = round(
                ndvi[(ndvi >= 0.5) & (ndvi < 0.7)].size * 100 / total, 2)
            feat['highp'] = round(ndvi[(ndvi >= 0.7)].size * 100 / total, 2)

            feat['id'] = "PN" + feat['id'] + "_" + image_date
            feat['id'] = feat['id'].replace('-', '_')

            feat['image'] = product_id
            uriDummy = URIBase + 'NDVI/' + feat['id'] + ">"
            triplesRow = triplify(feat, lsTriplesTemplate, uriDummy,
                                  feat['id'], feat['parcel'])

            triples = triples + triplesRow
            if len(triples) > maxRecordsPerFile:
                file = output_folder
                files.append(file)
                writeToFile(lsNameSpaces, triples, file)
                iFiles = iFiles + 1
                iTriples = iTriples + len(triples)
                triples = []
        else:
            print("Too small!")

    file = output_folder + 'ndvi.ttl'
    writeToFile(lsNameSpaces, triples, file)

    files.append(file)
    clear_output()
    print('Number of parcel', len(parcelFile))
    print('Number of triples', iTriples)
    endEpoch = time.time()
    elapsedTime = endEpoch - startEpoch
    if elapsedTime < 60:
        print('Elapsed time : ', elapsedTime, ' seconds')
    else:
        print('Elapsed time : ', math.floor(elapsedTime / 60), ' minutes and ',
              elapsedTime % 60, ' seconds')
    return files
def weighted_means(z, name_list, dist_vars):
    temp_name = name_list[0][z]
    all_pts = gpd.GeoDataFrame.from_file('Movement_Shapefiles/' +
                                         ''.join([temp_name, '_Python.shp']))
    all_pts.crs = {'init': 'epsg:32733'}

    from scipy.stats import gamma
    shape = float(dist_vars[z]['gamma.shape'])
    rate = float(dist_vars[z]['gamma.rate'])
    rad = gamma.ppf(0.975, a=shape, scale=1 / rate)

    big_buffers = all_pts.geometry.buffer(rad)
    #small_buffers = all_pts.geometry.buffer(30)
    #buffers_diff = big_buffers.difference(small_buffers)

    avail_green = []
    avail_wet = []
    avail_roads = []

    all_buff = big_buffers.geometry.values
    sample_iter = range(0, len(all_buff))
    from shapely.geometry import mapping
    for i in sample_iter:
        geoms = [mapping(big_buffers.geometry.values[i])]
        from rasterio.mask import mask
        with rasterio.open("ENP_Predictors/Final_Predictors_2009.tif") as src:
            out_image, out_transform = mask(src, geoms, crop=True)

            no_data = -3.39999995e+38
            Green_band = out_image.data[0]
            Wet_band = out_image.data[1]
            Road_band = out_image.data[2]
            row, col = np.where(Green_band != no_data)
            green = np.extract(Green_band != no_data, Green_band)
            wet = np.extract(Wet_band != no_data, Wet_band)
            roads = np.extract(Road_band != no_data, Road_band)

            from rasterio import Affine  # or from affine import Affine
            T1 = out_transform * Affine.translation(
                0.5, 0.5)  # reference the pixel centre
            rc2xy = lambda r, c: (c, r) * T1

            d = gpd.GeoDataFrame({
                'col': col,
                'row': row,
                'green': green,
                'wet': wet,
                'roads': roads
            })
            # coordinate transformation
            d['x'] = d.apply(lambda row: rc2xy(row.row, row.col)[0], axis=1)
            d['y'] = d.apply(lambda row: rc2xy(row.row, row.col)[1], axis=1)
            # geometry
            from shapely.geometry import Point
            d['geometry'] = d.apply(lambda row: Point(row['x'], row['y']),
                                    axis=1)

            from scipy.stats import gamma
            shape = float(dist_vars[z]['gamma.shape'])
            rate = float(dist_vars[z]['gamma.rate'])

            pt_iter = range(0, len(d))
            temp_weights = []
            temp_green_vals = []
            temp_wet_vals = []
            temp_roads_vals = []
            for j in pt_iter:
                temp_dist = d.loc[j].geometry.distance(all_pts['geometry'][i])
                weight = gamma.pdf(temp_dist, a=shape, scale=1 / rate)
                temp_weights.append(weight)

                temp_green = d.loc[j].green
                temp_wet = d.loc[j].wet
                temp_roads = d.loc[j].roads
                temp_green_vals.append(temp_green)
                temp_wet_vals.append(temp_wet)
                temp_roads_vals.append(temp_roads)

            weighted_green = sum(
                temp_green_vals[g] * temp_weights[g]
                for g in range(len(temp_green_vals))) / sum(temp_weights)
            weighted_wet = sum(
                temp_wet_vals[g] * temp_weights[g]
                for g in range(len(temp_wet_vals))) / sum(temp_weights)
            weighted_roads = sum(
                temp_roads_vals[g] * temp_weights[g]
                for g in range(len(temp_roads_vals))) / sum(temp_weights)
            avail_green.append(weighted_green.mean())
            avail_wet.append(weighted_wet.mean())
            avail_roads.append(weighted_roads.mean())

    import pandas

    gdf = gpd.GeoDataFrame(geometry=all_pts['geometry'])
    x_test = gdf.geometry.apply(lambda p: p.x)
    y_test = gdf.geometry.apply(lambda p: p.y)
    out_df = pandas.DataFrame(
        data={
            "x": x_test,
            "y": y_test,
            "green_avail": avail_green,
            "wet_avail": avail_wet,
            "roads_avail": avail_roads
        })
    return out_df
예제 #32
0
            xr_write_geotiff_from_ds(ds=ds, out_path=tempdir)

            # === Zonal Stats ===
            with fiona.open(sample_shape, 'r') as shp:
                features = [feature for feature in shp]
                # for i, f in enumerate(features):
                # print(f'feat dict {i} \n ', f, '\n')
                # shapes = [feature['geometry'] for feature in shp]
                with rasterio.open(tempfile) as src:
                    for f in features:
                        src_shp = [f['geometry']]
                        watershed_id = f['id']
                        # watershed_name = f['properties']['NAME']
                        # writer.writerow(["Year", "DOY", "parameter", "zon_mean_forID1", "zon_mean_forID2", "zon_mean_forID3", ...])
                        outimage, out_transform = msk.mask(src,
                                                           src_shp,
                                                           crop=True)
                        # scrap too-large datasets
                        outimage[outimage >= 3000] = np.nan
                        outimage[outimage < 0] = np.nan

                        ws_mean = np.nanmean(outimage)
                        # calculating cubic meters of discharge for a basin
                        m3_arr = cum_mm_to_m3(raster_dim_meters, outimage)
                        basin_ro = np.nansum(m3_arr)
                        m_basin = basin_ro / basin_area
                        mm_basin = m_basin * 1000
                        ws_year = yr

                        # average mm/year across the basin. Should be pretty close to 'mm_year'?
                        sd['mm_basin'].append(mm_basin)
예제 #33
0
maxx, maxy = 25.22, 60.35
bbox = box(minx, miny, maxx, maxy)

# Insert the bbox into a GeoDataFrame
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=from_epsg(4326))

# Re-project into the same coordinate system as the raster data
geo = geo.to_crs(crs=data.crs.data)
#shp_out = r"C:\HY-DATA\HENTENKA\KOODIT\Opetus\Automating-GIS-processes\Data\Landsat\Mask_polygon.shp"
#geo.to_file(shp_out)

# Get the geometry coordinates
coords = getFeatures(geo)

# Clip the raster with the polygon
out_img, out_transform = mask(raster=data, shapes=coords, crop=True)
# Copy the metadata
out_meta = data.meta.copy()

# Parse EPSG code
epsg_code = int(data.crs.data['init'][5:])

# Write the clipped raster to disk
out_meta.update({
    "driver": "GTiff",
    "height": out_img.shape[1],
    "width": out_img.shape[2],
    "transform": out_transform,
    "crs": pycrs.parser.from_epsg_code(epsg_code).to_proj4()
})
예제 #34
0
def triplify_dataset(cesbio_file, parcel_file, output_folder):
    startEpoch = time.time()
    print('triplify Cadastral parcel and its land-cover')
    village = parcel_file[-20:-15]
    triples = []
    files = []
    iFiles = 0
    iTriples = 0
    lsNameSpaces, lsTriplesTemplate = readTemplate(templateFile)
    CesbioFile = rasterio.open(cesbio_file)
    print("Land cover (expected 4326):", CesbioFile.crs)
    parcelFile = gpd.read_file(parcel_file)

    print(CesbioFile.crs)
    if '.shp' in parcel_file:
        project = partial(pyproj.transform, pyproj.Proj(init='epsg:4326'),
                          pyproj.Proj(init='epsg:4326'))

    for index, row in parcelFile.iterrows():
        clear_output()
        print(str(index) + '/' + str(len(parcelFile)))
        feat = {}
        feat['id'] = row['id']
        feat['geojson'] = shape(row['geometry']).wkt
        try:
            geom = transform(project, row['geometry'])
            LCParcel, LCTransform = mask(CesbioFile, [geom],
                                         crop=True,
                                         indexes=1,
                                         nodata=0)
        except ValueError as err:
            print(err)
            continue

        if np.count_nonzero(LCParcel) <= 0:
            try:
                geom = transform(project, row['geometry'])
                LCParcel, LCTransform = mask(CesbioFile, [geom],
                                             crop=True,
                                             all_touched=True,
                                             indexes=1,
                                             nodata=0)
            except ValueError as err:
                print(err)
                continue

        if np.count_nonzero(LCParcel) > 0:
            counter = Counter(LCParcel.ravel())
            dominantLCCode, DominantLCFreq = counter.most_common(2)[0]
            if dominantLCCode == 0:
                dominantLCCode, DominantLCFreq = counter.most_common(2)[1]

            percentage = round(
                DominantLCFreq * 100 / np.count_nonzero(LCParcel), 0)
            feat['lc'] = correspond_land_cover(dominantLCCode)
            URI = URIBase + 'Parcel/' + str(row['id']) + ">"
            triplesRow = triplify(feat, lsTriplesTemplate, URI,
                                  "p" + str(row['id']))
            triples = triples + triplesRow
        else:
            print("Too small!")

    file = output_folder + 'land_cover.ttl'
    files.append(file)
    writeToFile(lsNameSpaces, triples, file)
    clear_output()
    print('Number of triples', iTriples)
    print('Number of parcel', len(parcelFile))
    endEpoch = time.time()
    elapsedTime = endEpoch - startEpoch

    if elapsedTime < 60:
        print('Elapsed time : ', elapsedTime, ' seconds')
    else:
        print('Elapsed time : ', math.floor(elapsedTime / 60), ' minutes and ',
              elapsedTime % 60, ' seconds')
    return files
예제 #35
0
    driver_tiles = [DIRS.driver / tile for tile in set(gdf.driver)]

    country_geometry = countries[countries['NAME'] ==
                                 country].__geo_interface__
    country_geometry = country_geometry['features'][0]['geometry']

    print(merge_msg)
    data, transform = merge_from(driver_tiles)
    f = write(data,
              DIRS.tif / '{}.tif'.format(normalized_country),
              crs=WGS84,
              transform=transform,
              driver='GTiff',
              compress='lzw')

    del data, transform
    gc.collect()

    print(clip_msg)
    with rio.open(f, 'r') as src:
        data, transform = mask(src, [country_geometry], crop=True)

    write(data,
          DIRS.tif / 'driver_{}.tif'.format(normalized_country),
          crs=WGS84,
          transform=transform,
          driver='GTiff',
          compress='lzw')

    os.remove(str(f))
shapefile = gpd.read_file(shapefile_fp)
shapefile.crs
shapefile.bounds

geoms = shapefile.geometry.values

geometry = geoms[0]
print(type(geometry))
print(geometry)

feature = [mapping(geometry)
           ]  # can also do this using polygon.__geo_interface__
print(type(feature))
print(feature)

out_image, out_transform = mask(raster, feature, crop=True)
out_image.shape

##############################################################################
# TRAINING & TEST DATA
##############################################################################
nsamples_class = 10000

# create training pixels matrix with corresponding classname labels for rf
# This segment of code is from the tutorial:
# http://patrickgray.me/open-geo-tutorial/chapter_5_classification.html
X = np.array([], dtype=np.int8).reshape(0, 8)  # pixels for training
y = np.array([], dtype=np.string_)  # labels for training

# extract the raster values within the polygon
with rasterio.open(raster_fp) as src:
예제 #37
0
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190527\1514\Orthomosaic\c07_hollandbean-Joke Visser-201905271514_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190603\1020\Orthomosaic\c07_hollandbean-Joke Visser-201906031020_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190619\1208\Orthomosaic\c07_hollandbean-Joke Visser-201906191208_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190625\0739\Orthomosaic\c07_hollandbean-Joke Visser-201906250739_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190701\0933\Orthomosaic\c07_hollandbean-Joke Visser-201907010933_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190710\1007\Orthomosaic\c07_hollandbean-Joke Visser-201907101007_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190802\0829\Orthomosaic\c07_hollandbean-Joke Visser-201908020829_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190823\1004\Orthomosaic\c07_hollandbean-Joke Visser-201908231004_DEM-GR_cubic.tif",
    r"D:\VanBovenDrive\VanBoven MT\Archive\c07_hollandbean\Joke Visser\20190830\0729\Orthomosaic\c07_hollandbean-Joke Visser-201908300729_DEM-GR_cubic.tif"
]

for j in range(len(polys_paths)):
    shp = gpd.read_file(polys_paths[j])
    shp = shp.to_crs({'init': 'epsg:4326'})

    pbar = tqdm(total=len(shp.loc[:, 'geometry']),
                desc="getting mean height",
                position=0)

    with rasterio.open(dem_paths[j]) as src:
        for i in range(len(shp.loc[:, 'geometry'])):
            if shp.loc[i, 'geometry']:
                out_image, out_transform = mask(
                    src, [mapping(shp.loc[i, 'geometry'])], crop=True)
                elev = np.extract(out_image[0, :, :] != 0, out_image[0, :, :])
                shp.loc[i, 'mean_heigh'] = np.mean(
                    sorted(elev, reverse=True)[:int(len(elev) / 2)])
            pbar.update(1)

    shp = shp.to_crs({'init': 'epsg:28992'})
    shp.to_file(polys_paths[j])
예제 #38
0
bbox = box(minx, miny, maxx, maxy)

# Open file
img = rasterio.open(file_name)

# Parse EPSG code of image
epsg_code = img.crs.data['init']

# Reproject into the same coordinate system as raster data
wgs84 = pyproj.CRS('epsg:4326')
img_proj = pyproj.CRS(img.crs.data['init'])
project = pyproj.Transformer.from_crs(wgs84, img_proj).transform
bbox_proj = transform(project, bbox)

# Clip mosaic to target area
clip_img, clip_transform = mask(img, [bbox_proj], crop=True)

# Copy the metadata
out_meta = img.meta.copy()

# Update metadata with new dimensions
profile = img.profile
out_meta = img.meta.copy()
out_meta.update({
    "driver": "GTiff",
    "height": clip_img.shape[1],
    "width": clip_img.shape[2],
    "transform": clip_transform,
    "photometric": profile["photometric"],
    "compress": profile["compress"],
    "dtype": profile["dtype"]
예제 #39
0
def get_sub_image(idx, selected_polygon, image_tile_list, image_tile_bounds,
                  save_path, dstnodata, brectangle):
    '''
    get a mask image based on a selected polygon, it may cross two image tiles
    :param selected_polygon: selected polygons
    :param image_tile_list: image list
    :param image_tile_bounds: the boxes of images in the list
    :param save_path: save path
    :param brectangle: if brectangle is True, crop the raster using bounds, else, use the polygon
    :return: True is successful, False otherwise
    '''

    # find the images which the center polygon overlap (one or two images)
    img_index = get_overlap_image_index([selected_polygon], image_tile_bounds)
    if len(img_index) < 1:
        basic.outputlogMessage(
            'Warning, %dth polygon do not overlap any image tile, please check '  #and its buffer area
            '(1) the shape file and raster have the same projection'
            ' and (2) this polygon is in the extent of images' % idx)
        return False

    image_list = [image_tile_list[item] for item in img_index]

    # check it cross two or more images
    if len(image_list) == 1:
        # for the case that the polygon only overlap one raster
        with rasterio.open(image_list[0]) as src:
            polygon_json = mapping(selected_polygon)

            # not necessary
            # overlap_win = rasterio.features.geometry_window(src, [polygon_json], pad_x=0, pad_y=0, north_up=True, rotated=False,
            #                               pixel_precision=3)

            if brectangle:
                # polygon_box = selected_polygon.bounds
                polygon_json = mapping(
                    selected_polygon.envelope
                )  #shapely.geometry.Polygon([polygon_box])

            # crop image and saved to disk
            out_image, out_transform = mask(src, [polygon_json],
                                            nodata=dstnodata,
                                            all_touched=True,
                                            crop=True)

            # test: save it to disk
            out_meta = src.meta.copy()
            out_meta.update(
                {
                    "driver": "GTiff",
                    "height": out_image.shape[1],
                    "width": out_image.shape[2],
                    "transform": out_transform,
                    "nodata": dstnodata
                }
            )  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
            with rasterio.open(save_path, "w", **out_meta) as dest:
                dest.write(out_image)
        pass
    else:
        # for the case it overlap more than one raster, need to produce a mosaic
        tmp_saved_files = []

        for k_img, image_path in enumerate(image_list):
            with rasterio.open(image_path) as src:
                polygon_json = mapping(selected_polygon)
                if brectangle:
                    # polygon_box = selected_polygon.bounds
                    polygon_json = mapping(
                        selected_polygon.envelope
                    )  # shapely.geometry.Polygon([polygon_box])

                # crop image and saved to disk
                out_image, out_transform = mask(src, [polygon_json],
                                                nodata=dstnodata,
                                                all_touched=True,
                                                crop=True)

                tmp_saved = os.path.splitext(save_path)[
                    0] + '_%d' % k_img + os.path.splitext(save_path)[1]
                # test: save it to disk
                out_meta = src.meta.copy()
                out_meta.update(
                    {
                        "driver": "GTiff",
                        "height": out_image.shape[1],
                        "width": out_image.shape[2],
                        "transform": out_transform,
                        "nodata": dstnodata
                    }
                )  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
                with rasterio.open(tmp_saved, "w", **out_meta) as dest:
                    dest.write(out_image)
                tmp_saved_files.append(tmp_saved)

        # mosaic files in tmp_saved_files
        mosaic_args_list = [
            'gdal_merge.py', '-o', save_path, '-n',
            str(dstnodata), '-a_nodata',
            str(dstnodata)
        ]
        mosaic_args_list.extend(tmp_saved_files)
        if basic.exec_command_args_list_one_file(mosaic_args_list,
                                                 save_path) is False:
            raise IOError('error, obtain a mosaic (%s) failed' % save_path)

        # # for test
        # if idx==13:
        #     raise ValueError('for test')

        # remove the tmp files
        for tmp_file in tmp_saved_files:
            io_function.delete_file_or_dir(tmp_file)

    # if it will output a very large image (10000 by 10000 pixels), then raise a error

    return True