Exemplo n.º 1
0
def depth_raster(w, netf, fdepth, thresh):
    """
    From a raster of depths this subroutine finds nearest depth to every river pixel in grid
    """

    # Reading river network file
    dat_net = gdalutils.get_data(netf)
    geo_net = gdalutils.get_geo(netf)
    iy, ix = np.where(dat_net > 0)
    xx = geo_net[8][ix]
    yy = geo_net[9][iy]

    # Reading depth source file
    dat = gdalutils.get_data(fdepth)
    geo = gdalutils.get_geo(fdepth)
    iy, ix = np.where(dat > -9999)
    xdat = geo[8][ix]
    ydat = geo[9][iy]

    depth = []
    for x, y in zip(xx, yy):
        try:
            dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
            if dis <= thresh:
                val = dat[iy[ind], ix[ind]]
                depth.append(val)
            else:
                depth.append(np.nan)
        except ValueError:
            depth.append(np.nan)

    for x, y, mydepth in zip(xx, yy, depth):
        w.point(x, y)
        w.record(x, y, mydepth)

    return w
Exemplo n.º 2
0
def getwidths(recf, netf, proj, fwidth, output, thresh):

    print("    running getwidths.py...")

    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('width')

    # Reading XXX_rec.csv file
    rec = pd.read_csv(recf)

    # Get nearest width from datasource
    # Uses Euclidean distance to find nearest point in source
    # `try` included since it may happen that the width database doesn't
    # contains data in the basin if that is the case all values are assigned
    # a 30 m width
    width = []
    for x, y in zip(rec['lon'], rec['lat']):

        xmin = x - thresh
        ymin = y - thresh
        xmax = x + thresh
        ymax = y + thresh

        dat, geo = gdalutils.clip_raster(fwidth, xmin, ymin, xmax, ymax)
        iy, ix = np.where(dat > 30)
        xdat = geo[8][ix]
        ydat = geo[9][iy]

        try:
            dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
            val = dat[iy[ind], ix[ind]]
            width.append(val)
        except ValueError:
            width.append(np.nan)

    rec['width'] = width

    # Group river network per link
    # If there are more NaN than real values, all values in link are equal to 30
    # Otherwise, interpolate real values to fill NaNs
    def check_width(a):
        b = a.copy()
        c = b.isnull()
        falses = c.sum()
        trues = c.count() - falses
        if trues >= falses:
            return a.interpolate(limit_direction='both')
        else:
            b.loc[:] = 30
            return b
    rec.loc[:, 'width'] = rec.groupby('link').width.apply(check_width)

   # Writing .shp resulting file
    for x, y, width in zip(rec['lon'], rec['lat'], rec['width']):
        w.point(x, y)
        w.record(x, y, width)
    w.save("%s.shp" % output)

    # write .prj file
    prj = open("%s.prj" % output, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    geo = gdalutils.get_geo(netf)

    fmt = "GTiff"
    nodata = -9999
    name1 = output+".shp"
    name2 = output+".tif"
    subprocess.call(["gdal_rasterize", "-a_nodata", str(nodata), "-of", fmt, "-tr", str(geo[6]), str(geo[7]),
                     "-a", "width", "-a_srs", proj, "-te", str(geo[0]), str(geo[1]), str(geo[2]), str(geo[3]), name1, name2])
Exemplo n.º 3
0
def basinsplit(ncatch, outdir, cattif, demtif, acctif, nettif, wthtif, dirtif, aretif, otltif, tretxt, cootxt):

    # Get extend for every catchment and area
    catarr = gdalutils.get_data(cattif)

    try:
        dat = catarr == ncatch
    except:
        sys.exit('ERROR invalid basin number')

    catgeo = gdalutils.get_geo(cattif)
    area = gdalutils.get_data(aretif)
    outlet = gdalutils.get_data(otltif)
    direc = gdalutils.get_data(dirtif)
    row, col = np.where(dat)
    _sum = np.sum(dat*area)

    if _sum >= 100:  # be sure basin is larger than 100 Km2

        xmin = catgeo[8][min(col)]
        xmax = catgeo[8][max(col)]
        ymin = catgeo[9][max(row)]
        ymax = catgeo[9][min(row)]

        # Clip input rasters
        netarr_tmp, netgeo_tmp = gdalutils.clip_raster(
            nettif, xmin, ymin, xmax, ymax)
        catarr_tmp, catgeo_tmp = gdalutils.clip_raster(
            cattif, xmin, ymin, xmax, ymax)

        # Mask only the catchment and fill with zeros
        netarr_tmp = np.where(catarr_tmp == ncatch, netarr_tmp, 0)

        if netarr_tmp.sum() >= 35:  # be sure river network is long enough

            # Clipping tree and coord files based on nettif > 0, coordinates
            tree = misc_utils.read_tree_taudem(tretxt)
            coor = misc_utils.read_coord_taudem(cootxt)
            iy, ix = np.where(netarr_tmp > 0)
            Xrav = netgeo_tmp[8][ix]
            Yrav = netgeo_tmp[9][iy]

            # Clipping coord file (it may be improved, calculation takes some time)
            lfp_coor = pd.DataFrame()
            for i in range(len(Xrav)):
                dis, ind = misc_utils.near_euc(
                    coor['lon'].values, coor['lat'].values, (Xrav[i], Yrav[i]))
                if dis <= 0.01:
                    lfp_coor = lfp_coor.append(coor.loc[ind, :])
            lfp_coor = lfp_coor[['lon', 'lat',
                                 'distance', 'elev', 'contr_area']]
            lfp_coor.index.name = 'index'
            lfp_coor.sort_index(inplace=True)
            # Remove duplicates just in case
            lfp_coor.drop_duplicates(inplace=True)

            # Clipping tree file
            lfp_tree = pd.DataFrame()
            for i in tree.index:
                sta = tree.loc[i, 'start_pnt']
                end = tree.loc[i, 'end_pnt']
                lon1 = coor.loc[sta, 'lon']
                lat1 = coor.loc[sta, 'lat']
                lon2 = coor.loc[end, 'lon']
                lat2 = coor.loc[end, 'lat']
                dis1, ind1 = misc_utils.near_euc(
                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon1, lat1))
                dis2, ind2 = misc_utils.near_euc(
                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon2, lat2))
                # default value 0.01 wasn't able to find link number 3504, this value was increased to 0.012 to find missing link
                if (dis1 <= 0.012) & (dis2 <= 0.012):
                    lfp_tree = lfp_tree.append(tree.loc[i, :])
            lfp_tree = lfp_tree[['link_no', 'start_pnt', 'end_pnt', 'frst_ds',
                                 'frst_us', 'scnd_us', 'strahler', 'mon_pnt', 'shreve']]
            lfp_tree.index.name = 'index'

            # Creating folder per basin
            ncatchstr = "%03d" % ncatch
            folder = outdir + "/" + ncatchstr
            create_out_folder(folder)

            # Writing clipped coord and tree files
            fnametre = folder + "/" + ncatchstr + "_tre.csv"
            fnamecoo = folder + "/" + ncatchstr + "_coo.csv"
            lfp_coor.to_csv(fnamecoo)
            lfp_tree.to_csv(fnametre, float_format='%i')

            # Creating rec dataframe
            rec = connections(fnametre, fnamecoo)

            #  Writing XXX_rec.csv file
            fnamerec = folder + "/" + ncatchstr + "_rec.csv"
            rec.to_csv(fnamerec)

            # Get extent from rec dataframe
            xmin = rec['lon'].min()
            xmax = rec['lon'].max()
            ymin = rec['lat'].min()
            ymax = rec['lat'].max()

            # Get fixed extent
            # _dir    = getdir(rec,dirtif)
            # _dirlet = getdirletter(_dir)
            # xmin,ymin,xmax,ymax = get_extent_outlet(_dirlet,0.1,xmin,ymin,xmax,ymax)

            # Clipping rasters
            demarrcli, demgeocli = gdalutils.clip_raster(
                demtif, xmin, ymin, xmax, ymax)
            accarrcli, accgeocli = gdalutils.clip_raster(
                acctif, xmin, ymin, xmax, ymax)
            wtharrcli, wthgeocli = gdalutils.clip_raster(
                wthtif, xmin, ymin, xmax, ymax)
            dirarrcli, dirgeocli = gdalutils.clip_raster(
                dirtif, xmin, ymin, xmax, ymax)
            netarrcli, netgeocli = gdalutils.clip_raster(
                nettif, xmin, ymin, xmax, ymax)
            catarrcli, catgeocli = gdalutils.clip_raster(
                cattif, xmin, ymin, xmax, ymax)

            # Mask only the catchment and fill with zeros
            netarrcli = np.where(catarrcli == ncatch, netarrcli, 0)
            dirarrcli = np.where(catarrcli == ncatch, dirarrcli, 0)

            # Creating output names
            fnamedem = folder + "/" + ncatchstr + "_dem.tif"
            fnameacc = folder + "/" + ncatchstr + "_acc.tif"
            fnamenet = folder + "/" + ncatchstr + "_net.tif"
            fnamewth = folder + "/" + ncatchstr + "_wth.tif"
            fnamedir = folder + "/" + ncatchstr + "_dir.tif"

            # Writing clipped arrays
            nodata = -9999
            gdalutils.write_raster(demarrcli, fnamedem,
                                   demgeocli, "Float32", nodata)
            gdalutils.write_raster(accarrcli, fnameacc,
                                   accgeocli, "Float32", nodata)
            gdalutils.write_raster(netarrcli, fnamenet,
                                   netgeocli, "Float32", nodata)
            gdalutils.write_raster(wtharrcli, fnamewth,
                                   wthgeocli, "Float32", nodata)
            gdalutils.write_raster(dirarrcli, fnamedir,
                                   dirgeocli, "Float32", nodata)

        else:
            print("NOT PROCESSED: Number of pixels in river lower than 35 : " +
                  str(netarr_tmp.sum()) + " pixels in basin number " + str(ncatch))
    else:
        print("NOT PROCESSED: Basin area lower than 100 Km2 : " +
              str(_sum) + " KM**2 in basin number " + str(ncatch))
Exemplo n.º 4
0
def getwidths_varthresh(recf, netf, proj, fwidth, output, fbankfullq):

    # Reading XXX_net.tif file
    geo1 = gdalutils.get_geo(netf)

    bankfullq = gpd.read_file(fbankfullq)
    # bankfullq has name: 'bankfullq'

    # Reading XXX_rec.csv file
    rec = pd.read_csv(recf)
    print('loaded data')

    # x and y resolution (degrees)
    xres = geo1[6]
    yres = geo1[7]
    print('data res', xres, yres)

    width = []
    width = np.ones([len(bankfullq)],
                    dtype=np.float32) * 30.  # 30 is default value
    for row in bankfullq.itertuples():
        #print(row[0],row[1],row[2],row[3],row[4])
        i = row[0]
        x = float(row[1])
        y = float(row[2])
        bfq = max(float(row[3]), 1.)
        # Choose some threshold based on bankfull q (bfq)
        thresh = np.log(bfq) / 1000. + bfq / 1000000. + 2 * abs(
            xres) + 2 * abs(yres)

        # come up with minimum width to search for, based on bankfullq
        # This is designed to prevent assigning
        #width values from the tributaries to the major river channels
        minwidth = bfq / 100. + 30

        # Get nearest width from datasource
        # Uses Euclidean distance to find nearest point in source
        # `try` included since it may happen that the width database doesn't
        # contains data in the basin if that is the case all values are assigned
        # a 30 m width

        xmin = x - thresh
        ymin = y - thresh
        xmax = x + thresh
        ymax = y + thresh

        dat, geo = gdalutils.clip_raster(fwidth, xmin, ymin, xmax, ymax)
        try:
            iy, ix = np.where(dat > 30)
        except:
            print('Error: point', i, x, y)
            print('Vals:', bfq, thresh, dat)
            continue
        xdat = geo[8][ix]
        ydat = geo[9][iy]

        try:
            dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
            val = dat[iy[ind], ix[ind]]
            #width.append(val)
            width[i] = val
        except ValueError:
            #width.append(30.)
            continue

# Add widths to dataframe, then copy to new dataframe
#bankfullq['width'] = width
#widths = bankfullq[['x', 'y', 'geometry','width']]

    rec['width'] = width
    #################################################################
    # Group river network per link
    rec.loc[:, 'width'] = rec.groupby('link').width.apply(check_width)

    # Write out files
    print('Writing out data')
    name1 = output + '.shp'
    #widths.to_file(name1)

    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('width')
    # Writing .shp resulting file
    for x, y, width in zip(rec['lon'], rec['lat'], rec['width']):
        w.point(x, y)
        w.record(x, y, width)
    w.save("%s.shp" % output)

    # write .prj file
    prj = open("%s.prj" % output, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    nodata = -9999
    fmt = "GTiff"
    #    name1 = output
    #    name2 = os.path.dirname(output) + '/' + \
    #        os.path.basename(output).split('.')[0] + '.tif'
    name2 = output + '.tif'
    subprocess.call([
        "gdal_rasterize", "-a_nodata",
        str(nodata), "-of", fmt, "-ot", "Float32", "-co", "COMPRESS=DEFLATE",
        "-tr",
        str(geo1[6]),
        str(geo1[7]), "-a", "width", "-a_srs", proj, "-te",
        str(geo1[0]),
        str(geo1[1]),
        str(geo1[2]),
        str(geo1[3]), name1, name2
    ])
Exemplo n.º 5
0
def fixelevs(source,output,netf,recf,proj,method):

    print("    running fixelevs.py...")

    # Reading XXX_net.tif file
    geo = gdalutils.get_geo(netf)

    # Reading XXX_rec.csv file
    rec = pd.read_csv(recf)

    # Database to fix
    elev = np.array(shapefile.Reader(source).records(), dtype='float64')

    # Initiate output shapefile
    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('elevadj')

    # Retrieving bank elevations from XXX_bnk.shp file
    # Values are stored in rec['bnk']
    bnk = []
    for i in rec.index:
        dis, ind = misc_utils.near_euc(elev[:, 0], elev[:, 1], (rec['lon'][i],
                                                                rec['lat'][i]))
        bnk.append(elev[ind, 2])
    rec['bnk'] = bnk

    # Adjusting bank values, resulting values
    # are stored in rec['bnk_adj']
    # coordinates are grouped by REACH number
    rec['bnk_adj'] = 0
    recgrp = rec.groupby('reach')
    for reach, df in recgrp:
        ids = df.index
        dem = df['bnk']

        # calc bank elevation
        if method == 'yamazaki':
            adjusted_dem = bank4flood(dem)
        elif method == 'lowless':
            adjusted_dem = lowless(dem)
        else:
            sys.exit('Method not recognised')
        rec['bnk_adj'][ids] = adjusted_dem

    # Writing .shp resulting file
    for i in rec.index:
        w.point(rec['lon'][i], rec['lat'][i])
        w.record(rec['lon'][i], rec['lat'][i], rec['bnk_adj'][i])
    w.save("%s.shp" % output)

    # write .prj file
    prj = open("%s.prj" % output, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    nodata = -9999
    fmt = "GTiff"
    name1 = output+".shp"
    name2 = output+".tif"
    subprocess.call(["gdal_rasterize", "-a_nodata", str(nodata), "-of", fmt, "-tr",
                     str(geo[6]), str(geo[7]), "-a", "elevadj", "-a_srs", proj, "-te", str(geo[0]), str(geo[1]), str(geo[2]), str(geo[3]), name1, name2])
Exemplo n.º 6
0
def basinsplit(ncatch, outdir, cattif, demtif, acctif, nettif, wthtif, dirtif,
               aretif, ordtif, tretxt, cootxt):

    # Get extend for every catchment and area
    catarr = gdalutils.get_data(cattif)

    try:
        dat = catarr == ncatch
    except:
        sys.exit('ERROR invalid basin number')

    # Use gdal to mask out basin in network and direction tifs
    nettmp = 'net_tmp.tif'
    dirtmp = 'dir_tmp.tif'
    acctmp = 'acc_tmp.tif'
    ordtmp = 'ord_tmp.tif'
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Int16', '--NoDataValue', '-9999', '-B',
        cattif, '--B_band', '1', '-A', nettif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', nettmp
    ]
    subprocess.call(cmd)
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Int16', '--NoDataValue', '-9999', '-B',
        cattif, '--B_band', '1', '-A', dirtif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', dirtmp
    ]
    subprocess.call(cmd)
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Float32', '--NoDataValue', '-9999',
        '-B', cattif, '--B_band', '1', '-A', acctif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', acctmp
    ]
    subprocess.call(cmd)
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Int16', '--NoDataValue', '-9999', '-B',
        cattif, '--B_band', '1', '-A', ordtif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', ordtmp
    ]
    subprocess.call(cmd)
    print('separated basin for nettif, dirtif, acctif, ordtif')

    catgeo = gdalutils.get_geo(cattif)
    area = gdalutils.get_data(aretif)
    #outlet = gdalutils.get_data(otltif)
    #direc = gdalutils.get_data(dirtif)
    row, col = np.where(dat)
    _sum = np.sum(dat * area)
    # clean up
    del (catarr, dat, area)

    if _sum >= 100:  # be sure basin is larger than 100 Km2

        xmin = catgeo[8][min(col)]
        xmax = catgeo[8][max(col)]
        ymin = catgeo[9][max(row)]
        ymax = catgeo[9][min(row)]
        # Clean up
        del (row, col)

        # Clip input rasters
        netarr_tmp, netgeo_tmp = gdalutils.clip_raster(nettmp, xmin, ymin,
                                                       xmax, ymax)
        net_size = (netarr_tmp > 0).sum()
        print('loaded net array')

        if net_size >= 35:  # be sure river network is long enough

            # Load tree and coord files
            tree = misc_utils.read_tree_taudem(tretxt)
            lfp_coor = misc_utils.read_coord_taudem(cootxt)
            lfp_coor.index.name = 'index'

            # Get list of x,y points in river network in basin
            iy, ix = np.where(netarr_tmp > 0)
            Xrav = netgeo_tmp[8][ix]
            Yrav = netgeo_tmp[9][iy]
            # Clean up memory
            del (netarr_tmp)

            # Clipping tree file based on segments within basin
            print('Clipping tree file')
            lfp_tree = pd.DataFrame()
            for i in tree.index:
                sta = tree.loc[i, 'start_pnt']
                end = tree.loc[i, 'end_pnt']
                lon1 = lfp_coor.loc[sta, 'lon']
                lat1 = lfp_coor.loc[sta, 'lat']
                lon2 = lfp_coor.loc[end, 'lon']
                lat2 = lfp_coor.loc[end, 'lat']
                #                dis1, ind1 = misc_utils.near_euc(
                #                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon1, lat1))
                #                dis2, ind2 = misc_utils.near_euc(
                #                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon2, lat2))
                dis1, ind1 = misc_utils.near_euc(Xrav, Yrav, (lon1, lat1))
                dis2, ind2 = misc_utils.near_euc(Xrav, Yrav, (lon2, lat2))
                # default value 0.01 wasn't able to find link number 3504, this value was increased to 0.012 to find missing link
                if (dis1 <= 0.012) & (dis2 <= 0.012):
                    lfp_tree = lfp_tree.append(tree.loc[i, :])
            lfp_tree = lfp_tree[[
                'link_no', 'start_pnt', 'end_pnt', 'frst_ds', 'frst_us',
                'scnd_us', 'strahler', 'mon_pnt', 'shreve'
            ]]
            lfp_tree.index.name = 'index'

            # Creating folder per basin
            ncatchstr = "%03d" % ncatch
            folder = outdir + "/" + ncatchstr
            create_out_folder(folder)

            # Writing clipped coord and tree files
            print('Writing text files')
            fnametre = folder + "/" + ncatchstr + "_tre.csv"
            fnamecoo = folder + "/" + ncatchstr + "_coo.csv"
            lfp_coor.to_csv(fnamecoo)
            lfp_tree.to_csv(fnametre, float_format='%i')
            # clean up memory
            del (lfp_coor, lfp_tree)

            # Creating rec dataframe
            rec = connections(fnametre, fnamecoo)

            #  Writing XXX_rec.csv file
            fnamerec = folder + "/" + ncatchstr + "_rec.csv"
            rec.to_csv(fnamerec)

            # Get extent from rec dataframe
            xmin = rec['lon'].min()
            xmax = rec['lon'].max()
            ymin = rec['lat'].min()
            ymax = rec['lat'].max()
            # Clean up memory
            del (rec)

            # Get fixed extent
            # _dir    = getdir(rec,dirtif)
            # _dirlet = getdirletter(_dir)
            # xmin,ymin,xmax,ymax = get_extent_outlet(_dirlet,0.1,xmin,ymin,xmax,ymax)

            # Clipping rasters
            print('Loading and clipping rasters')
            nodata = -9999
            # Creating output names
            fnamedem = folder + "/" + ncatchstr + "_dem.tif"
            fnameacc = folder + "/" + ncatchstr + "_acc.tif"
            fnamenet = folder + "/" + ncatchstr + "_net.tif"
            fnamewth = folder + "/" + ncatchstr + "_wth.tif"
            fnamedir = folder + "/" + ncatchstr + "_dir.tif"
            fnameord = folder + "/" + ncatchstr + "_ord.tif"

            # Load and write each array before removing it from memory
            demarrcli, demgeocli = gdalutils.clip_raster(
                demtif, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(demarrcli, fnamedem, demgeocli, "Float32",
                                   nodata)
            del (demarrcli, demgeocli)

            accarrcli, accgeocli = gdalutils.clip_raster(
                acctmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(accarrcli, fnameacc, accgeocli, "Float32",
                                   nodata)
            del (accarrcli, accgeocli)

            wtharrcli, wthgeocli = gdalutils.clip_raster(
                wthtif, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(wtharrcli, fnamewth, wthgeocli, "Float32",
                                   nodata)
            del (wtharrcli, wthgeocli)

            dirarrcli, dirgeocli = gdalutils.clip_raster(
                dirtmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(dirarrcli, fnamedir, dirgeocli, "Int16",
                                   nodata)
            del (dirarrcli, dirgeocli)

            netarrcli, netgeocli = gdalutils.clip_raster(
                nettmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(netarrcli, fnamenet, netgeocli, "Int16",
                                   nodata)
            del (netarrcli, netgeocli)

            ordarrcli, ordgeocli = gdalutils.clip_raster(
                ordtmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(ordarrcli, fnameord, ordgeocli, "Int16",
                                   nodata)
            del (ordarrcli, ordgeocli)

            # Finally delete the nettmp and dirtmp files
            os.remove(nettmp)
            os.remove(dirtmp)
            os.remove(ordtmp)
            os.remove(acctmp)

        else:
            print("NOT PROCESSED: Number of pixels in river lower than 35 : " +
                  str(net_size) + " pixels in basin number " + str(ncatch))
    else:
        print("NOT PROCESSED: Basin area lower than 100 Km2 : " + str(_sum) +
              " KM**2 in basin number " + str(ncatch))
Exemplo n.º 7
0
def getslopes(source, output, netf, recf, proj, step):

    print("    runnning getslopes.py...")

    # Reading XXX_rec.csv file
    rec = pd.read_csv(recf)

    # Reading XXX_net.tif file
    geo = gdalutils.get_geo(netf)

    # Reading bank file (adjusted bank)
    elev = np.array(shapefile.Reader(source).records(), dtype='float64')

    # Initiate output shapefile
    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('slope')

    # Retrieving adjusted bank elevations from XXX_bnkfix.shp file
    # Values are stored in rec['bnk']
    bnkadj = []
    for i in rec.index:
        dis, ind = misc_utils.near_euc(elev[:, 0], elev[:, 1],
                                       (rec['lon'][i], rec['lat'][i]))
        bnkadj.append(elev[ind, 2])
    rec['bnkadj'] = bnkadj

    # Calculating slopes
    # coordinates are grouped by REACH number
    rec['slopes'] = 0
    recgrp = rec.groupby('reach')
    for reach, df in recgrp:
        ids = df.index
        dem = df['bnkadj']
        # calc slopes
        slopes_vals = calc_slope_step(dem, df['lon'].values, df['lat'].values,
                                      step)
        rec['slopes'][ids] = slopes_vals

    # Writing .shp resulting file
    for i in rec.index:
        w.point(rec['lon'][i], rec['lat'][i])
        w.record(rec['lon'][i], rec['lat'][i], rec['slopes'][i])
    w.save("%s.shp" % output)

    # write .prj file
    prj = open("%s.prj" % output, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    # Writing .tif file
    nodata = -9999
    fmt = "GTiff"
    name1 = output + ".shp"
    name2 = output + ".tif"
    subprocess.call([
        "gdal_rasterize", "-a_nodata",
        str(nodata), "-of", fmt, "-tr",
        str(geo[6]),
        str(geo[7]), "-a", "slope", "-a_srs", proj, "-te",
        str(geo[0]),
        str(geo[1]),
        str(geo[2]),
        str(geo[3]), name1, name2
    ])