Example #1
0
def calc_resampling(fname1, hrnodata, x, y, ix, iy, thresh, outlier, method):

    elev = np.ones([len(np.unique(y)), len(np.unique(x))]) * hrnodata

    for i in range(len(x)):

        # print("rasterresample.py - " + str(len(x)-i))

        xmin = x[i] - thresh
        ymin = y[i] - thresh
        xmax = x[i] + thresh
        ymax = y[i] + thresh

        dem, dem_geo = gdalutils.clip_raster(fname1, xmin, ymin, xmax, ymax)
        ddem = np.ma.masked_where(dem == hrnodata, dem)
        shape = dem.shape

        if outlier == "yes":
            ddem = check_outlier(dem, ddem, hrnodata, 3.5)
        elev[iy[i], ix[i]] = np.mean([ddem.mean(), ddem.min()])

    return elev
Example #2
0
def calc_resampling_mp(pos, queue, fname1, hrnodata, x, y, thresh, outlier,
                       method):

    elev = np.ones([len(x)]) * hrnodata

    for i in range(len(x)):

        # print("rasterresample.py - " + str(len(x)-i))

        xmin = x[i] - thresh
        ymin = y[i] - thresh
        xmax = x[i] + thresh
        ymax = y[i] + thresh

        dem, dem_geo = gdalutils.clip_raster(fname1, xmin, ymin, xmax, ymax)
        ddem = np.ma.masked_values(dem, hrnodata)
        shape = dem.shape

        # Check for outliers
        if outlier == "yes":
            ddem = check_outlier(dem, ddem, hrnodata, 3.5)

        # Method to applied at every kernel
        if method == "meanmin":
            elev[i] = np.mean([ddem.mean(), ddem.min()])

        elif method == "mean":
            elev[i] = ddem.mean()

        elif method == "min":
            elev[i] = ddem.min()

        else:
            sys.exit('ERROR method not specified')

    queue.put((pos, elev))
Example #3
0
def getwidths(recf, netf, proj, fwidth, output, thresh):

    print("    running getwidths.py...")

    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('width')

    # Reading XXX_rec.csv file
    rec = pd.read_csv(recf)

    # Get nearest width from datasource
    # Uses Euclidean distance to find nearest point in source
    # `try` included since it may happen that the width database doesn't
    # contains data in the basin if that is the case all values are assigned
    # a 30 m width
    width = []
    for x, y in zip(rec['lon'], rec['lat']):

        xmin = x - thresh
        ymin = y - thresh
        xmax = x + thresh
        ymax = y + thresh

        dat, geo = gdalutils.clip_raster(fwidth, xmin, ymin, xmax, ymax)
        iy, ix = np.where(dat > 30)
        xdat = geo[8][ix]
        ydat = geo[9][iy]

        try:
            dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
            val = dat[iy[ind], ix[ind]]
            width.append(val)
        except ValueError:
            width.append(np.nan)

    rec['width'] = width

    # Group river network per link
    # If there are more NaN than real values, all values in link are equal to 30
    # Otherwise, interpolate real values to fill NaNs
    def check_width(a):
        b = a.copy()
        c = b.isnull()
        falses = c.sum()
        trues = c.count() - falses
        if trues >= falses:
            return a.interpolate(limit_direction='both')
        else:
            b.loc[:] = 30
            return b
    rec.loc[:, 'width'] = rec.groupby('link').width.apply(check_width)

   # Writing .shp resulting file
    for x, y, width in zip(rec['lon'], rec['lat'], rec['width']):
        w.point(x, y)
        w.record(x, y, width)
    w.save("%s.shp" % output)

    # write .prj file
    prj = open("%s.prj" % output, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    geo = gdalutils.get_geo(netf)

    fmt = "GTiff"
    nodata = -9999
    name1 = output+".shp"
    name2 = output+".tif"
    subprocess.call(["gdal_rasterize", "-a_nodata", str(nodata), "-of", fmt, "-tr", str(geo[6]), str(geo[7]),
                     "-a", "width", "-a_srs", proj, "-te", str(geo[0]), str(geo[1]), str(geo[2]), str(geo[3]), name1, name2])
Example #4
0
def basinsplit(ncatch, outdir, cattif, demtif, acctif, nettif, wthtif, dirtif, aretif, otltif, tretxt, cootxt):

    # Get extend for every catchment and area
    catarr = gdalutils.get_data(cattif)

    try:
        dat = catarr == ncatch
    except:
        sys.exit('ERROR invalid basin number')

    catgeo = gdalutils.get_geo(cattif)
    area = gdalutils.get_data(aretif)
    outlet = gdalutils.get_data(otltif)
    direc = gdalutils.get_data(dirtif)
    row, col = np.where(dat)
    _sum = np.sum(dat*area)

    if _sum >= 100:  # be sure basin is larger than 100 Km2

        xmin = catgeo[8][min(col)]
        xmax = catgeo[8][max(col)]
        ymin = catgeo[9][max(row)]
        ymax = catgeo[9][min(row)]

        # Clip input rasters
        netarr_tmp, netgeo_tmp = gdalutils.clip_raster(
            nettif, xmin, ymin, xmax, ymax)
        catarr_tmp, catgeo_tmp = gdalutils.clip_raster(
            cattif, xmin, ymin, xmax, ymax)

        # Mask only the catchment and fill with zeros
        netarr_tmp = np.where(catarr_tmp == ncatch, netarr_tmp, 0)

        if netarr_tmp.sum() >= 35:  # be sure river network is long enough

            # Clipping tree and coord files based on nettif > 0, coordinates
            tree = misc_utils.read_tree_taudem(tretxt)
            coor = misc_utils.read_coord_taudem(cootxt)
            iy, ix = np.where(netarr_tmp > 0)
            Xrav = netgeo_tmp[8][ix]
            Yrav = netgeo_tmp[9][iy]

            # Clipping coord file (it may be improved, calculation takes some time)
            lfp_coor = pd.DataFrame()
            for i in range(len(Xrav)):
                dis, ind = misc_utils.near_euc(
                    coor['lon'].values, coor['lat'].values, (Xrav[i], Yrav[i]))
                if dis <= 0.01:
                    lfp_coor = lfp_coor.append(coor.loc[ind, :])
            lfp_coor = lfp_coor[['lon', 'lat',
                                 'distance', 'elev', 'contr_area']]
            lfp_coor.index.name = 'index'
            lfp_coor.sort_index(inplace=True)
            # Remove duplicates just in case
            lfp_coor.drop_duplicates(inplace=True)

            # Clipping tree file
            lfp_tree = pd.DataFrame()
            for i in tree.index:
                sta = tree.loc[i, 'start_pnt']
                end = tree.loc[i, 'end_pnt']
                lon1 = coor.loc[sta, 'lon']
                lat1 = coor.loc[sta, 'lat']
                lon2 = coor.loc[end, 'lon']
                lat2 = coor.loc[end, 'lat']
                dis1, ind1 = misc_utils.near_euc(
                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon1, lat1))
                dis2, ind2 = misc_utils.near_euc(
                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon2, lat2))
                # default value 0.01 wasn't able to find link number 3504, this value was increased to 0.012 to find missing link
                if (dis1 <= 0.012) & (dis2 <= 0.012):
                    lfp_tree = lfp_tree.append(tree.loc[i, :])
            lfp_tree = lfp_tree[['link_no', 'start_pnt', 'end_pnt', 'frst_ds',
                                 'frst_us', 'scnd_us', 'strahler', 'mon_pnt', 'shreve']]
            lfp_tree.index.name = 'index'

            # Creating folder per basin
            ncatchstr = "%03d" % ncatch
            folder = outdir + "/" + ncatchstr
            create_out_folder(folder)

            # Writing clipped coord and tree files
            fnametre = folder + "/" + ncatchstr + "_tre.csv"
            fnamecoo = folder + "/" + ncatchstr + "_coo.csv"
            lfp_coor.to_csv(fnamecoo)
            lfp_tree.to_csv(fnametre, float_format='%i')

            # Creating rec dataframe
            rec = connections(fnametre, fnamecoo)

            #  Writing XXX_rec.csv file
            fnamerec = folder + "/" + ncatchstr + "_rec.csv"
            rec.to_csv(fnamerec)

            # Get extent from rec dataframe
            xmin = rec['lon'].min()
            xmax = rec['lon'].max()
            ymin = rec['lat'].min()
            ymax = rec['lat'].max()

            # Get fixed extent
            # _dir    = getdir(rec,dirtif)
            # _dirlet = getdirletter(_dir)
            # xmin,ymin,xmax,ymax = get_extent_outlet(_dirlet,0.1,xmin,ymin,xmax,ymax)

            # Clipping rasters
            demarrcli, demgeocli = gdalutils.clip_raster(
                demtif, xmin, ymin, xmax, ymax)
            accarrcli, accgeocli = gdalutils.clip_raster(
                acctif, xmin, ymin, xmax, ymax)
            wtharrcli, wthgeocli = gdalutils.clip_raster(
                wthtif, xmin, ymin, xmax, ymax)
            dirarrcli, dirgeocli = gdalutils.clip_raster(
                dirtif, xmin, ymin, xmax, ymax)
            netarrcli, netgeocli = gdalutils.clip_raster(
                nettif, xmin, ymin, xmax, ymax)
            catarrcli, catgeocli = gdalutils.clip_raster(
                cattif, xmin, ymin, xmax, ymax)

            # Mask only the catchment and fill with zeros
            netarrcli = np.where(catarrcli == ncatch, netarrcli, 0)
            dirarrcli = np.where(catarrcli == ncatch, dirarrcli, 0)

            # Creating output names
            fnamedem = folder + "/" + ncatchstr + "_dem.tif"
            fnameacc = folder + "/" + ncatchstr + "_acc.tif"
            fnamenet = folder + "/" + ncatchstr + "_net.tif"
            fnamewth = folder + "/" + ncatchstr + "_wth.tif"
            fnamedir = folder + "/" + ncatchstr + "_dir.tif"

            # Writing clipped arrays
            nodata = -9999
            gdalutils.write_raster(demarrcli, fnamedem,
                                   demgeocli, "Float32", nodata)
            gdalutils.write_raster(accarrcli, fnameacc,
                                   accgeocli, "Float32", nodata)
            gdalutils.write_raster(netarrcli, fnamenet,
                                   netgeocli, "Float32", nodata)
            gdalutils.write_raster(wtharrcli, fnamewth,
                                   wthgeocli, "Float32", nodata)
            gdalutils.write_raster(dirarrcli, fnamedir,
                                   dirgeocli, "Float32", nodata)

        else:
            print("NOT PROCESSED: Number of pixels in river lower than 35 : " +
                  str(netarr_tmp.sum()) + " pixels in basin number " + str(ncatch))
    else:
        print("NOT PROCESSED: Basin area lower than 100 Km2 : " +
              str(_sum) + " KM**2 in basin number " + str(ncatch))
Example #5
0
def getbankelevs(output, recf, netf, hrdemf, proj, method, hrnodata, thresh,
                 outlier):

    print("    running getbankelevs.py...")

    fname = output

    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('elev')

    # Coordinates for bank elevations are based on the Rec file
    rec = pd.read_csv(recf)

    for x, y in zip(rec['lon'], rec['lat']):

        xmin = x - thresh
        ymin = y - thresh
        xmax = x + thresh
        ymax = y + thresh

        dem, dem_geo = gdalutils.clip_raster(hrdemf, xmin, ymin, xmax, ymax)
        ddem = np.ma.masked_where(dem == hrnodata, dem)

        if method == 'near':
            nodata = dem_geo[11]
            dfdem = gdalutils.array_to_pandas(dem, dem_geo, nodata, 'gt')
            arr = haversine.haversine_array(
                np.array(dfdem['y'].values, dtype='float32'),
                np.float32(dfdem['x'].values), np.float32(y), np.float32(x))
            dfdem['dis'] = np.array(arr)
            dfdem.sort_values(by='dis', inplace=True)
            elev = dfdem.iloc[0, 2]

        elif method == 'meanmin':
            if outlier == "yes":
                ddem = check_outlier(dem, ddem, hrnodata, 3.5)
            elev = np.mean([ddem.mean(), ddem.min()])

        elif method == 'mean':
            if outlier == "yes":
                ddem = check_outlier(dem, ddem, hrnodata, 3.5)
            elev = ddem.mean()

        elif method == 'min':
            if outlier == "yes":
                ddem = check_outlier(dem, ddem, hrnodata, 3.5)
            elev = ddem.min()

        # Write final file in a shapefile

        if np.isfinite(elev):
            w.point(x, y)
            w.record(x, y, elev)

    w.save("%s.shp" % fname)

    # Write .prj file
    prj = open("%s.prj" % fname, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    geo = gdalutils.get_geo(netf)

    fmt = "GTiff"
    nodata = -9999
    bnkname1 = output + ".shp"
    bnkname2 = output + ".tif"
    subprocess.call([
        "gdal_rasterize", "-a_nodata",
        str(nodata), "-of", fmt, "-co", "COMPRESS=DEFLATE", "-tr",
        str(geo[6]),
        str(geo[7]), "-a", "elev", "-a_srs", proj, "-te",
        str(geo[0]),
        str(geo[1]),
        str(geo[2]),
        str(geo[3]), bnkname1, bnkname2
    ])
Example #6
0
def getwidths_varthresh(recf, netf, proj, fwidth, output, fbankfullq):

    # Reading XXX_net.tif file
    geo1 = gdalutils.get_geo(netf)

    bankfullq = gpd.read_file(fbankfullq)
    # bankfullq has name: 'bankfullq'

    # Reading XXX_rec.csv file
    rec = pd.read_csv(recf)
    print('loaded data')

    # x and y resolution (degrees)
    xres = geo1[6]
    yres = geo1[7]
    print('data res', xres, yres)

    width = []
    width = np.ones([len(bankfullq)],
                    dtype=np.float32) * 30.  # 30 is default value
    for row in bankfullq.itertuples():
        #print(row[0],row[1],row[2],row[3],row[4])
        i = row[0]
        x = float(row[1])
        y = float(row[2])
        bfq = max(float(row[3]), 1.)
        # Choose some threshold based on bankfull q (bfq)
        thresh = np.log(bfq) / 1000. + bfq / 1000000. + 2 * abs(
            xres) + 2 * abs(yres)

        # come up with minimum width to search for, based on bankfullq
        # This is designed to prevent assigning
        #width values from the tributaries to the major river channels
        minwidth = bfq / 100. + 30

        # Get nearest width from datasource
        # Uses Euclidean distance to find nearest point in source
        # `try` included since it may happen that the width database doesn't
        # contains data in the basin if that is the case all values are assigned
        # a 30 m width

        xmin = x - thresh
        ymin = y - thresh
        xmax = x + thresh
        ymax = y + thresh

        dat, geo = gdalutils.clip_raster(fwidth, xmin, ymin, xmax, ymax)
        try:
            iy, ix = np.where(dat > 30)
        except:
            print('Error: point', i, x, y)
            print('Vals:', bfq, thresh, dat)
            continue
        xdat = geo[8][ix]
        ydat = geo[9][iy]

        try:
            dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
            val = dat[iy[ind], ix[ind]]
            #width.append(val)
            width[i] = val
        except ValueError:
            #width.append(30.)
            continue

# Add widths to dataframe, then copy to new dataframe
#bankfullq['width'] = width
#widths = bankfullq[['x', 'y', 'geometry','width']]

    rec['width'] = width
    #################################################################
    # Group river network per link
    rec.loc[:, 'width'] = rec.groupby('link').width.apply(check_width)

    # Write out files
    print('Writing out data')
    name1 = output + '.shp'
    #widths.to_file(name1)

    w = shapefile.Writer(shapefile.POINT)
    w.field('x')
    w.field('y')
    w.field('width')
    # Writing .shp resulting file
    for x, y, width in zip(rec['lon'], rec['lat'], rec['width']):
        w.point(x, y)
        w.record(x, y, width)
    w.save("%s.shp" % output)

    # write .prj file
    prj = open("%s.prj" % output, "w")
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj)
    prj.write(srs.ExportToWkt())
    prj.close()

    nodata = -9999
    fmt = "GTiff"
    #    name1 = output
    #    name2 = os.path.dirname(output) + '/' + \
    #        os.path.basename(output).split('.')[0] + '.tif'
    name2 = output + '.tif'
    subprocess.call([
        "gdal_rasterize", "-a_nodata",
        str(nodata), "-of", fmt, "-ot", "Float32", "-co", "COMPRESS=DEFLATE",
        "-tr",
        str(geo1[6]),
        str(geo1[7]), "-a", "width", "-a_srs", proj, "-te",
        str(geo1[0]),
        str(geo1[1]),
        str(geo1[2]),
        str(geo1[3]), name1, name2
    ])
Example #7
0
def basinsplit(ncatch, outdir, cattif, demtif, acctif, nettif, wthtif, dirtif,
               aretif, ordtif, tretxt, cootxt):

    # Get extend for every catchment and area
    catarr = gdalutils.get_data(cattif)

    try:
        dat = catarr == ncatch
    except:
        sys.exit('ERROR invalid basin number')

    # Use gdal to mask out basin in network and direction tifs
    nettmp = 'net_tmp.tif'
    dirtmp = 'dir_tmp.tif'
    acctmp = 'acc_tmp.tif'
    ordtmp = 'ord_tmp.tif'
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Int16', '--NoDataValue', '-9999', '-B',
        cattif, '--B_band', '1', '-A', nettif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', nettmp
    ]
    subprocess.call(cmd)
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Int16', '--NoDataValue', '-9999', '-B',
        cattif, '--B_band', '1', '-A', dirtif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', dirtmp
    ]
    subprocess.call(cmd)
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Float32', '--NoDataValue', '-9999',
        '-B', cattif, '--B_band', '1', '-A', acctif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', acctmp
    ]
    subprocess.call(cmd)
    cmd = [
        'gdal_calc.py', '--calc', 'where(B==' + str(ncatch) + ',A,0)',
        '--format', 'GTiff', '--type', 'Int16', '--NoDataValue', '-9999', '-B',
        cattif, '--B_band', '1', '-A', ordtif, '--A_band', '1', '--co',
        'COMPRESS=DEFLATE', '--outfile', ordtmp
    ]
    subprocess.call(cmd)
    print('separated basin for nettif, dirtif, acctif, ordtif')

    catgeo = gdalutils.get_geo(cattif)
    area = gdalutils.get_data(aretif)
    #outlet = gdalutils.get_data(otltif)
    #direc = gdalutils.get_data(dirtif)
    row, col = np.where(dat)
    _sum = np.sum(dat * area)
    # clean up
    del (catarr, dat, area)

    if _sum >= 100:  # be sure basin is larger than 100 Km2

        xmin = catgeo[8][min(col)]
        xmax = catgeo[8][max(col)]
        ymin = catgeo[9][max(row)]
        ymax = catgeo[9][min(row)]
        # Clean up
        del (row, col)

        # Clip input rasters
        netarr_tmp, netgeo_tmp = gdalutils.clip_raster(nettmp, xmin, ymin,
                                                       xmax, ymax)
        net_size = (netarr_tmp > 0).sum()
        print('loaded net array')

        if net_size >= 35:  # be sure river network is long enough

            # Load tree and coord files
            tree = misc_utils.read_tree_taudem(tretxt)
            lfp_coor = misc_utils.read_coord_taudem(cootxt)
            lfp_coor.index.name = 'index'

            # Get list of x,y points in river network in basin
            iy, ix = np.where(netarr_tmp > 0)
            Xrav = netgeo_tmp[8][ix]
            Yrav = netgeo_tmp[9][iy]
            # Clean up memory
            del (netarr_tmp)

            # Clipping tree file based on segments within basin
            print('Clipping tree file')
            lfp_tree = pd.DataFrame()
            for i in tree.index:
                sta = tree.loc[i, 'start_pnt']
                end = tree.loc[i, 'end_pnt']
                lon1 = lfp_coor.loc[sta, 'lon']
                lat1 = lfp_coor.loc[sta, 'lat']
                lon2 = lfp_coor.loc[end, 'lon']
                lat2 = lfp_coor.loc[end, 'lat']
                #                dis1, ind1 = misc_utils.near_euc(
                #                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon1, lat1))
                #                dis2, ind2 = misc_utils.near_euc(
                #                    lfp_coor['lon'].values, lfp_coor['lat'].values, (lon2, lat2))
                dis1, ind1 = misc_utils.near_euc(Xrav, Yrav, (lon1, lat1))
                dis2, ind2 = misc_utils.near_euc(Xrav, Yrav, (lon2, lat2))
                # default value 0.01 wasn't able to find link number 3504, this value was increased to 0.012 to find missing link
                if (dis1 <= 0.012) & (dis2 <= 0.012):
                    lfp_tree = lfp_tree.append(tree.loc[i, :])
            lfp_tree = lfp_tree[[
                'link_no', 'start_pnt', 'end_pnt', 'frst_ds', 'frst_us',
                'scnd_us', 'strahler', 'mon_pnt', 'shreve'
            ]]
            lfp_tree.index.name = 'index'

            # Creating folder per basin
            ncatchstr = "%03d" % ncatch
            folder = outdir + "/" + ncatchstr
            create_out_folder(folder)

            # Writing clipped coord and tree files
            print('Writing text files')
            fnametre = folder + "/" + ncatchstr + "_tre.csv"
            fnamecoo = folder + "/" + ncatchstr + "_coo.csv"
            lfp_coor.to_csv(fnamecoo)
            lfp_tree.to_csv(fnametre, float_format='%i')
            # clean up memory
            del (lfp_coor, lfp_tree)

            # Creating rec dataframe
            rec = connections(fnametre, fnamecoo)

            #  Writing XXX_rec.csv file
            fnamerec = folder + "/" + ncatchstr + "_rec.csv"
            rec.to_csv(fnamerec)

            # Get extent from rec dataframe
            xmin = rec['lon'].min()
            xmax = rec['lon'].max()
            ymin = rec['lat'].min()
            ymax = rec['lat'].max()
            # Clean up memory
            del (rec)

            # Get fixed extent
            # _dir    = getdir(rec,dirtif)
            # _dirlet = getdirletter(_dir)
            # xmin,ymin,xmax,ymax = get_extent_outlet(_dirlet,0.1,xmin,ymin,xmax,ymax)

            # Clipping rasters
            print('Loading and clipping rasters')
            nodata = -9999
            # Creating output names
            fnamedem = folder + "/" + ncatchstr + "_dem.tif"
            fnameacc = folder + "/" + ncatchstr + "_acc.tif"
            fnamenet = folder + "/" + ncatchstr + "_net.tif"
            fnamewth = folder + "/" + ncatchstr + "_wth.tif"
            fnamedir = folder + "/" + ncatchstr + "_dir.tif"
            fnameord = folder + "/" + ncatchstr + "_ord.tif"

            # Load and write each array before removing it from memory
            demarrcli, demgeocli = gdalutils.clip_raster(
                demtif, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(demarrcli, fnamedem, demgeocli, "Float32",
                                   nodata)
            del (demarrcli, demgeocli)

            accarrcli, accgeocli = gdalutils.clip_raster(
                acctmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(accarrcli, fnameacc, accgeocli, "Float32",
                                   nodata)
            del (accarrcli, accgeocli)

            wtharrcli, wthgeocli = gdalutils.clip_raster(
                wthtif, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(wtharrcli, fnamewth, wthgeocli, "Float32",
                                   nodata)
            del (wtharrcli, wthgeocli)

            dirarrcli, dirgeocli = gdalutils.clip_raster(
                dirtmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(dirarrcli, fnamedir, dirgeocli, "Int16",
                                   nodata)
            del (dirarrcli, dirgeocli)

            netarrcli, netgeocli = gdalutils.clip_raster(
                nettmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(netarrcli, fnamenet, netgeocli, "Int16",
                                   nodata)
            del (netarrcli, netgeocli)

            ordarrcli, ordgeocli = gdalutils.clip_raster(
                ordtmp, xmin, ymin, xmax, ymax)
            gdalutils.write_raster(ordarrcli, fnameord, ordgeocli, "Int16",
                                   nodata)
            del (ordarrcli, ordgeocli)

            # Finally delete the nettmp and dirtmp files
            os.remove(nettmp)
            os.remove(dirtmp)
            os.remove(ordtmp)
            os.remove(acctmp)

        else:
            print("NOT PROCESSED: Number of pixels in river lower than 35 : " +
                  str(net_size) + " pixels in basin number " + str(ncatch))
    else:
        print("NOT PROCESSED: Basin area lower than 100 Km2 : " + str(_sum) +
              " KM**2 in basin number " + str(ncatch))