Esempio n. 1
0
    def hydrological_radius(acc_file, radius_file, storm_probability='T2'):
        """Calculate hydrological radius."""
        acc_r = RasterUtilClass.read_raster(acc_file)
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        nodata_value = acc_r.noDataValue
        cellsize = acc_r.dx
        data = acc_r.data
        coe_table = {"T2": [0.05, 0.48],
                     "T10": [0.12, 0.52],
                     "T100": [0.18, 0.55]}
        ap = coe_table[storm_probability][0]
        bp = coe_table[storm_probability][1]

        def radius_cal(acc):
            """Calculate hydrological radius"""
            if abs(acc - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            return numpy.power(ap * ((acc + 1) * cellsize * cellsize / 1000000.), bp)

        radius_cal_numpy = numpy.frompyfunc(radius_cal, 1, 1)
        radius = radius_cal_numpy(data)

        RasterUtilClass.write_gtiff_file(radius_file, ysize, xsize, radius,
                                         acc_r.geotrans, acc_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 2
0
    def flow_velocity(slope_file, radius_file, manning_file, velocity_file):
        """velocity."""
        slp_r = RasterUtilClass.read_raster(slope_file)
        slp_data = slp_r.data
        xsize = slp_r.nCols
        ysize = slp_r.nRows
        nodata_value = slp_r.noDataValue

        rad_data = RasterUtilClass.read_raster(radius_file).data
        man_data = RasterUtilClass.read_raster(manning_file).data

        vel_max = 3.0
        vel_min = 0.0001

        def velocity_cal(rad, man, slp):
            """Calculate velocity"""
            if abs(slp - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            # print(rad, man, slp)
            tmp = numpy.power(man, -1) * numpy.power(rad, 2. / 3.) * numpy.power(slp, 0.5)
            if tmp < vel_min:
                return vel_min
            if tmp > vel_max:
                return vel_max
            return tmp

        velocity_cal_numpy = numpy.frompyfunc(velocity_cal, 3, 1)
        velocity = velocity_cal_numpy(rad_data, man_data, slp_data)
        RasterUtilClass.write_gtiff_file(velocity_file, ysize, xsize, velocity, slp_r.geotrans,
                                         slp_r.srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 3
0
    def generate_lat_raster(cfg):
        """Generate latitude raster"""
        dem_file = cfg.spatials.filldem
        ds = RasterUtilClass.read_raster(dem_file)
        src_srs = ds.srs
        if not src_srs.ExportToProj4():
            raise ValueError('The source raster %s has not coordinate, '
                             'which is required!' % dem_file)
        dst_srs = osr_SpatialReference()
        dst_srs.ImportFromEPSG(4326)  # WGS84
        # dst_wkt = dst_srs.ExportToWkt()
        transform = osr_CoordinateTransformation(src_srs, dst_srs)

        point_ll = ogr_CreateGeometryFromWkt('POINT (%f %f)' % (ds.xMin, ds.yMin))
        point_ur = ogr_CreateGeometryFromWkt('POINT (%f %f)' % (ds.xMax, ds.yMax))

        point_ll.Transform(transform)
        point_ur.Transform(transform)

        lower_lat = point_ll.GetY()
        up_lat = point_ur.GetY()

        rows = ds.nRows
        cols = ds.nCols
        delta_lat = (up_lat - lower_lat) / float(rows)

        def cal_cell_lat(row, col):
            """calculate latitude of cell by row number"""
            return up_lat - (row + 0.5) * delta_lat

        data_lat = fromfunction(cal_cell_lat, (rows, cols))
        data_lat = where(ds.validZone, data_lat, ds.data)
        RasterUtilClass.write_gtiff_file(cfg.spatials.cell_lat, rows, cols, data_lat,
                                         ds.geotrans, ds.srs,
                                         ds.noDataValue, GDT_Float32)
Esempio n. 4
0
 def ridge_without_flowin_cell(self):
     """Find the original ridge sources that have no flow-in cells."""
     for row in range(self.nrows):
         for col in range(self.ncols):
             tempdir = self.flowdir_data[row][col]
             if MathClass.floatequal(tempdir, self.nodata_flow):
                 self.rdgsrc_data[row][col] = DEFAULT_NODATA
                 continue
             if self.flowmodel == 1:  # Dinf flow model
                 temp_coor = DinfUtil.downstream_index_dinf(
                     tempdir, row, col)
                 for temprow, tempcol in temp_coor:
                     if 0 <= temprow < self.nrows and 0 <= tempcol < self.ncols:
                         self.rdgsrc_data[temprow][tempcol] = DEFAULT_NODATA
                     else:
                         self.rdgsrc_data[row][col] = DEFAULT_NODATA
             else:  # D8 flow model
                 temprow, tempcol = D8Util.downstream_index(
                     tempdir, row, col)
                 if 0 <= temprow < self.nrows and 0 <= tempcol < self.ncols:
                     self.rdgsrc_data[temprow][tempcol] = DEFAULT_NODATA
                 else:
                     self.rdgsrc_data[row][col] = DEFAULT_NODATA
     RasterUtilClass.write_gtiff_file(self.rdgorg, self.nrows, self.ncols,
                                      self.rdgsrc_data, self.geotrans,
                                      self.srs, DEFAULT_NODATA, 6)
Esempio n. 5
0
    def calculate_channel_width(acc_file, chwidth_file):
        """Calculate channel width."""
        acc_r = RasterUtilClass.read_raster(acc_file)
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        dx = acc_r.dx
        cell_area = dx * dx

        # storm frequency   a      b
        # 2                 1      0.56
        # 10                1.2    0.56
        # 100               1.4    0.56
        a = 1.2
        b = 0.56
        # TODO: Figure out what's means, and move it to text.py or config.py. LJ

        tmp_ones = numpy.ones((ysize, xsize))
        width = tmp_ones * DEFAULT_NODATA
        valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones)
        width = numpy.where(
            acc_r.validZone,
            numpy.power((a * (valid_values + 1) * cell_area / 1000000.), b),
            width)
        RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width,
                                         acc_r.geotrans, acc_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
        return width
Esempio n. 6
0
    def flow_velocity(slope_file, radius_file, manning_file, velocity_file):
        """velocity."""
        slp_r = RasterUtilClass.read_raster(slope_file)
        slp_data = slp_r.data
        xsize = slp_r.nCols
        ysize = slp_r.nRows
        nodata_value = slp_r.noDataValue

        rad_data = RasterUtilClass.read_raster(radius_file).data
        man_data = RasterUtilClass.read_raster(manning_file).data

        vel_max = 3.0
        vel_min = 0.0001

        def velocity_cal(rad, man, slp):
            """Calculate velocity"""
            if abs(slp - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            # print(rad, man, slp)
            tmp = numpy.power(man, -1) * numpy.power(
                rad, 2. / 3.) * numpy.power(slp, 0.5)
            if tmp < vel_min:
                return vel_min
            if tmp > vel_max:
                return vel_max
            return tmp

        velocity_cal_numpy = numpy.frompyfunc(velocity_cal, 3, 1)
        velocity = velocity_cal_numpy(rad_data, man_data, slp_data)
        RasterUtilClass.write_gtiff_file(velocity_file, ysize, xsize, velocity,
                                         slp_r.geotrans, slp_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 7
0
    def hydrological_radius(acc_file, radius_file, storm_probability='T2'):
        """Calculate hydrological radius."""
        acc_r = RasterUtilClass.read_raster(acc_file)
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        nodata_value = acc_r.noDataValue
        cellsize = acc_r.dx
        data = acc_r.data
        coe_table = {
            "T2": [0.05, 0.48],
            "T10": [0.12, 0.52],
            "T100": [0.18, 0.55]
        }
        ap = coe_table[storm_probability][0]
        bp = coe_table[storm_probability][1]

        def radius_cal(acc):
            """Calculate hydrological radius"""
            if abs(acc - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            return numpy.power(
                ap * ((acc + 1) * cellsize * cellsize / 1000000.), bp)

        radius_cal_numpy = numpy.frompyfunc(radius_cal, 1, 1)
        radius = radius_cal_numpy(data)

        RasterUtilClass.write_gtiff_file(radius_file, ysize, xsize, radius,
                                         acc_r.geotrans, acc_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 8
0
 def output_hillslope(method_id):
     """Output hillslope according different stream cell value method."""
     for (tmp_row, tmp_col) in stream_coors:
         tmp_hillslp_ids = DelineateHillslope.cal_hs_codes(max_id,
                                                           stream_data[tmp_row][tmp_col])
         if 0 < method_id < 3:
             hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id]
             # is head stream cell?
             if (tmp_row, tmp_col) in headstream_coors:
                 hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0]
         elif method_id == 3:
             hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA
     # Output to raster file
     hillslope_out_new = hillslope_out
     dirpath = os.path.dirname(hillslope_out_new) + os.path.sep
     corename = FileClass.get_core_name_without_suffix(hillslope_out_new)
     if method_id == 1:
         hillslope_out_new = dirpath + corename + '_right.tif'
     elif method_id == 2:
         hillslope_out_new = dirpath + corename + '_left.tif'
     elif method_id == 3:
         hillslope_out_new = dirpath + corename + '_nodata.tif'
     RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols,
                                      hillslope_mtx,
                                      geotrans, srs, DEFAULT_NODATA, datatype)
Esempio n. 9
0
    def flow_time_to_stream(streamlink,
                            velocity,
                            flow_dir_file,
                            t0_s_file,
                            flow_dir_code='TauDEM'):
        """Calculate flow time to the workflow channel from each grid cell."""
        strlk_data = RasterUtilClass.read_raster(streamlink).data

        vel_r = RasterUtilClass.read_raster(velocity)
        vel_data = vel_r.data
        xsize = vel_r.nCols
        ysize = vel_r.nRows
        # noDataValue = vel_r.noDataValue

        weight = numpy.where(strlk_data <= 0, numpy.ones((ysize, xsize)),
                             numpy.zeros((ysize, xsize)))
        traveltime = numpy.where(vel_r.validZone, numpy.zeros((ysize, xsize)),
                                 vel_data)
        flowlen = TerrainUtilClass.calculate_flow_length(
            flow_dir_file, weight, flow_dir_code)
        traveltime = numpy.where(vel_r.validZone,
                                 flowlen / (vel_data * 5. / 3.) / 3600.,
                                 traveltime)
        RasterUtilClass.write_gtiff_file(t0_s_file, ysize, xsize, traveltime,
                                         vel_r.geotrans, vel_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 10
0
    def generate_lat_raster(cfg):
        """Generate latitude raster"""
        dem_file = cfg.spatials.filldem
        ds = RasterUtilClass.read_raster(dem_file)
        src_srs = ds.srs
        if not src_srs.ExportToProj4():
            raise ValueError('The source raster %s has not coordinate, '
                             'which is required!' % dem_file)
        dst_srs = osr_SpatialReference()
        dst_srs.ImportFromEPSG(4326)  # WGS84
        # dst_wkt = dst_srs.ExportToWkt()
        transform = osr_CoordinateTransformation(src_srs, dst_srs)

        point_ll = ogr_CreateGeometryFromWkt('POINT (%f %f)' % (ds.xMin, ds.yMin))
        point_ur = ogr_CreateGeometryFromWkt('POINT (%f %f)' % (ds.xMax, ds.yMax))

        point_ll.Transform(transform)
        point_ur.Transform(transform)

        lower_lat = point_ll.GetY()
        up_lat = point_ur.GetY()

        rows = ds.nRows
        cols = ds.nCols
        delta_lat = (up_lat - lower_lat) / float(rows)

        def cal_cell_lat(row, col):
            """calculate latitude of cell by row number"""
            return up_lat - (row + 0.5) * delta_lat

        data_lat = fromfunction(cal_cell_lat, (rows, cols))
        data_lat = where(ds.validZone, data_lat, ds.data)
        RasterUtilClass.write_gtiff_file(cfg.spatials.cell_lat, rows, cols, data_lat,
                                         ds.geotrans, ds.srs,
                                         ds.noDataValue, GDT_Float32)
Esempio n. 11
0
 def output_hillslope(method_id):
     """Output hillslope according different stream cell value method."""
     for (tmp_row, tmp_col) in stream_coors:
         tmp_hillslp_ids = Hillslopes.cal_hs_codes(max_id, stream_data[tmp_row][tmp_col])
         if 0 < method_id < 3:
             hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id]
             # is head stream cell?
             if (tmp_row, tmp_col) in headstream_coors:
                 hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0]
         elif method_id == 3:
             hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA
         elif method_id == 4:
             hillslope_mtx[tmp_row][tmp_col] = 0
     # Output to raster file
     hillslope_out_new = hillslope_out
     dirpath = os.path.dirname(hillslope_out_new) + os.path.sep
     corename = FileClass.get_core_name_without_suffix(hillslope_out_new)
     if method_id == 1:
         hillslope_out_new = dirpath + corename + '_right.tif'
     elif method_id == 2:
         hillslope_out_new = dirpath + corename + '_left.tif'
     elif method_id == 3:
         hillslope_out_new = dirpath + corename + '_nodata.tif'
     elif method_id == 4:
         hillslope_out_new = dirpath + corename + '_zero.tif'
     RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols,
                                      hillslope_mtx,
                                      geotrans, srs, DEFAULT_NODATA, datatype)
Esempio n. 12
0
    def generate_runoff_coefficent(maindb, landuse_file, slope_file, soil_texture_file,
                                   runoff_coeff_file, landuse_shp, imper_perc=0.3):
        """Generate potential runoff coefficient."""
        # read landuselookup table from MongoDB
        prc_fields = ['PRC_ST%d' % (i,) for i in range(1, 13)]
        sc_fields = ['SC_ST%d' % (i,) for i in range(1, 13)]
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError("LanduseLoop Collection is not existed or empty!")

        runoff_c0 = dict()
        runoff_s0 = dict()
        for row in query_result:
            tmpid = row.get('LANDUSE_ID')
            runoff_c0[tmpid] = [float(row.get(item)) for item in prc_fields]
            runoff_s0[tmpid] = [float(row.get(item)) for item in sc_fields]

        landu_raster = RasterUtilClass.read_raster(landuse_file)
        landu_data = landu_raster.data
        nodata_value1 = landu_raster.noDataValue
        xsize = landu_raster.nCols
        ysize = landu_raster.nRows
        nodata_value2 = landu_raster.noDataValue

        slo_data = RasterUtilClass.read_raster(slope_file).data
        soil_texture_array = RasterUtilClass.read_raster(soil_texture_file).data
        id_omited = []

        def coef_cal(lu_id, soil_texture, slope):
            """Calculate runoff coefficient by landuse, soil texture and slope."""
            if abs(lu_id - nodata_value1) < UTIL_ZERO or int(lu_id) < 0:
                return nodata_value2
            if int(lu_id) not in list(runoff_c0.keys()):
                if int(lu_id) not in id_omited:
                    print('The landuse ID: %d does not exist.' % int(lu_id))
                    id_omited.append(int(lu_id))
            stid = int(soil_texture) - 1
            c0 = runoff_c0[int(lu_id)][stid]
            s0 = runoff_s0[int(lu_id)][stid] / 100.
            slp = slope

            if slp + s0 < 0.0001:
                return c0
            coef1 = (1 - c0) * slp / (slp + s0)
            coef2 = c0 + coef1
            # TODO, Check if it is (lu_id >= 98), by lj
            if int(lu_id) == 106 or int(lu_id) == 107 or int(lu_id) == 105:
                return coef2 * (1 - imper_perc) + imper_perc
            else:
                return coef2

        coef_cal_numpy = np_frompyfunc(coef_cal, 3, 1)
        coef = coef_cal_numpy(landu_data, soil_texture_array, slo_data)

        RasterUtilClass.write_gtiff_file(runoff_coeff_file, ysize, xsize, coef,
                                         landu_raster.geotrans, landu_raster.srs, nodata_value2,
                                         GDT_Float32)
        runoff_co_csv = r'D:\SEIMS\data\zts\data_prepare\spatial\test\runoff_co.csv'
        RasterUtilClass.count_raster_moist(runoff_coeff_file, landuse_shp, runoff_co_csv)
Esempio n. 13
0
    def subbasin_boundary_cells(self, subbsn_perc):
        """Subbasin boundary cells that are potential ridge sources."""
        dir_deltas = FlowModelConst.d8delta_ag.values()
        subbsn_elevs = dict()

        def add_elev_to_subbsn_elevs(sid, elev):
            if sid not in subbsn_elevs:
                subbsn_elevs[sid] = [elev]
            else:
                subbsn_elevs[sid].append(elev)

        for row in range(self.nrows):
            for col in range(self.ncols):
                if MathClass.floatequal(self.subbsn_data[row][col],
                                        self.nodata_subbsn):
                    continue
                for r, c in dir_deltas:
                    new_row = row + r
                    new_col = col + c
                    if 0 <= new_row < self.nrows and 0 <= new_col < self.ncols:
                        if MathClass.floatequal(
                                self.subbsn_data[new_row][new_col],
                                self.nodata_subbsn):
                            subbsnid = self.subbsn_data[row][col]
                            self.rdgpot[row][col] = subbsnid
                            add_elev_to_subbsn_elevs(subbsnid,
                                                     self.elev_data[row][col])
                        elif not MathClass.floatequal(
                                self.subbsn_data[row][col],
                                self.subbsn_data[new_row][new_col]):
                            subbsnid = self.subbsn_data[row][col]
                            subbsnid2 = self.subbsn_data[new_row][new_col]
                            self.rdgpot[row][col] = subbsnid
                            self.rdgpot[new_row][new_col] = subbsnid2
                            add_elev_to_subbsn_elevs(subbsnid,
                                                     self.elev_data[row][col])
                            add_elev_to_subbsn_elevs(
                                subbsnid2, self.elev_data[new_row][new_col])

        RasterUtilClass.write_gtiff_file(self.boundsrc, self.nrows, self.ncols,
                                         self.rdgpot, self.geotrans, self.srs,
                                         DEFAULT_NODATA, 6)
        subbsn_elevs_thresh = dict()
        for sid, elevs in list(subbsn_elevs.items()):
            tmpelev = numpy.array(elevs)
            tmpelev.sort()
            subbsn_elevs_thresh[sid] = tmpelev[int(len(tmpelev) * subbsn_perc)]
        for row in range(self.nrows):
            for col in range(self.ncols):
                if MathClass.floatequal(self.rdgpot[row][col], DEFAULT_NODATA):
                    continue
                if self.elev_data[row][col] < subbsn_elevs_thresh[
                        self.subbsn_data[row][col]]:
                    self.rdgpot[row][col] = DEFAULT_NODATA
        RasterUtilClass.write_gtiff_file(self.boundsrcfilter, self.nrows,
                                         self.ncols, self.rdgpot,
                                         self.geotrans, self.srs,
                                         DEFAULT_NODATA, 6)
Esempio n. 14
0
    def std_of_flow_time_to_stream(streamlink,
                                   flow_dir_file,
                                   slope,
                                   radius,
                                   velocity,
                                   delta_s_file,
                                   flow_dir_code='TauDEM'):
        """Generate standard deviation of t0_s (flow time to the workflow channel from each cell).
        """
        strlk_r = RasterUtilClass.read_raster(streamlink)
        strlk_data = strlk_r.data
        rad_data = RasterUtilClass.read_raster(radius).data
        slo_data = RasterUtilClass.read_raster(slope).data

        vel_r = RasterUtilClass.read_raster(velocity)
        vel_data = vel_r.data
        xsize = vel_r.nCols
        ysize = vel_r.nRows
        nodata_value = vel_r.noDataValue

        def initial_variables(vel, strlk, slp, rad):
            """initial variables"""
            if abs(vel - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            if strlk <= 0:
                tmp_weight = 1
            else:
                tmp_weight = 0
            # 0 is river
            if slp < 0.0005:
                slp = 0.0005
            # dampGrid = vel * rad / (slp / 100. * 2.) # No need to divide 100
            # in my view. By LJ
            damp_grid = vel * rad / (slp * 2.)
            celerity = vel * 5. / 3.
            tmp_weight *= damp_grid * 2. / numpy.power(celerity, 3.)
            return tmp_weight

        initial_variables_numpy = numpy.frompyfunc(initial_variables, 4, 1)
        weight = initial_variables_numpy(vel_data, strlk_data, slo_data,
                                         rad_data)

        delta_s_sqr = TerrainUtilClass.calculate_flow_length(
            flow_dir_file, weight, flow_dir_code)

        def cal_delta_s(vel, sqr):
            """Calculate delta s"""
            if abs(vel - nodata_value) < UTIL_ZERO:
                return nodata_value
            else:
                return sqrt(sqr) / 3600.

        cal_delta_s_numpy = numpy.frompyfunc(cal_delta_s, 2, 1)
        delta_s = cal_delta_s_numpy(vel_data, delta_s_sqr)

        RasterUtilClass.write_gtiff_file(delta_s_file, ysize, xsize, delta_s,
                                         strlk_r.geotrans, strlk_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 15
0
    def calculate_latitude_dependent_parameters(lat_file, min_dayl_file,
                                                dormhr_file, dorm_hr):
        """
        Calculate latitude dependent parameters, include:
           1. minimum daylength (daylmn), 2. day length threshold for dormancy (dormhr)
        """
        # calculate minimum daylength, from readwgn.f of SWAT
        # daylength=2*acos(-tan(sd)*tan(lat))/omega
        # where solar declination, sd, = -23.5 degrees for minimum daylength in
        # northern hemisphere and -tan(sd) = .4348
        # absolute value is taken of tan(lat) to convert southern hemisphere
        # values to northern hemisphere
        # the angular velocity of the earth's rotation, omega, = 15 deg/hr or
        # 0.2618 rad/hr and 2/0.2618 = 7.6394
        cell_lat_r = RasterUtilClass.read_raster(lat_file)
        lat_data = cell_lat_r.data
        # daylmn_data = cell_lat_r.data
        zero = numpy.zeros((cell_lat_r.nRows, cell_lat_r.nCols))
        # nodata = numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * cell_lat_r.noDataValue
        # convert degrees to radians (2pi/360=1/57.296)
        daylmn_data = 0.4348 * numpy.abs(numpy.tan(lat_data / 57.296))
        condition = daylmn_data < 1.
        daylmn_data = numpy.where(condition, numpy.arccos(daylmn_data), zero)
        # condition2 = lat_data != cell_lat_r.noDataValue
        daylmn_data *= 7.6394
        daylmn_data = numpy.where(cell_lat_r.validZone, daylmn_data, lat_data)
        RasterUtilClass.write_gtiff_file(min_dayl_file, cell_lat_r.nRows,
                                         cell_lat_r.nCols, daylmn_data,
                                         cell_lat_r.geotrans, cell_lat_r.srs,
                                         cell_lat_r.noDataValue, GDT_Float32)

        def cal_dorm_hr(lat):
            """calculate day length threshold for dormancy"""
            if lat == cell_lat_r.noDataValue:
                return cell_lat_r.noDataValue
            else:
                if 20. <= lat <= 40:
                    return (numpy.abs(lat - 20.)) / 20.
                elif lat > 40.:
                    return 1.
                elif lat < 20.:
                    return -1.

        cal_dorm_hr_numpy = numpy.frompyfunc(cal_dorm_hr, 1, 1)

        # dormhr_data = numpy.copy(lat_data)
        if dorm_hr < -UTIL_ZERO:
            dormhr_data = cal_dorm_hr_numpy(lat_data)
        else:
            dormhr_data = numpy.where(
                cell_lat_r.validZone,
                numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * dorm_hr,
                lat_data)
        RasterUtilClass.write_gtiff_file(dormhr_file, cell_lat_r.nRows,
                                         cell_lat_r.nCols, dormhr_data,
                                         cell_lat_r.geotrans, cell_lat_r.srs,
                                         cell_lat_r.noDataValue, GDT_Float32)
Esempio n. 16
0
File: Util.py Progetto: jx-qqq/SEIMS
def slope_rad_to_deg(tanslp, slp):
    """Convert slope from radius to slope."""
    origin = RasterUtilClass.read_raster(tanslp)
    temp = origin.data == origin.noDataValue
    slpdata = numpy.where(temp, origin.noDataValue,
                          numpy.arctan(origin.data) * 180. / numpy.pi)
    RasterUtilClass.write_gtiff_file(slp, origin.nRows, origin.nCols, slpdata,
                                     origin.geotrans, origin.srs,
                                     origin.noDataValue, origin.dataType)
Esempio n. 17
0
 def output(self, jfile, unitraster, unitshp):
     """output json file and slope position units raster file"""
     json_updown_data = json.dumps(self.units_updwon, indent=4)
     with open(jfile, 'w') as f:
         f.write(json_updown_data)
     RasterUtilClass.write_gtiff_file(unitraster, self.nrows, self.ncols,
                                      self.slppos_ids, self.geotrans, self.srs,
                                      DEFAULT_NODATA, self.datatype)
     VectorUtilClass.raster2shp(unitraster, unitshp)
     print("Original unique spatial units ID raster saved as '%s'" % unitraster)
Esempio n. 18
0
File: Util.py Progetto: jx-qqq/SEIMS
def rpi_calculation(distdown, distup, rpi_outfile):
    """Calculate Relative Position Index (RPI)."""
    down = RasterUtilClass.read_raster(distdown)
    up = RasterUtilClass.read_raster(distup)
    temp = down.data < 0
    rpi_data = numpy.where(temp, down.noDataValue,
                           down.data / (down.data + up.data))
    RasterUtilClass.write_gtiff_file(rpi_outfile, down.nRows, down.nCols,
                                     rpi_data, down.geotrans, down.srs,
                                     down.noDataValue, down.dataType)
Esempio n. 19
0
 def output(self, jfile, unitraster, unitshp):
     """output json file and slope position units raster file"""
     json_updown_data = json.dumps(self.units_updwon, indent=4)
     with open(jfile, 'w', encoding='utf-8') as f:
         f.write('%s' % json_updown_data)
     RasterUtilClass.write_gtiff_file(unitraster, self.nrows, self.ncols,
                                      self.slppos_ids, self.geotrans, self.srs,
                                      DEFAULT_NODATA, self.datatype)
     VectorUtilClass.raster2shp(unitraster, unitshp)
     print("Original unique spatial units ID raster saved as '%s'" % unitraster)
Esempio n. 20
0
 def filter_ridge_by_subbasin_boundary(self):
     for row in range(self.nrows):
         for col in range(self.ncols):
             if MathClass.floatequal(self.rdgsrc_data[row][col],
                                     DEFAULT_NODATA):
                 continue
             if MathClass.floatequal(self.rdgpot[row][col], DEFAULT_NODATA):
                 self.rdgsrc_data[row][col] = DEFAULT_NODATA
     RasterUtilClass.write_gtiff_file(self.rdgsrc, self.nrows, self.ncols,
                                      self.rdgsrc_data, self.geotrans,
                                      self.srs, DEFAULT_NODATA, 6)
Esempio n. 21
0
def main():
    """Read GeoTiff raster data and perform log transformation.
    """
    input_tif = "../tests/data/Jamaica_dem.tif"
    output_tif = "../tests/data/tmp_results/log_dem.tif"
    rst = RasterUtilClass.read_raster(input_tif)
    # raster data (with noDataValue as numpy.nan) as numpy array
    rst_valid = rst.validValues
    output_data = np.log(rst_valid)
    # write output raster
    RasterUtilClass.write_gtiff_file(output_tif, rst.nRows, rst.nCols, output_data, rst.geotrans,
                                     rst.srs, rst.noDataValue, rst.dataType)
Esempio n. 22
0
    def calculate_latitude_dependent_parameters(lat_file, min_dayl_file, dormhr_file, dorm_hr):
        """
        Calculate latitude dependent parameters, include:
           1. minimum daylength (daylmn), 2. day length threshold for dormancy (dormhr)
        """
        # calculate minimum daylength, from readwgn.f of SWAT
        # daylength=2*acos(-tan(sd)*tan(lat))/omega
        # where solar declination, sd, = -23.5 degrees for minimum daylength in
        # northern hemisphere and -tan(sd) = .4348
        # absolute value is taken of tan(lat) to convert southern hemisphere
        # values to northern hemisphere
        # the angular velocity of the earth's rotation, omega, = 15 deg/hr or
        # 0.2618 rad/hr and 2/0.2618 = 7.6394
        cell_lat_r = RasterUtilClass.read_raster(lat_file)
        lat_data = cell_lat_r.data
        # daylmn_data = cell_lat_r.data
        zero = numpy.zeros((cell_lat_r.nRows, cell_lat_r.nCols))
        # nodata = numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * cell_lat_r.noDataValue
        # convert degrees to radians (2pi/360=1/57.296)
        daylmn_data = 0.4348 * numpy.abs(numpy.tan(lat_data / 57.296))
        condition = daylmn_data < 1.
        daylmn_data = numpy.where(condition, numpy.arccos(daylmn_data), zero)
        # condition2 = lat_data != cell_lat_r.noDataValue
        daylmn_data *= 7.6394
        daylmn_data = numpy.where(cell_lat_r.validZone, daylmn_data, lat_data)
        RasterUtilClass.write_gtiff_file(min_dayl_file, cell_lat_r.nRows, cell_lat_r.nCols,
                                         daylmn_data, cell_lat_r.geotrans, cell_lat_r.srs,
                                         cell_lat_r.noDataValue, GDT_Float32)

        def cal_dorm_hr(lat):
            """calculate day length threshold for dormancy"""
            if lat == cell_lat_r.noDataValue:
                return cell_lat_r.noDataValue
            else:
                if 20. <= lat <= 40:
                    return (numpy.abs(lat - 20.)) / 20.
                elif lat > 40.:
                    return 1.
                elif lat < 20.:
                    return -1.

        cal_dorm_hr_numpy = numpy.frompyfunc(cal_dorm_hr, 1, 1)

        # dormhr_data = numpy.copy(lat_data)
        if dorm_hr < -UTIL_ZERO:
            dormhr_data = cal_dorm_hr_numpy(lat_data)
        else:
            dormhr_data = numpy.where(cell_lat_r.validZone,
                                      numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * dorm_hr,
                                      lat_data)
        RasterUtilClass.write_gtiff_file(dormhr_file, cell_lat_r.nRows, cell_lat_r.nCols,
                                         dormhr_data, cell_lat_r.geotrans, cell_lat_r.srs,
                                         cell_lat_r.noDataValue, GDT_Float32)
Esempio n. 23
0
    def std_of_flow_time_to_stream(streamlink, flow_dir_file, slope, radius, velocity, delta_s_file,
                                   flow_dir_code='TauDEM'):
        """Generate standard deviation of t0_s (flow time to the workflow channel from each cell).
        """
        strlk_r = RasterUtilClass.read_raster(streamlink)
        strlk_data = strlk_r.data
        rad_data = RasterUtilClass.read_raster(radius).data
        slo_data = RasterUtilClass.read_raster(slope).data

        vel_r = RasterUtilClass.read_raster(velocity)
        vel_data = vel_r.data
        xsize = vel_r.nCols
        ysize = vel_r.nRows
        nodata_value = vel_r.noDataValue

        def initial_variables(vel, strlk, slp, rad):
            """initial variables"""
            if abs(vel - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            if strlk <= 0:
                tmp_weight = 1
            else:
                tmp_weight = 0
            # 0 is river
            if slp < 0.0005:
                slp = 0.0005
            # dampGrid = vel * rad / (slp / 100. * 2.) # No need to divide 100
            # in my view. By LJ
            damp_grid = vel * rad / (slp * 2.)
            celerity = vel * 5. / 3.
            tmp_weight *= damp_grid * 2. / numpy.power(celerity, 3.)
            return tmp_weight

        initial_variables_numpy = numpy.frompyfunc(initial_variables, 4, 1)
        weight = initial_variables_numpy(vel_data, strlk_data, slo_data, rad_data)

        delta_s_sqr = TerrainUtilClass.calculate_flow_length(flow_dir_file, weight, flow_dir_code)

        def cal_delta_s(vel, sqr):
            """Calculate delta s"""
            if abs(vel - nodata_value) < UTIL_ZERO:
                return nodata_value
            else:
                return sqrt(sqr) / 3600.

        cal_delta_s_numpy = numpy.frompyfunc(cal_delta_s, 2, 1)
        delta_s = cal_delta_s_numpy(vel_data, delta_s_sqr)

        RasterUtilClass.write_gtiff_file(delta_s_file, ysize, xsize, delta_s, strlk_r.geotrans,
                                         strlk_r.srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 24
0
    def generate_cn2(maindb, landuse_file, hydrogroup_file, cn2_filename,
                     landuse_shp):
        """Generate CN2 raster."""
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError(
                "LanduseLoop Collection is not existed or empty!")
        # cn2 list for each landuse type and hydrological soil group
        cn2_map = dict()
        for row in query_result:
            lu_id = row.get('LANDUSE_ID')
            cn2_list = [
                row.get('CN2A'),
                row.get('CN2B'),
                row.get('CN2C'),
                row.get('CN2D')
            ]
            cn2_map[lu_id] = cn2_list
        # print(cn2Map)
        lu_r = RasterUtilClass.read_raster(landuse_file)
        data_landuse = lu_r.data
        xsize = lu_r.nCols
        ysize = lu_r.nRows
        nodata_value = lu_r.noDataValue

        hg_r = RasterUtilClass.read_raster(hydrogroup_file)
        data_hg = hg_r.data

        def cal_cn2(lucc_id, hg):
            """Calculate CN2 value from landuse ID and Hydro Group number."""
            lucc_id = int(lucc_id)
            if lucc_id < 0 or MathClass.floatequal(lucc_id, nodata_value):
                return DEFAULT_NODATA
            else:
                hg = int(hg) - 1
                return cn2_map[lucc_id][hg]

        cal_cn2_numpy = np_frompyfunc(cal_cn2, 2, 1)
        data_prop = cal_cn2_numpy(data_landuse, data_hg)
        print(cn2_filename)
        cn2_csv = r'D:\SEIMS\data\zts\data_prepare\spatial\test\cn2.csv'

        RasterUtilClass.write_gtiff_file(cn2_filename, ysize, xsize, data_prop,
                                         lu_r.geotrans, lu_r.srs, nodata_value,
                                         GDT_Float32)
        RasterUtilClass.count_raster_moist(cn2_filename, landuse_shp, cn2_csv)
Esempio n. 25
0
    def flow_time_to_stream(streamlink, velocity, flow_dir_file, t0_s_file,
                            flow_dir_code='TauDEM'):
        """Calculate flow time to the workflow channel from each grid cell."""
        strlk_data = RasterUtilClass.read_raster(streamlink).data

        vel_r = RasterUtilClass.read_raster(velocity)
        vel_data = vel_r.data
        xsize = vel_r.nCols
        ysize = vel_r.nRows
        # noDataValue = vel_r.noDataValue

        weight = numpy.where(strlk_data <= 0, numpy.ones((ysize, xsize)),
                             numpy.zeros((ysize, xsize)))
        traveltime = numpy.where(vel_r.validZone, numpy.zeros((ysize, xsize)), vel_data)
        flowlen = TerrainUtilClass.calculate_flow_length(flow_dir_file, weight, flow_dir_code)
        traveltime = numpy.where(vel_r.validZone, flowlen / (vel_data * 5. / 3.) / 3600.,
                                 traveltime)
        RasterUtilClass.write_gtiff_file(t0_s_file, ysize, xsize, traveltime, vel_r.geotrans,
                                         vel_r.srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 26
0
 def assign_stream_id_raster(stream_file, subbasin_file, out_stream_file):
     """Assign stream link ID according to subbasin ID.
     Args:
         stream_file: input stream raster file
         subbasin_file: subbasin raster file
         out_stream_file: output stream raster file
     """
     stream_raster = RasterUtilClass.read_raster(stream_file)
     stream_data = stream_raster.data
     nrows = stream_raster.nRows
     ncols = stream_raster.nCols
     nodata = stream_raster.noDataValue
     subbain_data = RasterUtilClass.read_raster(subbasin_file).data
     nodata_array = ones((nrows, ncols)) * DEFAULT_NODATA
     newstream_data = where((stream_data > 0) & (stream_data != nodata),
                            subbain_data, nodata_array)
     RasterUtilClass.write_gtiff_file(out_stream_file, nrows, ncols, newstream_data,
                                      stream_raster.geotrans, stream_raster.srs,
                                      DEFAULT_NODATA, GDT_Int16)
Esempio n. 27
0
 def assign_stream_id_raster(stream_file, subbasin_file, out_stream_file):
     """Assign stream link ID according to subbasin ID.
     Args:
         stream_file: input stream raster file
         subbasin_file: subbasin raster file
         out_stream_file: output stream raster file
     """
     stream_raster = RasterUtilClass.read_raster(stream_file)
     stream_data = stream_raster.data
     nrows = stream_raster.nRows
     ncols = stream_raster.nCols
     nodata = stream_raster.noDataValue
     subbain_data = RasterUtilClass.read_raster(subbasin_file).data
     nodata_array = ones((nrows, ncols)) * DEFAULT_NODATA
     newstream_data = where((stream_data > 0) & (stream_data != nodata),
                            subbain_data, nodata_array)
     RasterUtilClass.write_gtiff_file(out_stream_file, nrows, ncols, newstream_data,
                                      stream_raster.geotrans, stream_raster.srs,
                                      DEFAULT_NODATA, GDT_Int16)
Esempio n. 28
0
    def calculate_channel_width_depth(acc_file, chwidth_file, chdepth_file):
        """Calculate channel width and depth according to drainage area (km^2).

        The equations used in the BASINS software to estimate channel width and depth are adopted.

        W = 1.29 * A ^ 0.6
        D = 0.13 * A ^ 0.4

        where W is bankfull channel width (m), D is bankfull channel depth (m), and A is drainage
          area (km^2)

        References:
            Ames, D.P., Rafn, E.B., Kirk, R.V., Crosby, B., 2009.
              Estimation of stream channel geometry in Idaho using GIS-derived watershed
              characteristics. Environ. Model. Softw. 24, 444–448.
              https://doi.org/10.1016/j.envsoft.2008.08.008

        """
        acc_r = RasterUtilClass.read_raster(acc_file)
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        dx = acc_r.dx
        cell_area = dx * dx * 0.000001  # m^2 ==> km^2

        tmp_ones = numpy.ones((ysize, xsize))
        width = tmp_ones * DEFAULT_NODATA
        depth = tmp_ones * DEFAULT_NODATA
        valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones)
        width = numpy.where(
            acc_r.validZone,
            numpy.power((1.29 * (valid_values + 1) * cell_area), 0.6), width)
        depth = numpy.where(
            acc_r.validZone,
            numpy.power((0.13 * (valid_values + 1) * cell_area), 0.4), depth)

        RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width,
                                         acc_r.geotrans, acc_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
        RasterUtilClass.write_gtiff_file(chdepth_file, ysize, xsize, depth,
                                         acc_r.geotrans, acc_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 29
0
    def output_compressed_dinf(dinfflowang, compdinffile, weightfile):
        """Output compressed Dinf flow direction and weight to raster file
        Args:
            dinfflowang: Dinf flow direction raster file
            compdinffile: Compressed D8 flow code
            weightfile: The correspond weight
        """
        dinf_r = RasterUtilClass.read_raster(dinfflowang)
        data = dinf_r.data
        xsize = dinf_r.nCols
        ysize = dinf_r.nRows
        nodata_value = dinf_r.noDataValue

        cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3)
        updated_angle, dir_code, weight = cal_dir_code(data, nodata_value)

        RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize,
                                         updated_angle, dinf_r.geotrans,
                                         dinf_r.srs, DEFAULT_NODATA,
                                         GDT_Float32)
        RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code,
                                         dinf_r.geotrans, dinf_r.srs,
                                         DEFAULT_NODATA, GDT_Int16)
        RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight,
                                         dinf_r.geotrans, dinf_r.srs,
                                         DEFAULT_NODATA, GDT_Float32)
Esempio n. 30
0
    def calculate_channel_width_depth(acc_file, chwidth_file, chdepth_file):
        """Calculate channel width and depth according to drainage area (km^2).

        The equations used in the BASINS software to estimate channel width and depth are adopted.

        W = 1.29 * A ^ 0.6
        D = 0.13 * A ^ 0.4

        where W is bankfull channel width (m), D is bankfull channel depth (m), and A is drainage
          area (km^2)

        References:
            Ames, D.P., Rafn, E.B., Kirk, R.V., Crosby, B., 2009.
              Estimation of stream channel geometry in Idaho using GIS-derived watershed
              characteristics. Environ. Model. Softw. 24, 444–448.
              https://doi.org/10.1016/j.envsoft.2008.08.008

        """
        acc_r = RasterUtilClass.read_raster(acc_file)
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        dx = acc_r.dx
        cell_area = dx * dx * 0.000001  # m^2 ==> km^2

        tmp_ones = numpy.ones((ysize, xsize))
        width = tmp_ones * DEFAULT_NODATA
        depth = tmp_ones * DEFAULT_NODATA
        valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones)
        width = numpy.where(acc_r.validZone,
                            numpy.power((1.29 * (valid_values + 1) * cell_area), 0.6),
                            width)
        depth = numpy.where(acc_r.validZone,
                            numpy.power((0.13 * (valid_values + 1) * cell_area), 0.4),
                            depth)

        RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width, acc_r.geotrans,
                                         acc_r.srs, DEFAULT_NODATA, GDT_Float32)
        RasterUtilClass.write_gtiff_file(chdepth_file, ysize, xsize, depth, acc_r.geotrans,
                                         acc_r.srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 31
0
    def generate_cn2(maindb, landuse_file, hydrogroup_file, cn2_filename, landuse_shp):
        """Generate CN2 raster."""
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError("LanduseLoop Collection is not existed or empty!")
        # cn2 list for each landuse type and hydrological soil group
        cn2_map = dict()
        for row in query_result:
            lu_id = row.get('LANDUSE_ID')
            cn2_list = [row.get('CN2A'), row.get('CN2B'), row.get('CN2C'), row.get('CN2D')]
            cn2_map[lu_id] = cn2_list
        # print(cn2Map)
        lu_r = RasterUtilClass.read_raster(landuse_file)
        data_landuse = lu_r.data
        xsize = lu_r.nCols
        ysize = lu_r.nRows
        nodata_value = lu_r.noDataValue

        hg_r = RasterUtilClass.read_raster(hydrogroup_file)
        data_hg = hg_r.data

        def cal_cn2(lucc_id, hg):
            """Calculate CN2 value from landuse ID and Hydro Group number."""
            lucc_id = int(lucc_id)
            if lucc_id < 0 or MathClass.floatequal(lucc_id, nodata_value):
                return DEFAULT_NODATA
            else:
                hg = int(hg) - 1
                return cn2_map[lucc_id][hg]

        cal_cn2_numpy = np_frompyfunc(cal_cn2, 2, 1)
        data_prop = cal_cn2_numpy(data_landuse, data_hg)
        print (cn2_filename)
        cn2_csv = r'D:\SEIMS\data\zts\data_prepare\spatial\test\cn2.csv'

        RasterUtilClass.write_gtiff_file(cn2_filename, ysize, xsize, data_prop, lu_r.geotrans,
                                         lu_r.srs, nodata_value, GDT_Float32)
        RasterUtilClass.count_raster_moist(cn2_filename, landuse_shp, cn2_csv)
Esempio n. 32
0
    def calculate_channel_width(acc_file, chwidth_file):
        """Calculate channel width."""
        acc_r = RasterUtilClass.read_raster(acc_file)
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        dx = acc_r.dx
        cell_area = dx * dx

        # storm frequency   a      b
        # 2                 1      0.56
        # 10                1.2    0.56
        # 100               1.4    0.56
        a = 1.2
        b = 0.56
        # TODO: Figure out what's means, and move it to text.py or config.py. LJ

        tmp_ones = numpy.ones((ysize, xsize))
        width = tmp_ones * DEFAULT_NODATA
        valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones)
        width = numpy.where(acc_r.validZone, numpy.power((a * (valid_values + 1)
                                                          * cell_area / 1000000.), b), width)
        RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width, acc_r.geotrans,
                                         acc_r.srs, DEFAULT_NODATA, GDT_Float32)
        return width
Esempio n. 33
0
    def output_compressed_dinf(dinfflowang, compdinffile, weightfile):
        """Output compressed Dinf flow direction and weight to raster file
        Args:
            dinfflowang: Dinf flow direction raster file
            compdinffile: Compressed D8 flow code
            weightfile: The correspond weight
        """
        dinf_r = RasterUtilClass.read_raster(dinfflowang)
        data = dinf_r.data
        xsize = dinf_r.nCols
        ysize = dinf_r.nRows
        nodata_value = dinf_r.noDataValue

        cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3)
        updated_angle, dir_code, weight = cal_dir_code(data, nodata_value)

        RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize, updated_angle,
                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
        RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code,
                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16)
        RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight,
                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 34
0
    def output_compressed_dinf(dinfflowang,  # input
                               compdinffile, weightfile,  # outputs
                               minfraction=0.01, subbasin=None, stream=None,  # optional inputs
                               upddinffile=None  # optional output
                               ):
        """Output updated Dinf, compressed flow directions, and flow fractions to raster files
        Args:
            dinfflowang: Dinf flow direction raster file
            compdinffile: Compressed D8 flow code
            weightfile: The correspond weight to the first flow direction
            minfraction: Minimum flow fraction that accounted, percent, e.g., 0.01
            subbasin: Subbasin raster to satisfy that one cell only flow downstream within subbasin
            stream: Stream raster to satisfy that river cell only flow into one downstream cell
            upddinffile: Updated Dinf flow direction raster file
        """
        dinf_r = RasterUtilClass.read_raster(dinfflowang)
        data = dinf_r.data
        xsize = dinf_r.nCols
        ysize = dinf_r.nRows
        nodata_value = dinf_r.noDataValue

        use_subbsn = False
        use_stream = False
        if subbasin is not None:
            subbsn_r = RasterUtilClass.read_raster(subbasin)
            if xsize == subbsn_r.nCols and ysize == subbsn_r.nRows:
                use_subbsn = True
        if stream is not None:
            stream_r = RasterUtilClass.read_raster(stream)
            if xsize == stream_r.nCols and ysize == stream_r.nRows:
                use_stream = True

        # if use_stream:
        #     for i in range(ysize):
        #         for j in range(xsize):
        #             strm_v = stream_r.get_value_by_row_col(i, j)
        #             if strm_v is None or strm_v <= 0 or dinf_r.is_nodata(i, j):
        #                 continue
        #             dinf_v = dinf_r.get_value_by_row_col(i, j)
        #             dinf_upd, flowdir, weight = DinfUtil.compress_dinf(dinf_v,
        #                                                                dinf_r.noDataValue,
        #                                                                minfrac=minfraction)
        #             if flowdir in FlowModelConst.d8dir_ag:
        #                 continue
        #
        #
        #             down_cs = DinfUtil.downstream_index_dinf(dinf_v, i, j)
        #             weight = list()
        #             strm_l = list()
        #             for [ci, cj] in down_cs:
        #                 strm_tmp = stream_r.get_value_by_row_col(ci, cj)
        #                 if strm_v == strm_tmp:


        if use_subbsn or use_stream:
            for i in range(ysize):
                for j in range(xsize):
                    if dinf_r.is_nodata(i, j):
                        continue
                    # dinf_v = dinf_r.get_value_by_row_col(i, j)
                    # down_cs = DinfUtil.downstream_index_dinf(dinf_v, i, j)
                    # if use_subbsn:
                    #     sid = subbsn_r.get_value_by_row_col(i, j)
                    #
                    #     for [ci, cj] in down_cs:
                    #         sid_tmp = subbsn_r.get_value_by_row_col(ci, cj)
                    #         if
                    # if use_stream:
                    #     strmid = stream_r.get_value_by_row_col(i, j)

        cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 3, 3)
        updated_angle, dir_code, weight = cal_dir_code(data, nodata_value, minfraction)

        if upddinffile is None:
            upddinffile = dinfflowang
        RasterUtilClass.write_gtiff_file(upddinffile, ysize, xsize, updated_angle,
                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
        RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code,
                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16)
        RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight,
                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 35
0
    def downstream_method_whitebox(stream_raster, flow_dir_raster, hillslope_out, d8alg="taudem",
                                   stream_value_method=-1):
        """Algorithm modified from Whitebox GAT v3.4.0.
           source code: https://github.com/jblindsay/whitebox-geospatial-analysis-tools/
                                blob/master/HydroTools/src/plugins/Hillslopes.java
        Args:
            stream_raster: Stream cell value greater than 0 is identified by stream
                              The input stream are recommended sequenced as 1, 2, 3...
            flow_dir_raster: D8 flow direction in TauDEM code
            hillslope_out: With the sequenced stream IDs, the output hillslope will be numbered:
                                  - Header hillslope: MaxStreamID + (current_id - 1) * 3 + 1
                                  - Right hillslope: MaxStreamID + (current_id - 1) * 3 + 2
                                  - Left hillslope: MaxStreamID + (current_id - 1) * 3 + 3
            d8alg: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
            stream_value_method:  stream value assigned method, depend on this parameter,
                              the output hillslope will be appended as follows:
               -1 - all the four files will be output.
                0 - keep stream link code, which has the default file name
                1 - Set to the value of right hillslope and head hillslope, <name>_r.tif
                2 - Set to the value of left hillslope and head hillslope, <name>_l.tif
                3 - Set stream cell to NoData, <name>_n.tif
        """
        print('Delineating hillslopes (header, left, and right hillslope)...')
        streamr = RasterUtilClass.read_raster(stream_raster)
        stream_data = streamr.data
        stream_nodata = streamr.noDataValue
        geotrans = streamr.geotrans
        srs = streamr.srs
        nrows = streamr.nRows
        ncols = streamr.nCols
        datatype = streamr.dataType

        flowd8r = RasterUtilClass.read_raster(flow_dir_raster)
        flowd8_data = flowd8r.data
        flowd8_nodata = flowd8r.noDataValue
        if flowd8r.nRows != nrows or flowd8r.nCols != ncols:
            raise ValueError("The input extent of D8 flow direction is not "
                             "consistent with stream data!")

        # definition of utility functions

        def inflow_stream_number(vrow, vcol, flowmodel="taudem"):
            """
            Count the inflow stream cell number and coordinates of all inflow cells
            Args:
                vrow: row number
                vcol: col number
                flowmodel: D8 flow direction algorithm.
            Returns:
                neighb_stream_cell_num: inflow cells number that is stream
                cell_coors: inflow cell coordinates, the size() is equal or greater
                            than neighb_stream_cell_num
            """
            neighb_stream_cell_num = 0
            cell_coors = []
            for c in range(8):
                newrow = vrow + FlowModelConst.ccw_drow[c]
                newcol = vcol + FlowModelConst.ccw_dcol[c]
                if newrow < 0 or newrow >= nrows or newcol < 0 or newcol >= ncols:
                    continue
                if flowd8_data[newrow][newcol] == FlowModelConst.d8_inflows.get(flowmodel)[c]:
                    cell_coors.append((newrow, newcol))
                    if stream_data[newrow][newcol] > 0 \
                            and stream_data[newrow][newcol] != stream_nodata:
                        neighb_stream_cell_num += 1
            return neighb_stream_cell_num, cell_coors

        def assign_sequenced_stream_ids(c_id, vrow, vcol, flowmodel="taudem"):
            """set sequenced stream IDs"""
            in_strm_num, in_coors = inflow_stream_number(vrow, vcol, flowmodel)
            if in_strm_num == 0:
                # it's a headwater location so start a downstream flowpath
                c_id += 1
                tmp_row = vrow
                tmp_col = vcol
                sequenced_stream_d[tmp_row][tmp_col] = c_id
                searched_flag = True
                while searched_flag:
                    # find the downslope neighbour
                    tmpflowd8 = flowd8_data[tmp_row][tmp_col]
                    if tmpflowd8 < 0 or tmpflowd8 == flowd8_nodata:
                        if stream_data[tmp_row][tmp_col] > 0 \
                                and stream_data[tmp_row][tmp_col] != stream_nodata:
                            # it is a valid stream cell and probably just has no downslope
                            # neighbour (e.g. at the edge of the grid)
                            sequenced_stream_d[tmp_row][tmp_col] = c_id
                        break
                    tmp_row, tmp_col = D8Util.downstream_index(tmpflowd8, tmp_row,
                                                               tmp_col, flowmodel)
                    if tmp_row < 0 or tmp_row >= nrows or tmp_col < 0 or tmp_col >= ncols:
                        break
                    if stream_data[tmp_row][tmp_col] <= 0:
                        searched_flag = False  # it is not a stream cell
                    else:
                        if sequenced_stream_d[tmp_row][tmp_col] > 0:
                            # run into a larger stream, end the downstream search
                            break
                        # is it a confluence (conjunction node)
                        in_strm_num, in_coors = inflow_stream_number(tmp_row, tmp_col, flowmodel)
                        if in_strm_num >= 2:
                            c_id += 1
                        sequenced_stream_d[tmp_row][tmp_col] = c_id
            return c_id

        def assign_hillslope_code_of_neighbors(vrow, vcol, flowmodel="taudem"):
            """set hillslope code for neighbors of current stream cell."""
            stream_coors.append((vrow, vcol))
            in_strm_num, in_coors = inflow_stream_number(vrow, vcol, flowmodel)
            strm_id = stream_data[vrow][vcol]
            # print('Assign hillslope code for stream cell, r: %d, c: %d, ID: %d' % (vrow, vcol,
            #                                                                        int(strm_id)))
            # set hillslope IDs
            hillslp_ids = DelineateHillslope.cal_hs_codes(max_id, strm_id)
            cur_d8_value = flowd8_data[vrow][vcol]
            if in_strm_num == 0:  # it is a one-order stream head
                headstream_coors.append((vrow, vcol))
                for (in_nostrm_row, in_nostrm_col) in in_coors:
                    hillslope_mtx[in_nostrm_row][in_nostrm_col] = hillslp_ids[0]
            else:  # search the 3*3 neighbors by clockwise and counterclockwise separately
                if cur_d8_value <= 0 or cur_d8_value == flowd8_nodata:
                    return
                dirv = int(cur_d8_value)  # direction code
                d_idx = FlowModelConst.d8_dirs.get(flowmodel).index(dirv)  # direction index
                # look to the right side, i.e. clockwise
                d_idx_r = d_idx
                while True and len(in_coors) > 0:
                    d_idx_r -= 1
                    if d_idx_r > 7:
                        d_idx_r = 0
                    if d_idx_r < 0:
                        d_idx_r = 7
                    tmp_row = vrow + FlowModelConst.ccw_drow[d_idx_r]
                    tmp_col = vcol + FlowModelConst.ccw_dcol[d_idx_r]
                    if (tmp_row, tmp_col) not in in_coors:  # not inflow to this cell
                        continue
                    tmpstream = stream_data[tmp_row][tmp_col]
                    in_coors.remove((tmp_row, tmp_col))
                    if tmpstream <= 0 or tmpstream == stream_nodata:
                        hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[1]  # right hillslope
                    else:  # encounter another in flow stream
                        break

                # look to the left side, i.e. counterclockwise
                d_idx_l = d_idx
                while True and len(in_coors) > 0:
                    d_idx_l += 1
                    if d_idx_l > 7:
                        d_idx_l = 0
                    if d_idx_l < 0:
                        d_idx_l = 7
                    tmp_row = vrow + FlowModelConst.ccw_drow[d_idx_l]
                    tmp_col = vcol + FlowModelConst.ccw_dcol[d_idx_l]
                    if (tmp_row, tmp_col) not in in_coors:  # not inflow to this cell
                        continue
                    tmpstream = stream_data[tmp_row][tmp_col]
                    in_coors.remove((tmp_row, tmp_col))
                    if tmpstream <= 0 or tmpstream == stream_nodata:
                        hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[2]  # left hillslope
                    else:  # encounter another in flow stream
                        break
                # if any inflow cells existed?
                if len(in_coors) > 0:
                    for (tmp_row, tmp_col) in in_coors:
                        tmpstream = stream_data[tmp_row][tmp_col]
                        if tmpstream <= 0 or tmpstream == stream_nodata:
                            hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[0]
                            # add the current cell as head stream
                            headstream_coors.append((vrow, vcol))

        def output_hillslope(method_id):
            """Output hillslope according different stream cell value method."""
            for (tmp_row, tmp_col) in stream_coors:
                tmp_hillslp_ids = DelineateHillslope.cal_hs_codes(max_id,
                                                                  stream_data[tmp_row][tmp_col])
                if 0 < method_id < 3:
                    hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id]
                    # is head stream cell?
                    if (tmp_row, tmp_col) in headstream_coors:
                        hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0]
                elif method_id == 3:
                    hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA
            # Output to raster file
            hillslope_out_new = hillslope_out
            dirpath = os.path.dirname(hillslope_out_new) + os.path.sep
            corename = FileClass.get_core_name_without_suffix(hillslope_out_new)
            if method_id == 1:
                hillslope_out_new = dirpath + corename + '_right.tif'
            elif method_id == 2:
                hillslope_out_new = dirpath + corename + '_left.tif'
            elif method_id == 3:
                hillslope_out_new = dirpath + corename + '_nodata.tif'
            RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols,
                                             hillslope_mtx,
                                             geotrans, srs, DEFAULT_NODATA, datatype)

        # 1. assign a unique id to each link in the stream network if needed
        assign_stream_id = False
        tmp = numpy.where((stream_data > 0) & (stream_data != stream_nodata),
                          stream_data, numpy.nan)
        max_id = int(numpy.nanmax(tmp))  # i.e., stream link number
        min_id = int(numpy.nanmin(tmp))
        for i in range(min_id, max_id + 1):
            if i not in tmp:
                assign_stream_id = True
                break
        if max_id == min_id:
            assign_stream_id = True
        current_id = 0
        if assign_stream_id:
            # calculate and output sequenced stream raster
            sequenced_stream_d = numpy.ones((nrows, ncols)) * DEFAULT_NODATA
            for row in range(nrows):
                for col in range(ncols):
                    # if the cell is not a stream, or has been assigned an ID
                    if stream_data[row][col] <= 0 or stream_data[row][col] == stream_nodata \
                            or sequenced_stream_d[row][col] > 0:
                        continue
                    current_id = assign_sequenced_stream_ids(current_id, row, col, d8alg)
            stream_data = numpy.copy(sequenced_stream_d)
            stream_nodata = DEFAULT_NODATA
            stream_core = FileClass.get_core_name_without_suffix(stream_raster)
            stream_seq_file = os.path.dirname(stream_raster) + os.path.sep + stream_core + '_seq.tif'
            RasterUtilClass.write_gtiff_file(stream_seq_file, nrows, ncols, sequenced_stream_d,
                                             geotrans, srs, DEFAULT_NODATA, datatype)
            max_id = current_id
        # 2. assign hillslope code according to the 3*3 neighbors of stream cells
        hillslope_mtx = numpy.copy(stream_data)
        hillslope_mtx[stream_data == stream_nodata] = DEFAULT_NODATA
        headstream_coors = []  # head stream cells
        stream_coors = []  # all stream cells, include head stream cells.
        for row in range(nrows):
            for col in range(ncols):
                # if not a stream cell, or hillslope code has been assigned
                if stream_data[row][col] <= 0 or stream_data[row][col] == stream_nodata \
                        or hillslope_mtx[row][col] < 0:
                    continue
                assign_hillslope_code_of_neighbors(row, col, d8alg)

        # 3. From each cell, search downstream for not assigned hillslope
        for row in range(nrows):
            for col in range(ncols):
                if hillslope_mtx[row][col] > 0 or flowd8_data[row][col] == flowd8_nodata:
                    continue
                flag = False
                tmprow = row
                tmpcol = col
                tmpcoors = [(row, col)]
                hillslp_id = DEFAULT_NODATA
                while not flag:
                    # find it's downslope neighbour
                    curflowdir = flowd8_data[tmprow][tmpcol]
                    if curflowdir <= 0 or curflowdir == flowd8_nodata:
                        break
                    curflowdir = int(curflowdir)
                    tmprow, tmpcol = D8Util.downstream_index(curflowdir, tmprow, tmpcol, d8alg)
                    if tmprow < 0 or tmprow >= nrows or tmpcol < 0 or tmpcol >= ncols:
                        break
                    # if the new cell already has a hillslope value, use that
                    if hillslope_mtx[tmprow][tmpcol] > 0:
                        hillslp_id = hillslope_mtx[tmprow][tmpcol]
                        flag = True
                    if not flag:
                        tmpcoors.append((tmprow, tmpcol))
                # set the source cells
                for (crow, ccol) in tmpcoors:
                    hillslope_mtx[crow][ccol] = hillslp_id

        # 4. reassign stream cell's value according to stream_value_method, and output
        if stream_value_method < 0:  # output
            output_hillslope(0)
            output_hillslope(1)
            output_hillslope(2)
            output_hillslope(3)
        else:
            output_hillslope(stream_value_method)
Esempio n. 36
0
    def initial_soil_moisture(acc_file, slope_file, out_file, landuse_file):
        """Initialize soil moisture fraction of field capacity, based on TWI"""
        acc_r = RasterUtilClass.read_raster(acc_file)
        data_acc = acc_r.data
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        nodata_value = acc_r.noDataValue
        srs = acc_r.srs
        geotrans = acc_r.geotrans
        dx = acc_r.dx
        data_slope = RasterUtilClass.read_raster(slope_file).data
        cell_area = dx * dx

        def wi_grid_cal(accvalue, slpvalue):
            """TWI, ln(acc_file/tan(slp))"""
            if abs(accvalue - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            else:
                if abs(slpvalue) < MINI_SLOPE:
                    slpvalue = MINI_SLOPE
                return math.log((accvalue + 1.) * cell_area / slpvalue)

        wi_grid_cal_numpy = numpy.frompyfunc(wi_grid_cal, 2, 1)
        wi_grid = wi_grid_cal_numpy(data_acc, data_slope)
        # wiGrid_valid = numpy.where(acc_r.validZone, wi_grid, numpy.nan)
        # wi_max = numpy.nanmax(wiGrid_valid)
        # wi_min = numpy.nanmin(wiGrid_valid)
        # WARNING: numpy.nanmax and numpy.nanmin are un-stable in Linux, so
        # replaced by the for loops. By LJ
        wi_max = -numpy.inf
        wi_min = numpy.inf
        for i in range(0, ysize):
            for j in range(0, xsize):
                if wi_max < wi_grid[i][j]:
                    wi_max = wi_grid[i][j]
                if DEFAULT_NODATA != wi_grid[i][j] < wi_min:
                    wi_min = wi_grid[i][j]
        # print('TWIMax:%f, TWIMin:%f' % (wi_max, wi_min))
        soil_mois_fr_min = 0.6  # minimum relative saturation
        soil_mois_fr_max = 1.0

        wi_uplimit = wi_max
        a = (soil_mois_fr_max - soil_mois_fr_min) / (wi_uplimit - wi_min)
        b = soil_mois_fr_min - a * wi_min

        def moisture_cal(acc, wigrid):
            """calculate soil moisture"""
            if abs(acc - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            else:
                tmp = a * wigrid + b
                if tmp > soil_mois_fr_max:
                    return soil_mois_fr_max
                elif tmp < soil_mois_fr_min:
                    return soil_mois_fr_min
                else:
                    return tmp

        moisture_cal_numpy = numpy.frompyfunc(moisture_cal, 2, 1)
        moisture = moisture_cal_numpy(data_acc, wi_grid)
        RasterUtilClass.write_gtiff_file(out_file, ysize, xsize, moisture, geotrans, srs,
                                         DEFAULT_NODATA, GDT_Float32)
        weight_csv = out_file.split('\\')
        weight_csv[-1] = 'weight_field.csv'
        weight_csv = '/'.join(weight_csv)
        if not os.path.exists(weight_csv):
            RasterUtilClass.generate_field_weight(weight_csv, landuse_file, out_file)
        print ('filed weight file has been generated! %s' %weight_csv)
        RasterUtilClass.count_raster_field(out_file, landuse_file)
Esempio n. 37
0
    def initial_soil_moisture(acc_file, slope_file, out_file):
        """Initialize soil moisture fraction of field capacity, based on TWI"""
        acc_r = RasterUtilClass.read_raster(acc_file)
        data_acc = acc_r.data
        xsize = acc_r.nCols
        ysize = acc_r.nRows
        nodata_value = acc_r.noDataValue
        srs = acc_r.srs
        geotrans = acc_r.geotrans
        dx = acc_r.dx
        data_slope = RasterUtilClass.read_raster(slope_file).data
        cell_area = dx * dx

        def wi_grid_cal(accvalue, slpvalue):
            """TWI, ln(acc_file/tan(slp))"""
            if abs(accvalue - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            else:
                if abs(slpvalue) < MINI_SLOPE:
                    slpvalue = MINI_SLOPE
                return math.log((accvalue + 1.) * cell_area / slpvalue)

        wi_grid_cal_numpy = numpy.frompyfunc(wi_grid_cal, 2, 1)
        wi_grid = wi_grid_cal_numpy(data_acc, data_slope)
        # wiGrid_valid = numpy.where(acc_r.validZone, wi_grid, numpy.nan)
        # wi_max = numpy.nanmax(wiGrid_valid)
        # wi_min = numpy.nanmin(wiGrid_valid)
        # WARNING: numpy.nanmax and numpy.nanmin are un-stable in Linux, so
        # replaced by the for loops. By LJ
        wi_max = -numpy.inf
        wi_min = numpy.inf
        for i in range(0, ysize):
            for j in range(0, xsize):
                if wi_max < wi_grid[i][j]:
                    wi_max = wi_grid[i][j]
                if DEFAULT_NODATA != wi_grid[i][j] < wi_min:
                    wi_min = wi_grid[i][j]
        # print('TWIMax:%f, TWIMin:%f' % (wi_max, wi_min))
        soil_mois_fr_min = 0.6  # minimum relative saturation
        soil_mois_fr_max = 1.0

        wi_uplimit = wi_max
        a = (soil_mois_fr_max - soil_mois_fr_min) / (wi_uplimit - wi_min)
        b = soil_mois_fr_min - a * wi_min

        def moisture_cal(acc, wigrid):
            """calculate soil moisture"""
            if abs(acc - nodata_value) < UTIL_ZERO:
                return DEFAULT_NODATA
            else:
                tmp = a * wigrid + b
                if tmp > soil_mois_fr_max:
                    return soil_mois_fr_max
                elif tmp < soil_mois_fr_min:
                    return soil_mois_fr_min
                else:
                    return tmp

        moisture_cal_numpy = numpy.frompyfunc(moisture_cal, 2, 1)
        moisture = moisture_cal_numpy(data_acc, wi_grid)
        RasterUtilClass.write_gtiff_file(out_file, ysize, xsize, moisture,
                                         geotrans, srs, DEFAULT_NODATA,
                                         GDT_Float32)
Esempio n. 38
0
    def depression_capacity(maindb,
                            landuse_file,
                            slope_file,
                            soil_texture_file,
                            depression_file,
                            imper_perc=0.3):
        """Initialize depression capacity according to landuse, soil, and slope.
        Args:
            maindb: main MongoDatabase
            landuse_file: landuse raster file
            slope_file: slope raster file
            soil_texture_file: soil texture file
            depression_file: resulted depression raster file
            imper_perc: impervious percent in urban cell, 0.3 as default
        """
        # read landuselookup table from MongoDB
        st_fields = ['DSC_ST%d' % (i, ) for i in range(1, 13)]
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError(
                'LanduseLoop Collection is not existed or empty!')
        dep_sd0 = dict()
        for row in query_result:
            tmpid = row.get('LANDUSE_ID')
            dep_sd0[tmpid] = [float(row.get(item)) for item in st_fields]

        landu_r = RasterUtilClass.read_raster(landuse_file)
        landu_data = landu_r.data
        geotrans = landu_r.geotrans
        srs = landu_r.srs
        xsize = landu_r.nCols
        ysize = landu_r.nRows
        landu_nodata = landu_r.noDataValue

        slo_data = RasterUtilClass.read_raster(slope_file).data
        soil_texture_array = RasterUtilClass.read_raster(
            soil_texture_file).data

        id_omited = []

        def cal_dep(landu, soil_texture, slp):
            """Calculate depression"""
            last_stid = 0
            if abs(landu - landu_nodata) < UTIL_ZERO:
                return DEFAULT_NODATA
            landu_id = int(landu)
            if landu_id not in dep_sd0:
                if landu_id not in id_omited:
                    print('The landuse ID: %d does not exist.' % (landu_id, ))
                    id_omited.append(landu_id)
            stid = int(soil_texture) - 1
            try:
                depression_grid0 = dep_sd0[landu_id][stid]
                last_stid = stid
            except Exception:
                depression_grid0 = dep_sd0[landu_id][last_stid]

            depression_grid = exp(
                numpy.log(depression_grid0 + 0.0001) + slp * (-9.5))
            # TODO, check if it is  (landu_id >= 98)? By LJ
            if landu_id == 106 or landu_id == 107 or landu_id == 105:
                return 0.5 * imper_perc + (1. - imper_perc) * depression_grid
            else:
                return depression_grid

        cal_dep_numpy = numpy.frompyfunc(cal_dep, 3, 1)
        dep_storage_cap = cal_dep_numpy(landu_data, soil_texture_array,
                                        slo_data)

        RasterUtilClass.write_gtiff_file(depression_file, ysize, xsize,
                                         dep_storage_cap, geotrans, srs,
                                         DEFAULT_NODATA, GDT_Float32)
    RasterData = RasterUtilClass.read_raster(inputRaster)
    # Get the initial set: givenvalue_pixels_positions
    CratersCells = np.where(RasterData.data == 1)
    CratersCells = np.array(CratersCells)
    CratersCells = CratersCells.T

    # Initiate the first ID
    ID = 1
    # Initiate the drawID_raster
    CratersIDData = np.zeros((RasterData.nRows, RasterData.nCols))

    # Compute every pixels' connectivity
    while CratersCells.size > 0:
        cell = CratersCells[0]
        idx = [[cell[0], cell[1]]]
        # Compute the pixel and its 8-neighborhoods' connectivity
        pixels_connectivity_compute(RasterData.data, cell[0], cell[1], idx)
        # Get the pixels position set which have the last step's ID value
        idxArray = np.array(idx)
        # Remove the existed last step's ID pixels
        CratersCells = remove_existID_pixels(CratersCells, idxArray)
        # Draw the last step's ID pixels' ID value
        CratersIDData = draw_ID(ID, idxArray, CratersIDData)
        # ID need to add 1 for next time's computing
        ID = ID + 1

    RasterUtilClass.write_gtiff_file("../tests/data/tmp_results/OldTest1CratersID.tif",
                                     RasterData.nRows, RasterData.nCols,
                                     CratersIDData, RasterData.geotrans,
                                     RasterData.srs, -9999, GDT_Float32)
Esempio n. 40
0
    index = (dataline[0]) * (TstDEM.nCols) + (dataline[1])
    TstSplsLE[index] = dataline
TstSplsLE = TstSplsLE[:, 2:r]

# Detect the crater candidates in application area by random forest classifier.
TstRst = clf.predict(TstSplsLE)
TstProb = clf.predict_proba(TstSplsLE)

# Save the result as a image.
# Convert the 1-d result into 2-d result.
TstCrtCddts = np.zeros((TstDEM.nRows, TstDEM.nCols))
k = 0
for i in range(TstDEM.nRows):
    for j in range(TstDEM.nCols):
        TstCrtCddts[i, j] = TstRst[k]
        k = k+1
# Write the result into TIFF file.
Time = datetime.datetime.now()
ClcTstRstName = (os.getcwd() + "/OutputData/" + "CraterCandidatesPixels_" +
                 str(Time.year) + "-" + str(Time.month) + "-" + str(Time.day)
                 + ".tif")
RasterUtilClass.write_gtiff_file(ClcTstRstName, TstDEM.nRows, TstDEM.nCols,
                                 TstCrtCddts, TstDEM.geotrans, TstDEM.srs,
                                 32767, TstDEM.dataType)

# Record the time of detecting crater candidates.
FinishTime = time.time()
print("The time of detecting crater candidates is " +
      str(FinishTime - RFTime) + " s.")

print "OK"
Esempio n. 41
0
    def depression_capacity(maindb, landuse_file, slope_file, soil_texture_file,
                            depression_file, imper_perc=0.3):
        """Initialize depression capacity according to landuse, soil, and slope.
        Args:
            maindb: main MongoDatabase
            landuse_file: landuse raster file
            slope_file: slope raster file
            soil_texture_file: soil texture file
            depression_file: resulted depression raster file
            imper_perc: impervious percent in urban cell, 0.3 as default
        """
        # read landuselookup table from MongoDB
        st_fields = ['DSC_ST%d' % (i,) for i in range(1, 13)]
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError('LanduseLoop Collection is not existed or empty!')
        dep_sd0 = dict()
        for row in query_result:
            tmpid = row.get('LANDUSE_ID')
            dep_sd0[tmpid] = [float(row.get(item)) for item in st_fields]

        landu_r = RasterUtilClass.read_raster(landuse_file)
        landu_data = landu_r.data
        geotrans = landu_r.geotrans
        srs = landu_r.srs
        xsize = landu_r.nCols
        ysize = landu_r.nRows
        landu_nodata = landu_r.noDataValue

        slo_data = RasterUtilClass.read_raster(slope_file).data
        soil_texture_array = RasterUtilClass.read_raster(soil_texture_file).data

        id_omited = []

        def cal_dep(landu, soil_texture, slp):
            """Calculate depression"""
            last_stid = 0
            if abs(landu - landu_nodata) < UTIL_ZERO:
                return DEFAULT_NODATA
            landu_id = int(landu)
            if landu_id not in dep_sd0:
                if landu_id not in id_omited:
                    print('The landuse ID: %d does not exist.' % (landu_id,))
                    id_omited.append(landu_id)
            stid = int(soil_texture) - 1
            try:
                depression_grid0 = dep_sd0[landu_id][stid]
                last_stid = stid
            except Exception:
                depression_grid0 = dep_sd0[landu_id][last_stid]

            depression_grid = exp(numpy.log(depression_grid0 + 0.0001) + slp * (-9.5))
            # TODO, check if it is  (landu_id >= 98)? By LJ
            if landu_id == 106 or landu_id == 107 or landu_id == 105:
                return 0.5 * imper_perc + (1. - imper_perc) * depression_grid
            else:
                return depression_grid

        cal_dep_numpy = numpy.frompyfunc(cal_dep, 3, 1)
        dep_storage_cap = cal_dep_numpy(landu_data, soil_texture_array, slo_data)

        RasterUtilClass.write_gtiff_file(depression_file, ysize, xsize, dep_storage_cap,
                                         geotrans, srs, DEFAULT_NODATA, GDT_Float32)
Esempio n. 42
0
                CenterCol + k * skl_lines[LineNum][1]
            ]
            OutProfiles[temppoint[0], temppoint[1]] = IsCrater
            OutProfilesProb[temppoint[0], temppoint[1]] = float(Prob)
    # Then draw other lines.
    else:
        LineNum = LineNum - 4
        theta = line_theta[LineNum]
        for k in range(1, r + 1):
            temppoint = [
                CenterRow + int(k * math.sin(theta)),
                CenterCol + int(k * math.cos(theta))
            ]
            OutProfiles[temppoint[0], temppoint[1]] = IsCrater
            OutProfilesProb[temppoint[0], temppoint[1]] = float(Prob)

TstPrflsRstFileName1 = (os.getcwd() + "/OutputData/" + "TstPrflsRst_binary" +
                        str(Time.year) + "-" + str(Time.month) + "-" +
                        str(Time.day) + ".tif")
RasterUtilClass.write_gtiff_file(TstPrflsRstFileName1, DEM.nRows, DEM.nCols,
                                 OutProfiles, DEM.geotrans, DEM.srs, 32767,
                                 DEM.dataType)

TstPrflsRstFileName2 = (os.getcwd() + "/OutputData/" +
                        "TstPrflsRst_probability" + str(Time.year) + "-" +
                        str(Time.month) + "-" + str(Time.day) + ".tif")
RasterUtilClass.write_gtiff_file(TstPrflsRstFileName2, DEM.nRows, DEM.nCols,
                                 OutProfilesProb, DEM.geotrans, DEM.srs, 32767,
                                 DEM.dataType)

print "OK"
Esempio n. 43
0
    def downstream_method_whitebox(stream_raster, flow_dir_raster, hillslope_out, d8alg="taudem",
                                   stream_value_method=4):
        """Algorithm modified from Whitebox GAT v3.4.0.
           source code: https://github.com/jblindsay/whitebox-geospatial-analysis-tools/blob/
                                master/HydroTools/plugins/Hillslopes.java

        Args:
            stream_raster: Stream cell value greater than 0 is identified by stream
                              The input stream are recommended sequenced as 1, 2, 3...
            flow_dir_raster: D8 flow direction in TauDEM code
            hillslope_out: With the sequenced stream IDs, the output hillslope will be numbered:
                                  - Header hillslope: MaxStreamID + (current_id - 1) * 3 + 1
                                  - Right hillslope: MaxStreamID + (current_id - 1) * 3 + 2
                                  - Left hillslope: MaxStreamID + (current_id - 1) * 3 + 3
            d8alg: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
            stream_value_method:  stream value assigned method, depend on this parameter,
                              the output hillslope will be appended as follows:
               -1 - all the four files will be output.
                0 - keep stream link code, which has the default file name
                1 - Set to the value of right hillslope and head hillslope, <name>_right.tif
                2 - Set to the value of left hillslope and head hillslope, <name>_left.tif
                3 - Set stream cell to NoData, <name>_nodata.tif
                4 (Default) - Set stream cell to 0, <name>_zero.tif
        """
        print('Delineating hillslopes (header, left, and right hillslopes)...')
        streamr = RasterUtilClass.read_raster(stream_raster)
        stream_data = streamr.data
        stream_nodata = streamr.noDataValue
        geotrans = streamr.geotrans
        srs = streamr.srs
        nrows = streamr.nRows
        ncols = streamr.nCols
        datatype = streamr.dataType

        flowd8r = RasterUtilClass.read_raster(flow_dir_raster)
        flowd8_data = flowd8r.data
        flowd8_nodata = flowd8r.noDataValue
        if flowd8r.nRows != nrows or flowd8r.nCols != ncols:
            raise ValueError("The input extent of D8 flow direction is not "
                             "consistent with stream data!")

        # definition of utility functions

        def inflow_stream_number(vrow, vcol, flowmodel="taudem"):
            """
            Count the inflow stream cell number and coordinates of all inflow cells

            Args:
                vrow: row number
                vcol: col number
                flowmodel: D8 flow direction algorithm.

            Returns:
                neighb_stream_cell_num: inflow cells number that is stream
                cell_coors: inflow cell coordinates, the size() is equal or greater
                            than neighb_stream_cell_num
            """
            neighb_stream_cell_num = 0
            cell_coors = list()
            for c in list(range(8)):
                newrow = vrow + FlowModelConst.ccw_drow[c]
                newcol = vcol + FlowModelConst.ccw_dcol[c]
                if newrow < 0 or newrow >= nrows or newcol < 0 or newcol >= ncols:
                    continue
                if flowd8_data[newrow][newcol] == \
                        FlowModelConst.d8_inflows.get(flowmodel.lower())[c]:
                    cell_coors.append((newrow, newcol))
                    if stream_data[newrow][newcol] > 0 \
                            and stream_data[newrow][newcol] != stream_nodata:
                        neighb_stream_cell_num += 1
            return neighb_stream_cell_num, cell_coors

        def assign_sequenced_stream_ids(c_id, vrow, vcol, flowmodel="taudem"):
            """set sequenced stream IDs"""
            in_strm_num, in_coors = inflow_stream_number(vrow, vcol, flowmodel)
            if in_strm_num == 0:
                # it's a headwater location so start a downstream flowpath
                c_id += 1
                tmp_row = vrow
                tmp_col = vcol
                sequenced_stream_d[tmp_row][tmp_col] = c_id
                searched_flag = True
                while searched_flag:
                    # find the downslope neighbour
                    tmpflowd8 = flowd8_data[tmp_row][tmp_col]
                    if tmpflowd8 < 0 or tmpflowd8 == flowd8_nodata:
                        if stream_data[tmp_row][tmp_col] > 0 \
                                and stream_data[tmp_row][tmp_col] != stream_nodata:
                            # it is a valid stream cell and probably just has no downslope
                            # neighbour (e.g. at the edge of the grid)
                            sequenced_stream_d[tmp_row][tmp_col] = c_id
                        break
                    tmp_row, tmp_col = D8Util.downstream_index(tmpflowd8, tmp_row,
                                                               tmp_col, flowmodel)
                    if tmp_row < 0 or tmp_row >= nrows or tmp_col < 0 or tmp_col >= ncols:
                        break
                    if stream_data[tmp_row][tmp_col] <= 0:
                        searched_flag = False  # it is not a stream cell
                    else:
                        if sequenced_stream_d[tmp_row][tmp_col] > 0:
                            # run into a larger stream, end the downstream search
                            break
                        # is it a confluence (conjunction node)
                        in_strm_num, in_coors = inflow_stream_number(tmp_row, tmp_col, flowmodel)
                        if in_strm_num >= 2:
                            c_id += 1
                        sequenced_stream_d[tmp_row][tmp_col] = c_id
            return c_id

        def assign_hillslope_code_of_neighbors(vrow, vcol, flowmodel="taudem"):
            """set hillslope code for neighbors of current stream cell."""
            stream_coors.append((vrow, vcol))
            in_strm_num, in_coors = inflow_stream_number(vrow, vcol, flowmodel)
            strm_id = stream_data[vrow][vcol]
            # print('Assign hillslope code for stream cell, r: %d, c: %d, ID: %d' % (vrow, vcol,
            #                                                                        int(strm_id)))
            # set hillslope IDs
            hillslp_ids = Hillslopes.cal_hs_codes(max_id, strm_id)
            cur_d8_value = flowd8_data[vrow][vcol]
            if in_strm_num == 0:  # it is a one-order stream head
                headstream_coors.append((vrow, vcol))
                for (in_nostrm_row, in_nostrm_col) in in_coors:
                    hillslope_mtx[in_nostrm_row][in_nostrm_col] = hillslp_ids[0]
            else:  # search the 3*3 neighbors by clockwise and counterclockwise separately
                if cur_d8_value <= 0 or cur_d8_value == flowd8_nodata:
                    return
                dirv = int(cur_d8_value)  # direction code
                d_idx = FlowModelConst.d8_dirs.get(flowmodel).index(dirv)  # direction index
                # look to the right side, i.e. clockwise
                d_idx_r = d_idx
                while True and len(in_coors) > 0:
                    d_idx_r -= 1
                    if d_idx_r > 7:
                        d_idx_r = 0
                    if d_idx_r < 0:
                        d_idx_r = 7
                    tmp_row = vrow + FlowModelConst.ccw_drow[d_idx_r]
                    tmp_col = vcol + FlowModelConst.ccw_dcol[d_idx_r]
                    if (tmp_row, tmp_col) not in in_coors:  # not inflow to this cell
                        continue
                    tmpstream = stream_data[tmp_row][tmp_col]
                    in_coors.remove((tmp_row, tmp_col))
                    if tmpstream <= 0 or tmpstream == stream_nodata:
                        hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[1]  # right hillslope
                    else:  # encounter another in flow stream
                        break

                # look to the left side, i.e. counterclockwise
                d_idx_l = d_idx
                while True and len(in_coors) > 0:
                    d_idx_l += 1
                    if d_idx_l > 7:
                        d_idx_l = 0
                    if d_idx_l < 0:
                        d_idx_l = 7
                    tmp_row = vrow + FlowModelConst.ccw_drow[d_idx_l]
                    tmp_col = vcol + FlowModelConst.ccw_dcol[d_idx_l]
                    if (tmp_row, tmp_col) not in in_coors:  # not inflow to this cell
                        continue
                    tmpstream = stream_data[tmp_row][tmp_col]
                    in_coors.remove((tmp_row, tmp_col))
                    if tmpstream <= 0 or tmpstream == stream_nodata:
                        hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[2]  # left hillslope
                    else:  # encounter another in flow stream
                        break
                # if any inflow cells existed?
                if len(in_coors) > 0:
                    for (tmp_row, tmp_col) in in_coors:
                        tmpstream = stream_data[tmp_row][tmp_col]
                        if tmpstream <= 0 or tmpstream == stream_nodata:
                            hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[0]
                            # add the current cell as head stream
                            headstream_coors.append((vrow, vcol))

        def output_hillslope(method_id):
            """Output hillslope according different stream cell value method."""
            for (tmp_row, tmp_col) in stream_coors:
                tmp_hillslp_ids = Hillslopes.cal_hs_codes(max_id, stream_data[tmp_row][tmp_col])
                if 0 < method_id < 3:
                    hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id]
                    # is head stream cell?
                    if (tmp_row, tmp_col) in headstream_coors:
                        hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0]
                elif method_id == 3:
                    hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA
                elif method_id == 4:
                    hillslope_mtx[tmp_row][tmp_col] = 0
            # Output to raster file
            hillslope_out_new = hillslope_out
            dirpath = os.path.dirname(hillslope_out_new) + os.path.sep
            corename = FileClass.get_core_name_without_suffix(hillslope_out_new)
            if method_id == 1:
                hillslope_out_new = dirpath + corename + '_right.tif'
            elif method_id == 2:
                hillslope_out_new = dirpath + corename + '_left.tif'
            elif method_id == 3:
                hillslope_out_new = dirpath + corename + '_nodata.tif'
            elif method_id == 4:
                hillslope_out_new = dirpath + corename + '_zero.tif'
            RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols,
                                             hillslope_mtx,
                                             geotrans, srs, DEFAULT_NODATA, datatype)

        # 1. assign a unique id to each link in the stream network if needed
        assign_stream_id = False
        tmp = numpy.where((stream_data > 0) & (stream_data != stream_nodata),
                          stream_data, numpy.nan)
        max_id = int(numpy.nanmax(tmp))  # i.e., stream link number
        min_id = int(numpy.nanmin(tmp))
        for i in range(min_id, max_id + 1):
            if i not in tmp:
                assign_stream_id = True
                break
        if max_id == min_id:
            assign_stream_id = True
        current_id = 0
        if assign_stream_id:
            # calculate and output sequenced stream raster
            sequenced_stream_d = numpy.ones((nrows, ncols)) * DEFAULT_NODATA
            for row in range(nrows):
                for col in range(ncols):
                    # if the cell is not a stream, or has been assigned an ID
                    if stream_data[row][col] <= 0 or stream_data[row][col] == stream_nodata \
                            or sequenced_stream_d[row][col] > 0:
                        continue
                    current_id = assign_sequenced_stream_ids(current_id, row, col, d8alg)
            stream_data = numpy.copy(sequenced_stream_d)
            stream_nodata = DEFAULT_NODATA
            stream_core = FileClass.get_core_name_without_suffix(stream_raster)
            stream_seq_file = os.path.dirname(stream_raster) + os.path.sep + \
                              stream_core + '_seq.tif'
            RasterUtilClass.write_gtiff_file(stream_seq_file, nrows, ncols, sequenced_stream_d,
                                             geotrans, srs, DEFAULT_NODATA, datatype)
            max_id = current_id
        # 2. assign hillslope code according to the 3*3 neighbors of stream cells
        hillslope_mtx = numpy.copy(stream_data)
        hillslope_mtx[stream_data == stream_nodata] = DEFAULT_NODATA
        headstream_coors = list()  # head stream cells
        stream_coors = list()  # all stream cells, include head stream cells.
        for row in list(range(nrows)):
            for col in list(range(ncols)):
                # if not a stream cell, or hillslope code has been assigned
                if stream_data[row][col] <= 0 or stream_data[row][col] == stream_nodata \
                        or hillslope_mtx[row][col] < 0:
                    continue
                assign_hillslope_code_of_neighbors(row, col, d8alg)

        # 3. From each cell, search downstream for not assigned hillslope
        for row in list(range(nrows)):
            for col in list(range(ncols)):
                if hillslope_mtx[row][col] > 0 or flowd8_data[row][col] == flowd8_nodata:
                    continue
                flag = False
                tmprow = row
                tmpcol = col
                tmpcoors = [(row, col)]
                hillslp_id = DEFAULT_NODATA
                while not flag:
                    # find it's downslope neighbour
                    curflowdir = flowd8_data[tmprow][tmpcol]
                    if curflowdir <= 0 or curflowdir == flowd8_nodata:
                        break
                    curflowdir = int(curflowdir)
                    tmprow, tmpcol = D8Util.downstream_index(curflowdir, tmprow, tmpcol, d8alg)
                    if tmprow < 0 or tmprow >= nrows or tmpcol < 0 or tmpcol >= ncols:
                        break
                    # if the new cell already has a hillslope value, use that
                    if hillslope_mtx[tmprow][tmpcol] > 0:
                        hillslp_id = hillslope_mtx[tmprow][tmpcol]
                        flag = True
                    if not flag:
                        tmpcoors.append((tmprow, tmpcol))
                # set the source cells
                for (crow, ccol) in tmpcoors:
                    hillslope_mtx[crow][ccol] = hillslp_id

        # 4. reassign stream cell's value according to stream_value_method, and output
        if stream_value_method < 0:  # output
            output_hillslope(0)
            output_hillslope(1)
            output_hillslope(2)
            output_hillslope(3)
            output_hillslope(4)
        else:
            output_hillslope(stream_value_method)
Esempio n. 44
0
    RasterData = RasterUtilClass.read_raster(inputRaster)
    # Get the initial set: givenvalue_pixels_positions
    CratersCells = np.where(RasterData.data == 1)
    CratersCells = np.array(CratersCells)
    CratersCells = CratersCells.T

    # Initiate the first ID
    ID = 1
    # Initiate the drawID_raster
    CratersIDData = np.zeros((RasterData.nRows, RasterData.nCols))

    # Compute every pixels' connectivity
    while CratersCells.size > 0:
        cell = CratersCells[0]
        idx = [[cell[0], cell[1]]]
        # Compute the pixel and its 8-neighborhoods' connectivity
        pixels_connectivity_compute(RasterData.data, cell[0], cell[1], idx)
        # Get the pixels position set which have the last step's ID value
        idxArray = np.array(idx)
        # Remove the existed last step's ID pixels
        CratersCells = remove_existID_pixels(CratersCells, idxArray)
        # Draw the last step's ID pixels' ID value
        CratersIDData = draw_ID(ID, idxArray, CratersIDData)
        # ID need to add 1 for next time's computing
        ID = ID + 1

    RasterUtilClass.write_gtiff_file(
        "../tests/data/tmp_results/OldTest1CratersID.tif", RasterData.nRows,
        RasterData.nCols, CratersIDData, RasterData.geotrans, RasterData.srs,
        -9999, GDT_Float32)
Esempio n. 45
0
    def export_scenario_to_gtiff(self, outpath=None):
        # type: (Optional[str]) -> None
        """Export scenario to GTiff.

        TODO: Read Raster from MongoDB should be extracted to pygeoc.
        """
        if not self.export_sce_tif:
            return
        dist = self.bmps_info[self.cfg.bmpid]['DISTRIBUTION']
        dist_list = StringClass.split_string(dist, '|')
        if len(dist_list) >= 2 and dist_list[0] == 'RASTER':
            dist_name = '0_' + dist_list[1]  # prefix 0_ means the whole basin
            # read dist_name from MongoDB
            # client = ConnectMongoDB(self.modelcfg.host, self.modelcfg.port)
            # conn = client.get_conn()
            conn = MongoDBObj.client
            maindb = conn[self.modelcfg.db_name]
            spatial_gfs = GridFS(maindb, DBTableNames.gridfs_spatial)
            # read file from mongodb
            if not spatial_gfs.exists(filename=dist_name):
                print('WARNING: %s is not existed, export scenario failed!' %
                      dist_name)
                return
            try:
                slpposf = maindb[DBTableNames.gridfs_spatial].files.find(
                    {'filename': dist_name}, no_cursor_timeout=True)[0]
            except NetworkTimeout or Exception:
                # In case of unexpected raise
                # client.close()
                return

            ysize = int(slpposf['metadata'][RasterMetadata.nrows])
            xsize = int(slpposf['metadata'][RasterMetadata.ncols])
            xll = slpposf['metadata'][RasterMetadata.xll]
            yll = slpposf['metadata'][RasterMetadata.yll]
            cellsize = slpposf['metadata'][RasterMetadata.cellsize]
            nodata_value = slpposf['metadata'][RasterMetadata.nodata]
            srs = slpposf['metadata'][RasterMetadata.srs]
            if is_string(srs):
                srs = str(srs)
            srs = osr.GetUserInputAsWKT(srs)
            geotransform = [0] * 6
            geotransform[0] = xll - 0.5 * cellsize
            geotransform[1] = cellsize
            geotransform[3] = yll + (ysize - 0.5) * cellsize  # yMax
            geotransform[5] = -cellsize

            slppos_data = spatial_gfs.get(slpposf['_id'])
            total_len = xsize * ysize
            fmt = '%df' % (total_len, )
            slppos_data = unpack(fmt, slppos_data.read())
            slppos_data = numpy.reshape(slppos_data, (ysize, xsize))

            v_dict = dict()
            for unitidx, geneidx in viewitems(self.cfg.unit_to_gene):
                v_dict[unitidx] = self.gene_values[geneidx]
            # Deprecated and replaced by using self.cfg.unit_to_gene. 03/14/2019. ljzhu.
            # for idx, gene_v in enumerate(self.gene_values):
            #     v_dict[self.cfg.gene_to_unit[idx]] = gene_v

            for k, v in v_dict.items():
                slppos_data[slppos_data == k] = v
            if outpath is None:
                outpath = self.scenario_dir + os.path.sep + 'Scenario_%d.tif' % self.ID
            RasterUtilClass.write_gtiff_file(outpath, ysize, xsize,
                                             slppos_data, geotransform, srs,
                                             nodata_value)
StartTime = time.time()

# Read the initial craters data.
InitCrtsFileName = ( os.getcwd() + "/InputData/" +
                     "CraterCndidatesPixels_2018-11-27.tif" )

InitCrts = RasterUtilClass.read_raster(InitCrtsFileName)

# Do the opening for remove noise pixels.
OpnCrtsData = RasterUtilClass.openning(InitCrtsFileName,1)
Time = datetime.datetime.now()
OpnCrtsFileName = (os.getcwd() + "/OutputData/" +
                   "CraterCandidatesPixelsOpenning_" + str(Time.year) + "-" +
                   str(Time.month) + "-" + str(Time.day) + ".tif")
RasterUtilClass.write_gtiff_file(OpnCrtsFileName, InitCrts.nRows,
                                 InitCrts.nCols, OpnCrtsData, InitCrts.geotrans,
                                 InitCrts.srs, 32767, InitCrts.dataType)

# Record the time of opening,
OpnTime = time.time()
print("The time of opening is " + str(OpnTime - StartTime) + " s.")

# Do the DBSCAN for distinguish each crater candidates.
# Get the coordinates of crater candidates' pixels after openning.
OpnCrtsPxls = np.where(OpnCrtsData == 1, 1, OpnCrtsData)
PxlsCoord = np.where(OpnCrtsPxls == 1)
PxlsCoord = np.array([PxlsCoord[0], PxlsCoord[1]]).swapaxes(0,1)
# Set the parameters of DBSCAN
eps = 5
min_samples = 10
# Run DBSCAN clustering.
Esempio n. 47
0
    def export_scenario_to_gtiff(self, outpath=None):
        """Export scenario to GTiff.

        TODO: Read Raster from MongoDB should be extracted to pygeoc.
        """
        if not self.export_sce_tif:
            return
        dist = self.bmps_info['DISTRIBUTION']
        dist_list = StringClass.split_string(dist, '|')
        if len(dist_list) >= 2 and dist_list[0] == 'RASTER':
            dist_name = '0_' + dist_list[1]  # prefix 0_ means the whole basin
            # read dist_name from MongoDB
            client = ConnectMongoDB(self.hostname, self.port)
            conn = client.get_conn()
            maindb = conn[self.main_db]
            spatial_gfs = GridFS(maindb, DBTableNames.gridfs_spatial)
            # read file from mongodb
            if not spatial_gfs.exists(filename=dist_name):
                print('WARNING: %s is not existed, export scenario failed!' % dist_name)
                return
            try:
                slpposf = maindb[DBTableNames.gridfs_spatial].files.find({'filename': dist_name},
                                                                         no_cursor_timeout=True)[0]
            except NetworkTimeout or Exception:
                # In case of unexpected raise
                client.close()
                return

            ysize = int(slpposf['metadata'][RasterMetadata.nrows])
            xsize = int(slpposf['metadata'][RasterMetadata.ncols])
            xll = slpposf['metadata'][RasterMetadata.xll]
            yll = slpposf['metadata'][RasterMetadata.yll]
            cellsize = slpposf['metadata'][RasterMetadata.cellsize]
            nodata_value = slpposf['metadata'][RasterMetadata.nodata]
            srs = slpposf['metadata'][RasterMetadata.srs]
            if isinstance(srs, text_type):
                srs = str(srs)
            srs = osr.GetUserInputAsWKT(srs)
            geotransform = [0] * 6
            geotransform[0] = xll - 0.5 * cellsize
            geotransform[1] = cellsize
            geotransform[3] = yll + (ysize - 0.5) * cellsize  # yMax
            geotransform[5] = -cellsize

            slppos_data = spatial_gfs.get(slpposf['_id'])
            total_len = xsize * ysize
            fmt = '%df' % (total_len,)
            slppos_data = unpack(fmt, slppos_data.read())
            slppos_data = numpy.reshape(slppos_data, (ysize, xsize))

            v_dict = dict()
            for idx, gene_v in enumerate(self.gene_values):
                v_dict[self.gene_to_unit[idx]] = gene_v

            for k, v in v_dict.items():
                slppos_data[slppos_data == k] = v
            if outpath is None:
                outpath = self.scenario_dir + os.path.sep + 'Scenario_%d.tif' % self.ID
            RasterUtilClass.write_gtiff_file(outpath, ysize, xsize, slppos_data, geotransform,
                                             srs, nodata_value)
            client.close()