예제 #1
0
            datetime_idx1 = header1.index('datetime')
            tz_idx1 = header1.index('tz_cd')
            height_idx1 = [
                i for i, x in enumerate(header1) if x.split('_')[-1] == '00065'
            ][0]
            line = line.replace('\n', '').split('\t')
            if line[height_idx1]:
                #convert height from feet to meter
                height1 = (bed_elev + float(line[height_idx1])) * 0.3048
                fo.write(line[datetime_idx1] + ' ' + line[tz_idx1] + '\t' +
                         str(height1) + '\n')
    return 'gauge/' + site_no1 + '_height.csv'


#read dem data high resolution
ds_dem0 = gdal.Open('gmted_mean075_reprojected.tif')
geom0 = ds_dem0.GetGeoTransform()
dem_arr0 = ds_dem0.ReadAsArray()
header0 = "ncols     %s\n" % dem_arr0.shape[1]
header0 += "nrows    %s\n" % dem_arr0.shape[0]
header0 += "xllcorner %.3f\n" % geom0[0]
header0 += "yllcorner %.3f\n" % (geom0[3] + geom0[5] * dem_arr0.shape[0])
header0 += "cellsize %.2f\n" % geom0[1]
header0 += "NODATA_value -9999\n"

with open('lower_mississippi_high_res.dem.asc', 'w') as fo:
    fo.write(header0)
    np.savetxt(fo, dem_arr0, fmt="%d")

#read dem data
ds_dem = gdal.Open('gmted_mean075_reprojected_resampled.tif')
예제 #2
0
def georeference_images_using_glitter(imageDataRows,
                                      imageDirectory,
                                      outputDirectory,
                                      droneParamsLogPath,
                                      cameraPitch,
                                      threshold=90,
                                      suffix="",
                                      cameraYaw=90):
    #Prepare the output directory
    if path.exists(outputDirectory) == False:
        os.makedirs(outputDirectory)

    #Read drone parameters log variables
    droneParams = pd.read_csv(droneParamsLogPath)

    #For each image, georeference and calculate longitude and latitude for the centre of each pixel.
    for imageDataRow in imageDataRows:
        #read drone state when image was taken
        dronePitch = imageDataRow["ATT_Pitch"]
        droneRoll = imageDataRow["ATT_Roll"]
        droneLonLat = (imageDataRow["GPS_Longitude"],
                       imageDataRow["GPS_Latitude"])
        droneTimeMS = imageDataRow["droneTime_MS"]
        droneAltitude = imageDataRow["GPS_Altitude"] / 1000.0
        #Convert from metres to kilometres
        droneNSats = imageDataRow["GPS_NSats"]
        droneHDop = imageDataRow["GPS_HDop"]
        imageFilename = imageDataRow["filename"]

        #calculate Yaw from sun glitter
        imagePath = path.join(imageDirectory, imageFilename)
        imageDate = imageDataRow["imageDate"].to_pydatetime().astimezone()
        droneYaw = yaw_from_glitter.calc_yaw_from_ellipse(imagePath,
                                                          imageDate,
                                                          droneLonLat[0],
                                                          droneLonLat[1],
                                                          threshold=threshold)

        #Some images may not have data for them. There should always be an altitude (if there is anything) so ignore this image if there is no altitude data.
        if np.isfinite(droneAltitude) == False:
            continue
        if droneYaw == None:  #Also ignore images where the yaw couldn't be determined from the glitter (e.g. when over land)
            print(
                "Stationary image (%s) skipped because no glitter could be detected."
                % imageDataRow["filename"])
            continue

        print("Processing image ", imageFilename)
        lons, lats, refPoints = do_georeference(droneLonLat,
                                                droneAltitude,
                                                droneRoll,
                                                dronePitch,
                                                droneYaw,
                                                cameraPitch,
                                                cameraYaw=90)

        #Extract image metadata and append drone position orientation, image file
        imagePath = path.join(imageDirectory, imageFilename)
        imageDataset = gdal.Open(imagePath, gdal.GA_ReadOnly)
        metaData = imageDataset.GetMetadata()
        metaData["image_filename"] = imageFilename
        #Add filename (helps with debugging)
        metaData["drone_pitch"] = dronePitch
        metaData["drone_roll"] = droneRoll
        metaData["drone_yaw"] = droneYaw
        metaData["drone_longitude"] = droneLonLat[0]
        metaData["drone_latitude"] = droneLonLat[1]
        metaData["drone_altitude"] = droneAltitude
        metaData["drone_time_ms"] = droneTimeMS
        metaData["gps_n_satellites"] = droneNSats
        metaData["gps_HDop"] = droneHDop
        metaData["camera_pitch"] = cameraPitch
        metaData["camera_pitch"] = cameraYaw
        metaData["camera_horizontal_field_of_view"] = HORIZONTAL_FOV
        metaData["camera_aspect_ratio"] = ASPECT_RATIO
        metaData["num_pixels_x"] = N_PIXELS_X
        metaData["num_pixels_y"] = N_PIXELS_Y
        metaData["refpoint_centre_lon"] = refPoints[0][0]
        metaData["refpoint_centre_lat"] = refPoints[0][1]
        metaData["refpoint_topleft_lon"] = refPoints[1][0]
        metaData["refpoint_topleft_lat"] = refPoints[1][1]
        metaData["refpoint_topright_lon"] = refPoints[2][0]
        metaData["refpoint_topright_lat"] = refPoints[2][1]
        metaData["refpoint_bottomleft_lon"] = refPoints[3][0]
        metaData["refpoint_bottomleft_lat"] = refPoints[3][1]
        metaData["refpoint_bottomright_lon"] = refPoints[4][0]
        metaData["refpoint_bottomright_lat"] = refPoints[4][1]

        #Append all the drone parameters.
        for irow in range(len(droneParams)):
            key, value = droneParams.iloc[irow]["Name"], droneParams.iloc[
                irow]["Value"]
            metaData["drone_param_" + key] = value

        #Extract image data
        imageData = imageDataset.GetRasterBand(1).ReadAsArray()
        #First band is brightest if NIR

        outputPath = path.join(outputDirectory, imageFilename + suffix + ".nc")
        write_netcdf(outputPath, lons, lats, metaData, imageData)

        georeferencedImagePathTemplate = Template(
            path.join(
                outputDirectory,
                metaData["image_filename"][:-4] + suffix + ".${EXTENSION}"))
        do_image_geotransform(
            path.join(imageDirectory, metaData["image_filename"]), metaData,
            georeferencedImagePathTemplate)
	outColInt='OutClass_'+str(i) # define output class column
	print('')
	print('......processing: ' + outColInt)
	print('')
	classesIntCol = 'ClassInt'
	rsgislib.classification.classratutils.balanceSampleTrainingRandom(outputClumps, classesIntCol, 'classesIntColBal', 50, 5000) # rebalance the training data
	classesIntCol='classesIntColBal'
	# run the classifier
	classratutils.classifyWithinRAT(outputClumps, classesIntCol, classesNameCol, variables, classifier=classifier, classColours=classColours,preProcessor=MaxAbsScaler(),outColInt=outColInt)

###########################################################################################
# Read all results from RAT and extract mode, providing final result
# Also, mask out nan values from the classification where vvMin==-999

inRatFile = outputClumps
ratDataset = gdal.Open(inRatFile, gdal.GA_Update) # Open RAT

vvMin_val=[]
vvMin_val.append(rat.readColumn(ratDataset, 'VVMin')) # read in urban footprint column
vvMin_val=numpy.asarray(vvMin_val[0])

guf_val=[]
guf_val.append(rat.readColumn(ratDataset, 'gufMax')) # read in urban footprint column
guf_val=numpy.asarray(guf_val[0])

# define column names for output classifications
x_col_names = []
for i in runs:
	# define output class column
	col_name='OutClass_'+str(i)
	x_col_names.append(col_name)
예제 #4
0
    print (max_prob,Max)
    for m in range(0,img.shape[0]):
        for n in range(0,img.shape[1]):
            if (img[m,n]>Max):
                img_new[m,n]=255
            elif(img[m,n]<Min):
                img_new[m,n]=0
            else:
                img_new[m,n]=(img[m,n]-Min)/(Max-Min)*255
    return img_new

if __name__ == "__main__":
    rasterOrigin = (-123.25745,45.43013)
    for i in range(1,66):
        newMul='C:/Users/USER-1/Documents/Peijuan/article/fromHocam/psgan/PSGan-master/data/psgan/data/WV2/test_real/datalist/%d_pan.tif'%i
        dataset=gdal.Open('C:/Users/USER-1/Documents/Peijuan/article/fromHocam/psgan/PSGan-master/data/psgan/data/WV2/test_real/MS/%d_pan.tif'%i)
        
        # newMul='C:/Users/USER-1/Documents/Peijuan/article/fromHocam/psgan/PSGan-master/data/psgan/data/test_real/%d_pan.tif'%i
        # dataset=gdal.Open('C:/Users/USER-1/Documents/Peijuan/article/fromHocam/psgan/PSGan-master/data/pansharpening/dataset/test_real/%d_pan_t.tif'%i)
        
        #newMul='C:/Users/USER-1/Documents/Peijuan/article/fromHocam/psgan/PSGan-master/data/psgan/data/%d_pan.tif'%i
        #dataset=gdal.Open('C:/Users/USER-1/Documents/Peijuan/article/fromHocam/psgan/PSGan-master/data/pansharpening/dataset/%d_pan.tif'%i)
        
        
        
        
        img=dataset.ReadAsArray()
    
        img=img.transpose(1,0)
        
        #  downsampling
예제 #5
0
REFLECTANCE_ADD_BAND_8 = -0.100000
REFLECTANCE_ADD_BAND_9 = -0.100000

CORNER_UL_LAT_PRODUCT = 29.91127
CORNER_UL_LON_PRODUCT = -17.55620
CORNER_UR_LAT_PRODUCT = 29.93596
CORNER_UR_LON_PRODUCT = -15.16061
CORNER_LL_LAT_PRODUCT = 27.78498
CORNER_LL_LON_PRODUCT = -17.50470
CORNER_LR_LAT_PRODUCT = 27.80760
CORNER_LR_LON_PRODUCT = -15.15737


print "Opening B4..."
# gdalwarp LC82070402017107LGN00_B5.TIF LC82070402017107LGN00_B5_longlat.TIF -t_srs "+proj=longlat +ellps=WGS84"
ds = gdal.Open('../LandsatData/LC82070402017107LGN00/LC82070402017107LGN00_B4_longlat.TIF')


bandR = ds.GetRasterBand(1)
dataR = bandR.ReadAsArray()
dataR = REFLECTANCE_MULT_BAND_4*dataR + REFLECTANCE_ADD_BAND_4

ds = None

print "Opening B5..."
ds = gdal.Open('../LandsatData/LC82070402017107LGN00/LC82070402017107LGN00_B5_longlat.TIF')
bandNIR = ds.GetRasterBand(1)
dataNIR = bandNIR.ReadAsArray()
dataNIR = REFLECTANCE_MULT_BAND_5*dataNIR + REFLECTANCE_ADD_BAND_5

예제 #6
0
#ones = np.ones(np.shape(d[0]))
#tm = np.tensordot(t, ones, axes=0)
#need time matrix, although the problem is that this takes too long given the memory needs

#trapz_out = sp.cumtrapz(d, tm, axis = 0, initial= 0)

#try to perform this iteratively? use the speed up with numba? for now lets just time 100x100
#remembering that all we really care about is the index of when the cumtrapz = 1 so first calculate the
#cumtrapz for the particular cell, then save the index in a matrix

#shp = np.shape(d[0])
#holder = np.zeros(shp)

#apply a mask
mask_file = 'E:\\WBP_model\\output\\prob\\WBP2010_binary.tif'
mask = gdal.Open(mask_file).ReadAsArray()
bool_mask = np.where(mask == 0, True, False)


#masked_d = np.ma.array(d, mask=d*mask[np.newaxis,:,:])
def solver(d, t):
    z, m, n = d.shape
    result = np.zeros((m, n)).astype(int)
    for i in range(m):
        for j in range(n):
            trapz_out = sp.cumtrapz(d[:, i, j] * 24 / 1e6, t, initial=0)
            comp_index = np.argmax(trapz_out > 1)
            result[i, j] = int(comp_index)
    return (result)

예제 #7
0
def Nearest_Interpolate(Dir_in, Startdate, Enddate, Dir_out=None):
    """
    This functions calculates monthly tiff files based on the 16 daily tiff files. (will calculate the average)

    Parameters
    ----------
    Dir_in : str
        Path to the input data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'    
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd' 
    Dir_out : str
        Path to the output data, default is same as Dir_in

    """
    # import WA+ modules
    import wa.General.data_conversions as DC
    import wa.General.raster_conversions as RC

    # Change working directory
    os.chdir(Dir_in)

    # Find all eight daily files
    files = glob.glob('*16-daily*.tif')

    # Create array with filename and keys (DOY and year) of all the 8 daily files
    i = 0
    DOY_Year = np.zeros([len(files), 3])
    for File in files:

        # Get the time characteristics from the filename
        year = File.split('.')[-4][-4:]
        month = File.split('.')[-3]
        day = File.split('.')[-2]

        # Create pandas Timestamp
        date_file = '%s-%02s-%02s' % (year, month, day)
        Datum = pd.Timestamp(date_file)

        # Get day of year
        DOY = Datum.strftime('%j')

        # Save data in array
        DOY_Year[i, 0] = i
        DOY_Year[i, 1] = DOY
        DOY_Year[i, 2] = year

        # Loop over files
        i += 1

    # Check enddate:
    Enddate_split = Enddate.split('-')
    month_range = calendar.monthrange(int(Enddate_split[0]),
                                      int(Enddate_split[1]))[1]
    Enddate = '%d-%02d-%02d' % (int(Enddate_split[0]), int(
        Enddate_split[1]), month_range)

    # Check startdate:
    Startdate_split = Startdate.split('-')
    Startdate = '%d-%02d-01' % (int(Startdate_split[0]), int(
        Startdate_split[1]))

    # Define end and start date
    Dates = pd.date_range(Startdate, Enddate, freq='MS')
    DatesEnd = pd.date_range(Startdate, Enddate, freq='M')

    # Get array information and define projection
    geo_out, proj, size_X, size_Y = RC.Open_array_info(files[0])
    if int(proj.split('"')[-2]) == 4326:
        proj = "WGS84"

    # Get the No Data Value
    dest = gdal.Open(files[0])
    NDV = dest.GetRasterBand(1).GetNoDataValue()

    # Loop over months and create monthly tiff files
    i = 0
    for date in Dates:
        # Get Start and end DOY of the current month
        DOY_month_start = date.strftime('%j')
        DOY_month_end = DatesEnd[i].strftime('%j')

        # Search for the files that are between those DOYs
        year = date.year
        DOYs = DOY_Year[DOY_Year[:, 2] == year]
        DOYs_oneMonth = DOYs[np.logical_and(
            (DOYs[:, 1] + 16) >= int(DOY_month_start),
            DOYs[:, 1] <= int(DOY_month_end))]

        # Create empty arrays
        Monthly = np.zeros([size_Y, size_X])
        Weight_tot = np.zeros([size_Y, size_X])
        Data_one_month = np.ones([size_Y, size_X]) * np.nan

        # Loop over the files that are within the DOYs
        for EightDays in DOYs_oneMonth[:, 0]:

            # Calculate the amount of days in this month of each file
            Weight = np.ones([size_Y, size_X])

            # For start of month
            if EightDays == DOYs_oneMonth[:, 0][0]:
                Weight = Weight * int(DOYs_oneMonth[:, 1][0] + 16 -
                                      int(DOY_month_start))

            # For end of month
            elif EightDays == DOYs_oneMonth[:, 0][-1]:
                Weight = Weight * (int(DOY_month_end) -
                                   DOYs_oneMonth[:, 1][-1] + 1)

            # For the middle of the month
            else:
                Weight = Weight * 16

            # Open the array of current file
            input_name = os.path.join(Dir_in, files[int(EightDays)])
            Data = RC.Open_tiff_array(input_name)

            # Remove NDV
            Weight[Data == NDV] = 0
            Data[Data == NDV] = np.nan

            # Multiply weight time data (per day)
            Data = Data * Weight

            # Calculate the total weight and data
            Weight_tot += Weight
            Monthly[~np.isnan(Data)] += Data[~np.isnan(Data)]

        # Go to next month
        i += 1

        # Calculate the average
        Data_one_month[Weight_tot != 0.] = Monthly[
            Weight_tot != 0.] / Weight_tot[Weight_tot != 0.]

        # Define output directory
        if Dir_out == None:
            Dir_out = Dir_in

        # Define output name
        output_name = os.path.join(
            Dir_out, files[int(EightDays)].replace('16-daily', 'monthly'))
        output_name = output_name[:-6] + '01.tif'

        # Save tiff file
        DC.Save_as_tiff(output_name, Data_one_month, geo_out, proj)

    return
예제 #8
0
    print "+geocode.pl " + geomapf + " " + outfile + " " + geooutfile
    r = subprocess.call("geocode.pl " + geomapf + " " + outfile + " " +
                        geooutfile,
                        shell=True)
    if r != 0:
        raise Exception("geocode.pl failed")

    print "+gdal_translate -ot Float32 -b 2 -co COMPRESS=DEFLATE -co COMPRESS=PREDICTOR " + geooutfile + " " + tiffoutfile
    r = subprocess.call(
        "gdal_translate -ot Float32 -b 2 -co COMPRESS=DEFLATE -co COMPRESS=PREDICTOR "
        + geooutfile + " " + tiffoutfile,
        shell=True)
    if r != 0:
        raise Exception("gdal_translate failed")

    ds = gdal.Open(geooutfile, gdal.GA_ReadOnly)
    ds_band2 = ds.GetRasterBand(2)
    los = ds_band2.ReadAsArray(0, 0, ds.RasterXSize, ds.RasterYSize)
    print 'Nlign:{}, Ncol:{}, geodate:{}:'.format(ds.RasterYSize,
                                                  ds.RasterXSize, l)
    nlign, ncol = ds.RasterYSize, ds.RasterXSize
    # if l==0:
    #     geomaps=np.zeros((nlign,ncol,N))

    # geomaps[:,:,l] = los

    del ds, ds_band2

fid = open('lect_geo.in', 'w')
np.savetxt(fid, (nlign, ncol, N), fmt='%6i', newline='\t')
fid.close()
def plot(pred_img, dictionary):
    #open dictionary with predictions
    with open(dictionary) as json_file:
        dictTiles = json.load(json_file)
    # for the worldpop file open dictionary holding worldpop valuzes
    with open(
            "/exports/csce/datastore/geos/groups/MSCGIS/s1937352/Eddiebackup/dictionary_pWorldPop.json"
    ) as json_file:
        dictTileswp = json.load(json_file)

    #open boundsfile (Raster Uganda) and get the extent of the raster as well as its columns and rows
    ds = gdal.Open(boundsfileP)
    minx, miny, maxx, maxy, rY, rX = get_bounds(ds)
    print(minx, miny, maxx, maxy, rY, rX)
    band = ds.GetRasterBand(1)
    uganda = band.ReadAsArray()
    total = 0
    columns = 0
    for x in range(minx, maxx, 1000):
        columns += 1
        for y in range(miny, maxy, 1000):
            total += 1
    rows = total / columns
    print(rows)
    #create empty list
    ugandapopList = []
    print(total)
    print(columns)

    with tqdm.tqdm(total=total) as bar:
        bar.set_description(f"Write Prediction: ")
        # loop over the extent in 1000 m steps
        for y in range(miny, maxy, 1000):
            for x in range(minx, maxx, 1000):
                #check if the boundsfile holds data
                v = max(
                    0,
                    int(
                        gdal.Translate('',
                                       ds,
                                       projWin=[x, y, x + 1000, y - 1000],
                                       format='VRT').ReadAsArray()[0, 0]))

                if v > 0:
                    #create the key of the dictionary with the extent
                    xystring = f'{x}_{y}'
                    #check if the key is in the dictionary and if a prediction value is saved
                    if xystring in dictTiles:
                        if dictTiles[xystring].get("pred") != None:
                            # get the prediction value
                            pred = dictTiles[xystring].get("pred")
                            #pred = dictTileswp[xystring].get("pred")

                            #append prediction value to list
                            ugandapopList.append(pred)
                        # else append np.nan to the list to get the same extent of the boundsfile
                        else:
                            ugandapopList.append(np.nan)
                    else:
                        ugandapopList.append(np.nan)
                else:
                    ugandapopList.append(np.nan)
                bar.update()

    print(len(ugandapopList))
    #reshape list to array with specified rows and columns
    ugandaarray = np.array(ugandapopList)
    ugandaarray = ugandaarray.reshape(int(rows), int(columns))
    ugandaarray = np.flip(ugandaarray, axis=0)
    #write array to raster
    array2raster(boundsfileP, pred_img, ugandaarray, int(rows), int(columns))
예제 #10
0
def load_hdf4_array(datafile, meta, band_specs=None):
    '''Return an ElmStore where each subdataset is a DataArray

    Parameters:
        :datafile: filename
        :meta:     meta from earthio.load_hdf4_meta
        :band_specs: list of earthio.BandSpec objects,
                    defaulting to reading all subdatasets
                    as bands

    Returns:
        :Elmstore: Elmstore of teh hdf4 data
    '''
    from earthio import ElmStore
    from earthio.metadata_selection import match_meta
    logger.debug('load_hdf4_array: {}'.format(datafile))
    f = gdal.Open(datafile, GA_ReadOnly)

    sds = meta['sub_datasets']
    band_metas = meta['band_meta']
    band_order_info = []
    if band_specs:
        for band_meta, s in zip(band_metas, sds):
            for idx, band_spec in enumerate(band_specs):
                if match_meta(band_meta, band_spec):
                    band_order_info.append((idx, band_meta, s, band_spec))
                    break

        band_order_info.sort(key=lambda x: x[0])
        if not len(band_order_info):
            raise ValueError('No matching bands with '
                             'band_specs {}'.format(band_specs))
    else:
        band_order_info = [(idx, band_meta, s, 'band_{}'.format(idx))
                           for idx, (band_meta,
                                     s) in enumerate(zip(band_metas, sds))]
    native_dims = ('y', 'x')
    elm_store_data = OrderedDict()

    band_order = []
    for _, band_meta, s, band_spec in band_order_info:
        attrs = copy.deepcopy(meta)
        attrs.update(copy.deepcopy(band_meta))
        if isinstance(band_spec, BandSpec):
            name = band_spec.name
            reader_kwargs = {
                k: getattr(band_spec, k)
                for k in READ_ARRAY_KWARGS if getattr(band_spec, k)
            }
            geo_transform = take_geo_transform_from_meta(band_spec, **attrs)
        else:
            reader_kwargs = {}
            name = band_spec
            geo_transform = None
        reader_kwargs = window_to_gdal_read_kwargs(**reader_kwargs)
        dat0 = gdal.Open(s[0], GA_ReadOnly)
        band_meta.update(reader_kwargs)
        raster = raster_as_2d(dat0.ReadAsArray(**reader_kwargs))
        if geo_transform is None:
            geo_transform = dat0.GetGeoTransform()
        attrs['geo_transform'] = geo_transform
        if hasattr(band_spec, 'store_coords_order'):
            if band_spec.stored_coords_order[0] == 'y':
                rows, cols = raster.shape
            else:
                rows, cols = raster.T.shape
        else:
            rows, cols = raster.shape
        coord_x, coord_y = geotransform_to_coords(cols, rows, geo_transform)

        canvas = Canvas(geo_transform=geo_transform,
                        buf_xsize=cols,
                        buf_ysize=rows,
                        dims=native_dims,
                        ravel_order='C',
                        bounds=geotransform_to_bounds(cols, rows,
                                                      geo_transform))
        attrs['canvas'] = canvas
        elm_store_data[name] = xr.DataArray(raster,
                                            coords=[('y', coord_y),
                                                    ('x', coord_x)],
                                            dims=native_dims,
                                            attrs=attrs)

        band_order.append(name)
    del dat0
    attrs = copy.deepcopy(attrs)
    attrs['band_order'] = band_order
    gc.collect()
    return ElmStore(elm_store_data, attrs=attrs)
예제 #11
0
                metedata = glob.glob(os.path.join(outname, "*.xml"))[0]

            elif GFType == 'PMS':
                # tiffFile = glob.glob(outname + "/*mss*.tiff")[0]
                # metedata = glob.glob(outname+"/*mss*.xml")[0]
                tiffFile = glob.glob(os.path.join(outname, "*MSS*.tiff"))[0]
                metedata = glob.glob(os.path.join(outname, "*MSS*.xml"))[0]

            try:
                os.mkdir(atcfiles)
            except Exception as e:
                pass
            print(filename + "解压缩完成")

            try:
                IDataSet = gdal.Open(tiffFile)
            except Exception as e:
                print("文件%S打开失败" % tiffFile)

            cols = IDataSet.RasterXSize
            rows = IDataSet.RasterYSize

            # SatelliteID = filename[0:3]
            # SensorID = filename[4:8]
            # Year = filename[22:26]
            SatelliteID = filename_split[0]
            SensorID = filename_split[1]
            Year = filename_split[4][:4]
            ImageType = os.path.basename(tiffFile)[-9:-6]

            Block(IDataSet)
예제 #12
0
        self.raiz.destroy()

    def escreveNome(self):
        global nome
        nome = askopenfilename()
        Label(self.raiz, text=nome).grid(row=12, column=1, sticky=W, pady=5)


inst1 = Tk()
Griding(inst1)
inst1.mainloop()

driver = gdal.GetDriverByName('GTiff')
driver.Register()

entrada = gdal.Open(nomeFile, GA_ReadOnly)
if entrada is None:
    print 'Erro ao abrir o arquivo: ' + nomeFile
    sys.exit(1)

linhas = entrada.RasterYSize
colunas = entrada.RasterXSize
NBandas = entrada.RasterCount
driverEntrada = entrada.GetDriver()

print 'linhas:', linhas, ' colunas:', colunas, 'bandas:', NBandas, 'driver:', driverEntrada.ShortName

#----------

pi = math.pi
#cosZ = math.cos((90 - 56.98100422)*pi/180) # pego do metadata da imagem - SUN_ELEVATION
예제 #13
0
## transfile = "../GPS/GPS_all_clean_spec_segments_edited_raster.tif"
srfile = "/scratch/dknapp4/Western_Hawaii/Moorea/20190604_moorea_tile_sr.tif"
rbfile = "/scratch/dknapp4/Western_Hawaii/Moorea/20190604_moorea_tile_sr_rb.tif"
depthfile = "/scratch/dknapp4/Western_Hawaii/Moorea/20190604_moorea_tile_sr_depth.tif"
ptsfile = "/scratch/dknapp4/ASD/GPS/GPS_all_clean_asd_points.shp"
## csvfile = "../Spectroscopy/Moorea_all_points_asd_bgr.csv"
outfile = "Moorea_dove_rb_sr_transects_20190710.csv"
asdcsvfile = "/scratch/dknapp4/ASD/Spectroscopy/Moorea_Dove_BGR_from_ASD.csv"

asddata = np.genfromtxt(asdcsvfile,
                        dtype=[('fname', 'S43'), ('blue', 'f8'),
                               ('green', 'f8'), ('red', 'f8')],
                        delimiter=',',
                        autostrip=True)

RbDS = gdal.Open(rbfile, gdal.GA_ReadOnly)
Rbblue = RbDS.GetRasterBand(1).ReadAsArray()
Rbgreen = RbDS.GetRasterBand(2).ReadAsArray()
Rbred = RbDS.GetRasterBand(3).ReadAsArray()

SrDS = gdal.Open(srfile, gdal.GA_ReadOnly)
Srblue = SrDS.GetRasterBand(1).ReadAsArray()
Srgreen = SrDS.GetRasterBand(2).ReadAsArray()
Srred = SrDS.GetRasterBand(3).ReadAsArray()
Srnir = SrDS.GetRasterBand(4).ReadAsArray()

depDS = gdal.Open(depthfile, gdal.GA_ReadOnly)
depth = depDS.GetRasterBand(1).ReadAsArray()

gt = SrDS.GetGeoTransform()
gt2 = RbDS.GetGeoTransform()
예제 #14
0
파일: s1_otsu.py 프로젝트: dalgeun/water
otsu_img = img
otsu_img[~binary] = np.nan
otsu_img[binary] = 1

plt.imshow(img)
plt.imshow(otsu_img)
plt.imshow(binary)

## compute water area
water_pixels = np.count_nonzero(otsu_img == 1)
water_area = water_pixels * 0.0001
print("water area is ", water_area, "km^2")

## ouput image
outfile = "d:\\dg_work\\water\\water_area_bi_otsu_log.tif"
ds = gdal.Open(path_to_img_data)
band = ds.GetRasterBand(1)
arr = band.ReadAsArray()
[cols, rows] = arr.shape

driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
outdata.SetGeoTransform(ds.GetGeoTransform())##sets same geotransform as input
outdata.SetProjection(ds.GetProjection())##sets same projection as input
outdata.GetRasterBand(1).WriteArray(otsu_img)
# outdata.GetRasterBand(1).SetNoDataValue()##if you want these values transparent
outdata.FlushCache() ##saves to disk!!
outdata = None
band=None
ds=None
예제 #15
0
        for tif_file in conus_pf_1k_tifs:
            os.system('iget -K ' + avra_path_tif + tif_file + ' .')

    if not os.path.isfile(region_shp):
        print(region_shp + ' does not exits...downloading from avra')
        auth = os.system('iinit')
        if auth != 0:
            print('Authentication failed...exit')
            sys.exit()

        for shp_component_file in region_shps:
            os.system('iget -K ' + avra_path_shp + shp_component_file + ' .')

    # read domain raster
    ds_ref = gdal.Open(conus_pf_1k_mask)
    arr_ref = ds_ref.ReadAsArray()
    geom_ref = ds_ref.GetGeoTransform()

    # makes an empty spatial ref object
    inSRS_converter = osr.SpatialReference()

    # populates the spatial ref object with our WKT SRS
    inSRS_converter.ImportFromWkt(ds_ref.GetProjection())

    # Exports an SRS ref as a Proj4 string usable by PyProj
    inSRS_forPyProj = inSRS_converter.ExportToProj4()
    inProj = Proj(inSRS_forPyProj)
    outProj = Proj(init='epsg:4326')

    # rasterize region shapefile
예제 #16
0
def reproject(path,
              crs="EPSG:4326",
              out_path=os.path.join(path_apls, 'img.tif')):
    src = gdal.Open(path)
    gdal.Warp(out_path, src, dstSRS=crs)
예제 #17
0
def main():
    # Set Up Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("input_dir",
                        help='''directory path containing date directories of 
                        images to be processed''')
    parser.add_argument("image_type",
                        type=str,
                        choices=["srgb", "wv02_ms", "pan"],
                        help="image type: 'srgb', 'wv02_ms', 'pan'")
    parser.add_argument("training_dataset", help="training data file")
    parser.add_argument("--training_label",
                        type=str,
                        default=None,
                        help="name of training classification list")
    parser.add_argument("-o",
                        "--output_dir",
                        type=str,
                        default="default",
                        help="directory to place output results.")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="display text information and progress")
    parser.add_argument("-c",
                        "--stretch",
                        type=str,
                        choices=["hist", "pansh", "none"],
                        default='hist',
                        help='''Apply image correction/stretch to input: \n
                               hist: Histogram stretch \n
                               pansh: Orthorectify / Pansharpen for MS WV images \n
                               none: No correction''')
    parser.add_argument("--pgc_script",
                        type=str,
                        default=None,
                        help="Path for the pansharpening script if needed")
    parser.add_argument("-t",
                        "--threads",
                        type=int,
                        default=1,
                        help="Number of subprocesses to start")

    # Parse Arguments
    args = parser.parse_args()

    # System filepath that contains the directories or files for batch processing
    user_input = args.input_dir
    if os.path.isdir(user_input):
        src_dir = user_input
        src_file = ''
    elif os.path.isfile(user_input):
        src_dir, src_file = os.path.split(user_input)
    else:
        raise IOError('Invalid input')
    # Image type, choices are 'srgb', 'pan', or 'wv02_ms'
    image_type = args.image_type
    # File with the training data
    tds_file = args.training_dataset
    # Default tds label is the image type
    if args.training_label is None:
        tds_label = image_type
    else:
        tds_label = args.training_label
    # Default output directory
    #   (if not provided this gets set when the tasks are created)
    dst_dir = args.output_dir
    threads = args.threads
    verbose = args.verbose
    stretch = args.stretch

    # Use the given pansh script path, otherwise search for the correct folder
    #   in the same directory as this script.
    if args.pgc_script:
        pansh_script_path = args.pgc_script
    else:
        current_path = os.path.dirname(os.path.realpath(__file__))
        pansh_script_path = os.path.join(
            os.path.split(current_path)[0], 'imagery_utils')

    # For Ames OIB Processing:
    # White balance flag (To add as user option in future, presently only used on oib imagery)
    if image_type == 'srgb':
        assess_quality = True
        white_balance = True
    else:
        assess_quality = False
        white_balance = False
    # Set a default quality score until this value is calculated
    quality_score = 1.

    # Prepare a list of images to be processed based on the user input
    #   list of task objects based on the files in the input directory.
    #   Each task is an image to process, and has a subtask for each split
    #   of that image.
    task_list = utils.create_task_list(os.path.join(src_dir, src_file),
                                       dst_dir)

    for task in task_list:

        # ASP: Restrict processing to the frame range
        # try:
        #     frameNum = getFrameNumberFromFilename(file)
        # except Exception, e:
        #     continue
        # if (frameNum < args.min_frame) or (frameNum > args.max_frame):
        #     continue

        # Skip this task if it is already marked as complete
        if task.is_complete():
            continue

        # Make the output directory if it doesnt already exist
        if not os.path.isdir(task.get_dst_dir()):
            os.makedirs(task.get_dst_dir())

        # Run Ortho/Pan scripts if necessary
        if stretch == 'pansh':
            if verbose:
                print("Orthorectifying and Pansharpening image...")

            full_image_name = os.path.join(task.get_src_dir(), task.get_id())
            pansh_filepath = pp.run_pgc_pansharpen(pansh_script_path,
                                                   full_image_name,
                                                   task.get_dst_dir())

            # Set the image name/dir to the pan output name/dir
            task.set_src_dir(task.get_dst_dir())
            task.change_id(pansh_filepath)

        # Open the image dataset with gdal
        full_image_name = os.path.join(task.get_src_dir(), task.get_id())
        if os.path.isfile(full_image_name):
            if verbose:
                print("Loading image {}...".format(task.get_id()))
            src_ds = gdal.Open(full_image_name, gdal.GA_ReadOnly)
        else:
            print("File not found: {}".format(full_image_name))
            continue

        # Read metadata to get image date and keep only the metadata we need
        metadata = src_ds.GetMetadata()
        image_date = pp.parse_metadata(metadata, image_type)
        metadata = [image_type, image_date]

        # For processing icebridge imagery:
        if image_type == 'srgb':
            if image_date <= 150:
                tds_label = 'spring'
                white_balance = True
            else:
                tds_label = 'summer'

        # Load Training Data
        tds = utils.load_tds(tds_file, tds_label, image_type)
        # tds = utils.load_tds(tds_file, 'srgb', image_type)

        if verbose:
            print("Size of training set: {}".format(len(tds[1])))

        # Set necessary parameters for reading image 1 block at a time
        x_dim = src_ds.RasterXSize
        y_dim = src_ds.RasterYSize
        desired_block_size = 6400

        src_dtype = gdal.GetDataTypeSize(src_ds.GetRasterBand(1).DataType)
        # Analyze input image histogram (if applying correction)
        if stretch == 'hist':
            stretch_params = pp.histogram_threshold(src_ds, src_dtype)
        else:  # stretch == 'none':
            # WV Images are actually 11bit stored in 16bit files
            if src_dtype > 12:
                src_dtype = 11
            stretch_params = [
                1, 2**src_dtype - 1,
                [2**src_dtype - 1 for _ in range(src_ds.RasterCount)],
                [1 for _ in range(src_ds.RasterCount)]
            ]

        # Create a blank output image dataset
        # Save the classified image output as a geotiff
        fileformat = "GTiff"
        image_name_noext = os.path.splitext(task.get_id())[0]
        dst_filename = os.path.join(task.get_dst_dir(),
                                    image_name_noext + '_classified.tif')
        driver = gdal.GetDriverByName(fileformat)
        dst_ds = driver.Create(dst_filename,
                               xsize=x_dim,
                               ysize=y_dim,
                               bands=1,
                               eType=gdal.GDT_Byte,
                               options=["TILED=YES", "COMPRESS=LZW"])

        # Transfer the metadata from input image
        # dst_ds.SetMetadata(src_ds.GetMetadata())
        # Transfer the input projection and geotransform if they are different than the default
        if src_ds.GetGeoTransform() != (0, 1, 0, 0, 0, 1):
            dst_ds.SetGeoTransform(
                src_ds.GetGeoTransform())  # sets same geotransform as input
        if src_ds.GetProjection() != '':
            dst_ds.SetProjection(
                src_ds.GetProjection())  # sets same projection as input

        # Find the appropriate image block read size
        block_size_x, block_size_y = utils.find_blocksize(
            x_dim, y_dim, desired_block_size)
        if verbose:
            print("block size: [{},{}]".format(block_size_x, block_size_y))

        # close the source dataset so that it can be loaded by each thread seperately
        src_ds = None
        lock = RLock()
        block_queue, qsize = construct_block_queue(block_size_x, block_size_y,
                                                   x_dim, y_dim)
        dst_queue = Queue()

        # Display a progress bar
        if verbose:
            try:
                from tqdm import tqdm
            except ImportError:
                print("Install tqdm to display progress bar.")
                verbose = False
            else:
                pbar = tqdm(total=qsize, unit='block')

        # Set an empty value for the pixel counter
        pixel_counts = [0, 0, 0, 0, 0]

        NUMBER_OF_PROCESSES = threads
        block_procs = [
            Process(target=process_block_queue,
                    args=(lock, block_queue, dst_queue, full_image_name,
                          assess_quality, stretch_params, white_balance, tds,
                          metadata)) for _ in range(NUMBER_OF_PROCESSES)
        ]

        for proc in block_procs:
            # Add a stop command to the end of the queue for each of the
            #   processes started. This will signal for the process to stop.
            block_queue.put('STOP')
            # Start the process
            proc.start()

        # Collect data from processes as they complete tasks
        finished_threads = 0
        while finished_threads < NUMBER_OF_PROCESSES:

            if not dst_queue.empty():
                val = dst_queue.get()
                if val is None:
                    finished_threads += 1
                else:
                    # Keep only the lowest quality score found
                    quality_score_block = val[0]
                    if quality_score_block < quality_score:
                        quality_score = quality_score_block
                    # Add the pixel counts to the master list
                    pixel_counts_block = val[1]
                    for i in range(len(pixel_counts)):
                        pixel_counts[i] += pixel_counts_block[i]
                    # Write image data to output dataset
                    x = val[2]
                    y = val[3]
                    classified_block = val[4]
                    dst_ds.GetRasterBand(1).WriteArray(classified_block,
                                                       xoff=x,
                                                       yoff=y)
                    dst_ds.FlushCache()
                    # Update the progress bar
                    if verbose: pbar.update()
            # Give the other threads some time to finish their tasks.
            else:
                time.sleep(10)

        # Update the progress bar
        if verbose: pbar.update()

        # Join all of the processes back together
        for proc in block_procs:
            proc.join()

        # Close dataset and write to disk
        dst_ds = None

        # Write extra data (total pixel counts and quality score to the database (or csv)
        output_csv = os.path.join(task.get_dst_dir(),
                                  image_name_noext + '_md.csv')
        with open(output_csv, "w") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([
                "Quality Score", "White Ice", "Gray Ice", "Melt Ponds",
                "Open Water", "Shadow"
            ])
            writer.writerow([
                quality_score, pixel_counts[0], pixel_counts[1],
                pixel_counts[2], pixel_counts[3], pixel_counts[4]
            ])

        # Close the progress bar
        if verbose:
            pbar.close()
            print("Finished Processing.")
def get_FYBImage_FromShp(shp_path, save_folder):
    sf = shapefile.Reader(shp_path)  # 读取未识别目标物的shp文件
    dataset = gdal.Open(r"J:\山西整幅\影像\山西全省20200508.img")
    im_width = dataset.RasterXSize  # 栅格矩阵的列数
    im_height = dataset.RasterYSize  # 栅格矩阵的行数

    in_band1 = dataset.GetRasterBand(1)
    in_band2 = dataset.GetRasterBand(2)
    in_band3 = dataset.GetRasterBand(3)

    fyb_savefolder = os.path.join(save_folder, 'fyb')
    label_savefolder = os.path.join(save_folder, 'label')
    num = len(os.listdir(fyb_savefolder))  # 统计原有负样本的数量
    num += 1
    cell = 1024

    shapes = sf.shapes()
    for i in tqdm.tqdm(range(len(shapes))):
        shp = shapes[i]  # 获取shp文件中的每一个形状

        point = shp.points  # 获取每一个最小外接矩形的四个点
        x_list = [ii[0] for ii in point]
        y_list = [ii[1] for ii in point]

        # if (isOutOfRaster(x_list, y_list, raster_x_min, raster_x_max, raster_y_min, raster_y_max)):
        #     continue

        x_min = min(x_list)
        y_min = min(y_list)
        x_max = max(x_list)
        y_max = max(y_list)

        x_cen = (x_min + x_max) / 2
        y_cen = (y_max + y_min) / 2

        # if (not use_proj_coord):
        #     coords = lonlat2imagexy(dataset, x_cen, y_cen)
        # else:
        coords = geo2imagexy(dataset, x_cen, y_cen)
        coords = (int(round(abs(coords[0]))), int(round(abs(coords[1]))))

        offset_x = coords[0] - cell / 2
        offset_y = coords[1] - cell / 2

        out_band1 = in_band1.ReadAsArray(offset_x, offset_y, cell, cell)
        out_band2 = in_band2.ReadAsArray(offset_x, offset_y, cell, cell)
        out_band3 = in_band3.ReadAsArray(offset_x, offset_y, cell, cell)
        # out_bandmask = mask_band.ReadAsArray(offset_x, offset_y, cell, cell)

        # out_bandmask[np.where(out_bandmask > 0)] = 255

        out_band1 = np.reshape(out_band1, [out_band1.shape[0], out_band1.shape[1], 1])
        out_band2 = np.reshape(out_band2, [out_band2.shape[0], out_band2.shape[1], 1])
        out_band3 = np.reshape(out_band3, [out_band3.shape[0], out_band3.shape[1], 1])
        image = np.concatenate([out_band3, out_band2, out_band1], axis=2)

        # if(np.where(out_band1==0)[0].shape[0]+np.where(out_band1==255)[0].shape[0]!=cell**2):
        cv2.imwrite(os.path.join(fyb_savefolder, 'fyb{0}.png'.format(num)), image)
        label = np.zeros([cell, cell, 1])
        cv2.imwrite(os.path.join(label_savefolder, 'fyb{0}.png'.format(num)), label)
        num += 1
예제 #19
0
def getShape(file):

    dataset = gdal.Open(file, GA_ReadOnly)
    return dataset.RasterYSize, dataset.RasterXSize
예제 #20
0
def array2point(array,rasterfn):
    raster = gdal.Open(rasterfn)
    return array
예제 #21
0
def tifgenerator(outfile,
                 raster_path,
                 df,
                 value='yhat',
                 aggregate_factor=1,
                 crop=False,
                 minimum_pop=0.3):
    """
    Given a filepath (.tif), a raster for reference and a dataset with i, j and yhat
    it generates a raster.
    :param outfile:
    :param raster_path:
    :param df:
    :return:
    """
    from rasterio.mask import mask

    # Aggregate raster per aggregation factor
    if aggregate_factor > 1:
        print('INFO: aggregating raster ...')
        local_raster_path = "../tmp/local_raster.tif"
        aggregate(raster_path, local_raster_path, aggregate_factor)
        #change raster path
        raster_path = local_raster_path

    #Crop to data
    if crop:
        minlat, maxlat, minlon, maxlon = df_boundaries(df,
                                                       buffer=0.05,
                                                       lat_col="gpsLatitude",
                                                       lon_col="gpsLongitude")
        area = points_to_polygon(minlon, minlat, maxlon, maxlat)

        # crop raster
        with rasterio.open(raster_path) as src:
            out_image, out_transform = mask(src, [area], crop=True)
            out_meta = src.meta.copy()

        # save the resulting raster
        out_meta.update({
            "driver": "GTiff",
            "height": out_image.shape[1],
            "width": out_image.shape[2],
            "transform": out_transform
        })

        final_raster = "../tmp/final_raster.tif"
        with rasterio.open(final_raster, "w", **out_meta) as dest:
            out_image[out_image < minimum_pop] = dest.nodata
            dest.write(out_image)
            list_j, list_i = np.where(dest.read()[0] != dest.nodata)
        # change raster path
        raster_path = final_raster

    print('-> writing: ', outfile)
    # create empty raster from the original one
    ds = gdal.Open(raster_path)
    band = ds.GetRasterBand(1)
    arr = band.ReadAsArray()
    [cols, rows] = arr.shape
    arr_out = np.zeros(arr.shape) - 99
    arr_out[df['j'], df['i']] = df[value]
    driver = gdal.GetDriverByName("GTiff")
    outdata = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)

    outdata.SetGeoTransform(
        ds.GetGeoTransform())  # sets same geotransform as input
    outdata.SetProjection(ds.GetProjection())  # sets same projection as input

    outdata.GetRasterBand(1).SetNoDataValue(-99)
    outdata.GetRasterBand(1).WriteArray(arr_out)

    outdata.FlushCache()  # saves to disk!!
예제 #22
0
def raster2array(rasterfn):
    raster = gdal.Open(rasterfn)
    band = raster.GetRasterBand(1)
    array = band.ReadAsArray()
    return array
예제 #23
0
# ==================================================================================================== #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Part I ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ==================================================================================================== #
# load point shapefile and get list with strings of all tiles in rasterfolder
pointfiles = ListFiles(pointdir, '.shp', 1)
pointshp = ogr.Open(pointfiles[0])
point_lyr = pointshp.GetLayer()
pointlyr_names = [field.name for field in point_lyr.schema]

rasterfiles = sorted(ListFiles(rasterdir, '.tif', 1))

# get Projections of Layers to check coordinate systems.
# In this case both rasters and the pointshapefile are projected using EPSG 4326/WGS84.
pointcrs = point_lyr.GetSpatialRef()
rastercrs = gdal.Open(rasterfiles[0]).GetProjection()

# ------------------------ Part 1.1) & 1.2)  --------------------------------#
# create dataFrame with necessary columns (excluding bands 13-21)
tc_landsat_metrics = pd.DataFrame(
    columns={
        'ID': [],
        'tree_fraction': [],
        'Band_01': [],
        'Band_02': [],
        'Band_03': [],
        'Band_04': [],
        'Band_05': [],
        'Band_06': [],
        'Band_07': [],
        'Band_08': [],
##            '20200615_to_20200622', '20200622_to_20200629']

drv = gdal.GetDriverByName('GTiff')

dslist = []
baseimg = np.zeros((4096, 4096, 2)) - 9999
## open the data sets for this group
for k, theweek in enumerate(blweeks):
    rbtile = theweek + '/' + ascdesc + '/' + tile + '_br_comp.tif'
    if not os.path.exists(rbtile):
        rbtile2 = theweek + '/' + ascdesc + '/' + tile + '_br_comp_masked.tif'
        if os.path.exists(rbtile2):
            rbtile = rbtile2
        else:
            continue
    dslist.append(gdal.Open(rbtile, gdal.GA_ReadOnly))

if (len(dslist) < 2):
    print('Not enough data (only %d) for %s for tile %s' %
          (len(dslist), ascdesc, tile))
    for j in dslist:
        j = None
    sys.exit(0)

gt = dslist[0].GetGeoTransform()
proj = dslist[0].GetProjection()
outtile = 'BaseFiles2019/' + tile + '_base.tif'
outDS = drv.Create(outtile, 4096, 4096, 5, eType=gdal.GDT_Float32, \
  options=['COMPRESS=LZW', 'TILED=YES'])
outDS.SetGeoTransform(gt)
outDS.SetProjection(proj)
예제 #25
0
def georeference_images(imageData,
                        imageDirectory,
                        outputDirectory,
                        droneParmsLogPath,
                        cameraPitch=30.0,
                        suffix="",
                        cameraYaw=90):
    #Prepare the output directory
    if path.exists(outputDirectory) == False:
        os.makedirs(outputDirectory)

    #Read drone parameters log variables
    droneParams = pd.read_csv(droneParmsLogPath, sep=",")

    #For each image, georeference and calculate longitude and latitude for the centre of each pixel.
    for r in range(len(imageData)):
        #read drone state when image was taken
        if isinstance(imageData, pd.DataFrame):
            imageDataRow = imageData.iloc[r]
        else:
            imageDataRow = imageData[r]

        dronePitch = imageDataRow["ATT_Pitch"]
        droneRoll = imageDataRow["ATT_Roll"]
        droneYaw = imageDataRow["ATT_Yaw"]
        droneLonLat = (imageDataRow["GPS_Longitude"],
                       imageDataRow["GPS_Latitude"])
        droneTimeMS = imageDataRow["droneTime_MS"]
        droneAltitude = imageDataRow["GPS_Altitude"] / 1000.0
        #Convert from metres to kilometres
        droneNSats = imageDataRow["GPS_NSats"]
        droneHDop = imageDataRow["GPS_HDop"]
        imageFilename = imageDataRow["filename"]

        print("Processing image ", imageFilename)
        outputPathNC = path.join(outputDirectory,
                                 imageFilename + suffix + ".nc")
        if path.exists(outputPathNC) == True:
            print("WARNING: Path already exists and will not be overwritten:",
                  outputPathNC)
            continue

        #Some images may not have data for them. There should always be an altitude (if there is anything) so ignore this image if there is no altitude data.
        if np.isfinite(droneAltitude) == False:
            continue

        #Do the georeferencing calculations
        lons, lats, refPoints = do_georeference(droneLonLat,
                                                droneAltitude,
                                                droneRoll,
                                                dronePitch,
                                                droneYaw,
                                                cameraPitch,
                                                cameraYaw=90)

        #Extract image metadata and append drone position orientation, image file
        imagePath = path.join(imageDirectory, imageFilename)
        imageDataset = gdal.Open(imagePath, gdal.GA_ReadOnly)
        metaData = imageDataset.GetMetadata()
        metaData["image_filename"] = imageFilename
        #Add filename (helps with debugging)
        metaData["drone_pitch"] = dronePitch
        metaData["drone_roll"] = droneRoll
        metaData["drone_yaw"] = droneYaw
        metaData["drone_longitude"] = droneLonLat[0]
        metaData["drone_latitude"] = droneLonLat[1]
        metaData["drone_altitude"] = droneAltitude
        metaData["drone_time_ms"] = droneTimeMS
        metaData["gps_n_satellites"] = droneNSats
        metaData["gps_HDop"] = droneHDop
        metaData["camera_pitch"] = cameraPitch
        metaData["camera_pitch"] = cameraYaw
        metaData["camera_horizontal_field_of_view"] = HORIZONTAL_FOV
        metaData["camera_aspect_ratio"] = ASPECT_RATIO
        metaData["num_pixels_x"] = N_PIXELS_X
        metaData["num_pixels_y"] = N_PIXELS_Y
        metaData["refpoint_centre_lon"] = refPoints[0][0]
        metaData["refpoint_centre_lat"] = refPoints[0][1]
        metaData["refpoint_topleft_lon"] = refPoints[1][0]
        metaData["refpoint_topleft_lat"] = refPoints[1][1]
        metaData["refpoint_topright_lon"] = refPoints[2][0]
        metaData["refpoint_topright_lat"] = refPoints[2][1]
        metaData["refpoint_bottomleft_lon"] = refPoints[3][0]
        metaData["refpoint_bottomleft_lat"] = refPoints[3][1]
        metaData["refpoint_bottomright_lon"] = refPoints[4][0]
        metaData["refpoint_bottomright_lat"] = refPoints[4][1]

        #Append all the drone parameters.
        for irow in range(len(droneParams)):
            key, value = droneParams.iloc[irow]["Name"], droneParams.iloc[
                irow]["Value"]
            metaData["drone_param_" + key] = value

        #Extract image data
        imageData = imageDataset.GetRasterBand(1).ReadAsArray()
        #First band is brightest if NIR

        write_netcdf(outputPathNC, lons, lats, metaData, imageData)

        georeferencedImagePathTemplate = Template(
            path.join(
                outputDirectory,
                metaData["image_filename"][:-4] + suffix + ".${EXTENSION}"))
        do_image_geotransform(
            path.join(imageDirectory, metaData["image_filename"]), metaData,
            georeferencedImagePathTemplate)
예제 #26
0
    # each single step in mask is ~120ft
    # this draws a ~ 1000x1000ft box over the lat/lon pair
    threshold = 30
    boxOfInterest = dbBand.ReadAsArray(px - 5, py - 5, 10, 10)
    totalCount = 0
    isItDevelopedArea = False
    for i in boxOfInterest.flatten():
        if i in maskingValues:
            totalCount += 1
    if totalCount >= threshold:
        isItDevelopedArea = True
    return isItDevelopedArea


if __name__ == "__main__":

    filename = "/home/kyle/Downloads/landcoverData/convertedCover"
    db = gdal.Open(filename)
    gt = db.GetGeoTransform()
    dbBand = db.GetRasterBand(1)
    geotrans = db.GetGeoTransform()

    latLons = "/home/kyle/git/final/DataCollection/IDs/FloridaIDs.csv"
    with open(latLons) as f:
        header = f.readline()
        print(header.strip())
        for line in f:
            lat, lon = [float(x.strip()) for x in line.split(',')]
            if getMyMask(dbBand, geotrans, lat, lon):
                print(line.strip())
    #%%
    xmlfile = glob.glob(fp + '/*_meta.xml')[0]
    DOMTree = xml.dom.minidom.parse(xmlfile)
    collection = DOMTree.documentElement
    TDI = collection.getElementsByTagName("TDIStages")[0].childNodes[0].data
    TDI = TDI.split(',')
    TDI = np.uint8(TDI)
    #%%
    esun = pd.read_excel('E:/esun_obt.xlsx')
    sensor = 'ESUN_' + mypdf[0][-25] + 'cmos' + mypdf[0][-19]
    #esun = np.array(esun['ESUN_Acmos2'])
    esun = np.array(esun[sensor])

    count = 0
    name = 'C:/' + fp[-36:]
    im = gdal.Open(fp1[0], gdal.GA_ReadOnly)
    imx = im.RasterXSize
    imy = im.RasterYSize
    outdata = gdal.GetDriverByName('GTiff').Create(name + '.tif', imx, imy, 4,
                                                   gdal.GDT_UInt16)
    bandcount = 0
    for _fp in fp1:
        print(_fp)
        if count + 1 not in [1, 13, 26, 28]:
            count += 1
            continue

        im = gdal.Open(_fp, gdal.GA_ReadOnly)
        proj = im.GetProjection()
        geo = im.GetGeoTransform()
        im = im.ReadAsArray()
예제 #28
0
    def S2_PSF_optimization(self, ):

        # open the created vrt file with 10 meter, 20 meter and 60 meter
        # grouped togehter and use gdal memory map to open it
        g = gdal.Open(self.s2_dir + '10meter.vrt')
        data = g.GetVirtualMemArray()
        b2, b3, b4, b8 = data
        g1 = gdal.Open(self.s2_dir + '20meter.vrt')
        data1 = g1.GetVirtualMemArray()
        b8a, b11, b12 = data1[-3:, :, :]
        img = dict(zip(self.bands, [b2, b3, b4, b8, b8a, b11, b12]))

        if glob(self.s2_dir + 'cloud.tiff') == []:
            cl = classification(img=img)
            cl.Get_cm_p()
            g = None
            g1 = None
            self.cloud = cl.cm
            g = gdal.Open(self.s2_dir + 'B04.jp2')
            driver = gdal.GetDriverByName('GTiff')
            g1 = driver.Create(self.s2_dir + 'cloud.tiff', g.RasterXSize,
                               g.RasterYSize, 1, gdal.GDT_Byte)

            projection = g.GetProjection()
            geotransform = g.GetGeoTransform()
            g1.SetGeoTransform(geotransform)
            g1.SetProjection(projection)
            gcp_count = g.GetGCPs()
            if gcp_count != 0:
                g1.SetGCPs(gcp_count, g.GetGCPProjection())
            g1.GetRasterBand(1).WriteArray(self.cloud)
            g1 = None
            g = None
            del cl
        else:
            self.cloud = cloud = gdal.Open(
                self.s2_dir + 'cloud.tiff').ReadAsArray().astype(bool)
        cloud_cover = 1. * self.cloud.sum() / self.cloud.size
        cloud_cover = 1. * self.cloud.sum() / self.cloud.size
        if cloud_cover > 0.2:
            print 'Too much cloud, cloud proportion: %.03f !!' % cloud_cover
            return []
        else:

            mete = readxml('%smetadata.xml' % self.s2_dir)
            self.sza = np.zeros(7)
            self.sza[:] = mete['mSz']
            self.saa = self.sza.copy()
            self.saa[:] = mete['mSa']
            try:
                self.vza = (mete['mVz'])[[1, 2, 3, 7, 11, 12, 8], ]
                self.vaa = (mete['mVa'])[[1, 2, 3, 7, 11, 12, 8], ]
            except:
                self.vza = np.repeat(np.nanmean(mete['mVz']), 7)
                self.vaa = np.repeat(np.nanmean(mete['mVa']), 7)
            angles = (self.sza[-2], self.vza[-2], (self.vaa - self.saa)[-2])

            tiles = Find_corresponding_pixels(self.s2_dir + 'B04.jp2',
                                              destination_res=500)

            self.h, self.v = int(self.Lfile.split('.')[-4][1:3]), int(
                self.Lfile.split('.')[-4][4:])

            self.H_inds, self.L_inds = tiles['h%02dv%02d' % (self.h, self.v)]

            self.Lx, self.Ly = self.L_inds
            self.Hx, self.Hy = self.H_inds

            self.brdf, self.qa = get_brdf_six(self.Lfile,
                                              angles=angles,
                                              bands=(7, ),
                                              Linds=list(self.L_inds))
            self.brdf, self.qa = self.brdf.flatten(), self.qa.flatten()

            # convolve band 12 using the generally used PSF value
            self.H_data = np.repeat(np.repeat(b12, 2, axis=1), 2, axis=0)
            size = 2 * int(round(max(
                1.96 * 50, 1.96 * 50)))  # set the largest possible PSF size
            self.H_data[0, :] = self.H_data[
                -1, :] = self.H_data[:, 0] = self.H_data[:, -1] = 0
            self.bad_pixs = cloud_dilation((self.H_data <= 0) | self.cloud,
                                           iteration=size / 2)
            xstd, ystd = 29.75, 39
            ker = self.gaussian(xstd, ystd, 0)
            self.conved = signal.fftconvolve(self.H_data, ker, mode='same')

            in_patch_m = np.logical_and.reduce(
                ((self.Hx > self.patch[1]), (self.Hx <
                                             (self.patch[1] + self.patch[3])),
                 (self.Hy > self.patch[0]), (self.Hy <
                                             (self.patch[0] + self.patch[2]))))

            self.patch_s2_ind = self.Hx[in_patch_m] - self.patch[1], self.Hy[
                in_patch_m] - self.patch[0]
            self.patch_mod = self.brdf[in_patch_m]
            self.patch_qa = self.qa[in_patch_m]
예제 #29
0
 def readTif(self, fileName):
     dataset = gdal.Open(fileName)
     if dataset == None:
         print(fileName + "文件无法打开")
     return dataset
예제 #30
0
UB = np.loadtxt(CalibPath + "/UB - tot.txt", usecols=0)
LB = np.loadtxt(CalibPath + "/LB - tot.txt", usecols=0)
Coello.ReadParametersBounds(UB, LB, Snow)
#%% spatial variability function
"""
define how generated parameters are going to be distributed spatially
totaly distributed or totally distributed with some parameters are lumped
for the whole catchment or HRUs or HRUs with some lumped parameters
for muskingum parameters k & x include the upper and lower bound in both
UB & LB with the order of Klb then kub
function inside the calibration algorithm is written as following
par_dist=SpatialVarFun(par,*SpatialVarArgs,kub=kub,klb=klb)

"""
raster = gdal.Open(FlowAccPath)
#-------------
# for lumped catchment parameters
no_parameters = 12
klb = 0.5
kub = 1
#------------
no_lumped_par = 1
lumped_par_pos = [7]

SpatialVarFun = DP(raster,
                   no_parameters,
                   no_lumped_par=no_lumped_par,
                   lumped_par_pos=lumped_par_pos,
                   Function=2,
                   Klb=klb,