def RFChangeDetection(TiffA, TiffB, savefile, filename_mask): treeChange = treeAtoB(TiffA, TiffB, filename_mask) TiffDataA = gr.load_tiff(TiffA).astype(float) TiffDataB = gr.load_tiff(TiffB) if filename_mask == 'none': arr_mask = np.ones(np.shape(TiffDataB)) else: arr_mask = gr.load_tiff(filename_mask) #Single channel input as filename_before if len(TiffDataA.shape) == 2: TiffDataA[arr_mask == 0] = 0 dataA = TiffDataA[TiffDataA != 0] dataB_predicted = treeChange.predict(dataA) TiffDataA[TiffDataA != 0] = dataB_predicted.astype(float) TiffDataB[arr_mask == 0] = 0 TiffData_diff = np.asarray(TiffDataB - TiffDataA, dtype='float') TiffData_diff[arr_mask == 0] = -2 #Multiple channel input as filename_before if len(TiffDataA.shape) == 3: TiffDataA[:, arr_mask == 0] = 0 dataA = TiffDataA[:, arr_mask == 1] dataB_predicted = treeChange.predict(dataA.transpose()) dataB_predicted_block = TiffDataA[0, :, :].astype(float) dataB_predicted_block[arr_mask == 1] = dataB_predicted.astype(float) TiffDataB[arr_mask == 0] = 0 TiffData_diff = np.asarray(TiffDataB - dataB_predicted_block, dtype='float') TiffData_diff[arr_mask == 0] = -2 im = Image.fromarray(TiffData_diff) im.save(savefile)
def treeAtoB(TiffA, TiffB, filename_mask): arr_A = gr.load_tiff(TiffA) arr_B = gr.load_tiff(TiffB) if filename_mask == 'none': arr_mask = np.ones(np.shape(arr_B)) else: arr_mask = gr.load_tiff(filename_mask) if len(arr_A.shape) == 3: arr_A_0 = arr_A[0, :, :] else: arr_A_0 = arr_A z = (arr_B != 0).astype(int) + (arr_A_0 != 0).astype(int) mask0 = (z == 2) mask = ((mask0.astype(int) + (arr_mask > 0).astype(int)) == 2) del z if method == 'Decision Tree': treeReg = tree.DecisionTreeRegressor() print('Using Decision Tree Estimator') #For faster speed, we use decision tree; for best result, we use random forest or xgboost if method == 'Random Forest': treeReg = RandomForestRegressor() print('Using Random Forest Estimator') dataset = arr_A[:, mask != 0] y_var = arr_B[mask != 0] del mask, arr_A, arr_B sample_ratio = np.max([np.ceil(len(dataset[-1]) / 5000000), 1]).astype(int) print('Subsampling ratio for training: ', sample_ratio) treeReg.fit(dataset[:, ::sample_ratio].transpose(), y_var[::sample_ratio]) return treeReg
def nightlights_values(self, df, lon_col='gpsLongitude', lat_col='gpsLatitude'): """ Given a dataset with latitude and longitude columns, it returns the nightlight value at each point. :param df: DataFrame :param lon_col: column names for longitude :param lat_col: column name of latitude :return: Series """ import georasters as gr try: pop = gr.load_tiff(self.file) except MemoryError: print('Landuse Raster too big!') raise # Find location of point (x,y) on raster, e.g. to extract info at that location NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info( self.file) def lu_extract(row): try: c, r = gr.map_pixel(row[lon_col], row[lat_col], GeoT[1], GeoT[-1], GeoT[0], GeoT[3]) lu = pop[c, r] return lu except IndexError: pass return df.apply(lu_extract, axis=1)
def getRastervalue(df, pop_raster, lat_col="gpsLatitude", lon_col="gpsLongitude", filter=1): """ when you pass dataframe with Lat, Long coordinates it returns a vector of the corresponding population value at theses locations It merges on the closest coordinates between the raster and the dataset. Using the WorldPop Populaiton layers: http://www.worldpop.org.uk/data/data_sources/ use: data = getRastervalue(data,path_to_raster) Parameters ---------- df : dataframe pop_raster : string filapath to the population raster lat_col, lon_col : str column names for the coordinates filter : what treshold to consider valid population. """ print('-> finding landuse for {} points'.format(df.shape[0])) import georasters as gr try: pop = gr.load_tiff(pop_raster) except MemoryError: print('Landuse Raster too big!') raise # Find location of point (x,y) on raster, e.g. to extract info at that location NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info(pop_raster) def lu_extract(row): try: c, r = gr.map_pixel(row[lon_col], row[lat_col], GeoT[1], GeoT[-1], GeoT[0], GeoT[3]) lu = pop[c, r] return lu except IndexError: pass df['landuse'] = df.apply(lu_extract, axis=1) # filter on population densities greater than filter df = df[df.landuse > filter] return df
im.save(savefile) if len(sys.argv) < 4: print( 'How to use this change detection tool: \n python RF_change.py filename_before filename_after filename_result filename_mask' ) sys.exit() #print(sys.argv) #the first input value is the filename for before image #filename_before = '/home/kuan/Downloads/shouxian/SX_828_NDVI.tif' #filename_before = '/home/kuan/Downloads/modis_NDVI/stack_12345.tif' filename_before = sys.argv[1] #data_before=TiffRead(filename_before) data_before = gr.load_tiff( filename_before) #use georasters library for simpler operation #the second input value is the filename for after image #filename_after = '/home/kuan/Downloads/shouxian/SX_1007_NDVI.tif' #filename_after = '/home/kuan/Downloads/modis_NDVI/ndvi201.tiff' filename_after = sys.argv[2] #data_after=TiffRead(filename_after) data_after = gr.load_tiff(filename_after) #the third input parameter is the filename for output if len(sys.argv) == 5: #the fourth input value, if exists, is a mask of value 0 and >1 #filename_mask= '/home/kuan/Downloads/shouxian/rice.tif' filename_mask = sys.argv[4] print(filename_mask) else:
afr_cities_buf.plot(ax=ax, linewidth=0) afr_cities.plot(ax=ax, markersize=.2, color='yellow') ax.set_title('1 decimal degree buffer \n Major cities in Africa', fontsize = 12) ax.set_axis_off() # %% [markdown] Collapsed="false" # # Raster Data # %% Collapsed="false" raster = 'data/res03_crav6190h_sihr_cer.tif' # %% Collapsed="false" # Get info on raster NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info(raster) grow = gr.load_tiff(raster) grow = gr.GeoRaster(grow, GeoT, projection = Projection) # %% Collapsed="false" f, ax = plt.subplots(1, figsize=(13, 11)) grow.plot(ax = ax, cmap = 'YlGn_r') ax.set_title('GAEZ Crop Suitability Measures') ax.set_axis_off() # %% [markdown] Collapsed="false" # ## Clipping Raster # %% Collapsed="false" brazil = countries.query('ADMIN == "Brazil"') # %% Collapsed="false"
### Remember to now delete asset in google drive and local directory print("The uploaded Document ID of imagery scene in MongoDB: " + str(mongo_doc_id)) S4_endTime = currentSecondsTime() showPyMessage(" -- Step completed successfully. Step took {}. ".format( timeTaken(S4_startTime, S4_endTime))) # ---------------------------STEP 5: Apply ML to predict Wildlife movement based on LULC--------------------------- print("\nStep 5: Apply ML to predict Wildlife movement based on LULC.") S5_startTime = currentSecondsTime() #The training table mongo_ndvi = fs.get(mongo_doc_id).read() gr_ndvi = gr.load_tiff(mongo_ndvi) training_table = response_df[:] training_table['NDVI'] = gr_ndvi.map_pixel(response_df['longitude'], response_df['latitude']) #ML '''NDVI vs PRESENCE(1) OR NOT (O)''' '''LULC vs PRESENCE(1) OR NOT (O)''' '''Relief vs PRESENCE(1) OR NOT (O)''' '''Temp vs PRESENCE(1) OR NOT (O)''' '''Moisture vs PRESENCE(1) OR NOT (O)''' '''Other tracked species vs PRESENCE(1) OR NOT (O)''' '''Reported incident vs PRESENCE(1) OR NOT (O)''' '''''' '''Combined variables vs PRESENCE(1) OR NOT (O)'''
import numpy as np for filename, indicator in zip([ '../HRM/HRM/Data/datasets/WFP_ENSAN_Senegal_2013_cluster.csv', '../HRM/HRM/Data/datasets/WB_Uganda_2011_cluster.csv'], ['FCS_mean', 'cons']): dataset = pd.read_csv(filename) try: dataset = dataset[dataset.cons <= 5] except AttributeError: dataset = dataset[dataset.FCS_mean <= 30] import georasters as gr nightlights = 'data/nightlights.tif' esa = gr.load_tiff(nightlights) # Find location of point (x,y) on raster, e.g. to extract info at that location NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info(nightlights) def lu_extract(row): try: c, r = gr.map_pixel(row['gpsLongitude'], row['gpsLatitude'], GeoT[1], GeoT[-1], GeoT[0], GeoT[3]) lu = esa[c, r] return lu except IndexError: print('coordinates {} {} at sea!'.format(row['gpsLongitude'], row['gpsLatitude'])) dataset['nightlights'] = dataset.apply(lu_extract, axis=1)