def set_AF_date(feat): return feat.set("af_date", ee.Date(feat.get("ACQ_DATE")).format().slice(0,19))
# count = fromFT.size().getInfo() # print(count) polys = fromFT.geometry() centroid = polys.centroid() lng, lat = centroid.getInfo()['coordinates'] # print("lng = {}, lat = {}".format(lng, lat)) values = fromFT.reduceColumns(ee.Reducer.toList(2), ['system:index', 'name']).getInfo()['list'] # print(values) # Map.setCenter(lng, lat, 10) vis = {'bands': ['N', 'R', 'G']} for year in years: # year = 2015 startTime = ee.Date(str(year) + '-01-01') endTime = ee.Date(str(year) + '-12-31') # year = startTime.get('year').getInfo() # print(year) for (id, name) in values: watershed = fromFT.filter(ee.Filter.eq('system:index', str(id))) filename = "Y" + str(year) + "_" + str(id) + "_" + str(name).replace( " ", "_") print(filename) image = subsetNAIP(collection, startTime, endTime, watershed) ndwi = calNDWI(image, threshold) vector = rasterToVector(ndwi, watershed) exportToDrive(vector, filename) # Map.addLayer(image, vis) # Map.addLayer(vector)
def l8Correction(img): # tile geometry l8Footprint = img.geometry() # dem DEM_OLI = ee.Image('USGS/SRTMGL1_003').clip(l8Footprint) # ozone DU_OLI = ee.Image( ozone.filterDate(iniDate, endDate).filterBounds(l8Footprint).mean()) # Julian Day imgDate_OLI = ee.Date(img.get('system:time_start')) FOY_OLI = ee.Date.fromYMD(imgDate_OLI.get('year'), 1, 1) JD_OLI = imgDate_OLI.difference(FOY_OLI, 'day').int().add(1) # Earth-Sun distance d_OLI = ee.Image.constant(img.get('EARTH_SUN_DISTANCE')) # Sun elevation SunEl_OLI = ee.Image.constant(img.get('SUN_ELEVATION')) # Sun azimuth SunAz_OLI = ee.Image.constant(img.get('SUN_AZIMUTH')) # Satellite zenith SatZe_OLI = ee.Image(0.0) cosdSatZe_OLI = (SatZe_OLI).multiply(pi.divide(ee.Image(180))).cos() sindSatZe_OLI = (SatZe_OLI).multiply(pi.divide(ee.Image(180))).sin() # Satellite azimuth SatAz_OLI = ee.Image(0.0).clip(l8Footprint) # Sun zenith SunZe_OLI = ee.Image(90).subtract(SunEl_OLI) cosdSunZe_OLI = SunZe_OLI.multiply(pi.divide( ee.Image.constant(180))).cos() # in degrees sindSunZe_OLI = SunZe_OLI.multiply(pi.divide( ee.Image(180))).sin() # in degrees # Relative azimuth RelAz_OLI = ee.Image(SunAz_OLI) cosdRelAz_OLI = RelAz_OLI.multiply(pi.divide(ee.Image(180))).cos() # Pressure calculation P_OLI = ee.Image(101325).multiply( ee.Image(1).subtract(ee.Image(0.0000225577).multiply(DEM_OLI)).pow( 5.25588)).multiply(0.01) Po_OLI = ee.Image(1013.25) # Radiometric Calibration # # define bands to be converted to radiance bands_OLI = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7'] # radiance_mult_bands rad_mult_OLI = ee.Image( ee.Array([ ee.Image(img.get('RADIANCE_MULT_BAND_1')), ee.Image(img.get('RADIANCE_MULT_BAND_2')), ee.Image(img.get('RADIANCE_MULT_BAND_3')), ee.Image(img.get('RADIANCE_MULT_BAND_4')), ee.Image(img.get('RADIANCE_MULT_BAND_5')), ee.Image(img.get('RADIANCE_MULT_BAND_6')), ee.Image(img.get('RADIANCE_MULT_BAND_7')) ])).toArray(1) # radiance add band rad_add_OLI = ee.Image( ee.Array([ ee.Image(img.get('RADIANCE_ADD_BAND_1')), ee.Image(img.get('RADIANCE_ADD_BAND_2')), ee.Image(img.get('RADIANCE_ADD_BAND_3')), ee.Image(img.get('RADIANCE_ADD_BAND_4')), ee.Image(img.get('RADIANCE_ADD_BAND_5')), ee.Image(img.get('RADIANCE_ADD_BAND_6')), ee.Image(img.get('RADIANCE_ADD_BAND_7')) ])).toArray(1) # create an empty image to save new radiance bands to imgArr_OLI = img.select(bands_OLI).toArray().toArray(1) Ltoa_OLI = imgArr_OLI.multiply(rad_mult_OLI).add(rad_add_OLI) # esun ESUN_OLI = ee.Image.constant(197.24790954589844)\ .addBands(ee.Image.constant(201.98426818847656))\ .addBands(ee.Image.constant(186.12677001953125))\ .addBands(ee.Image.constant(156.95257568359375))\ .addBands(ee.Image.constant(96.04714965820312))\ .addBands(ee.Image.constant(23.8833221450863))\ .addBands(ee.Image.constant(8.04995873449635)).toArray().toArray(1) ESUN_OLI = ESUN_OLI.multiply(ee.Image(1)) ESUNImg_OLI = ESUN_OLI.arrayProject([0]).arrayFlatten([bands_OLI]) # Ozone Correction # # Ozone coefficients koz_OLI = ee.Image.constant(0.0039).addBands(ee.Image.constant(0.0218))\ .addBands(ee.Image.constant(0.1078))\ .addBands(ee.Image.constant(0.0608))\ .addBands(ee.Image.constant(0.0019))\ .addBands(ee.Image.constant(0))\ .addBands(ee.Image.constant(0))\ .toArray().toArray(1) # Calculate ozone optical thickness Toz_OLI = koz_OLI.multiply(DU_OLI).divide(ee.Image.constant(1000)) # Calculate TOA radiance in the absense of ozone Lt_OLI = Ltoa_OLI.multiply(((Toz_OLI)).multiply( (ee.Image.constant(1).divide(cosdSunZe_OLI)).add( ee.Image.constant(1).divide(cosdSatZe_OLI))).exp()) # Rayleigh optical thickness bandCenter_OLI = ee.Image(443).divide(1000).addBands(ee.Image(483).divide(1000))\ .addBands(ee.Image(561).divide(1000))\ .addBands(ee.Image(655).divide(1000))\ .addBands(ee.Image(865).divide(1000))\ .addBands(ee.Image(1609).divide(1000))\ .addBands(ee.Number(2201).divide(1000))\ .toArray().toArray(1) # create an empty image to save new Tr values to Tr_OLI = (P_OLI.divide(Po_OLI)).multiply( ee.Image(0.008569).multiply(bandCenter_OLI.pow(-4))).multiply( (ee.Image(1).add( ee.Image(0.0113).multiply(bandCenter_OLI.pow(-2))).add( ee.Image(0.00013).multiply(bandCenter_OLI.pow(-4))))) # Fresnel Reflection # # Specular reflection (s- and p- polarization states) theta_V_OLI = ee.Image(0.0000000001) sin_theta_j_OLI = sindSunZe_OLI.divide(ee.Image(1.333)) theta_j_OLI = sin_theta_j_OLI.asin().multiply(ee.Image(180).divide(pi)) theta_SZ_OLI = SunZe_OLI R_theta_SZ_s_OLI = (((theta_SZ_OLI.multiply(pi.divide( ee.Image(180)))).subtract( theta_j_OLI.multiply(pi.divide( ee.Image(180))))).sin().pow(2)).divide( (((theta_SZ_OLI.multiply(pi.divide(ee.Image(180)))).add( theta_j_OLI.multiply(pi.divide( ee.Image(180))))).sin().pow(2))) R_theta_V_s_OLI = ee.Image(0.0000000001) R_theta_SZ_p_OLI = (((theta_SZ_OLI.multiply(pi.divide(180))).subtract( theta_j_OLI.multiply(pi.divide(180)))).tan().pow(2)).divide( (((theta_SZ_OLI.multiply(pi.divide(180))).add( theta_j_OLI.multiply(pi.divide(180)))).tan().pow(2))) R_theta_V_p_OLI = ee.Image(0.0000000001) R_theta_SZ_OLI = ee.Image(0.5).multiply( R_theta_SZ_s_OLI.add(R_theta_SZ_p_OLI)) R_theta_V_OLI = ee.Image(0.5).multiply( R_theta_V_s_OLI.add(R_theta_V_p_OLI)) # Rayleigh scattering phase function # # Sun-sensor geometry theta_neg_OLI = ((cosdSunZe_OLI.multiply( ee.Image(-1))).multiply(cosdSatZe_OLI)).subtract( (sindSunZe_OLI).multiply(sindSatZe_OLI).multiply(cosdRelAz_OLI)) theta_neg_inv_OLI = theta_neg_OLI.acos().multiply(ee.Image(180).divide(pi)) theta_pos_OLI = (cosdSunZe_OLI.multiply(cosdSatZe_OLI)).subtract( sindSunZe_OLI.multiply(sindSatZe_OLI).multiply(cosdRelAz_OLI)) theta_pos_inv_OLI = theta_pos_OLI.acos().multiply(ee.Image(180).divide(pi)) cosd_tni_OLI = theta_neg_inv_OLI.multiply( pi.divide(180)).cos() # in degrees cosd_tpi_OLI = theta_pos_inv_OLI.multiply( pi.divide(180)).cos() # in degrees Pr_neg_OLI = ee.Image(0.75).multiply( (ee.Image(1).add(cosd_tni_OLI.pow(2)))) Pr_pos_OLI = ee.Image(0.75).multiply( (ee.Image(1).add(cosd_tpi_OLI.pow(2)))) # Rayleigh scattering phase function Pr_OLI = Pr_neg_OLI.add( (R_theta_SZ_OLI.add(R_theta_V_OLI)).multiply(Pr_pos_OLI)) # Calulate Lr, denom_OLI = ee.Image(4).multiply(pi).multiply(cosdSatZe_OLI) Lr_OLI = (ESUN_OLI.multiply(Tr_OLI)).multiply(Pr_OLI.divide(denom_OLI)) # Rayleigh corrected radiance Lrc_OLI = (Lt_OLI.divide(ee.Image(10))).subtract(Lr_OLI) LrcImg_OLI = Lrc_OLI.arrayProject([0]).arrayFlatten([bands_OLI]) # Rayleigh corrected reflectance prc_OLI = Lrc_OLI.multiply(pi).multiply(d_OLI.pow(2)).divide( ESUN_OLI.multiply(cosdSunZe_OLI)) prcImg_OLI = prc_OLI.arrayProject([0]).arrayFlatten([bands_OLI]) # Aerosol Correction # # Bands in nm bands_nm_OLI = ee.Image(443).addBands(ee.Image(483))\ .addBands(ee.Image(561))\ .addBands(ee.Image(655))\ .addBands(ee.Image(865))\ .addBands(ee.Image(0))\ .addBands(ee.Image(0))\ .toArray().toArray(1) # Lam in SWIR bands Lam_6_OLI = LrcImg_OLI.select('B6') Lam_7_OLI = LrcImg_OLI.select('B7') # Calculate aerosol type eps_OLI = (((((Lam_7_OLI).divide( ESUNImg_OLI.select('B7'))).log()).subtract( ((Lam_6_OLI).divide(ESUNImg_OLI.select('B6'))).log())).divide( ee.Image(2201).subtract(ee.Image(1609)))).multiply(mask) # Calculate multiple scattering of aerosols for each band Lam_OLI = (Lam_7_OLI).multiply( ((ESUN_OLI).divide(ESUNImg_OLI.select('B7')))).multiply( (eps_OLI.multiply(ee.Image(-1))).multiply( (bands_nm_OLI.divide(ee.Image(2201)))).exp()) # diffuse transmittance trans_OLI = Tr_OLI.multiply(ee.Image(-1)).divide(ee.Image(2)).multiply( ee.Image(1).divide(cosdSatZe_OLI)).exp() # Compute water-leaving radiance Lw_OLI = Lrc_OLI.subtract(Lam_OLI).divide(trans_OLI) # water-leaving reflectance pw_OLI = (Lw_OLI.multiply(pi).multiply(d_OLI.pow(2)).divide( ESUN_OLI.multiply(cosdSunZe_OLI))) pwImg_OLI = pw_OLI.arrayProject([0]).arrayFlatten([bands_OLI]) # Rrs Rrs_coll = (pw_OLI.divide(pi).arrayProject([0]).arrayFlatten( [bands_OLI]).slice(0, 5)) return (Rrs_coll.set('system:time_start', img.get('system:time_start')))
def get_flood_PopbyCountryCIESEN(floodImage): """ Args: floodImage : the standard Earth Engine Image object outputted by the map_DFO_event function roiGEO : the region of interest as an Earth Engine Geometry object Returns: -a feature collection of all countries for each flood events -with a pop and area count - An ee feature with properties including 'Index': the event index ID 'Began': start date of event map 'Ended': end date of event map 'Flood_Area': total area of detected flood 'Pop_Exposed': the number of people in the mapped flood from WorldPop data """ import ee ee.Initialize() roiGEO = floodImage.geometry() permWater = ee.Image("JRC/GSW1_0/GlobalSurfaceWater").select( "transition").eq(1) def maskImages(image): nonFlood = image.select("flooded") waterMask = nonFlood.multiply(permWater.neq(1)) return image.select("flooded").mask(waterMask) # Extract the final flood extent image data as its own variable for analysis floodExtent = maskImages(ee.Image(floodImage.select("flooded"))) # check to make sure there are actually flooded pixels, some images are only 0 and masked, which will make this function fail. # if there is not, then you just return a feature with 0 for pop and area effected. # maxVal = floodExtent.reduceRegion(reducer=ee.Reducer.max(), maxPixels=1e9) # Import the World Pop image collection, clip it to the study area, and get the UN adjusted data popAll = ee.ImageCollection( "CIESIN/GPWv4/unwpp-adjusted-population-count").filterBounds(roiGEO) # get event year and available population years to figure out the population dataset closest to the event eventYear = ee.Date(floodImage.get('Began')).get('year') def year_diff(popImg): popYear = ee.Date(popImg.get('system:index')).get('year') diff = ee.Number(eventYear).subtract(ee.Number(popYear)).abs() return popImg.set({"year_diff": diff}) withYearDiffs = popAll.map(year_diff).sort('year_diff') closestYear = withYearDiffs.first().get('system:index') popImg = ee.Image( popAll.filterMetadata('system:index', 'equals', closestYear).first()) # Mask the world population dataset using the flood extent layer popScale = popImg.projection().nominalScale() popImageMasked = popImg.updateMask(floodExtent) #now only select the countries for which flood touches countries = ee.FeatureCollection( 'ft:1tdSwUL7MVpOauSgRzqVTOwdfy17KDbw-1d9omPw') getcountries = countries.filterBounds(floodExtent.geometry().bounds()) # Get area of flood in the scale of the flood map floodAreaImg = floodExtent.multiply(ee.Image.pixelArea()) map_scale = floodExtent.projection().nominalScale() index = ee.Image(floodImage).get("Index") began = ee.Date(floodImage.get("Began")).get("year") def countrieswithpop(feature): popsum = popImageMasked.reduceRegion(reducer=ee.Reducer.sum(), geometry=feature.geometry(), scale=popScale, maxPixels=1e9) pop = popsum.get("population-count") areasum = floodAreaImg.reduceRegion(reducer=ee.Reducer.sum(), geometry=feature.geometry(), scale=map_scale, maxPixels=1e9) area = areasum.get("flooded") return ee.Feature( None, { "Index": index, "Year": began_year, "Country": feature.get("Country"), "Exposed": pop, "Area": area }) stat = ee.FeatureCollection(getcountries).map(countrieswithpop) return ee.FeatureCollection(stat).set({"Index": index})
def get_flood_PopArea_WP(floodImage): """ Function to compute the estimated affected population and area (in square meters) of a flood event given a computed flood image Args: floodImage : the standard Earth Engine Image object outputted by the map_DFO_event function roiGEO : the region of interest as an Earth Engine Geometry object Returns: - An ee feature with properties including 'Index': the event index ID 'Began': start date of event map 'Ended': end date of event map 'Flood_Area': total area of detected flood 'Pop_Exposed': the number of people in the mapped flood from WorldPop data """ import ee ee.Initialize() roiGEO = floodImage.geometry() # Extract the final flood extent image data as its own variable for analysis floodExtent = ee.Image(floodImage.select("flooded")) # check to make sure there are actually flooded pixels, some images are only 0 and masked, which will make this function fail. # if there is not, then you just return a feature with 0 for pop and area effected. # maxVal = floodExtent.reduceRegion(reducer=ee.Reducer.max(), maxPixels=1e9) # Import the World Pop image collection, clip it to the study area, and get the UN adjusted data worldPopAll = ee.ImageCollection("WorldPop/POP").filterBounds( roiGEO).filterMetadata('UNadj', 'equals', 'yes') valid = ee.Algorithms.If(worldPopAll.first(), True, False) # get event year and available population years to figure out the population dataset closest to the event eventYear = ee.Date(floodImage.get('Began')).get('year') def year_diff(popImg): diff = ee.Number(eventYear).subtract(ee.Number( popImg.get('year'))).abs() return popImg.set({"year_diff": diff}) withYearDiffs = worldPopAll.map(year_diff).sort('year_diff') closestYear = withYearDiffs.first().get('year') worldPop = worldPopAll.filterMetadata('year', 'equals', closestYear).mosaic() # Mask the world population dataset using the flood extent layer popImageColl = worldPop.updateMask(floodExtent) wpScale = ee.Image(worldPopAll.first()).projection().nominalScale() # Calculate the population affected and area maintain as a dictionary popAffected = popImageColl.reduceRegion(reducer=ee.Reducer.sum(), geometry=roiGEO, scale=wpScale, maxPixels=1e9, bestEffort=True) # Get area of flood in the scale of the flood map floodAreaImg = floodExtent.multiply(ee.Image.pixelArea()) # map scale map_scale = floodExtent.projection().nominalScale() floodArea = floodAreaImg.reduceRegion(reducer=ee.Reducer.sum(), geometry=roiGEO, scale=map_scale, maxPixels=1e9, bestEffort=True) results = { 'Area_flooded_(km2)': ee.Number(floodArea.get('flooded')).divide(1e6), 'Pop_Exposed': ee.Number(popAffected.get('population')).round() } return ee.Feature(ee.Geometry.Point([100, 100]), results).copyProperties(floodImage)
# %% Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') # %% ''' ## Add Earth Engine Python script ''' # %% collection = ee.ImageCollection('LANDSAT/LC08/C01/T1') point = ee.Geometry.Point(-122.262, 37.8719) start = ee.Date('2014-06-01') finish = ee.Date('2014-10-01') filteredCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1') \ .filterBounds(point) \ .filterDate(start, finish) \ .sort('CLOUD_COVER', True) first = filteredCollection.first() # Define visualization parameters in an object literal. vizParams = { 'bands': ['B5', 'B4', 'B3'], 'min': 5000, 'max': 15000, 'gamma': 1.3 }
def get(self): do_add_DOY_band = True # ***PG Edit*** do_add_nOBS_band = True # ***PG Edit*** # if use_asap_pbc is True, then use user defined compositing procedures # if use_asap_pbc is False, then the mosaic() reducer is used instead use_asap_pbc = self.request.get('use_asap_pbc') """Sets up the request to Earth Engine and returns the map information.""" ee.Initialize(config.EE_CREDENTIALS)#todo PG reactivate ee.data.setDeadline(120000) try: # todo PG edit urlfetch.set_default_fetch_deadline(120000) except: pass gaul1 = ee.FeatureCollection('users/gglemoine62/asap_level1') asap1_id = int(self.request.get('asap1_id')) cloud_pcent = int(self.request.get('cloudpcent')) deltad = int(self.request.get('deltad')) end_date = ee.Date(self.request.get('end_period')) start_date = end_date.advance(-deltad, 'day') roi = ee.Feature(gaul1.filterMetadata('asap1_id', 'equals', asap1_id).first()) # start_date = ee.Date('2017-06-20') #ee.Date(datetime.datetime.now()).advance(-40, 'day') # end_date = ee.Date('2017-07-20') #ee.Date(datetime.datetime.now()).advance(-10, 'day') start_date_1Y = start_date.advance(-1, 'year') end_date_1Y = end_date.advance(-1, 'year') layers = [] # Select S2 s2_visualization_options = { 'bands': ','.join(['B8', 'B11', 'B4']), 'min': 0, 'max': ','.join(['6000', '6000', '4000']), 'format': 'png' } if use_asap_pbc is True: weights = asap_highres.pbc_weights_selector(interval=deltad) s2now = asap_highres.get_single_asap_pbc_layer(start_date, end_date, region=roi.geometry(), weights=weights, add_DOY=do_add_DOY_band, add_nOBS=do_add_nOBS_band)# ***PG Edit*** elif use_asap_pbc is False: s2nowCol = ee.ImageCollection('COPERNICUS/S2')\ .filterDate(start_date, end_date)\ .filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', cloud_pcent)\ .map(asap_highres.s2_cloudmasking)\ .map(asap_highres.add_DOY_band)\ .filterBounds(roi.geometry()) s2now = ee.Image(s2nowCol.mosaic()).clip(roi.geometry()) s2now = s2now.addBands([asap_highres.get_nObs_band(s2nowCol).clip(roi.geometry())]) # ***PG Edit*** s2now_visualization = s2now.getMapId(s2_visualization_options) s2now_nb = int(s2now.bandNames().size().getInfo()) # Add the recent Sentinel-2 composite. if s2now_nb > 0: layers.append({ 'mapid': s2now_visualization['mapid'], 'label': "Sentinel-2 ({} to {})".format(ee.Date.format(start_date, 'd MMM Y').getInfo(), ee.Date.format(end_date, 'd MMM Y').getInfo()), 'token': s2now_visualization['token'] }) if use_asap_pbc is True: s2lastyear = asap_highres.get_single_asap_pbc_layer(start_date_1Y, end_date_1Y, region=roi.geometry(), weights=weights, add_DOY=do_add_DOY_band, add_nOBS=do_add_nOBS_band) # ***PG Edit*** elif use_asap_pbc is False: s2lastCol = ee.ImageCollection('COPERNICUS/S2') \ .filterDate(start_date_1Y, end_date_1Y) \ .filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', cloud_pcent) \ .map(asap_highres.s2_cloudmasking) \ .map(asap_highres.add_DOY_band) \ .filterBounds(roi.geometry()) s2lastyear = ee.Image(s2lastCol.mosaic()).clip(roi.geometry()) s2lastyear = s2lastyear.addBands([asap_highres.get_nObs_band(s2lastCol).clip(roi.geometry())]) # ***PG Edit*** # Add the last years Sentinel-2 composite. s2lastyear_visualization = s2lastyear.getMapId(s2_visualization_options) s2ly_nb = int(s2lastyear.bandNames().size().getInfo()) if s2ly_nb > 0: layers.append({ 'mapid': s2lastyear_visualization['mapid'], 'label': "Sentinel-2 ({} to {})".format(ee.Date.format(start_date_1Y, 'd MMM Y').getInfo(), ee.Date.format(end_date_1Y, 'd MMM Y').getInfo()), 'token': s2lastyear_visualization['token'] }) # ***PG Edit*** if do_add_DOY_band:# prepare inputs for visualization of observation DOY band # currently get min and max DOY from daterange minmax_doy_1 = [start_date.getRelative('day', 'year').add(1).getInfo(), end_date.getRelative('day', 'year').add(1).getInfo()] minmax_doy_2 = [start_date_1Y.getRelative('day', 'year').add(1).getInfo(),end_date_1Y.getRelative('day', 'year').add(1).getInfo()] s2_DOY_visopts1 = {'bands': 'DOY', 'min': str(minmax_doy_1[0]), 'max': str(minmax_doy_1[1]), 'palette': "08ff1d, ff1306"} s2_DOY_visopts2 = {'bands': 'DOY', 'min': str(minmax_doy_2[0]), 'max': str(minmax_doy_2[1]), 'palette': "08ff1d, ff1306"} # compute visualization: s2now_DOY_vis = s2now.getMapId(s2_DOY_visopts1) s2last_DOY_vis = s2lastyear.getMapId(s2_DOY_visopts2) # compile layer specific information doy_layer_info_this = {'mapid': s2now_DOY_vis['mapid'], 'label':'S2 Observation DOY (min={0},max={1})'.format(minmax_doy_1[0],minmax_doy_1[1]), 'token': s2now_DOY_vis['token']} doy_layer_info_last = {'mapid': s2last_DOY_vis['mapid'], 'label':'S2 Observation DOY (min={0},max={1})'.format(minmax_doy_2[0],minmax_doy_2[1]), 'token': s2last_DOY_vis['token']} # append specific information to result list layers.append(doy_layer_info_this) layers.append(doy_layer_info_last) # ***PG Edit*** if do_add_nOBS_band: # prepare inputs for visualization of Number of Observations (nObs)_band: # currently get min max values for stretch as [0, nDaysInterval] minmax_nObs_1 = [0, minmax_doy_1[1] - minmax_doy_1[0]] minmax_nObs_2 = [0, minmax_doy_2[1] - minmax_doy_2[0]] s2_NOBS_visopts1 = {'bands': 'nObs', 'min': str(minmax_nObs_1[0]), 'max': str(minmax_nObs_1[1]), 'palette': '000000, 2892C7, FAFA64, E81014'} s2_NOBS_visopts2 = {'bands': 'nObs', 'min': str(minmax_nObs_2[0]), 'max': str(minmax_nObs_2[1]), 'palette': '000000, 2892C7, FAFA64, E81014'} # compute visualization: s2now_NOBS_vis = s2now.getMapId(s2_NOBS_visopts1) s2last_NOBS_vis = s2lastyear.getMapId(s2_NOBS_visopts2) # compile layer specific information nObs_layer_info_this = {'mapid': s2now_NOBS_vis['mapid'], 'label': 'S2 Number of clear observations (min={0},max={1})'.format(minmax_nObs_1[0], minmax_nObs_1[1]), 'token': s2now_NOBS_vis['token']} nObs_layer_info_last = {'mapid': s2last_NOBS_vis['mapid'], 'label': 'S2 Number of clear observations (min={0},max={1})'.format(minmax_nObs_2[0], minmax_nObs_2[1]), 'token': s2last_NOBS_vis['token']} # append specific information to result list layers.append(nObs_layer_info_this) layers.append(nObs_layer_info_last) if (s2now_nb > 0) and (s2ly_nb > 0): s2diff = s2now.normalizedDifference(['B8', 'B4']).subtract(s2lastyear.normalizedDifference(['B8', 'B4'])) red2green = ['ffffff', 'a50026', 'd73027', 'f46d43', 'fdae61', 'fee08b', 'ffffbf', 'd9ef8b', 'a6d96a', '66bd63', '1a9850', '006837']; s2diff_visualization_options = { 'palette': ','.join(red2green), 'min': -0.6, 'max': 0.6, 'format': 'png' } s2diff_visualization = ee.Image(s2diff).getMapId(s2diff_visualization_options) layers.append({ 'mapid': s2diff_visualization['mapid'], 'label': 'Sentinel-2 NDVI difference', 'token': s2diff_visualization['token'] }) if use_asap_pbc is False: L8NowCol = ee.ImageCollection('LANDSAT/LC08/C01/T1_RT_TOA')\ .filterDate(start_date, end_date)\ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_pcent)\ .map(asap_highres.L8_TOA_masking)\ .map(asap_highres.add_DOY_band)\ .filterBounds(roi.geometry()) l8now = ee.Image(L8NowCol.mosaic()).clip(roi.geometry()) l8now = l8now.addBands([asap_highres.get_nObs_band(L8NowCol).clip(roi.geometry())]) # ***PG Edit*** elif use_asap_pbc is True: l8now = asap_highres.get_single_asap_pbc_layer(start_date, end_date, sensor='L8', region=roi.geometry(), weights=weights, add_DOY=do_add_DOY_band, add_nOBS=do_add_nOBS_band) # ***PG Edit*** # A set of visualization parameters using the landcover palette. l8_visualization_options = { 'bands': ','.join(['B5', 'B6', 'B4']), 'min': 0, 'max': ','.join(['0.6', '0.6', '0.4']), 'format': 'png' } l8now_visualization = l8now.getMapId(l8_visualization_options) l8now_nb = int(l8now.bandNames().size().getInfo()) if l8now_nb > 0: layers.append({ 'mapid': l8now_visualization['mapid'], 'label': "Landsat-8 ({} to {})".format(ee.Date.format(start_date, 'd MMM Y').getInfo(), ee.Date.format(end_date, 'd MMM Y').getInfo()), 'token': l8now_visualization['token'] }) if use_asap_pbc is False: # mosaic() based composite L8LastCol = ee.ImageCollection('LANDSAT/LC08/C01/T1_RT_TOA')\ .filterDate(start_date_1Y, end_date_1Y) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_pcent)\ .map(asap_highres.L8_TOA_masking)\ .map(asap_highres.add_DOY_band)\ .filterBounds(roi.geometry()) l8lastyear = ee.Image(L8LastCol.mosaic()).clip(roi.geometry()) l8lastyear = l8lastyear.addBands([asap_highres.get_nObs_band(L8LastCol).clip(roi.geometry())]) # ***PG Edit*** elif use_asap_pbc is True: # ***PG Edit*** l8lastyear = asap_highres.get_single_asap_pbc_layer(start_date_1Y, end_date_1Y, sensor='L8', region=roi.geometry(), weights=weights, add_DOY=do_add_DOY_band, add_nOBS=do_add_nOBS_band) l8lastyear_visualization = l8lastyear.getMapId(l8_visualization_options) l8ly_nb = int(l8lastyear.bandNames().size().getInfo()) # ***PG Edit*** if do_add_DOY_band: # prepare inputs for visualization of observation DOY band # assuming minmax_doy_1 minmax_doy_2 has already been computed for s2 # also assuming s2_DOY_visopts1 has already been computed for S2 - is also valid for L9 # compute visualization: l8now_DOY_vis = s2now.getMapId(s2_DOY_visopts1) l8lastyear_DOY_vis = s2lastyear.getMapId(s2_DOY_visopts2) # compile layer specific information l8_doy_layer_info_this = {'mapid': l8now_DOY_vis['mapid'], 'label': 'L8 Observation DOY (min={0},max={1})'.format(minmax_doy_1[0], minmax_doy_1[1]), 'token': l8now_DOY_vis['token']} l8_doy_layer_info_last = {'mapid': l8lastyear_DOY_vis['mapid'], 'label': 'L8 Observation DOY (min={0},max={1})'.format(minmax_doy_2[0], minmax_doy_2[1]), 'token': l8lastyear_DOY_vis['token']} # append specific information to result list layers.append(l8_doy_layer_info_this) layers.append(l8_doy_layer_info_last) # ***PG Edit*** if do_add_nOBS_band: # prepare inputs for visualization of Number of Observations (nObs)_band: # compute visualization: l8now_NOBS_vis = l8now.getMapId(s2_NOBS_visopts1) l8lastyear_NOBS_vis = l8lastyear.getMapId(s2_NOBS_visopts2) # compile layer specific information l8_nObs_layer_info_this = {'mapid': l8now_NOBS_vis['mapid'], 'label': 'L8 Number of clear observations (min={0},max={1})'.format(minmax_nObs_1[0], minmax_nObs_1[1]), 'token': l8now_NOBS_vis['token']} l8_nObs_layer_info_last = {'mapid': l8lastyear_NOBS_vis['mapid'], 'label': 'L8 Number of clear observations (min={0},max={1})'.format(minmax_nObs_2[0], minmax_nObs_2[1]), 'token': l8lastyear_NOBS_vis['token']} # append specific information to result list layers.append(l8_nObs_layer_info_this) layers.append(l8_nObs_layer_info_last) if l8ly_nb > 0: layers.append({ 'mapid': l8lastyear_visualization['mapid'], 'label': "Landsat-8 ({} to {})".format(ee.Date.format(start_date_1Y, 'd MMM Y').getInfo(), ee.Date.format(end_date_1Y, 'd MMM Y').getInfo()), 'token': l8lastyear_visualization['token'] }) if (l8now_nb > 0) and (l8ly_nb > 0): l8diff = l8now.normalizedDifference(['B5', 'B4']).subtract(l8lastyear.normalizedDifference(['B5', 'B4'])) red2green = ['ffffff', 'a50026', 'd73027', 'f46d43', 'fdae61', 'fee08b', 'ffffbf', 'd9ef8b', 'a6d96a', '66bd63', '1a9850', '006837']; l8diff_visualization_options = { 'palette': ','.join(red2green), 'min': -0.6, 'max': 0.6, 'format': 'png' } l8diff_visualization = l8diff.getMapId(l8diff_visualization_options) layers.append({ 'mapid': l8diff_visualization['mapid'], 'label': 'Landsat 8 NDVI difference', 'token': l8diff_visualization['token'] }) if not layers: layers.append({'mapid': None, 'label': 'No S2, L8 found for periods', 'token': None}) else: background = ee.Image().getMapId({}) layers.append({'mapid': background['mapid'], 'label': 'Google Background', 'token': background['token']}) # The number of samples we want to use to train our classifier. self.response.out.write(json.dumps(layers))
def from_scene_et_fraction(scene_coll, start_date, end_date, variables, model_args, t_interval='custom', interp_method='linear', interp_days=32, _interp_vars=['et_fraction', 'ndvi'], use_joins=False): """ Parameters ---------- scene_coll : ee.ImageCollection start_date : str end_date : str variables : list List of variables that will be returned in the Image Collection. model_args : dict t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional Time interval over which to interpolate and aggregate values The default is 'custom' which means the aggregation time period will be controlled by the start and end date parameters. interp_method : {'linear}, optional Interpolation method. The default is 'linear'. interp_days : int, str, optional Number of extra days before the start date and after the end date to include in the interpolation calculation. The default is 32. _interp_vars : list, optional The variables that can be interpolated to daily timesteps. The default is to interpolate the 'et_fraction' and 'ndvi' bands. Returns ------- ee.ImageCollection Raises ------ ValueError """ # Check that the input parameters are valid if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']: raise ValueError('unsupported t_interval: {}'.format(t_interval)) elif interp_method.lower() not in ['linear']: raise ValueError('unsupported interp_method: {}'.format(interp_method)) if type(interp_days) is str and utils.is_number(interp_days): interp_days = int(interp_days) elif not type(interp_days) is int: raise TypeError('interp_days must be an integer') elif interp_days <= 0: raise ValueError('interp_days must be a positive integer') if not variables: raise ValueError('variables parameter must be set') # Adjust start/end dates based on t_interval # Increase the date range to fully include the time interval start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d') end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d') if t_interval.lower() == 'annual': start_dt = datetime.datetime(start_dt.year, 1, 1) # Covert end date to inclusive, flatten to beginning of year, # then add a year which will make it exclusive end_dt -= relativedelta(days=+1) end_dt = datetime.datetime(end_dt.year, 1, 1) end_dt += relativedelta(years=+1) elif t_interval.lower() == 'monthly': start_dt = datetime.datetime(start_dt.year, start_dt.month, 1) end_dt -= relativedelta(days=+1) end_dt = datetime.datetime(end_dt.year, end_dt.month, 1) end_dt += relativedelta(months=+1) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') # The start/end date for the interpolation include more days # (+/- interp_days) than are included in the ETr collection interp_start_dt = start_dt - datetime.timedelta(days=interp_days) interp_end_dt = end_dt + datetime.timedelta(days=interp_days) interp_start_date = interp_start_dt.date().isoformat() interp_end_date = interp_end_dt.date().isoformat() # Get reference ET source if 'et_reference_source' in model_args.keys(): et_reference_source = model_args['et_reference_source'] else: raise ValueError('et_reference_source was not set') # Get reference ET band name if 'et_reference_band' in model_args.keys(): et_reference_band = model_args['et_reference_band'] else: raise ValueError('et_reference_band was not set') # Get reference ET factor if 'et_reference_factor' in model_args.keys(): et_reference_factor = model_args['et_reference_factor'] else: et_reference_factor = 1.0 logging.debug('et_reference_factor was not set, default to 1.0') # raise ValueError('et_reference_factor was not set') # Get reference ET resample if 'et_reference_resample' in model_args.keys(): et_reference_resample = model_args['et_reference_resample'] else: et_reference_resample = 'nearest' logging.debug('et_reference_resample was not set, default to nearest') # raise ValueError('et_reference_resample was not set') if type(et_reference_source) is str: # Assume a string source is an single image collection ID # not an list of collection IDs or ee.ImageCollection daily_et_reference_coll = ee.ImageCollection(et_reference_source) \ .filterDate(start_date, end_date) \ .select([et_reference_band], ['et_reference']) # elif isinstance(et_reference_source, computedobject.ComputedObject): # # Interpret computed objects as image collections # daily_et_reference_coll = ee.ImageCollection(et_reference_source)\ # .select([et_reference_band])\ # .filterDate(self.start_date, self.end_date) else: raise ValueError( 'unsupported et_reference_source: {}'.format(et_reference_source)) # TODO: Need to add time and mask to the scene collection # The time band is always needed for interpolation interp_vars = _interp_vars + ['time'] # DEADBEEF - I don't think this is needed since interp_vars is hardcoded # # Initialize variable list to only variables that can be interpolated # interp_vars = list(set(interp_vars) & set(variables)) # # # To return ET, the ETf must be interpolated # if 'et' in variables and 'et_fraction' not in interp_vars: # interp_vars.append('et_fraction') # # # With the current interpolate.daily() function, # # something has to be interpolated in order to return et_reference # if 'et_reference' in variables and 'et_fraction' not in interp_vars: # interp_vars.append('et_fraction') # Filter scene collection to the interpolation range # This probably isn't needed since scene_coll was built to this range # scene_coll = scene_coll.filterDate(interp_start_date, interp_end_date) # For count, compute the composite/mosaic image for the mask band only if 'count' in variables: aggregate_coll = openet.core.interpolate.aggregate_daily( image_coll=scene_coll.select(['mask']), start_date=start_date, end_date=end_date) # The following is needed because the aggregate collection can be # empty if there are no scenes in the target date range but there # are scenes in the interpolation date range. # Without this the count image will not be built but the other # bands will be which causes a non-homogeneous image collection. aggregate_coll = aggregate_coll.merge( ee.Image.constant(0).rename(['mask']).set( {'system:time_start': ee.Date(start_date).millis()})) # Interpolate to a daily time step # NOTE: the daily function is not computing ET (ETf x ETr) # but is returning the target (ETr) band daily_coll = openet.core.interpolate.daily( target_coll=daily_et_reference_coll, source_coll=scene_coll.select(interp_vars), interp_method=interp_method, interp_days=interp_days, use_joins=use_joins, ) # Compute ET from ETf and ETr (if necessary) # The check for et_fraction is needed since it is back computed from ET and ETr # if 'et' in variables or 'et_fraction' in variables: def compute_et(img): """This function assumes ETr and ETf are present""" et_img = img.select(['et_fraction' ]).multiply(img.select(['et_reference'])) return img.addBands(et_img.double().rename('et')) daily_coll = daily_coll.map(compute_et) def aggregate_image(agg_start_date, agg_end_date, date_format): """Aggregate the daily images within the target date range Parameters ---------- agg_start_date: str Start date (inclusive). agg_end_date : str End date (exclusive). date_format : str Date format for system:index (uses EE JODA format). Returns ------- ee.Image Notes ----- Since this function takes multiple inputs it is being called for each time interval by separate mappable functions """ # if 'et' in variables or 'et_fraction' in variables: et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \ .select(['et']).sum() # if 'et_reference' in variables or 'et_fraction' in variables: et_reference_img = daily_coll.filterDate(agg_start_date, agg_end_date) \ .select(['et_reference']).sum() if et_reference_factor: et_img = et_img.multiply(et_reference_factor) et_reference_img = et_reference_img.multiply(et_reference_factor) # DEADBEEF - This doesn't seem to be doing anything if et_reference_resample in ['bilinear', 'bicubic']: et_reference_img = et_reference_img.resample(et_reference_resample) image_list = [] if 'et' in variables: image_list.append(et_img.float()) if 'et_reference' in variables: image_list.append(et_reference_img.float()) if 'et_fraction' in variables: # Compute average et fraction over the aggregation period image_list.append( et_img.divide(et_reference_img).rename(['et_fraction' ]).float()) if 'ndvi' in variables: # Compute average ndvi over the aggregation period ndvi_img = daily_coll \ .filterDate(agg_start_date, agg_end_date) \ .mean().select(['ndvi']).float() image_list.append(ndvi_img) if 'count' in variables: count_img = aggregate_coll \ .filterDate(agg_start_date, agg_end_date) \ .select(['mask']).sum().rename('count').uint8() image_list.append(count_img) return ee.Image(image_list) \ .set({ 'system:index': ee.Date(agg_start_date).format(date_format), 'system:time_start': ee.Date(agg_start_date).millis()}) # .set(interp_properties) \ # Combine input, interpolated, and derived values if t_interval.lower() == 'daily': def agg_daily(daily_img): # CGM - Double check that this time_start is a 0 UTC time. # It should be since it is coming from the interpolate source # collection, but what if source is GRIDMET (+6 UTC)? agg_start_date = ee.Date(daily_img.get('system:time_start')) # CGM - This calls .sum() on collections with only one image return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'day'), date_format='YYYYMMdd') return ee.ImageCollection(daily_coll.map(agg_daily)) elif t_interval.lower() == 'monthly': def month_gen(iter_start_dt, iter_end_dt): iter_dt = iter_start_dt # Conditional is "less than" because end date is exclusive while iter_dt < iter_end_dt: yield iter_dt.strftime('%Y-%m-%d') iter_dt += relativedelta(months=+1) month_list = ee.List(list(month_gen(start_dt, end_dt))) def agg_monthly(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'month'), date_format='YYYYMM') return ee.ImageCollection(month_list.map(agg_monthly)) elif t_interval.lower() == 'annual': def year_gen(iter_start_dt, iter_end_dt): iter_dt = iter_start_dt while iter_dt < iter_end_dt: yield iter_dt.strftime('%Y-%m-%d') iter_dt += relativedelta(years=+1) year_list = ee.List(list(year_gen(start_dt, end_dt))) def agg_annual(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'year'), date_format='YYYY') return ee.ImageCollection(year_list.map(agg_annual)) elif t_interval.lower() == 'custom': # Returning an ImageCollection to be consistent return ee.ImageCollection( aggregate_image(agg_start_date=start_date, agg_end_date=end_date, date_format='YYYYMMdd'))
def aggregate_image(agg_start_date, agg_end_date, date_format): """Aggregate the daily images within the target date range Parameters ---------- agg_start_date: str Start date (inclusive). agg_end_date : str End date (exclusive). date_format : str Date format for system:index (uses EE JODA format). Returns ------- ee.Image Notes ----- Since this function takes multiple inputs it is being called for each time interval by separate mappable functions """ # if 'et' in variables or 'et_fraction' in variables: et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \ .select(['et']).sum() # if 'et_reference' in variables or 'et_fraction' in variables: et_reference_img = daily_coll.filterDate(agg_start_date, agg_end_date) \ .select(['et_reference']).sum() if et_reference_factor: et_img = et_img.multiply(et_reference_factor) et_reference_img = et_reference_img.multiply(et_reference_factor) # DEADBEEF - This doesn't seem to be doing anything if et_reference_resample in ['bilinear', 'bicubic']: et_reference_img = et_reference_img.resample(et_reference_resample) image_list = [] if 'et' in variables: image_list.append(et_img.float()) if 'et_reference' in variables: image_list.append(et_reference_img.float()) if 'et_fraction' in variables: # Compute average et fraction over the aggregation period image_list.append( et_img.divide(et_reference_img).rename(['et_fraction' ]).float()) if 'ndvi' in variables: # Compute average ndvi over the aggregation period ndvi_img = daily_coll \ .filterDate(agg_start_date, agg_end_date) \ .mean().select(['ndvi']).float() image_list.append(ndvi_img) if 'count' in variables: count_img = aggregate_coll \ .filterDate(agg_start_date, agg_end_date) \ .select(['mask']).sum().rename('count').uint8() image_list.append(count_img) return ee.Image(image_list) \ .set({ 'system:index': ee.Date(agg_start_date).format(date_format), 'system:time_start': ee.Date(agg_start_date).millis()})
#dont use more than 30 images if i < 50: #save the name of the image in a list p = features[i]['properties']['system:index'] print(p) listID.append(p) print("Appeneded all to the list") output_list = [] #create new image collection # loop over all images of the list and apply atmospheric correction counter = 0 for m in listID: S2 = ee.Image(f'COPERNICUS/S2/{m}') toa = S2.divide(10000) info = S2.getInfo()['properties'] date = ee.Date(datetime.datetime.utcfromtimestamp(info['system:time_start']/1000)) #print(date) ### Credits to SAM MURPHY #### scene_date = datetime.datetime.utcfromtimestamp(info['system:time_start']/1000)# i.e. Python uses seconds, EE uses milliseconds #print(scene_date) solar_z = info['MEAN_SOLAR_ZENITH_ANGLE'] h2o = Atmospheric.water(geom,date).getInfo() o3 = Atmospheric.ozone(geom,date).getInfo() aot = Atmospheric.aerosol(geom,date).getInfo() SRTM = ee.Image('CGIAR/SRTM90_V4')# Shuttle Radar Topography mission covers *most* of the Earth alt = SRTM.reduceRegion(reducer = ee.Reducer.mean(),geometry = geom.centroid()).get('elevation').getInfo() km = alt/1000 # i.e. Py6S uses units of kilometers s = SixS()
def get_image_collection_gif( ee_ic, out_dir, out_gif, vis_params, region, cmap=None, proj=None, fps=10, mp4=False, grid_interval=None, plot_title="", date_format="YYYY-MM-dd", fig_size=(10, 10), dpi_plot=100, file_format="png", north_arrow_dict={}, scale_bar_dict={}, verbose=True, ): """Download all the images in an image collection and use them to generate a gif/video. Args: ee_ic (object): ee.ImageCollection out_dir (str): The output directory of images and video. out_gif (str): The name of the gif file. vis_params (dict): Visualization parameters as a dictionary. region (list | tuple): Geospatial region of the image to render in format [E,S,W,N]. fps (int, optional): Video frames per second. Defaults to 10. mp4 (bool, optional): Whether to create mp4 video. grid_interval (float | tuple[float]): Float specifying an interval at which to create gridlines, units are decimal degrees. lists will be interpreted a (x_interval, y_interval), such as (0.1, 0.1). Defaults to None. plot_title (str): Plot title. Defaults to "". date_format (str, optional): A pattern, as described at http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. Defaults to "YYYY-MM-dd". fig_size (tuple, optional): Size of the figure. dpi_plot (int, optional): The resolution in dots per inch of the plot. file_format (str, optional): Either 'png' or 'jpg'. north_arrow_dict (dict, optional): Parameters for the north arrow. See https://geemap.org/cartoee/#geemap.cartoee.add_north_arrow. Defaults to {}. scale_bar_dict (dict, optional): Parameters for the scale bar. See https://geemap.org/cartoee/#geemap.cartoee.add_scale_bar. Defaults. to {}. verbose (bool, optional): Whether or not to print text when the program is running. Defaults to True. """ from .geemap import png_to_gif import matplotlib.pyplot as plt out_dir = os.path.abspath(out_dir) if not os.path.exists(out_dir): os.makedirs(out_dir) out_gif = os.path.join(out_dir, out_gif) count = int(ee_ic.size().getInfo()) names = ee_ic.aggregate_array("system:index").getInfo() images = ee_ic.toList(count) dates = ee_ic.aggregate_array("system:time_start") dates = dates.map(lambda d: ee.Date(d).format(date_format)).getInfo() # list of file name img_list = [] for i, date in enumerate(dates): image = ee.Image(images.get(i)) name = str(names[i]) name = name + "." + file_format out_img = os.path.join(out_dir, name) img_list.append(out_img) if verbose: print(f"Downloading {i+1}/{count}: {name} ...") # Size plot plt.figure(figsize=fig_size) # Plot image ax = get_map(image, region=region, vis_params=vis_params, cmap=cmap, proj=proj) # Add grid if grid_interval is not None: add_gridlines(ax, interval=grid_interval, linestyle=":") # Add title if len(plot_title) > 0: ax.set_title(label=plot_title + " " + date + "\n", fontsize=15) # Add scale bar if len(scale_bar_dict) > 0: add_scale_bar(ax, **scale_bar_dict) # Add north arrow if len(north_arrow_dict) > 0: add_north_arrow(ax, **north_arrow_dict) # Save plot plt.savefig(fname=out_img, dpi=dpi_plot) plt.clf() plt.close() out_gif = os.path.abspath(out_gif) png_to_gif(out_dir, out_gif, fps) if verbose: print(f"GIF saved to {out_gif}") if mp4: video_filename = out_gif.replace(".gif", ".mp4") try: import cv2 except ImportError: print("Installing opencv-python ...") subprocess.check_call( ["python", "-m", "pip", "install", "opencv-python"]) import cv2 # Video file name output_video_file_name = os.path.join(out_dir, video_filename) frame = cv2.imread(img_list[0]) height, width, _ = frame.shape frame_size = (width, height) fps_video = fps # Make mp4 fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Function def convert_frames_to_video(input_list, output_video_file_name, fps_video, frame_size): """Convert frames to video Args: input_list (list): Downloaded Image Name List. output_video_file_name (str): The name of the video file in the image directory. fps_video (int): Video frames per second. frame_size (tuple): Frame size. """ out = cv2.VideoWriter(output_video_file_name, fourcc, fps_video, frame_size) num_frames = len(input_list) for i in range(num_frames): img_path = input_list[i] img = cv2.imread(img_path) out.write(img) out.release() cv2.destroyAllWindows() # Use function convert_frames_to_video( input_list=img_list, output_video_file_name=output_video_file_name, fps_video=fps_video, frame_size=frame_size, ) if verbose: print(f"MP4 saved to {output_video_file_name}")
ndvi = ee.ImageCollection('MODIS/006/MOD13A2') # choose dates ndvi = ndvi.filterDate('2014-12-04', '2015-01-04') ndvi = ndvi.select(['NDVI', 'EVI']) # setting the Area of Interest (AOI) #moz_aoi = ee.Geometry.Rectangle(aoi) aoi_bound = ndvi.filterBounds(mwi_aoi) # the least cloudy image least_cloudy = ee.Image(aoi_bound.sort('CLOUD_COVER').first()) # how cloudy is it? print('Cloud Cover (%):', least_cloudy.get('CLOUD_COVER').getInfo()) # when was this image taken? date = ee.Date(least_cloudy.get('system:time_start')) time = date.getInfo()['value'] / 1000. dt.utcfromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S') task = ee.batch.Export.image.toDrive(image=least_cloudy, region=mwi_aoi.getInfo()['coordinates'], description='train' + '_ndvi', folder='train', fileNamePrefix='ndvi', scale=30, crs='EPSG:4326') task.start() ## Evapotranspiration evap = ee.ImageCollection("MODIS/006/MOD16A2")
def main(ini_path=None, overwrite_flag=False, delay_time=0, gee_key_file=None, max_ready=-1, reverse_flag=False): """Compute monthly Tcorr images by WRS2 tile Parameters ---------- ini_path : str Input file path. overwrite_flag : bool, optional If True, overwrite existing files (the default is False). delay_time : float, optional Delay time in seconds between starting export tasks (or checking the number of queued tasks, see "max_ready" parameter). The default is 0. gee_key_file : str, None, optional Earth Engine service account JSON key file (the default is None). max_ready: int, optional Maximum number of queued "READY" tasks. The default is -1 which is implies no limit to the number of tasks that will be submitted. reverse_flag : bool, optional If True, process WRS2 tiles in reverse order. """ logging.info('\nCompute annual Tcorr images by WRS2 tile') ini = utils.read_ini(ini_path) model_name = 'SSEBOP' # model_name = ini['INPUTS']['et_model'].upper() tmax_name = ini[model_name]['tmax_source'] export_id_fmt = 'tcorr_scene_{product}_{wrs2}_annual' asset_id_fmt = '{coll_id}/{wrs2}' tcorr_annual_coll_id = '{}/{}_annual'.format(ini['EXPORT']['export_coll'], tmax_name.lower()) wrs2_coll_id = 'projects/earthengine-legacy/assets/' \ 'projects/usgs-ssebop/wrs2_descending_custom' wrs2_tile_field = 'WRS2_TILE' # wrs2_path_field = 'ROW' # wrs2_row_field = 'PATH' try: wrs2_tiles = str(ini['INPUTS']['wrs2_tiles']) wrs2_tiles = [x.strip() for x in wrs2_tiles.split(',')] wrs2_tiles = sorted([x.lower() for x in wrs2_tiles if x]) except KeyError: wrs2_tiles = [] logging.debug(' wrs2_tiles: not set in INI, defaulting to []') except Exception as e: raise e try: study_area_extent = str(ini['INPUTS']['study_area_extent']) \ .replace('[', '').replace(']', '').split(',') study_area_extent = [float(x.strip()) for x in study_area_extent] except KeyError: study_area_extent = None logging.debug(' study_area_extent: not set in INI') except Exception as e: raise e # TODO: Add try/except blocks and default values? collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')] cloud_cover = float(ini['INPUTS']['cloud_cover']) min_pixel_count = float(ini['TCORR']['min_pixel_count']) min_scene_count = float(ini['TCORR']['min_scene_count']) if (tmax_name.upper() == 'CIMIS' and ini['INPUTS']['end_date'] < '2003-10-01'): logging.error( '\nCIMIS is not currently available before 2003-10-01, exiting\n') sys.exit() elif (tmax_name.upper() == 'DAYMET' and ini['INPUTS']['end_date'] > '2018-12-31'): logging.warning('\nDAYMET is not currently available past 2018-12-31, ' 'using median Tmax values\n') # sys.exit() # elif (tmax_name.upper() == 'TOPOWX' and # ini['INPUTS']['end_date'] > '2017-12-31'): # logging.warning( # '\nDAYMET is not currently available past 2017-12-31, ' # 'using median Tmax values\n') # # sys.exit() # Extract the model keyword arguments from the INI # Set the property name to lower case and try to cast values to numbers model_args = { k.lower(): float(v) if utils.is_number(v) else v for k, v in dict(ini[model_name]).items() } # et_reference_args = { # k: model_args.pop(k) # for k in [k for k in model_args.keys() if k.startswith('et_reference_')]} logging.info('\nInitializing Earth Engine') if gee_key_file: logging.info( ' Using service account key file: {}'.format(gee_key_file)) # The "EE_ACCOUNT" parameter is not used if the key file is valid ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file), use_cloud_api=True) else: ee.Initialize(use_cloud_api=True) logging.debug('\nTmax properties') tmax_source = tmax_name.split('_', 1)[0] tmax_version = tmax_name.split('_', 1)[1] tmax_coll_id = 'projects/earthengine-legacy/assets/' \ 'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower()) tmax_coll = ee.ImageCollection(tmax_coll_id) tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0) logging.debug(' Collection: {}'.format(tmax_coll_id)) logging.debug(' Source: {}'.format(tmax_source)) logging.debug(' Version: {}'.format(tmax_version)) logging.debug('\nExport properties') export_info = utils.get_info(ee.Image(tmax_mask)) if 'daymet' in tmax_name.lower(): # Custom smaller extent for DAYMET focused on CONUS export_extent = [-1999750, -1890500, 2500250, 1109500] export_shape = [4500, 3000] export_geo = [1000, 0, -1999750, 0, -1000, 1109500] # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada # export_extent = [-2099750, -3090500, 2900250, 1909500] # export_shape = [5000, 5000] # export_geo = [1000, 0, -2099750, 0, -1000, 1909500] export_crs = export_info['bands'][0]['crs'] else: export_crs = export_info['bands'][0]['crs'] export_geo = export_info['bands'][0]['crs_transform'] export_shape = export_info['bands'][0]['dimensions'] # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform'] # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs'] # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions'] export_extent = [ export_geo[2], export_geo[5] + export_shape[1] * export_geo[4], export_geo[2] + export_shape[0] * export_geo[0], export_geo[5] ] export_geom = ee.Geometry.Rectangle(export_extent, proj=export_crs, geodesic=False) logging.debug(' CRS: {}'.format(export_crs)) logging.debug(' Extent: {}'.format(export_extent)) logging.debug(' Geo: {}'.format(export_geo)) logging.debug(' Shape: {}'.format(export_shape)) if study_area_extent is None: if 'daymet' in tmax_name.lower(): # CGM - For now force DAYMET to a slightly smaller "CONUS" extent study_area_extent = [-125, 25, -65, 49] # study_area_extent = [-125, 25, -65, 52] elif 'cimis' in tmax_name.lower(): study_area_extent = [-124, 35, -119, 42] else: # TODO: Make sure output from bounds is in WGS84 study_area_extent = tmax_mask.geometry().bounds().getInfo() logging.debug(f'\nStudy area extent not set in INI, ' f'default to {study_area_extent}') study_area_geom = ee.Geometry.Rectangle(study_area_extent, proj='EPSG:4326', geodesic=False) if not ee.data.getInfo(tcorr_annual_coll_id): logging.info('\nExport collection does not exist and will be built' '\n {}'.format(tcorr_annual_coll_id)) input('Press ENTER to continue') ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_annual_coll_id) # Get current asset list logging.debug('\nGetting GEE asset list') asset_list = utils.get_ee_assets(tcorr_annual_coll_id) # if logging.getLogger().getEffectiveLevel() == logging.DEBUG: # pprint.pprint(asset_list[:10]) # Get current running tasks tasks = utils.get_ee_tasks() if logging.getLogger().getEffectiveLevel() == logging.DEBUG: logging.debug(' Tasks: {}\n'.format(len(tasks))) input('ENTER') # if cron_flag: # # CGM - This seems like a silly way of getting the date as a datetime # # Why am I doing this and not using the commented out line? # end_dt = datetime.date.today().strftime('%Y-%m-%d') # end_dt = datetime.datetime.strptime(end_dt, '%Y-%m-%d') # end_dt = end_dt + datetime.timedelta(days=-4) # # end_dt = datetime.datetime.today() + datetime.timedelta(days=-1) # start_dt = end_dt + datetime.timedelta(days=-64) # else: start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'], '%Y-%m-%d') end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'], '%Y-%m-%d') start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') next_date = (end_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d') logging.debug('Start Date: {}'.format(start_date)) logging.debug('End Date: {}\n'.format(end_date)) # Limit by year and month # try: # month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months']))) # except: # logging.info('\nTCORR "months" parameter not set in the INI,' # '\n Defaulting to all months (1-12)\n') # month_list = list(range(1, 13)) try: year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years']))) except: logging.info('\nTCORR "years" parameter not set in the INI,' '\n Defaulting to all available years\n') year_list = [] # Get the list of WRS2 tiles that intersect the data area and study area wrs2_coll = ee.FeatureCollection(wrs2_coll_id) \ .filterBounds(export_geom) \ .filterBounds(study_area_geom) if wrs2_tiles: wrs2_coll = wrs2_coll.filter( ee.Filter.inList(wrs2_tile_field, wrs2_tiles)) wrs2_info = wrs2_coll.getInfo()['features'] for wrs2_ftr in sorted(wrs2_info, key=lambda k: k['properties']['WRS2_TILE'], reverse=reverse_flag): wrs2_tile = wrs2_ftr['properties'][wrs2_tile_field] logging.info('{}'.format(wrs2_tile)) wrs2_path = int(wrs2_tile[1:4]) wrs2_row = int(wrs2_tile[5:8]) # wrs2_path = wrs2_ftr['properites']['PATH'] # wrs2_row = wrs2_ftr['properites']['ROW'] export_id = export_id_fmt.format(product=tmax_name.lower(), wrs2=wrs2_tile) logging.debug(' Export ID: {}'.format(export_id)) asset_id = asset_id_fmt.format(coll_id=tcorr_annual_coll_id, wrs2=wrs2_tile) logging.debug(' Asset ID: {}'.format(asset_id)) if overwrite_flag: if export_id in tasks.keys(): logging.debug(' Task already submitted, cancelling') ee.data.cancelTask(tasks[export_id]['id']) # This is intentionally not an "elif" so that a task can be # cancelled and an existing image/file/asset can be removed if asset_id in asset_list: logging.debug(' Asset already exists, removing') ee.data.deleteAsset(asset_id) else: if export_id in tasks.keys(): logging.debug(' Task already submitted, exiting') continue elif asset_id in asset_list: logging.debug(' Asset already exists, skipping') continue # CGM: I couldn't find a way to build this from the Collection class # TODO: Will need to be changed/updated for SR collection # TODO: Add code to handle real time collections landsat_coll = ee.ImageCollection([]) if 'LANDSAT/LC08/C01/T1_TOA' in collections: l8_coll = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \ .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ .filterMetadata('DATA_TYPE', 'equals', 'L1TP') \ .filter(ee.Filter.gt('system:time_start', ee.Date('2013-03-24').millis())) # .filterDate(start_date, next_date) landsat_coll = landsat_coll.merge(l8_coll) if 'LANDSAT/LE07/C01/T1_TOA' in collections: l7_coll = ee.ImageCollection('LANDSAT/LE07/C01/T1_TOA') \ .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ .filterMetadata('DATA_TYPE', 'equals', 'L1TP') # .filterDate(start_date, next_date) landsat_coll = landsat_coll.merge(l7_coll) if 'LANDSAT/LT05/C01/T1_TOA' in collections: l5_coll = ee.ImageCollection('LANDSAT/LT05/C01/T1_TOA') \ .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ .filterMetadata('DATA_TYPE', 'equals', 'L1TP') \ .filter(ee.Filter.lt('system:time_start', ee.Date('2011-12-31').millis())) # .filterDate(start_date, next_date) landsat_coll = landsat_coll.merge(l5_coll) # if 'LANDSAT/LT04/C01/T1_TOA' in collections: # l4_coll = ee.ImageCollection('LANDSAT/LT04/C01/T1_TOA') \ # .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ # .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ # .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ # .filterMetadata('DATA_TYPE', 'equals', 'L1TP') # # .filterDate(start_date, next_date) # landsat_coll = landsat_coll.merge(l4_coll) def tcorr_img_func(landsat_img): # TODO: Will need to be changed for SR t_obj = ssebop.Image.from_landsat_c1_toa(landsat_img, **model_args) t_stats = ee.Dictionary(t_obj.tcorr_stats) \ .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False) tcorr = ee.Number(t_stats.get('tcorr_p5')) count = ee.Number(t_stats.get('tcorr_count')) return tmax_mask.add(tcorr) \ .rename(['tcorr']) \ .set({ 'system:time_start': ee.Image(landsat_img).get('system:time_start'), 'tcorr_value': tcorr, 'tcorr_pixel_count': count, }) # Filter the Tcorr image collection based on the pixel counts tcorr_coll = ee.ImageCollection(landsat_coll.map(tcorr_img_func)) \ .filterMetadata('tcorr_pixel_count', 'not_less_than', min_pixel_count) # Use a common reducer for the image and property stats reducer = ee.Reducer.median() \ .combine(ee.Reducer.count(), sharedInputs=True) # Compute stats from the collection images # This might be used when Tcorr is spatial # tcorr_img = tcorr_coll.reduce(reducer).rename(['tcorr', 'count']) # Compute stats from the image properties tcorr_stats = ee.List(tcorr_coll.aggregate_array('tcorr_value')) \ .reduce(reducer) tcorr_stats = ee.Dictionary(tcorr_stats) \ .combine({'median': 0, 'count': 0}, overwrite=False) tcorr = ee.Number(tcorr_stats.get('median')) count = ee.Number(tcorr_stats.get('count')) index = count.lt(min_scene_count).multiply(7).add(2) # index = ee.Algorithms.If(count.gte(min_scene_count), 2, 9) # Clip the mask image to the Landsat footprint # Change mask values to 1 if count >= threshold # Mask values of 0 will be set to nodata mask_img = tmax_mask.add(count.gte(min_scene_count)) \ .clip(ee.Geometry(wrs2_ftr['geometry'])) output_img = ee.Image( [mask_img.multiply(tcorr), mask_img.multiply(count)]) \ .rename(['tcorr', 'count']) \ .updateMask(mask_img.unmask(0)) # # Write an empty image if the pixel count is too low # # CGM: Check/test if this can be combined into a single If() # tcorr_img = ee.Algorithms.If( # count.gte(min_scene_count), # tmax_mask.add(tcorr), tmax_mask.updateMask(0)) # count_img = ee.Algorithms.If( # count.gte(min_scene_count), # tmax_mask.add(count), tmax_mask.updateMask(0)) # # # Clip to the Landsat image footprint # output_img = ee.Image([tcorr_img, count_img]) \ # .rename(['tcorr', 'count']) \ # .clip(ee.Geometry(wrs2_ftr['geometry'])) # # Clear the transparency mask # output_img = output_img.updateMask(output_img.unmask(0)) output_img = output_img.set({ 'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'), 'model_name': model_name, 'model_version': ssebop.__version__, # 'system:time_start': utils.millis(start_dt), 'tcorr_value': tcorr, 'tcorr_index': index, 'tcorr_scene_count': count, 'tmax_source': tmax_source.upper(), 'tmax_version': tmax_version.upper(), 'wrs2_path': wrs2_path, 'wrs2_row': wrs2_row, 'wrs2_tile': wrs2_tile, 'years': ','.join(map(str, year_list)), # 'year_start': year_list[0], # 'year_end': year_list[-1], }) # pprint.pprint(output_img.getInfo()) # input('ENTER') logging.debug(' Building export task') task = ee.batch.Export.image.toAsset( image=output_img, description=export_id, assetId=asset_id, crs=export_crs, crsTransform='[' + ','.join(list(map(str, export_geo))) + ']', dimensions='{0}x{1}'.format(*export_shape), ) logging.info(' Starting export task') utils.ee_task_start(task) # Pause before starting the next export task utils.delay_task(delay_time, max_ready) logging.debug('')
def createTimeBand(img): year = img.date().difference(ee.Date('1990-01-01'), 'year') return ee.Image(year).float().addBands(img)
def aggregate_monthly(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'month'), date_format='YYYYMM')
def createTimeBand(image): return image.addBands(image.metadata('system:time_start').divide(1e18)) # createTimeBand = function(image) { # # Scale milliseconds by a large constant to avoid very small slopes # # in the linear regression output. # return image.addBands(image.metadata('system:time_start').divide(1e18)) # } # Load the input image 'collection': projected climate data. collection = ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS') \ .filter(ee.Filter.eq('scenario', 'rcp85')) \ .filterDate(ee.Date('2006-01-01'), ee.Date('2050-01-01')) \ .map(createTimeBand) # Reduce the collection with the linear fit reducer. # Independent variable are followed by dependent variables. linearFit = collection.select(['system:time_start', 'pr_mean']) \ .reduce(ee.Reducer.linearFit()) # Display the results. Map.setCenter(-100.11, 40.38, 5) Map.addLayer(linearFit, { 'min': 0, 'max': [-0.9, 8e-5, 1], 'bands': ['scale', 'offset', 'scale'] }, 'fit')
def aggregate_annual(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'year'), date_format='YYYY')
def process(self,product, date,skipPreprocessing=False): try: ee.Initialize() except EEException as e: print(e) self._parse_config() if product in ['sentinel1','atms','viirs','modis']: dt = utils.decode_date(date) tomorrow = (dt + datetime.timedelta(1)).strftime('%Y-%m-%d') dateDir = os.path.join(self.workdir,dt.strftime('%Y%m%d')) prodDir = os.path.join(dateDir,product) geom = ee.Geometry.Rectangle(self.region) hand = ee.Image(self.hand) if product == 'atms': params = self.atmsParams paramKeys = list(params.keys()) collId = self.atmsParams['waterFractionAsset'] worker = Atms(geom,date,tomorrow,collectionid=collId) if skipPreprocessing == False: if os.path.exists(dateDir) != True: os.mkdir(dateDir) if os.path.exists(prodDir) != True: os.mkdir(prodDir) geotiffs = worker.extract(dt,self.region,credentials=self.earthdataLogin,outDir=prodDir,gridding_radius=50000) worker.load(geotiffs,self.stagingBucket,collId) if 'seed' in paramKeys: permanentWater = ee.Image(self.atmsParams['seed']) else: permanentWater = None if 'probablistic' in paramKeys: runProbs = params['probablistic'] else: runProbs = False waterImage = worker.waterMap(hand,permanent=permanentWater,probablistic=runProbs) waterImage = waterImage.set({'system:time_start':ee.Date(date).millis(),'sensor':product}) assetTarget = self.targetAsset + '{0}_bathtub_{1}'.format(product,date.replace('-','')) elif (product == 'viirs') or (product == 'modis'): today = datetime.datetime.now() if (today - dt).days < 5: avail = today - datetime.timedelta(5) raise NotImplementedError('NRT processing for VIIRS or MODIS has not been implemented, please select a date prior to {}'.format(avail)) else: minDate = (dt - datetime.timedelta(45)).strftime('%Y-%m-%d') maxDate = (dt + datetime.timedelta(1)).strftime('%Y-%m-%d') if product == 'modis': worker = Modis(geom,minDate,maxDate,collectionid='MODIS/006/MOD09GA') params = self.viirsParams else: worker = Viirs(geom,minDate,maxDate,collectionid='NOAA/VIIRS/001/VNP09GA') params = self.viirsParams paramKeys = list(params.keys()) ls = Landsat(geom,minDate,maxDate,collectionid='LANDSAT/LC08/C01/T1_SR') s2 = Sentinel2(geom,minDate,maxDate,collectionid='COPERNICUS/S2_SR') highRes = ee.ImageCollection(ls.collection.merge(s2.collection)) worker.downscale(highRes,target_date=date,windowSize=33,A=0.5) if 'probablistic' in paramKeys: runProbs = params['probablistic'] params.pop('probablistic') else: runProbs = False nIters=100 probTreshold=0.75 waterImage = worker.waterMap(date,hand,probablistic=runProbs,**params) waterImage = waterImage\ .set({'system:time_start':ee.Date(date).millis(),'sensor':product}) assetTarget = self.targetAsset + '{0}_downscaled_globalOtsu_{1}'.format(product,date.replace('-','')) elif product == 'sentinel1': previous = (dt + datetime.timedelta(0)).strftime('%Y-%m-%d') worker = Sentinel1(geom,previous,tomorrow,collectionid='COPERNICUS/S1_GRD') waterImage = worker.waterMap(date).And(hand.lt(30)) waterImage = waterImage.rename('water')\ .set({'system:time_start':ee.Date(date).millis(),'sensor':product}) assetTarget = self.targetAsset + '{0}_bootstrapOtsu_{1}'.format(product,date.replace('-','')) else: raise NotImplementedError('select product is currently not implemented, please check back with later versions') description = '{0}_water_{1}'.format(product,date) geeutils.exportImage(waterImage,geom,assetTarget,description=description) else: raise NotImplementedError('select product is currently not implemented, please check back with later versions') return
def CreateTimeBand(img): year = ee.Date(img.get('system:time_start')).get('year').subtract(1991) return ee.Image(year).byte().addBands(img)
def time_start(self, year): """ Get time start property """ return ee.Date('{}-{}-{}'.format(year, 1, 1))
def getFloodPopbyCountry_LandScan(flood_img): """ Args: floodImage : the standard Earth Engine Image object outputted by the map_DFO_event function roiGEO : the region of interest as an Earth Engine Geometry object Returns: -a feature collection of all countries for each flood events -with a pop and area count - An ee feature with properties including 'Index': the event index ID 'Began': start date of event map 'Ended': end date of event map 'Flood_Area': total area of detected flood 'Pop_Exposed': the number of people in the mapped flood from WorldPop data """ import ee ee.Initialize() roi_geo = flood_img.geometry() # Import the LandScan image collection & permannt water mask pop_all = ee.ImageCollection("projects/global-flood-db/landscan") perm_water = ee.Image("JRC/GSW1_0/GlobalSurfaceWater").select( "transition").eq(1).unmask() def maskImages(img): non_flood = img.select("flooded") water_mask = non_flood.multiply(perm_water.neq(1)) return img.select("flooded").mask(water_mask) # Extract the final flood extent image data as its own variable for analysis flood_extent = maskImages(ee.Image(flood_img.select("flooded"))) # Get event year, match with the population year and clip to study are event_year = ee.Date(flood_img.get('began')).get('year') pop_img = ee.Image(pop_all.filterMetadata('year', 'equals', event_year)\ .first()).clip(roi_geo) pop_img = pop_img.updateMask( pop_img.gte(0)) # mask out bad data with negative values # Mask the world population dataset using the flood extent layer pop_scale = pop_img.projection().nominalScale() pop_masked = pop_img.updateMask(flood_extent) # Select the countries for which flood touches countries = ee.FeatureCollection( 'ft:1tdSwUL7MVpOauSgRzqVTOwdfy17KDbw-1d9omPw') flood_countries = countries.filterBounds(flood_extent.geometry().bounds()) # Get area of flood in the scale of the flood map flood_area_img = flood_extent.multiply(ee.Image.pixelArea()) map_scale = flood_extent.projection().nominalScale() index = ee.Image(flood_img).get("id") began_year = ee.Date(flood_img.get("began")).get("year") began_month = ee.Date(flood_img.get("began")).get("month") began_day = ee.Date(flood_img.get("began")).get("day") def getCountriesPop(ft): pop_sum = pop_masked.reduceRegion(reducer=ee.Reducer.sum(), geometry=ft.geometry(), scale=pop_scale, maxPixels=1e9) pop = pop_sum.get("b1") area_sum = flood_area_img.reduceRegion(reducer=ee.Reducer.sum(), geometry=ft.geometry(), scale=map_scale, maxPixels=1e9) sys_id = ee.String(flood_img.get('system:index')).getInfo() area = area_sum.get("flooded") return ee.Feature( None, { "system:index": sys_id, "id": index, "Year": began_year, "Month": began_month, "Day": began_day, "Country": ft.get("Country"), "Exposed": pop, "Area": area }) country_stats = ee.FeatureCollection(flood_countries).map(getCountriesPop) return ee.FeatureCollection(country_stats).set({"id": index})
def getS2collection(): return ee.ImageCollection('COPERNICUS/S2') \ .filterBounds(poly) \ .filterDate(ee.Date(w_startdate.value),ee.Date(w_enddate.value)) \ .sort('CLOUDY_PIXEL_PERCENTAGE',True)
def year_diff(popImg): popYear = ee.Date(popImg.get('system:index')).get('year') diff = ee.Number(eventYear).subtract(ee.Number(popYear)).abs() return popImg.set({"year_diff": diff})
def getYear(date): return ee.Date(date).get('year')
def wrap(img): return ee.Date(ee.Image(img).date().format("YYYY-MM-dd"))
def _build(self, variables=None, start_date=None, end_date=None): """Build a merged model variable image collection Parameters ---------- variables : list Set a variable list that is different than the class variable list. start_date : str, optional Set a start_date that is different than the class start_date. This is needed when defining the scene collection to have extra images for interpolation. end_date : str, optional Set an exclusive end_date that is different than the class end_date. Returns ------- ee.ImageCollection Raises ------ ValueError if collection IDs are invalid. ValueError if variables is not set here and in class init. """ # Override the class parameters if necessary if not variables: if self.variables: variables = self.variables else: raise ValueError('variables parameter must be set') if not start_date: start_date = self.start_date if not end_date: end_date = self.end_date # Build the variable image collection variable_coll = ee.ImageCollection([]) for coll_id in self.collections: # TODO: Move to separate methods/functions for each collection type if coll_id in self._landsat_c2_sr_collections: input_coll = ee.ImageCollection(coll_id)\ .filterDate(start_date, end_date)\ .filterBounds(self.geometry)\ .filterMetadata('CLOUD_COVER_LAND', 'less_than', self.cloud_cover_max)\ .filterMetadata('CLOUD_COVER_LAND', 'greater_than', -0.5) # TODO: Check if PROCESSING_LEVEL needs to be filtered on # .filterMetadata('PROCESSING_LEVEL', 'equals', 'L2SP') # TODO: Need to come up with a system for applying # generic filter arguments to the collections if coll_id in self.filter_args.keys(): for f in copy.deepcopy(self.filter_args[coll_id]): try: filter_type = f.pop('type') except KeyError: continue if filter_type.lower() == 'equals': input_coll = input_coll.filter( ee.Filter.equals(**f)) # TODO: Check if these bad images are in collection 2 # Time filters are to remove bad (L5) and pre-op (L8) images if 'LT05' in coll_id: input_coll = input_coll.filter( ee.Filter.lt('system:time_start', ee.Date('2011-12-31').millis())) elif 'LC08' in coll_id: input_coll = input_coll.filter( ee.Filter.gt('system:time_start', ee.Date('2013-03-24').millis())) def compute_lsr(image): model_obj = Image.from_landsat_c2_sr( sr_image=ee.Image(image), **self.model_args) return model_obj.calculate(variables) variable_coll = variable_coll.merge( ee.ImageCollection(input_coll.map(compute_lsr))) elif coll_id in self._landsat_c1_toa_collections: input_coll = ee.ImageCollection(coll_id)\ .filterDate(start_date, end_date)\ .filterBounds(self.geometry)\ .filterMetadata('DATA_TYPE', 'equals', 'L1TP')\ .filterMetadata('CLOUD_COVER_LAND', 'less_than', self.cloud_cover_max)\ .filterMetadata('CLOUD_COVER_LAND', 'greater_than', -0.5) # TODO: Need to come up with a system for applying # generic filter arguments to the collections if coll_id in self.filter_args.keys(): for f in copy.deepcopy(self.filter_args[coll_id]): try: filter_type = f.pop('type') except KeyError: continue if filter_type.lower() == 'equals': input_coll = input_coll.filter( ee.Filter.equals(**f)) # TODO: Check if these bad images are in collection 1 SR # Time filters are to remove bad (L5) and pre-op (L8) images if 'LT05' in coll_id: input_coll = input_coll.filter( ee.Filter.lt('system:time_start', ee.Date('2011-12-31').millis())) elif 'LC08' in coll_id: input_coll = input_coll.filter( ee.Filter.gt('system:time_start', ee.Date('2013-03-24').millis())) def compute_ltoa(image): model_obj = Image.from_landsat_c1_toa( toa_image=ee.Image(image), **self.model_args) return model_obj.calculate(variables) variable_coll = variable_coll.merge( ee.ImageCollection(input_coll.map(compute_ltoa))) elif coll_id in self._landsat_c1_sr_collections: input_coll = ee.ImageCollection(coll_id)\ .filterDate(start_date, end_date)\ .filterBounds(self.geometry)\ .filterMetadata('CLOUD_COVER_LAND', 'less_than', self.cloud_cover_max)\ .filterMetadata('CLOUD_COVER_LAND', 'greater_than', -0.5) # TODO: Need to come up with a system for applying # generic filter arguments to the collections if coll_id in self.filter_args.keys(): for f in copy.deepcopy(self.filter_args[coll_id]): try: filter_type = f.pop('type') except KeyError: continue if filter_type.lower() == 'equals': input_coll = input_coll.filter( ee.Filter.equals(**f)) # Time filters are to remove bad (L5) and pre-op (L8) images if 'LT05' in coll_id: input_coll = input_coll.filter( ee.Filter.lt('system:time_start', ee.Date('2011-12-31').millis())) elif 'LC08' in coll_id: input_coll = input_coll.filter( ee.Filter.gt('system:time_start', ee.Date('2013-03-24').millis())) def compute_lsr(image): model_obj = Image.from_landsat_c1_sr( sr_image=ee.Image(image), **self.model_args) return model_obj.calculate(variables) variable_coll = variable_coll.merge( ee.ImageCollection(input_coll.map(compute_lsr))) else: raise ValueError('unsupported collection: {}'.format(coll_id)) return variable_coll
def find_NAIP(region, add_NDVI=True, add_NDWI=True): """Create annual NAIP mosaic for a given region. Args: region (object): ee.Geometry add_NDVI (bool, optional): Whether to add the NDVI band. Defaults to True. add_NDWI (bool, optional): Whether to add the NDWI band. Defaults to True. Returns: object: ee.ImageCollection """ init_collection = ee.ImageCollection('USDA/NAIP/DOQQ') \ .filterBounds(region) \ .filterDate('2009-01-01', '2019-12-31') \ .filter(ee.Filter.listContains("system:band_names", "N")) yearList = ee.List( init_collection.distinct(['system:time_start' ]).aggregate_array('system:time_start')) init_years = yearList.map(lambda y: ee.Date(y).get('year')) # remove duplicates init_years = ee.Dictionary( init_years.reduce(ee.Reducer.frequencyHistogram())).keys() years = init_years.map(lambda x: ee.Number.parse(x)) # years = init_years.map(lambda x: x) # Available NAIP years with NIR band def NAIPAnnual(year): start_date = ee.Date.fromYMD(year, 1, 1) end_date = ee.Date.fromYMD(year, 12, 31) collection = init_collection.filterDate(start_date, end_date) # .filterBounds(geometry) # .filter(ee.Filter.listContains("system:band_names", "N")) time_start = ee.Date( ee.List(collection.aggregate_array( 'system:time_start')).sort().get(0)).format('YYYY-MM-dd') time_end = ee.Date( ee.List(collection.aggregate_array('system:time_end')).sort().get( -1)).format('YYYY-MM-dd') col_size = collection.size() image = ee.Image(collection.mosaic().clip(region)) if add_NDVI: NDVI = ee.Image(image).normalizedDifference(['N', 'R']).select(['nd'], ['ndvi']) image = image.addBands(NDVI) if add_NDWI: NDWI = ee.Image(image).normalizedDifference(['G', 'N']).select(['nd'], ['ndwi']) image = image.addBands(NDWI) return image.set({ 'system:time_start': time_start, 'system:time_end': time_end, 'tiles': col_size }) # remove years with incomplete coverage naip = ee.ImageCollection(years.map(NAIPAnnual)) mean_size = ee.Number(naip.aggregate_mean('tiles')) total_sd = ee.Number(naip.aggregate_total_sd('tiles')) threshold = mean_size.subtract(total_sd.multiply(1)) naip = naip.filter( ee.Filter.Or(ee.Filter.gte('tiles', threshold), ee.Filter.gte('tiles', 15))) naip = naip.filter(ee.Filter.gte('tiles', 7)) naip_count = naip.size() naip_seq = ee.List.sequence(0, naip_count.subtract(1)) def set_index(index): img = ee.Image(naip.toList(naip_count).get(index)) return img.set({'system:uid': ee.Number(index).toUint8()}) naip = naip_seq.map(set_index) return ee.ImageCollection(naip)
def interpolate(self, variables=None, t_interval='custom', interp_method='linear', interp_days=32, **kwargs): """ Parameters ---------- variables : list, optional List of variables that will be returned in the Image Collection. If variables is not set here it must be specified in the class instantiation call. t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional Time interval over which to interpolate and aggregate values (the default is 'monthly'). interp_method : {'linear}, optional Interpolation method (the default is 'linear'). interp_days : int, str, optional Number of extra days before the start date and after the end date to include in the interpolation calculation. (the default is 32). kwargs : dict, optional Returns ------- ee.ImageCollection Raises ------ ValueError for unsupported input parameters ValueError for negative interp_days values TypeError for non-integer interp_days Notes ----- Not all variables can be interpolated to new time steps. Variables like reference ET are simply summed whereas ET fraction is computed from the interpolated/aggregated values. """ # Check that the input parameters are valid if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']: raise ValueError('unsupported t_interval: {}'.format(t_interval)) elif interp_method.lower() not in ['linear']: raise ValueError( 'unsupported interp_method: {}'.format(interp_method)) if type(interp_days) is str and utils.is_number(interp_days): interp_days = int(interp_days) elif not type(interp_days) is int: raise TypeError('interp_days must be an integer') elif interp_days <= 0: raise ValueError('interp_days must be a positive integer') # Does it make sense to use the class variable list if not set? if not variables: if self.variables: variables = self.variables else: raise ValueError('variables parameter must be set') # Adjust start/end dates based on t_interval # Increase the date range to fully include the time interval start_dt = datetime.datetime.strptime(self.start_date, '%Y-%m-%d') end_dt = datetime.datetime.strptime(self.end_date, '%Y-%m-%d') if t_interval.lower() == 'annual': start_dt = datetime.datetime(start_dt.year, 1, 1) # Covert end date to inclusive, flatten to beginning of year, # then add a year which will make it exclusive end_dt -= relativedelta(days=+1) end_dt = datetime.datetime(end_dt.year, 1, 1) end_dt += relativedelta(years=+1) elif t_interval.lower() == 'monthly': start_dt = datetime.datetime(start_dt.year, start_dt.month, 1) end_dt -= relativedelta(days=+1) end_dt = datetime.datetime(end_dt.year, end_dt.month, 1) end_dt += relativedelta(months=+1) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') # The start/end date for the interpolation include more days # (+/- interp_days) than are included in the reference ET collection interp_start_dt = start_dt - datetime.timedelta(days=interp_days) interp_end_dt = end_dt + datetime.timedelta(days=interp_days) interp_start_date = interp_start_dt.date().isoformat() interp_end_date = interp_end_dt.date().isoformat() # Update model_args if reference ET parameters were passed to interpolate # Intentionally using model_args (instead of self.et_reference_source, etc.) in # this function since model_args is passed to Image class in _build() # if 'et' in variables or 'et_reference' in variables: if ('et_reference_source' in kwargs.keys() and \ kwargs['et_reference_source'] is not None): self.model_args['et_reference_source'] = kwargs[ 'et_reference_source'] if ('et_reference_band' in kwargs.keys() and \ kwargs['et_reference_band'] is not None): self.model_args['et_reference_band'] = kwargs['et_reference_band'] if ('et_reference_factor' in kwargs.keys() and \ kwargs['et_reference_factor'] is not None): self.model_args['et_reference_factor'] = kwargs[ 'et_reference_factor'] if ('et_reference_resample' in kwargs.keys() and \ kwargs['et_reference_resample'] is not None): self.model_args['et_reference_resample'] = kwargs[ 'et_reference_resample'] # Check that all reference ET parameters were set # print(self.model_args) for et_reference_param in [ 'et_reference_source', 'et_reference_band', 'et_reference_factor' ]: if et_reference_param not in self.model_args.keys(): raise ValueError('{} was not set'.format(et_reference_param)) elif not self.model_args[et_reference_param]: raise ValueError('{} was not set'.format(et_reference_param)) if type(self.model_args['et_reference_source']) is str: # Assume a string source is an single image collection ID # not an list of collection IDs or ee.ImageCollection daily_et_ref_coll_id = self.model_args['et_reference_source'] daily_et_ref_coll = ee.ImageCollection(daily_et_ref_coll_id)\ .filterDate(start_date, end_date)\ .select([self.model_args['et_reference_band']], ['et_reference']) # elif isinstance(self.model_args['et_reference_source'], computedobject.ComputedObject): # # Interpret computed objects as image collections # daily_et_ref_coll = self.model_args['et_reference_source'] \ # .filterDate(self.start_date, self.end_date) \ # .select([self.model_args['et_reference_band']]) else: raise ValueError('unsupported et_reference_source: {}'.format( self.model_args['et_reference_source'])) # Scale reference ET images (if necessary) # CGM - Resampling is not working correctly so not including for now if (self.model_args['et_reference_factor'] and self.model_args['et_reference_factor'] != 1): def et_reference_adjust(input_img): return input_img \ .multiply(self.model_args['et_reference_factor']) \ .copyProperties(input_img) \ .set({'system:time_start': input_img.get('system:time_start')}) daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust) # Initialize variable list to only variables that can be interpolated interp_vars = list(set(self._interp_vars) & set(variables)) # To return ET, the ET fraction must be interpolated if 'et' in variables and 'et_fraction' not in interp_vars: interp_vars.append('et_fraction') # With the current interpolate.daily() function, # something has to be interpolated in order to return et_reference if 'et_reference' in variables and 'et_fraction' not in interp_vars: interp_vars.append('et_fraction') # The time band is always needed for interpolation interp_vars.append('time') # Count will be determined using the aggregate_coll image masks if 'count' in variables: interp_vars.append('mask') # interp_vars.remove('count') # Build initial scene image collection scene_coll = self._build(variables=interp_vars, start_date=interp_start_date, end_date=interp_end_date) # For count, compute the composite/mosaic image for the mask band only if 'count' in variables: aggregate_coll = openet.core.interpolate.aggregate_to_daily( image_coll=scene_coll.select(['mask']), start_date=start_date, end_date=end_date) # The following is needed because the aggregate collection can be # empty if there are no scenes in the target date range but there # are scenes in the interpolation date range. # Without this the count image will not be built but the other # bands will be which causes a non-homogenous image collection. aggregate_coll = aggregate_coll.merge( ee.Image.constant(0).rename(['mask']).set( {'system:time_start': ee.Date(start_date).millis()})) # Including count/mask causes problems in interpolate.daily() function. # Issues with mask being an int but the values need to be double. # Casting the mask band to a double would fix this problem also. if 'mask' in interp_vars: interp_vars.remove('mask') # Interpolate to a daily time step # NOTE: the daily function is not computing ET (ETf x ETr) # but is returning the target (ETr) band daily_coll = openet.core.interpolate.daily( target_coll=daily_et_ref_coll, source_coll=scene_coll.select(interp_vars), interp_method=interp_method, interp_days=interp_days, ) # Compute ET from ET fraction and reference ET (if necessary) # if 'et' in variables or 'et_fraction' in variables: def compute_et(img): """This function assumes reference ET and ET fraction are present""" et_img = img.select(['et_fraction'])\ .multiply(img.select(['et_reference'])) return img.addBands(et_img.rename('et')) daily_coll = daily_coll.map(compute_et) interp_properties = { 'cloud_cover_max': self.cloud_cover_max, 'collections': ', '.join(self.collections), 'interp_days': interp_days, 'interp_method': interp_method, 'model_name': openet.ssebop.MODEL_NAME, 'model_version': openet.ssebop.__version__, } interp_properties.update(self.model_args) def aggregate_image(agg_start_date, agg_end_date, date_format): """Aggregate the daily images within the target date range Parameters ---------- agg_start_date: str Start date (inclusive). agg_end_date : str End date (exclusive). date_format : str Date format for system:index (uses EE JODA format). Returns ------- ee.Image Notes ----- Since this function takes multiple inputs it is being called for each time interval by separate mappable functions """ if 'et' in variables or 'et_fraction' in variables: et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \ .select(['et']).sum() if 'et_reference' in variables or 'et_fraction' in variables: # et_reference_img = daily_et_ref_coll \ et_reference_img = daily_coll \ .filterDate(agg_start_date, agg_end_date) \ .select(['et_reference']).sum() image_list = [] if 'et' in variables: image_list.append(et_img.float()) if 'et_reference' in variables: image_list.append(et_reference_img.float()) if 'et_fraction' in variables: # Compute average et fraction over the aggregation period image_list.append( et_img.divide(et_reference_img).rename(['et_fraction' ]).float()) if 'ndvi' in variables: # Compute average ndvi over the aggregation period ndvi_img = daily_coll \ .filterDate(agg_start_date, agg_end_date) \ .mean().select(['ndvi']).float() image_list.append(ndvi_img) if 'count' in variables: count_img = aggregate_coll \ .filterDate(agg_start_date, agg_end_date) \ .select(['mask']).count().rename('count').uint8() image_list.append(count_img) return ee.Image(image_list) \ .set(interp_properties) \ .set({ 'system:index': ee.Date(agg_start_date).format(date_format), 'system:time_start': ee.Date(agg_start_date).millis(), }) # Combine input, interpolated, and derived values if t_interval.lower() == 'daily': def aggregate_daily(daily_img): # CGM - Double check that this time_start is a 0 UTC time. # It should be since it is coming from the interpolate source # collection, but what if source is GRIDMET (+6 UTC)? agg_start_date = ee.Date(daily_img.get('system:time_start')) # CGM - This calls .sum() on collections with only one image return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'day'), date_format='YYYYMMdd') return ee.ImageCollection(daily_coll.map(aggregate_daily)) elif t_interval.lower() == 'monthly': def month_gen(iter_start_dt, iter_end_dt): iter_dt = iter_start_dt # Conditional is "less than" because end date is exclusive while iter_dt < iter_end_dt: yield iter_dt.strftime('%Y-%m-%d') iter_dt += relativedelta(months=+1) month_list = ee.List(list(month_gen(start_dt, end_dt))) def aggregate_monthly(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'month'), date_format='YYYYMM') return ee.ImageCollection(month_list.map(aggregate_monthly)) elif t_interval.lower() == 'annual': def year_gen(iter_start_dt, iter_end_dt): iter_dt = iter_start_dt while iter_dt < iter_end_dt: yield iter_dt.strftime('%Y-%m-%d') iter_dt += relativedelta(years=+1) year_list = ee.List(list(year_gen(start_dt, end_dt))) def aggregate_annual(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'year'), date_format='YYYY') return ee.ImageCollection(year_list.map(aggregate_annual)) elif t_interval.lower() == 'custom': # Returning an ImageCollection to be consistent return ee.ImageCollection( aggregate_image(agg_start_date=start_date, agg_end_date=end_date, date_format='YYYYMMdd'))
''' # %% roi = ee.Geometry.Point([-99.2182, 46.7824]) # find images acquired during June and July collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \ .filterBounds(roi) \ .filter(ee.Filter.calendarRange(6, 7, 'month')) \ .sort('DATE_ACQUIRED') print(collection.size().getInfo()) first = collection.first() propertyNames = first.propertyNames() print(propertyNames.getInfo()) time_start = ee.Date(first.get('system:time_start')).format("YYYY-MM-dd") print(time_start.getInfo()) # %% ''' ## Display Earth Engine data layers ''' # %% Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map
print(outarr[0]) print('--Writing reflectance to csv for ', roiID) with open('Processed/Reflectance/reflectL8_'+roiID+'.csv', "a", newline="") as f: writer = csv.writer(f) #~ ##writer.writerow(['date', 'blue','red','nir','green','LID']) #only for new file for item in outarr: #Write item to outcsv writer.writerow([int(item[0]), item[1], item[2],item[3],item[4],item[5]]) else: print('No new imagery found in last 20 days for', roiID) else: print('No new imagery found in last 20 days for', roiID) #### Options########### edate = datetime.datetime.now() sdate = datetime.datetime.now() + datetime.timedelta(days=-20) endDate = ee.Date(edate) #ee.Date(datttte()) startDate = ee.Date(sdate) # ee.Date('1987-01-01') print('Checking for new imagery from: {} to {}'.format(sdate.strftime('%Y-%m-%d'),edate.strftime('%Y-%m-%d'))) downloadL8(baruria,startDate,endDate,'baruria') downloadL8(bahadurabad,startDate,endDate,'bahadurabad') downloadL8(hardinge_bridge,startDate,endDate,'hardinge_bridge') downloadL8(mawa,startDate,endDate,'mawa') downloadL8(naria,startDate,endDate,'naria')