def testDepthLimit_withDictionaries(self):
     x = ee.Dictionary({0: 0})
     for i in range(100):
         x = ee.Dictionary({i: x})
     encoded = serializer.encode(x, for_cloud_api=True)
     self.assertLess(_max_depth(encoded), 60)
Esempio n. 2
0
def mapList(results, key):
    newResult = results.map(lambda x: ee.Dictionary(x).get(key))
    return newResult
Esempio n. 3
0
'''

# %%
# Visualization for WRI/GPPD/power_plants
# https:#code.earthengine.google.com/9efbd726e4a8ba9b8b56ba94f1267678

table = ee.FeatureCollection("WRI/GPPD/power_plants")

# Get a color from a fuel
fuelColor = ee.Dictionary({
    'Coal': '000000',
    'Oil': '593704',
    'Gas': 'BC80BD',
    'Hydro': '0565A6',
    'Nuclear': 'E31A1C',
    'Solar': 'FF7F00',
    'Waste': '6A3D9A',
    'Wind': '5CA2D1',
    'Geothermal': 'FDBF6F',
    'Biomass': '229A00'
})

# List of fuels to add to the map
fuels = [
    'Coal', 'Oil', 'Gas', 'Hydro', 'Nuclear', 'Solar', 'Waste', 'Wind',
    'Geothermal', 'Biomass'
]


# /**
#  * Computes size from capacity and color from fuel type.
Esempio n. 4
0
 def listval(img, it):
     id = ee.String(img.id())
     values = img.reduceRegion(ee.Reducer.first(), point, scale)
     return ee.Dictionary(it).set(id, ee.Dictionary(values))
Esempio n. 5
0
def imad(current, prev):
    done = ee.Number(ee.Dictionary(prev).get('done'))
    return ee.Algorithms.If(done, prev, imad1(current, prev))
Esempio n. 6
0
def rescale(image, col, collection_to_match, reference='all', renamed=False,
            drop=False):
    """ Re-scale the values of image which must belong to collection so the
        values match the ones from collection_from

    :param collection: The Collection to which belongs the image
    :type collection: Collection
    :param collection_to_match: the Collection to get the range from
    :type collection_to_match: Collection
    :param reference: optical, thermal, bits or all
    :type reference: str
    """
    # Create comparative collection
    # bands = ee.Dictionary(col.bands)
    common_bands = getCommonBands(col, collection_to_match,
                                  reference=reference, match='name')
    # keep only bands with min and max values
    new_common = []

    ranges = {}
    ranges_other = {}

    def setrange(band, range_dict):
        if not renamed:
            name = band.id
        else:
            name = band.name

        range_dict[name] = {'min': band.min, 'max': band.max}

    precisions = {}
    for band in common_bands:
        b = col.getBand(band, 'name')
        b_proxy = collection_to_match.getBand(band, 'name')
        if b.min is not None and \
                b.max is not None and \
                b_proxy.min is not None and \
                b_proxy.max is not None:
            if not renamed:
                name = b.id
            else:
                name = b.name
            new_common.append(name)
            setrange(b, ranges)
            setrange(b_proxy, ranges_other)
            precisions[name] = b_proxy.precision

    new_common = ee.List(new_common)
    ranges_this = ee.Dictionary(ranges)
    ranges_proxy = ee.Dictionary(ranges_other)
    precisions = ee.Dictionary(precisions)

    def iteration(band, ini):
        ini = ee.Image(ini)
        band = ee.String(band)
        ranges_this_band = ee.Dictionary(ranges_this.get(band))
        ranges_proxy_band = ee.Dictionary(ranges_proxy.get(band))
        min_this = ee.Number(ranges_this_band.get('min'))
        min_proxy = ee.Number(ranges_proxy_band.get('min'))
        max_this = ee.Number(ranges_this_band.get('max'))
        max_proxy = ee.Number(ranges_proxy_band.get('max'))

        equal_min = min_this.eq(min_proxy)
        equal_max = max_this.eq(max_proxy)
        equal = equal_min.And(equal_max)

        def true(ini):
            return ini

        def false(ini, band, min_this, max_this, min_proxy, max_proxy):
            return tools.image.parametrize(ini,
                                           (min_this, max_this),
                                           (min_proxy, max_proxy),
                                           bands=[band])

        return ee.Image(ee.Algorithms.If(
            equal, true(ini),
            false(ini, band, min_this, max_this, min_proxy, max_proxy)))

    final = ee.Image(new_common.iterate(iteration, image))
    final = convertPrecisions(final, precisions)
    if drop:
        final = final.select(new_common)

    return final
Esempio n. 7
0
def main(request):
    """Compute scene Tcorr images by date

    Parameters
    ----------
    start_date : str, optional
    end_date : str, optional

    Returns
    -------
    str : ?

    """
    logging.info('\nCompute scene Tcorr images by date')

    export_id_fmt = 'tcorr_scene_{product}_{scene_id}'
    asset_id_fmt = '{coll_id}/{scene_id}'
    tcorr_scene_coll_id = '{}/{}_scene'.format(EXPORT_COLL,
                                               TMAX_SOURCE.lower())
    model_args = {'tmax_source': 'DAYMET_MEDIAN_V2'}

    # Default start and end date to None if not set
    try:
        start_date = request.args['start']
    except:
        start_date = None
    try:
        end_date = request.args['end']
    except:
        end_date = None

    if not start_date and not end_date:
        # Process the last 60 days by default
        start_dt = datetime.datetime.today() - datetime.timedelta(days=60)
        end_dt = datetime.datetime.today() - datetime.timedelta(days=1)
    elif start_date and end_date:
        # Only process custom range if start and end are both set
        # Limit the end date to the current date
        try:
            start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
            end_dt = min(
                datetime.datetime.strptime(end_date, '%Y-%m-%d'),
                datetime.datetime.today() - datetime.timedelta(days=1))
            # end_dt = end_dt + datetime.timedelta(days=1)
        except ValueError as e:
            response = 'Error parsing start and end dates\n'
            response += str(e)
            abort(404, description=response)
        # if start_dt < datetime.datetime(1984, 3, 23):
        #     logging.debug('Start Date: {} - no Landsat 5+ images before '
        #                   '1984-03-23'.format(start_dt.strftime('%Y-%m-%d')))
        #     start_dt = datetime.datetime(1984, 3, 23)
        if start_dt > end_dt:
            return abort(404, description='Start date must be before end date')
    else:
        response = 'Both start and end date must be specified'
        abort(404, description=response)
    logging.info('Start Date: {}'.format(start_dt.strftime('%Y-%m-%d')))
    logging.info('End Date:   {}'.format(end_dt.strftime('%Y-%m-%d')))

    # if (TMAX_SOURCE.upper() == 'CIMIS' and end_date < '2003-10-01'):
    #     logging.error(
    #         '\nCIMIS is not currently available before 2003-10-01, exiting\n')
    #     abort()
    # elif (TMAX_SOURCE.upper() == 'DAYMET' and end_date > '2018-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2018-12-31, '
    #         'using median Tmax values\n')

    logging.debug('\nInitializing Earth Engine')
    ee.Initialize(ee.ServiceAccountCredentials('', key_file='privatekey.json'),
                  use_cloud_api=True)

    if not ee.data.getInfo(tcorr_scene_coll_id):
        return abort(404, description='Export collection does not exist')

    # Get a Tmax image to set the Tcorr values to
    logging.debug('\nTmax properties')
    tmax_source = TMAX_SOURCE.split('_', 1)[0]
    tmax_version = TMAX_SOURCE.split('_', 1)[1]
    if 'MEDIAN' in TMAX_SOURCE.upper():
        tmax_coll_id = 'projects/earthengine-legacy/assets/' \
                       'projects/usgs-ssebop/tmax/{}'.format(TMAX_SOURCE.lower())
        tmax_coll = ee.ImageCollection(tmax_coll_id)
        tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
    else:
        # TODO: Add support for non-median tmax sources
        raise ValueError('unsupported tmax_source: {}'.format(TMAX_SOURCE))
    logging.debug('  Collection: {}'.format(tmax_coll_id))
    logging.debug('  Source:  {}'.format(tmax_source))
    logging.debug('  Version: {}'.format(tmax_version))

    logging.debug('\nExport properties')
    export_info = get_info(ee.Image(tmax_mask))
    if 'daymet' in TMAX_SOURCE.lower():
        # Custom smaller extent for DAYMET focused on CONUS
        export_extent = [-1999750, -1890500, 2500250, 1109500]
        export_shape = [4500, 3000]
        export_geo = [1000, 0, -1999750, 0, -1000, 1109500]
        # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada
        # export_extent = [-2099750, -3090500, 2900250, 1909500]
        # export_shape = [5000, 5000]
        # export_geo = [1000, 0, -2099750, 0, -1000, 1909500]
        export_crs = export_info['bands'][0]['crs']
    else:
        export_crs = export_info['bands'][0]['crs']
        export_geo = export_info['bands'][0]['crs_transform']
        export_shape = export_info['bands'][0]['dimensions']
        # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform']
        # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs']
        # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions']
        export_extent = [
            export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
            export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]
        ]
    export_geom = ee.Geometry.Rectangle(export_extent,
                                        proj=export_crs,
                                        geodesic=False)
    logging.debug('  CRS: {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo: {}'.format(export_geo))
    logging.debug('  Shape: {}'.format(export_shape))

    if STUDY_AREA_EXTENT is None:
        if 'daymet' in TMAX_SOURCE.lower():
            # CGM - For now force DAYMET to a slightly smaller "CONUS" extent
            study_area_extent = [-125, 25, -65, 49]
            # study_area_extent =  [-125, 25, -65, 52]
        elif 'cimis' in TMAX_SOURCE.lower():
            study_area_extent = [-124, 35, -119, 42]
        else:
            # TODO: Make sure output from bounds is in WGS84
            study_area_extent = tmax_mask.geometry().bounds().getInfo()
        logging.debug(f'\nStudy area extent not set, '
                      f'default to {STUDY_AREA_EXTENT}')
    study_area_geom = ee.Geometry.Rectangle(STUDY_AREA_EXTENT,
                                            proj='EPSG:4326',
                                            geodesic=False)

    # Intersect study area with export extent
    export_geom = export_geom.intersection(study_area_geom, 1)
    # logging.debug('Extent: {}'.format(export_geom.bounds().getInfo()))

    # # If cell_size parameter is set in the INI,
    # # adjust the output cellsize and recompute the transform and shape
    # try:
    #     export_cs = CELL_SIZE
    #     export_shape = [
    #         int(math.ceil(abs((export_shape[0] * export_geo[0]) / export_cs))),
    #         int(math.ceil(abs((export_shape[1] * export_geo[4]) / export_cs)))]
    #     export_geo = [export_cs, 0.0, export_geo[2], 0.0, -export_cs, export_geo[5]]
    #     logging.debug('  Custom export cell size: {}'.format(export_cs))
    #     logging.debug('  Geo: {}'.format(export_geo))
    #     logging.debug('  Shape: {}'.format(export_shape))
    # except KeyError:
    #     pass

    # Get current asset list
    logging.debug('\nGetting GEE asset list')
    asset_list = get_ee_assets(tcorr_scene_coll_id)

    # Get current running tasks
    logging.debug('\nGetting GEE task list')
    tasks = get_ee_tasks()

    # if update_flag:
    #     assets_info = utils.get_info(ee.ImageCollection(
    #         tcorr_scene_coll_id).filterDate(start_date, end_date))
    #     asset_props = {f'{scene_coll_id}/{x["properties"]["system:index"]}':
    #                        x['properties']
    #                    for x in assets_info['features']}
    # else:
    #     asset_props = {}

    response = 'Tcorr scene export tasks\n'

    for export_dt in sorted(date_range(start_dt, end_dt)):
        export_date = export_dt.strftime('%Y-%m-%d')
        next_date = (export_dt +
                     datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        logging.debug(f'Date: {export_date}')

        model_obj = ssebop.Collection(
            collections=COLLECTIONS,
            start_date=export_date,
            end_date=next_date,
            cloud_cover_max=CLOUD_COVER,
            geometry=export_geom,
            model_args=model_args,
            # filter_args=filter_args,
        )
        landsat_coll = model_obj.overpass(variables=['ndvi'])

        try:
            image_id_list = landsat_coll.aggregate_array('system:id').getInfo()
        except Exception as e:
            logging.warning('  Error getting image ID list, skipping date')
            logging.debug(f'  {e}')
            continue

        # Sort by path/row
        for image_id in sorted(image_id_list,
                               key=lambda k: k.split('/')[-1].split('_')[-2],
                               reverse=True):
            scene_id = image_id.split('/')[-1]

            wrs2_path = int(scene_id[5:8])
            wrs2_row = int(scene_id[8:11])
            wrs2_tile = 'p{:03d}r{:03d}'.format(wrs2_path, wrs2_row)
            logging.debug(f'{scene_id}')

            export_id = export_id_fmt.format(product=TMAX_SOURCE.lower(),
                                             scene_id=scene_id)
            logging.debug(f'  Export ID: {export_id}')

            asset_id = asset_id_fmt.format(coll_id=tcorr_scene_coll_id,
                                           scene_id=scene_id)
            logging.debug(f'  Asset ID: {asset_id}')

            if export_id in tasks.keys():
                logging.debug('  Task already submitted, skipping')
                continue
            elif asset_id in asset_list:
                logging.debug('  Asset already exists, skipping')
                continue

            image = ee.Image(image_id)
            # TODO: Will need to be changed for SR or use from_image_id()
            t_obj = ssebop.Image.from_landsat_c1_toa(image_id, **model_args)
            t_stats = ee.Dictionary(t_obj.tcorr_stats) \
                .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False)
            tcorr = ee.Number(t_stats.get('tcorr_p5'))
            count = ee.Number(t_stats.get('tcorr_count'))
            index = ee.Algorithms.If(count.gte(MIN_PIXEL_COUNT), 0, 9)

            # Write an empty image if the pixel count is too low
            tcorr_img = ee.Algorithms.If(count.gte(MIN_PIXEL_COUNT),
                                         tmax_mask.add(tcorr),
                                         tmax_mask.updateMask(0))

            # Clip to the Landsat image footprint
            output_img = ee.Image(tcorr_img).clip(image.geometry())

            # Clear the transparency mask
            output_img = output_img.updateMask(output_img.unmask(0)) \
                .rename(['tcorr']) \
                .set({
                    'CLOUD_COVER': image.get('CLOUD_COVER'),
                    'CLOUD_COVER_LAND': image.get('CLOUD_COVER_LAND'),
                    # 'SPACECRAFT_ID': image.get('SPACECRAFT_ID'),
                    'coll_id': image_id.split('/')[0],
                    # 'cycle_day': ((export_dt - cycle_base_dt).days % 8) + 1,
                    'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
                    'date': export_dt.strftime('%Y-%m-%d'),
                    'doy': int(export_dt.strftime('%j')),
                    'model_name': MODEL_NAME,
                    'model_version': ssebop.__version__,
                    'month': int(export_dt.month),
                    'scene_id': image_id.split('/')[-1],
                    'system:time_start': image.get('system:time_start'),
                    'tcorr_value': tcorr,
                    'tcorr_index': index,
                    'tcorr_pixel_count': count,
                    'tmax_source': tmax_source.upper(),
                    'tmax_version': tmax_version.upper(),
                    'wrs2_path': wrs2_path,
                    'wrs2_row': wrs2_row,
                    'wrs2_tile': wrs2_tile,
                    'year': int(export_dt.year),
                })

            # logging.debug('  Building export task')
            task = ee.batch.Export.image.toAsset(
                image=output_img,
                description=export_id,
                assetId=asset_id,
                crs=export_crs,
                crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
                dimensions='{0}x{1}'.format(*export_shape),
                # crsTransform=list(map(str, export_geo)),
                # dimensions=export_shape,
            )

            # logging.debug('  Starting export task')
            ee_task_start(task)

            response += '{}\n'.format(export_id)

    response += 'End\n'
    return Response(response, mimetype='text/plain')
Esempio n. 8
0
 def unique_properties(coll, property):
     return ee.String(ee.List(ee.Dictionary(
         coll.aggregate_histogram(property)).keys()).join(','))
Esempio n. 9
0
def map_list(results, key):
    """ helper function for zonal stats"""
    new_result = results.map(lambda x: ee.Dictionary(x).get(key))
    return new_result
Esempio n. 10
0
def leesigma(image, KERNEL_SIZE):
    """
    Implements the improved lee sigma filter to one image. 
    It is implemented as described in, Lee, J.-S. Wen, J.-H. Ainsworth, T.L. Chen, K.-S. Chen, A.J. 
    Improved sigma filter for speckle filtering of SAR imagery. 
    IEEE Trans. Geosci. Remote Sens. 2009, 47, 202–213.

    Parameters
    ----------
    image : ee.Image
        Image to be filtered
    KERNEL_SIZE : positive odd integer
        Neighbourhood window size

    Returns
    -------
    ee.Image
        Filtered Image

    """

    #parameters
    Tk = ee.Image.constant(7)  #number of bright pixels in a 3x3 window
    sigma = 0.9
    enl = 4
    target_kernel = 3
    bandNames = image.bandNames().remove('angle')

    #compute the 98 percentile intensity
    z98 = ee.Dictionary(
        image.select(bandNames).reduceRegion(reducer=ee.Reducer.percentile(
            [98]),
                                             geometry=image.geometry(),
                                             scale=10,
                                             maxPixels=1e13)).toImage()

    #select the strong scatterers to retain
    brightPixel = image.select(bandNames).gte(z98)
    K = brightPixel.reduceNeighborhood(ee.Reducer.countDistinctNonNull(),
                                       ee.Kernel.square(target_kernel / 2))
    retainPixel = K.gte(Tk)

    #compute the a-priori mean within a 3x3 local window
    #original noise standard deviation since the data is 5 look
    eta = 1.0 / math.sqrt(enl)
    eta = ee.Image.constant(eta)
    #MMSE applied to estimate the apriori mean
    reducers = ee.Reducer.mean().combine( \
                      reducer2= ee.Reducer.variance(), \
                      sharedInputs= True
                      )
    stats = image.select(bandNames).reduceNeighborhood( \
                      reducer= reducers, \
                          kernel= ee.Kernel.square(target_kernel/2,'pixels'), \
                              optimization= 'window')
    meanBand = bandNames.map(lambda bandName: ee.String(bandName).cat('_mean'))
    varBand = bandNames.map(
        lambda bandName: ee.String(bandName).cat('_variance'))

    z_bar = stats.select(meanBand)
    varz = stats.select(varBand)

    oneImg = ee.Image.constant(1)
    varx = (varz.subtract(z_bar.abs().pow(2).multiply(eta.pow(2)))).divide(
        oneImg.add(eta.pow(2)))
    b = varx.divide(varz)
    xTilde = oneImg.subtract(b).multiply(z_bar.abs()).add(
        b.multiply(image.select(bandNames)))

    #step 3: compute the sigma range
    #Lookup table (J.S.Lee et al 2009) for range and eta values for intensity (only 4 look is shown here)
    LUT = ee.Dictionary({
        0.5:
        ee.Dictionary({
            'I1': 0.694,
            'I2': 1.385,
            'eta': 0.1921
        }),
        0.6:
        ee.Dictionary({
            'I1': 0.630,
            'I2': 1.495,
            'eta': 0.2348
        }),
        0.7:
        ee.Dictionary({
            'I1': 0.560,
            'I2': 1.627,
            'eta': 0.2825
        }),
        0.8:
        ee.Dictionary({
            'I1': 0.480,
            'I2': 1.804,
            'eta': 0.3354
        }),
        0.9:
        ee.Dictionary({
            'I1': 0.378,
            'I2': 2.094,
            'eta': 0.3991
        }),
        0.95:
        ee.Dictionary({
            'I1': 0.302,
            'I2': 2.360,
            'eta': 0.4391
        })
    })

    #extract data from lookup
    sigmaImage = ee.Dictionary(LUT.get(str(sigma))).toImage()
    I1 = sigmaImage.select('I1')
    I2 = sigmaImage.select('I2')
    #new speckle sigma
    nEta = sigmaImage.select('eta')
    #establish the sigma ranges
    I1 = I1.multiply(xTilde)
    I2 = I2.multiply(xTilde)

    #step 3: apply MMSE filter for pixels in the sigma range
    #MMSE estimator
    mask = image.select(bandNames).gte(I1).Or(image.select(bandNames).lte(I2))
    z = image.select(bandNames).updateMask(mask)

    stats = z.reduceNeighborhood( \
                      reducer= reducers, \
                          kernel= ee.Kernel.square(KERNEL_SIZE/2,'pixels'), \
                              optimization= 'window')

    z_bar = stats.select(meanBand)
    varz = stats.select(varBand)

    varx = (varz.subtract(z_bar.abs().pow(2).multiply(nEta.pow(2)))).divide(
        oneImg.add(nEta.pow(2)))
    b = varx.divide(varz)
    #if b is negative set it to zero
    new_b = b.where(b.lt(0), 0)
    xHat = oneImg.subtract(new_b).multiply(z_bar.abs()).add(new_b.multiply(z))

    #remove the applied masks and merge the retained pixels and the filtered pixels
    xHat = image.select(bandNames).updateMask(retainPixel).unmask(xHat)
    output = ee.Image(xHat).rename(bandNames)
    return image.addBands(output, None, True)
Esempio n. 11
0
    def __init__(self):
        """Initialize the environment."""

        # Initialize the Earth Engine object, using the authentication credentials.
        ee.Initialize()

        self.dem = ee.Image("JAXA/ALOS/AW3D30_V1_1").select(["AVE"])
        self.epsg = "EPSG:32717"

        ##########################################
        # variable for the landsat data request #
        ##########################################
        self.metadataCloudCoverMax = 80

        ##########################################
        # Export variables                       #
        ##########################################

        self.assetId = "projects/Sacha/PreprocessedData/L8_Biweekly_V6/"
        self.name = "LS_BW_"

        self.exportScale = 20

        ##########################################
        # variable for the shadowMask  algorithm #
        ##########################################

        # zScoreThresh: Threshold for cloud shadow masking- lower number masks out
        # less. Between -0.8 and -1.2 generally works well
        self.zScoreThresh = -0.9

        # shadowSumThresh: Sum of IR bands to include as shadows within TDOM and the
        # shadow shift method (lower number masks out less)
        self.shadowSumThresh = 0.4

        # contractPixels: The radius of the number of pixels to contract (negative buffer) clouds and cloud shadows by. Intended to eliminate smaller cloud
        #    patches that are likely errors (1.5 results in a -1 pixel buffer)(0.5 results in a -0 pixel buffer)
        # (1.5 or 2.5 generally is sufficient)
        self.contractPixels = 1.5

        # dilatePixels: The radius of the number of pixels to dilate (buffer) clouds
        # and cloud shadows by. Intended to include edges of clouds/cloud shadows
        # that are often missed (1.5 results in a 1 pixel buffer)(0.5 results in a 0 pixel buffer)
        # (2.5 or 3.5 generally is sufficient)
        self.dilatePixels = 3.25

        ##########################################
        # variable for cloudScore  algorithm     #
        ##########################################

        # 9. Cloud and cloud shadow masking parameters.
        # If cloudScoreTDOM is chosen
        # cloudScoreThresh: If using the cloudScoreTDOMShift method-Threshold for cloud
        #    masking (lower number masks more clouds.  Between 10 and 30 generally works best)
        self.cloudScoreThresh = 1

        # Percentile of cloud score to pull from time series to represent a minimum for
        # the cloud score over time for a given pixel. Reduces commission errors over
        # cool bright surfaces. Generally between 5 and 10 works well. 0 generally is a bit noisy
        self.cloudScorePctl = 8
        self.hazeThresh = 195

        ##########################################
        # variable for terrain  algorithm        #
        ##########################################

        self.terrainScale = 600

        ##########################################
        # variable band selection                #
        ##########################################
        self.percentiles = [25, 75]
        self.medianPercentileBands = ee.List([
            'blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'date',
            'pixel_qa', 'cloudScore'
        ])

        self.divideBands = ee.List(
            ['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
        self.medoidBands = ee.List(
            ['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
        self.medoidIncludeBands = ee.List(
            ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'pixel_qa'])

        self.noScaleBands = ee.List([
            'date', 'year', 'cloudMask', 'count', 'TDOMMask', 'pixel_qa',
            'cloudScore'
        ])

        self.bandNamesLandsat = ee.List([
            'blue', 'green', 'red', 'nir', 'swir1', 'thermal', 'swir2',
            'sr_atmos_opacity', 'pixel_qa', 'radsat_qa'
        ])
        self.sensorBandDictLandsatSR = ee.Dictionary({'L8' : ee.List([1,2,3,4,5,7,6,9,10,11]),\
                     'L7' : ee.List([0,1,2,3,4,5,6,7,9,10]),\
                     'L5' : ee.List([0,1,2,3,4,5,6,7,9,10]),\
                     'L4' : ee.List([0,1,2,3,4,5,6,7,9,10])})

        ##########################################
        # enable / disable modules               #
        ##########################################
        self.maskSR = True
        self.cloudMask = False
        self.hazeMask = False
        self.shadowMask = False
        self.brdfCorrect = True
        self.terrainCorrection = True
        self.includePercentiles = True
        self.compositingMethod = 'Medoid'
Esempio n. 12
0
def leeSigma(collection,
             window=9,
             sigma=0.9,
             looks=4,
             Tk=7):

    def applyFilter(img):
        img = dbToPower(img)

        # MMSE estimator
        mmseMask = img.gte(a1).Or(img.lte(a2))
        mmseIn = img.updateMask(mmseMask)
        oneImg = ee.Image(1)
        z = mmseIn.reduceNeighborhood(ee.Reducer.mean(), kernel, None, True)
        varz = mmseIn.reduceNeighborhood(ee.Reducer.variance(), kernel)
        varx = (varz.subtract(z.abs().pow(2).multiply(eta))).divide(oneImg.add(eta))
        b = varx.divide(varz)
        mmse = oneImg.subtract(b).multiply(z.abs()).add(b.multiply(mmseIn))

        # workflow
        z99 = ee.Dictionary(img.reduceRegion(
            reducer= ee.Reducer.percentile([99], None, 255, 0.001, 1e6),
            geometry= img.geometry(),
            scale= 10,
            bestEffort= True
        )).toImage()

        overThresh = img.gte(z99)

        K = overThresh.reduceNeighborhood(ee.Reducer.sum(), targetKernel, None, True)

        retainPixel = K.gte(Tk)
        xHat = powerToDb(img.updateMask(retainPixel).unmask(mmse))

        return ee.Image(xHat).rename(bandNames).copyProperties(img)

    bandNames = ee.Image(collection.first()).bandNames()

    midPt = (window//2)+1 if (window%2)!=0 else window//2
    kernelWeights = ee.List.repeat(ee.List.repeat(1, window), window)
    kernel = ee.Kernel.fixed(window,window, kernelWeights, midPt,midPt)

    targetWeights = ee.List.repeat(ee.List.repeat(1, 3), 3)
    targetKernel = ee.Kernel.fixed(3, 3, targetWeights, 1, 1)

    # Lookup table for range and eta values for intensity
    sigmaLookup = ee.Dictionary({
        1: ee.Dictionary({
            0.5: ee.Dictionary({
                'A1': 0.436,
                'A2': 1.92,
                'η': 0.4057
            }),
            0.6: ee.Dictionary({
                'A1': 0.343,
                'A2': 2.21,
                'η': 0.4954
            }),
            0.7: ee.Dictionary({
                'A1': 0.254,
                'A2': 2.582,
                'η': 0.5911
            }),
            0.8: ee.Dictionary({
                'A1': 0.168,
                'A2': 3.094,
                'η': 0.6966
            }),
            0.9: ee.Dictionary({
                'A1': 0.084,
                'A2': 3.941,
                'η': 0.8191
            }),
            0.95: ee.Dictionary({
                'A1': 0.043,
                'A2': 4.840,
                'η': 0.8599
            })
        }),
        2: ee.Dictionary({
            0.5: ee.Dictionary({
                'A1': 0.582,
                'A2': 1.584,
                'η': 0.2763
            }),
            0.6: ee.Dictionary({
                'A1': 0.501,
                'A2': 1.755,
                'η': 0.3388
            }),
            0.7: ee.Dictionary({
                'A1': 0.418,
                'A2': 1.972,
                'η': 0.4062
            }),
            0.8: ee.Dictionary({
                'A1': 0.327,
                'A2': 2.260,
                'η': 0.4819
            }),
            0.9: ee.Dictionary({
                'A1': 0.221,
                'A2': 2.744,
                'η': 0.5699
            }),
            0.95: ee.Dictionary({
                'A1': 0.152,
                'A2': 3.206,
                'η': 0.6254
            }),
        }),
        3: ee.Dictionary({
            0.5: ee.Dictionary({
                'A1': 0.652,
                'A2': 1.458,
                'η': 0.2222
            }),
            0.6: ee.Dictionary({
                'A1': 0.580,
                'A2': 1.586,
                'η': 0.2736
            }),
            0.7: ee.Dictionary({
                'A1': 0.505,
                'A2': 1.751,
                'η': 0.3280
            }),
            0.8: ee.Dictionary({
                'A1': 0.419,
                'A2': 1.865,
                'η': 0.3892
            }),
            0.9: ee.Dictionary({
                'A1': 0.313,
                'A2': 2.320,
                'η': 0.4624
            }),
            0.95: ee.Dictionary({
                'A1': 0.238,
                'A2': 2.656,
                'η': 0.5084
            }),
        }),
        4: ee.Dictionary({
            0.5: ee.Dictionary({
                'A1': 0.694,
                'A2': 1.385,
                'η': 0.1921
            }),
            0.6: ee.Dictionary({
                'A1': 0.630,
                'A2': 1.495,
                'η': 0.2348
            }),
            0.7: ee.Dictionary({
                'A1': 0.560,
                'A2': 1.627,
                'η': 0.2825
            }),
            0.8: ee.Dictionary({
                'A1': 0.480,
                'A2': 1.804,
                'η': 0.3354
            }),
            0.9: ee.Dictionary({
                'A1': 0.378,
                'A2': 2.094,
                'η': 0.3991
            }),
            0.95: ee.Dictionary({
                'A1': 0.302,
                'A2': 2.360,
                'η': 0.4391
            }),
        })
    })

    # extract data from lookup
    looksDict = ee.Dictionary(sigmaLookup.get(ee.String(str(looks))))
    sigmaImage = ee.Dictionary(looksDict.get(ee.String(str(sigma)))).toImage()
    a1 = sigmaImage.select('A1')
    a2 = sigmaImage.select('A2')
    aRange = a2.subtract(a1)
    eta = sigmaImage.select('η').pow(2)

    return collection.map(applyFilter)
def get_properties(image):
    properties = {}
    for property in image.propertyNames().getInfo():
        properties[property] = image.get(property)
    return ee.Dictionary(properties).getInfo()
Esempio n. 14
0
 def CheckMultiProperties(result):
   self.assertEquals(ee.ApiFunction.lookup('Element.setMulti'), result.func)
   self.assertEquals(
       {'object': image, 'properties': ee.Dictionary(computed_arg)},
       result.args)
Esempio n. 15
0
def getValues(collection,
              geometry,
              scale=None,
              reducer=None,
              id='system:index',
              properties=None,
              side='server',
              maxPixels=1e9):
    """ Return all values of all bands of an image collection in the
        specified geometry

    :param geometry: Point from where to get the info
    :type geometry: ee.Geometry
    :param scale: The scale to use in the reducer. It defaults to 10 due
        to the minimum scale available in EE (Sentinel 10m)
    :type scale: int
    :param id: image property that will be the key in the result dict
    :type id: str
    :param properties: image properties that will be added to the resulting
        dict
    :type properties: list
    :param side: 'server' or 'client' side
    :type side: str
    :return: Values of all bands in the ponit
    :rtype: dict
    """
    if reducer is None:
        reducer = ee.Reducer.mean()

    if not scale:
        scale = 1
    else:
        scale = int(scale)

    if not properties:
        properties = []
    properties = ee.List(properties)

    def listval(img, it):
        theid = ee.Algorithms.String(img.get(id))
        values = img.reduceRegion(reducer,
                                  geometry,
                                  scale,
                                  maxPixels=maxPixels)
        values = ee.Dictionary(values)
        img_props = img.propertyNames()

        def add_properties(prop, ini):
            ini = ee.Dictionary(ini)
            condition = img_props.contains(prop)

            def true():
                value = img.get(prop)
                return ini.set(prop, value)

            return ee.Algorithms.If(condition, true(), ini)

        with_prop = ee.Dictionary(properties.iterate(add_properties, values))
        return ee.Dictionary(it).set(theid, with_prop)

    result = collection.iterate(listval, ee.Dictionary({}))
    result = ee.Dictionary(
        ee.Algorithms.If(collection.size().neq(0), result, {}))

    if side == 'server':
        return result
    elif side == 'client':
        return result.getInfo()
    else:
        raise ValueError("side parameter must be 'server' or 'client'")
Esempio n. 16
0
def lee_sigma(img, window=9, sigma=0.9, looks=4, tk=7, keep_bands="angle"):
    """Lee Sigma speckle filtering algorithm.
    Implemented from interpreting https://doi.org/10.1109/TGRS.2008.2002881

    args:
        img (ee.Image): Earth engine image object. Expects that imagery is a SAR image
        window (int, optional): moving window size to apply filter (i.e. a value of 9 == 9x9 window). default = 9
        sigma (float, optional): sigma lookup value from table 1 in paper. default = 0.9
        looks (int, optional): look intensity value from table 1 in paper. default = 4
        tk (int, optional): threshold value to determine values in window as point targets. default = 7
        keep_bands (str | list[str], optional): regex name or list of band names to drop during filtering and include in the result
            default = "angle"

    returns:
        ee.Image: filtered SAR image using the Lee Sigma algorithm
    """
    band_names = img.bandNames()
    proc_bands = band_names.remove(keep_bands)
    keep_img = img.select(keep_bands)
    img = img.select(proc_bands)

    midPt = (window // 2) + 1 if (window % 2) != 0 else window // 2
    kernelWeights = ee.List.repeat(ee.List.repeat(1, window), window)
    kernel = ee.Kernel.fixed(window, window, kernelWeights, midPt, midPt)

    targetWeights = ee.List.repeat(ee.List.repeat(1, 3), 3)
    targetkernel = ee.Kernel.fixed(3, 3, targetWeights, 1, 1)

    # Lookup table for range and eta values for intensity
    sigmaLookup = ee.Dictionary({
        1:
        ee.Dictionary({
            0.5:
            ee.Dictionary({
                "A1": 0.436,
                "A2": 1.92,
                "η": 0.4057
            }),
            0.6:
            ee.Dictionary({
                "A1": 0.343,
                "A2": 2.21,
                "η": 0.4954
            }),
            0.7:
            ee.Dictionary({
                "A1": 0.254,
                "A2": 2.582,
                "η": 0.5911
            }),
            0.8:
            ee.Dictionary({
                "A1": 0.168,
                "A2": 3.094,
                "η": 0.6966
            }),
            0.9:
            ee.Dictionary({
                "A1": 0.084,
                "A2": 3.941,
                "η": 0.8191
            }),
            0.95:
            ee.Dictionary({
                "A1": 0.043,
                "A2": 4.840,
                "η": 0.8599
            }),
        }),
        2:
        ee.Dictionary({
            0.5:
            ee.Dictionary({
                "A1": 0.582,
                "A2": 1.584,
                "η": 0.2763
            }),
            0.6:
            ee.Dictionary({
                "A1": 0.501,
                "A2": 1.755,
                "η": 0.3388
            }),
            0.7:
            ee.Dictionary({
                "A1": 0.418,
                "A2": 1.972,
                "η": 0.4062
            }),
            0.8:
            ee.Dictionary({
                "A1": 0.327,
                "A2": 2.260,
                "η": 0.4819
            }),
            0.9:
            ee.Dictionary({
                "A1": 0.221,
                "A2": 2.744,
                "η": 0.5699
            }),
            0.95:
            ee.Dictionary({
                "A1": 0.152,
                "A2": 3.206,
                "η": 0.6254
            }),
        }),
        3:
        ee.Dictionary({
            0.5:
            ee.Dictionary({
                "A1": 0.652,
                "A2": 1.458,
                "η": 0.2222
            }),
            0.6:
            ee.Dictionary({
                "A1": 0.580,
                "A2": 1.586,
                "η": 0.2736
            }),
            0.7:
            ee.Dictionary({
                "A1": 0.505,
                "A2": 1.751,
                "η": 0.3280
            }),
            0.8:
            ee.Dictionary({
                "A1": 0.419,
                "A2": 1.865,
                "η": 0.3892
            }),
            0.9:
            ee.Dictionary({
                "A1": 0.313,
                "A2": 2.320,
                "η": 0.4624
            }),
            0.95:
            ee.Dictionary({
                "A1": 0.238,
                "A2": 2.656,
                "η": 0.5084
            }),
        }),
        4:
        ee.Dictionary({
            0.5:
            ee.Dictionary({
                "A1": 0.694,
                "A2": 1.385,
                "η": 0.1921
            }),
            0.6:
            ee.Dictionary({
                "A1": 0.630,
                "A2": 1.495,
                "η": 0.2348
            }),
            0.7:
            ee.Dictionary({
                "A1": 0.560,
                "A2": 1.627,
                "η": 0.2825
            }),
            0.8:
            ee.Dictionary({
                "A1": 0.480,
                "A2": 1.804,
                "η": 0.3354
            }),
            0.9:
            ee.Dictionary({
                "A1": 0.378,
                "A2": 2.094,
                "η": 0.3991
            }),
            0.95:
            ee.Dictionary({
                "A1": 0.302,
                "A2": 2.360,
                "η": 0.4391
            }),
        }),
    })

    # extract data from lookup
    looksDict = ee.Dictionary(sigmaLookup.get(ee.String(str(looks))))
    sigmaImage = ee.Dictionary(looksDict.get(ee.String(str(sigma)))).toImage()
    a1 = sigmaImage.select("A1")
    a2 = sigmaImage.select("A2")
    aRange = a2.subtract(a1)
    eta = sigmaImage.select("η").pow(2)

    img = geeutils.db_to_power(img)

    # MMSE estimator
    mmseMask = img.gte(a1).Or(img.lte(a2))
    mmseIn = img.updateMask(mmseMask)
    oneImg = ee.Image(1)
    z = mmseIn.reduceNeighborhood(ee.Reducer.mean(), kernel, None, True)
    varz = mmseIn.reduceNeighborhood(ee.Reducer.variance(), kernel)
    varx = (varz.subtract(z.abs().pow(2).multiply(eta))).divide(
        oneImg.add(eta))
    b = varx.divide(varz)
    mmse = oneImg.subtract(b).multiply(z.abs()).add(b.multiply(mmseIn))

    # workflow
    z99 = ee.Dictionary(
        img.reduceRegion(
            reducer=ee.Reducer.percentile([99], None, 255, 0.001, 1e6),
            geometry=img.geometry(),
            scale=10,
            bestEffort=True,
        )).toImage()

    overThresh = img.gte(z99)

    K = overThresh.reduceNeighborhood(ee.Reducer.sum(), targetkernel, None,
                                      True)

    retainPixel = K.gte(tk)
    xHat = geeutils.power_to_db(img.updateMask(retainPixel).unmask(mmse))

    return ee.Image(xHat).rename(proc_bands).addBands(keep_img)
def main(ini_path=None,
         overwrite_flag=False,
         delay_time=0,
         gee_key_file=None,
         max_ready=-1,
         cron_flag=False,
         reverse_flag=False,
         update_flag=False):
    """Compute scene Tcorr images by WRS2 tile

    Parameters
    ----------
    ini_path : str
        Input file path.
    overwrite_flag : bool, optional
        If True, overwrite existing files if the export dates are the same and
        generate new images (but with different export dates) even if the tile
        lists are the same.  The default is False.
    delay_time : float, optional
        Delay time in seconds between starting export tasks (or checking the
        number of queued tasks, see "max_ready" parameter).  The default is 0.
    gee_key_file : str, None, optional
        Earth Engine service account JSON key file (the default is None).
    max_ready: int, optional
        Maximum number of queued "READY" tasks.  The default is -1 which is
        implies no limit to the number of tasks that will be submitted.
    cron_flag: bool, optional
        Not currently implemented.
    reverse_flag : bool, optional
        If True, process WRS2 tiles and dates in reverse order.
    update_flag : bool, optional
        If True, only overwrite scenes with an older model version.

    """
    logging.info('\nCompute scene Tcorr images by WRS2 tile')

    ini = utils.read_ini(ini_path)

    model_name = 'SSEBOP'
    # model_name = ini['INPUTS']['et_model'].upper()

    tmax_name = ini[model_name]['tmax_source']

    export_id_fmt = 'tcorr_scene_{product}_{scene_id}'
    asset_id_fmt = '{coll_id}/{scene_id}'

    tcorr_scene_coll_id = '{}/{}_scene'.format(ini['EXPORT']['export_coll'],
                                               tmax_name.lower())

    wrs2_coll_id = 'projects/earthengine-legacy/assets/' \
                   'projects/usgs-ssebop/wrs2_descending_custom'
    wrs2_tile_field = 'WRS2_TILE'
    wrs2_path_field = 'ROW'
    wrs2_row_field = 'PATH'

    try:
        wrs2_tiles = str(ini['INPUTS']['wrs2_tiles'])
        wrs2_tiles = sorted([x.strip() for x in wrs2_tiles.split(',')])
    except KeyError:
        wrs2_tiles = []
        logging.debug('  wrs2_tiles: not set in INI, defaulting to []')
    except Exception as e:
        raise e

    try:
        study_area_extent = str(ini['INPUTS']['study_area_extent']) \
            .replace('[', '').replace(']', '').split(',')
        study_area_extent = [float(x.strip()) for x in study_area_extent]
    except KeyError:
        study_area_extent = None
        logging.debug('  study_area_extent: not set in INI')
    except Exception as e:
        raise e

    # TODO: Add try/except blocks and default values?
    collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')]
    cloud_cover = float(ini['INPUTS']['cloud_cover'])
    min_pixel_count = float(ini['TCORR']['min_pixel_count'])
    # min_scene_count = float(ini['TCORR']['min_scene_count'])

    if (tmax_name.upper() == 'CIMIS'
            and ini['INPUTS']['end_date'] < '2003-10-01'):
        logging.error(
            '\nCIMIS is not currently available before 2003-10-01, exiting\n')
        sys.exit()
    elif (tmax_name.upper() == 'DAYMET'
          and ini['INPUTS']['end_date'] > '2018-12-31'):
        logging.warning('\nDAYMET is not currently available past 2018-12-31, '
                        'using median Tmax values\n')
        # sys.exit()
    # elif (tmax_name.upper() == 'TOPOWX' and
    #         ini['INPUTS']['end_date'] > '2017-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2017-12-31, '
    #         'using median Tmax values\n')
    #     # sys.exit()

    # Extract the model keyword arguments from the INI
    # Set the property name to lower case and try to cast values to numbers
    model_args = {
        k.lower(): float(v) if utils.is_number(v) else v
        for k, v in dict(ini[model_name]).items()
    }
    # et_reference_args = {
    #     k: model_args.pop(k)
    #     for k in [k for k in model_args.keys() if k.startswith('et_reference_')]}

    logging.info('\nInitializing Earth Engine')
    if gee_key_file:
        logging.info(
            '  Using service account key file: {}'.format(gee_key_file))
        # The "EE_ACCOUNT" parameter is not used if the key file is valid
        ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file),
                      use_cloud_api=True)
    else:
        ee.Initialize(use_cloud_api=True)

    # Get a Tmax image to set the Tcorr values to
    logging.debug('\nTmax properties')
    tmax_source = tmax_name.split('_', 1)[0]
    tmax_version = tmax_name.split('_', 1)[1]
    if 'MEDIAN' in tmax_name.upper():
        tmax_coll_id = 'projects/earthengine-legacy/assets/' \
                       'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
        tmax_coll = ee.ImageCollection(tmax_coll_id)
        tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
    else:
        # TODO: Add support for non-median tmax sources
        raise ValueError('unsupported tmax_source: {}'.format(tmax_name))
    logging.debug('  Collection: {}'.format(tmax_coll_id))
    logging.debug('  Source:  {}'.format(tmax_source))
    logging.debug('  Version: {}'.format(tmax_version))

    logging.debug('\nExport properties')
    export_info = utils.get_info(ee.Image(tmax_mask))
    if 'daymet' in tmax_name.lower():
        # Custom smaller extent for DAYMET focused on CONUS
        export_extent = [-1999750, -1890500, 2500250, 1109500]
        export_shape = [4500, 3000]
        export_geo = [1000, 0, -1999750, 0, -1000, 1109500]
        # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada
        # export_extent = [-2099750, -3090500, 2900250, 1909500]
        # export_shape = [5000, 5000]
        # export_geo = [1000, 0, -2099750, 0, -1000, 1909500]
        export_crs = export_info['bands'][0]['crs']
    else:
        export_crs = export_info['bands'][0]['crs']
        export_geo = export_info['bands'][0]['crs_transform']
        export_shape = export_info['bands'][0]['dimensions']
        # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform']
        # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs']
        # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions']
        export_extent = [
            export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
            export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]
        ]
    export_geom = ee.Geometry.Rectangle(export_extent,
                                        proj=export_crs,
                                        geodesic=False)
    logging.debug('  CRS: {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo: {}'.format(export_geo))
    logging.debug('  Shape: {}'.format(export_shape))

    if study_area_extent is None:
        if 'daymet' in tmax_name.lower():
            # CGM - For now force DAYMET to a slightly smaller "CONUS" extent
            study_area_extent = [-125, 25, -65, 49]
            # study_area_extent =  [-125, 25, -65, 52]
        elif 'cimis' in tmax_name.lower():
            study_area_extent = [-124, 35, -119, 42]
        else:
            # TODO: Make sure output from bounds is in WGS84
            study_area_extent = tmax_mask.geometry().bounds().getInfo()
        logging.debug(f'\nStudy area extent not set in INI, '
                      f'default to {study_area_extent}')
    study_area_geom = ee.Geometry.Rectangle(study_area_extent,
                                            proj='EPSG:4326',
                                            geodesic=False)

    # For now define the study area from an extent
    if study_area_extent:
        study_area_geom = ee.Geometry.Rectangle(study_area_extent,
                                                proj='EPSG:4326',
                                                geodesic=False)
        export_geom = export_geom.intersection(study_area_geom, 1)
        # logging.debug('  Extent: {}'.format(export_geom.bounds().getInfo()))

    # If cell_size parameter is set in the INI,
    # adjust the output cellsize and recompute the transform and shape
    try:
        export_cs = float(ini['EXPORT']['cell_size'])
        export_shape = [
            int(math.ceil(abs((export_shape[0] * export_geo[0]) / export_cs))),
            int(math.ceil(abs((export_shape[1] * export_geo[4]) / export_cs)))
        ]
        export_geo = [
            export_cs, 0.0, export_geo[2], 0.0, -export_cs, export_geo[5]
        ]
        logging.debug('  Custom export cell size: {}'.format(export_cs))
        logging.debug('  Geo: {}'.format(export_geo))
        logging.debug('  Shape: {}'.format(export_shape))
    except KeyError:
        pass

    if not ee.data.getInfo(tcorr_scene_coll_id):
        logging.info('\nExport collection does not exist and will be built'
                     '\n  {}'.format(tcorr_scene_coll_id))
        input('Press ENTER to continue')
        ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_scene_coll_id)

    # Get current asset list
    logging.debug('\nGetting GEE asset list')
    asset_list = utils.get_ee_assets(tcorr_scene_coll_id)
    # if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
    #     pprint.pprint(asset_list[:10])

    # Get current running tasks
    tasks = utils.get_ee_tasks()
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('  Tasks: {}\n'.format(len(tasks)))
        input('ENTER')

    # TODO: Decide if month and year lists should be applied to scene exports
    # # Limit by year and month
    # try:
    #     month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months'])))
    # except:
    #     logging.info('\nTCORR "months" parameter not set in the INI,'
    #                  '\n  Defaulting to all months (1-12)\n')
    #     month_list = list(range(1, 13))
    # try:
    #     year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
    # except:
    #     logging.info('\nTCORR "years" parameter not set in the INI,'
    #                  '\n  Defaulting to all available years\n')
    #     year_list = []

    if cron_flag:
        # CGM - This seems like a silly way of getting the date as a datetime
        #   Why am I doing this and not using the commented out line?
        end_dt = datetime.date.today().strftime('%Y-%m-%d')
        end_dt = datetime.datetime.strptime(end_dt, '%Y-%m-%d')
        end_dt = end_dt + datetime.timedelta(days=-4)
        # end_dt = datetime.datetime.today() + datetime.timedelta(days=-1)
        start_dt = end_dt + datetime.timedelta(days=-64)
    else:
        start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'],
                                              '%Y-%m-%d')
        end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'],
                                            '%Y-%m-%d')
    if end_dt >= datetime.datetime.today():
        logging.debug('End Date:   {} - setting end date to current '
                      'date'.format(end_dt.strftime('%Y-%m-%d')))
        end_dt = datetime.datetime.today()
    if start_dt < datetime.datetime(1984, 3, 23):
        logging.debug('Start Date: {} - no Landsat 5+ images before '
                      '1984-03-23'.format(start_dt.strftime('%Y-%m-%d')))
        start_dt = datetime.datetime(1984, 3, 23)
    start_date = start_dt.strftime('%Y-%m-%d')
    end_date = end_dt.strftime('%Y-%m-%d')
    # next_date = (start_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
    logging.debug('Start Date: {}'.format(start_date))
    logging.debug('End Date:   {}\n'.format(end_date))
    if start_dt > end_dt:
        raise ValueError('start date must be before end date')

    # Get the list of WRS2 tiles that intersect the data area and study area
    wrs2_coll = ee.FeatureCollection(wrs2_coll_id) \
        .filterBounds(export_geom) \
        .filterBounds(study_area_geom)
    if wrs2_tiles:
        wrs2_coll = wrs2_coll.filter(
            ee.Filter.inList(wrs2_tile_field, wrs2_tiles))
    wrs2_info = wrs2_coll.getInfo()['features']
    # pprint.pprint(wrs2_info)
    # input('ENTER')

    # Iterate over WRS2 tiles (default is from west to east)
    for wrs2_ftr in sorted(wrs2_info,
                           key=lambda k: k['properties']['WRS2_TILE'],
                           reverse=not (reverse_flag)):
        wrs2_tile = wrs2_ftr['properties'][wrs2_tile_field]
        logging.info('{}'.format(wrs2_tile))

        wrs2_path = int(wrs2_tile[1:4])
        wrs2_row = int(wrs2_tile[5:8])
        # wrs2_path = wrs2_ftr['properties']['PATH']
        # wrs2_row = wrs2_ftr['properties']['ROW']

        wrs2_filter = [{
            'type': 'equals',
            'leftField': 'WRS_PATH',
            'rightValue': wrs2_path
        }, {
            'type': 'equals',
            'leftField': 'WRS_ROW',
            'rightValue': wrs2_row
        }]
        filter_args = {c: wrs2_filter for c in collections}

        # Build and merge the Landsat collections
        model_obj = ssebop.Collection(
            collections=collections,
            start_date=start_date,
            end_date=end_date,
            cloud_cover_max=cloud_cover,
            geometry=ee.Geometry(wrs2_ftr['geometry']),
            model_args=model_args,
            filter_args=filter_args,
        )
        landsat_coll = model_obj.overpass(variables=['ndvi'])
        # pprint.pprint(landsat_coll.aggregate_array('system:id').getInfo())
        # input('ENTER')

        try:
            image_id_list = landsat_coll.aggregate_array('system:id').getInfo()
        except Exception as e:
            logging.warning('  Error getting image ID list, skipping tile')
            logging.debug(f'  {e}')
            continue

        if update_flag:
            assets_info = utils.get_info(
                ee.ImageCollection(tcorr_scene_coll_id).filterMetadata(
                    'wrs2_tile', 'equals',
                    wrs2_tile).filterDate(start_date, end_date))
            asset_props = {
                f'{tcorr_scene_coll_id}/{x["properties"]["system:index"]}':
                x['properties']
                for x in assets_info['features']
            }
        else:
            asset_props = {}

        # Sort by date
        for image_id in sorted(image_id_list,
                               key=lambda k: k.split('/')[-1].split('_')[-1],
                               reverse=reverse_flag):
            coll_id, scene_id = image_id.rsplit('/', 1)
            logging.info(f'{scene_id}')

            export_dt = datetime.datetime.strptime(
                scene_id.split('_')[-1], '%Y%m%d')
            export_date = export_dt.strftime('%Y-%m-%d')
            # next_date = (export_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')

            # # Uncomment to apply month and year list filtering
            # if month_list and export_dt.month not in month_list:
            #     logging.debug(f'  Date: {export_date} - month not in INI - skipping')
            #     continue
            # elif year_list and export_dt.year not in year_list:
            #     logging.debug(f'  Date: {export_date} - year not in INI - skipping')
            #     continue

            logging.debug(f'  Date: {export_date}')

            export_id = export_id_fmt.format(product=tmax_name.lower(),
                                             scene_id=scene_id)
            logging.debug(f'  Export ID: {export_id}')

            asset_id = asset_id_fmt.format(coll_id=tcorr_scene_coll_id,
                                           scene_id=scene_id)
            logging.debug(f'  Asset ID: {asset_id}')

            if update_flag:

                def version_number(version_str):
                    return list(map(int, version_str.split('.')))

                if export_id in tasks.keys():
                    logging.info('  Task already submitted, skipping')
                    continue
                # In update mode only overwrite if the version is old
                if asset_props and asset_id in asset_props.keys():
                    model_ver = version_number(ssebop.__version__)
                    asset_ver = version_number(
                        asset_props[asset_id]['model_version'])

                    if asset_ver < model_ver:
                        logging.info('  Asset model version is old, removing')
                        try:
                            ee.data.deleteAsset(asset_id)
                        except:
                            logging.info('  Error removing asset, skipping')
                            continue
                    else:
                        logging.info('  Asset is up to date, skipping')
                        continue
            elif overwrite_flag:
                if export_id in tasks.keys():
                    logging.debug('  Task already submitted, cancelling')
                    ee.data.cancelTask(tasks[export_id]['id'])
                # This is intentionally not an "elif" so that a task can be
                # cancelled and an existing image/file/asset can be removed
                if asset_id in asset_list:
                    logging.debug('  Asset already exists, removing')
                    ee.data.deleteAsset(asset_id)
            else:
                if export_id in tasks.keys():
                    logging.debug('  Task already submitted, exiting')
                    continue
                elif asset_id in asset_list:
                    logging.debug('  Asset already exists, skipping')
                    continue

            image = ee.Image(image_id)
            # TODO: Will need to be changed for SR or use from_image_id()
            t_obj = ssebop.Image.from_landsat_c1_toa(image_id, **model_args)
            t_stats = ee.Dictionary(t_obj.tcorr_stats) \
                .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False)
            tcorr = ee.Number(t_stats.get('tcorr_p5'))
            count = ee.Number(t_stats.get('tcorr_count'))
            index = ee.Algorithms.If(count.gte(min_pixel_count), 0, 9)

            # Write an empty image if the pixel count is too low
            tcorr_img = ee.Algorithms.If(count.gte(min_pixel_count),
                                         tmax_mask.add(tcorr),
                                         tmax_mask.updateMask(0))

            # Clip to the Landsat image footprint
            output_img = ee.Image(tcorr_img).clip(image.geometry())

            # Clear the transparency mask
            output_img = output_img.updateMask(output_img.unmask(0)) \
                .rename(['tcorr']) \
                .set({
                    'CLOUD_COVER': image.get('CLOUD_COVER'),
                    'CLOUD_COVER_LAND': image.get('CLOUD_COVER_LAND'),
                    # 'SPACECRAFT_ID': image.get('SPACECRAFT_ID'),
                    'coll_id': coll_id,
                    'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
                    'date': export_dt.strftime('%Y-%m-%d'),
                    'doy': int(export_dt.strftime('%j')),
                    'image_id': image_id,
                    'model_name': model_name,
                    'model_version': ssebop.__version__,
                    'month': int(export_dt.month),
                    'scene_id': scene_id,
                    'system:time_start': image.get('system:time_start'),
                    'tcorr_value': tcorr,
                    'tcorr_index': index,
                    'tcorr_pixel_count': count,
                    'tmax_source': tmax_source.upper(),
                    'tmax_version': tmax_version.upper(),
                    'wrs2_path': wrs2_path,
                    'wrs2_row': wrs2_row,
                    'wrs2_tile': wrs2_tile,
                    'year': int(export_dt.year),
                })
            # pprint.pprint(output_img.getInfo()['properties'])
            # input('ENTER')

            logging.debug('  Building export task')
            task = ee.batch.Export.image.toAsset(
                image=output_img,
                description=export_id,
                assetId=asset_id,
                crs=export_crs,
                crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
                dimensions='{0}x{1}'.format(*export_shape),
            )

            logging.info('  Starting export task')
            utils.ee_task_start(task)

        # Pause before starting the next date (not export task)
        utils.delay_task(delay_time, max_ready)
        logging.debug('')
Esempio n. 18
0
def on_run_button_clicked(b):
    global result,m,collection,count,timestamplist1, \
           w_startdate,w_enddate,w_orbitpass,w_changemap, \
           w_relativeorbitnumber,w_significance,w_median,w_Q, \
           mean_incidence,collectionmean,coords
    try:
        if w_collection.value == 'COPERNICUS/S1_GRD':
            w_text.value = 'running on GEE archive ...'
            coords = ee.List(poly.bounds().coordinates().get(0))
            collection = ee.ImageCollection(w_collection.value) \
                      .filterBounds(ee.Geometry.Point(coords.get(0))) \
                      .filterBounds(ee.Geometry.Point(coords.get(1))) \
                      .filterBounds(ee.Geometry.Point(coords.get(2))) \
                      .filterBounds(ee.Geometry.Point(coords.get(3))) \
                      .filterDate(ee.Date(w_startdate.value), ee.Date(w_enddate.value)) \
                      .filter(ee.Filter.eq('transmitterReceiverPolarisation', ['VV','VH'])) \
                      .filter(ee.Filter.eq('resolution_meters', 10)) \
                      .filter(ee.Filter.eq('instrumentMode', 'IW')) \
                      .filter(ee.Filter.eq('orbitProperties_pass', w_orbitpass.value))
            if w_relativeorbitnumber.value > 0:
                collection = collection.filter(
                    ee.Filter.eq('relativeOrbitNumber_start',
                                 int(w_relativeorbitnumber.value)))
            if w_platform.value != 'Both':
                collection = collection.filter(
                    ee.Filter.eq('platform_number', w_platform.value))
            collection = collection.sort('system:time_start')
            acquisition_times = ee.List(
                collection.aggregate_array('system:time_start')).getInfo()
            count = len(acquisition_times)
            if count < 2:
                raise ValueError('Less than 2 images found')
            timestamplist = []
            for timestamp in acquisition_times:
                tmp = time.gmtime(int(timestamp) / 1000)
                timestamplist.append(time.strftime('%x', tmp))
    #      make timestamps in YYYYMMDD format
            timestamplist = [x.replace('/', '') for x in timestamplist]
            timestamplist = ['T20' + x[4:] + x[0:4] for x in timestamplist]
            #      in case of duplicates add running integer
            timestamplist1 = [
                timestamplist[i] + '_' + str(i + 1)
                for i in range(len(timestamplist))
            ]
            relativeorbitnumbers = map(
                int,
                ee.List(collection.aggregate_array(
                    'relativeOrbitNumber_start')).getInfo())
            rons = list(set(relativeorbitnumbers))
            txt = 'running on GEE archive ...'
            txt += 'Images found: %i, platform: %s \n' % (count,
                                                          w_platform.value)
            txt += 'Acquisition dates: %s\n' % str(timestamplist)
            txt += 'Relative orbit numbers: ' + str(rons) + '\n'
            if len(rons) == 1:
                mean_incidence = get_incidence_angle(collection.first())
                txt += 'Mean incidence angle: %f' % mean_incidence
            else:
                txt += 'Mean incidence angle: (select one rel. orbit)'
            w_text.value = txt
            pcollection = collection.map(get_vvvh)
            pList = pcollection.toList(100)
            first = ee.Dictionary({
                'imlist': ee.List([]),
                'poly': poly,
                'enl': ee.Number(w_enl.value)
            })
            imList = ee.Dictionary(pList.iterate(clipList,
                                                 first)).get('imlist')
            collectionmean = collection.mean().select(0).clip(poly)
            collmeanviz = collectionmean.visualize(min=-15,
                                                   max=5,
                                                   opacity=w_opacity.value)
        else:
            txt = 'running on local collection ...\n'
            collection = ee.ImageCollection(w_collection.value)
            count = collection.size().getInfo()
            w_exportscale.value = str(
                collection.first().projection().nominalScale().getInfo())
            txt += 'Images found: %i' % count
            timestamplist1 = ['T%i' % (i + 1) for i in range(count)]
            w_text.value = txt
            imList = collection.toList(100)
            collectionmean = collection.mean().select(0)
            collmeanviz = collectionmean.log().visualize(
                min=5, max=15, opacity=w_opacity.value)


#      run the algorithm
        result = ee.Dictionary(
            omnibus(imList, w_significance.value, w_enl.value, w_median.value,
                    w_Q.value))
        w_preview.disabled = False
        #      display mean of the full collection
        if len(m.layers) > 1:
            m.remove_layer(m.layers[1])
        m.add_layer(TileLayer(url=GetTileLayerUrl(collmeanviz)))
    except Exception as e:
        w_text.value = 'Error: %s' % e
Esempio n. 19
0
def get_values(collection,
               geometry,
               scale=None,
               reducer=ee.Reducer.mean(),
               id='system:index',
               properties=None,
               side='server',
               maxPixels=1e9):
    """ Return all values of all bands of an image collection in the
        specified geometry

    :param geometry: Point from where to get the info
    :type geometry: ee.Geometry
    :param scale: The scale to use in the reducer. It defaults to 10 due
        to the minimum scale available in EE (Sentinel 10m)
    :type scale: int
    :param id: image property that will be the key in the result dict
    :type id: str
    :param properties: image properties that will be added to the resulting
        dict
    :type properties: list
    :param side: 'server' or 'client' side
    :type side: str
    :return: Values of all bands in the ponit
    :rtype: dict
    """
    if not scale:
        # scale = minscale(ee.Image(self.first()))
        scale = 1
    else:
        scale = int(scale)

    propid = ee.Image(collection.first()).get(id).getInfo()

    def transform(eeobject):
        try:  # Py2
            isstr = isinstance(propid, (str, unicode))
        except:  # Py3
            isstr = isinstance(propid, (str))

        if isinstance(propid, (int, float)):
            return ee.Number(eeobject).format()
        elif isstr:
            return ee.String(eeobject)
        else:
            msg = 'property must be a number or string, found {}'
            raise ValueError(msg.format(type(propid)))

    if not properties:
        properties = []
    properties = ee.List(properties)

    def listval(img, it):
        theid = ee.String(transform(img.get(id)))
        values = img.reduceRegion(reducer,
                                  geometry,
                                  scale,
                                  maxPixels=maxPixels)
        values = ee.Dictionary(values)
        img_props = img.propertyNames()

        def add_properties(prop, ini):
            ini = ee.Dictionary(ini)
            condition = img_props.contains(prop)

            def true():
                value = img.get(prop)
                return ini.set(prop, value)

            # value = img.get(prop)
            # return ini.set(prop, value)
            return ee.Algorithms.If(condition, true(), ini)

        with_prop = ee.Dictionary(properties.iterate(add_properties, values))
        return ee.Dictionary(it).set(theid, with_prop)

    result = collection.iterate(listval, ee.Dictionary({}))
    result = ee.Dictionary(result)

    if side == 'server':
        return result
    elif side == 'client':
        return result.getInfo()
    else:
        raise ValueError("side parameter must be 'server' or 'client'")
def productivity_performance(year_start, year_end, ndvi_gee_dataset, geojson,
                             EXECUTION_ID, logger):
    logger.debug("Entering productivity_performance function.")

    ndvi_1yr = ee.Image(ndvi_gee_dataset)
    ndvi_1yr = ndvi_1yr.where(ndvi_1yr.eq(9999), -32768)
    ndvi_1yr = ndvi_1yr.updateMask(ndvi_1yr.neq(-32768))

    # land cover data from esa cci
    lc = ee.Image(
        "users/geflanddegradation/toolbox_datasets/lcov_esacc_1992_2015")
    lc = lc.where(lc.eq(9999), -32768)
    lc = lc.updateMask(lc.neq(-32768))

    # global agroecological zones from IIASA
    soil_tax_usda = ee.Image(
        "users/geflanddegradation/toolbox_datasets/soil_tax_usda_sgrid")

    # Make sure the bounding box of the poly is used, and not the geodesic
    # version, for the clipping
    poly = ee.Geometry(geojson, opt_geodesic=False)

    # compute mean ndvi for the period
    ndvi_avg = ndvi_1yr.select(ee.List(['y{}'.format(i) for i in range(year_start, year_end + 1)])) \
        .reduce(ee.Reducer.mean()).rename(['ndvi']).clip(poly)

    # Handle case of year_start that isn't included in the CCI data
    if year_start > 2015:
        lc_year_start = 2015
    elif year_start < 1992:
        lc_year_start = 1992
    else:
        lc_year_start = year_start
    # reclassify lc to ipcc classes
    lc_t0 = lc.select('y{}'.format(lc_year_start)) \
        .remap([10, 11, 12, 20, 30, 40, 50, 60, 61, 62, 70, 71, 72, 80, 81, 82, 90, 100, 160, 170, 110, 130, 180, 190, 120, 121, 122, 140, 150, 151, 152, 153, 200, 201, 202, 210],
               [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36])

    # create a binary mask.
    mask = ndvi_avg.neq(0)

    # define modis projection attributes
    modis_proj = ee.Image(
        "users/geflanddegradation/toolbox_datasets/ndvi_modis_2001_2016"
    ).projection()

    # reproject land cover, soil_tax_usda and avhrr to modis resolution
    lc_proj = lc_t0.reproject(crs=modis_proj)
    soil_tax_usda_proj = soil_tax_usda.reproject(crs=modis_proj)
    ndvi_avg_proj = ndvi_avg.reproject(crs=modis_proj)

    # define unit of analysis as the intersect of soil_tax_usda and land cover
    units = soil_tax_usda_proj.multiply(100).add(lc_proj)

    # create a 2 band raster to compute 90th percentile per unit (analysis restricted by mask and study area)
    ndvi_id = ndvi_avg_proj.addBands(units).updateMask(mask)

    # compute 90th percentile by unit
    perc90 = ndvi_id.reduceRegion(
        reducer=ee.Reducer.percentile([90]).group(groupField=1,
                                                  groupName='code'),
        geometry=poly,
        scale=ee.Number(modis_proj.nominalScale()).getInfo(),
        maxPixels=1e15)

    # Extract the cluster IDs and the 90th percentile
    groups = ee.List(perc90.get("groups"))
    ids = groups.map(lambda d: ee.Dictionary(d).get('code'))
    perc = groups.map(lambda d: ee.Dictionary(d).get('p90'))

    # remap the units raster using their 90th percentile value
    raster_perc = units.remap(ids, perc)

    # compute the ration of observed ndvi to 90th for that class
    obs_ratio = ndvi_avg_proj.divide(raster_perc)

    # aggregate obs_ratio to original NDVI data resolution (for modis this step does not change anything)
    obs_ratio_2 = obs_ratio.reduceResolution(reducer=ee.Reducer.mean(), maxPixels=2000) \
        .reproject(crs=ndvi_1yr.projection())

    # create final degradation output layer (9999 is background), 0 is not
    # degreaded, -1 is degraded
    lp_perf_deg = ee.Image(-32768).where(obs_ratio_2.gte(0.5), 0) \
        .where(obs_ratio_2.lte(0.5), -1)

    lp_perf = lp_perf_deg.addBands(obs_ratio_2.multiply(10000)) \
        .addBands(units)

    task = util.export_to_cloudstorage(
        lp_perf.unmask(-32768).int16(), ndvi_1yr.projection(), geojson,
        'prod_performance', logger, EXECUTION_ID)
    task.join()

    logger.debug("Setting up results JSON.")
    d = [
        BandInfo("Productivity performance (degradation)",
                 1,
                 no_data_value=-32768,
                 add_to_map=True,
                 metadata={
                     'year_start': year_start,
                     'year_end': year_end
                 }),
        BandInfo("Productivity performance (ratio)",
                 2,
                 no_data_value=-32768,
                 add_to_map=False,
                 metadata={
                     'year_start': year_start,
                     'year_end': year_end
                 }),
        BandInfo("Productivity performance (units)",
                 3,
                 no_data_value=-32768,
                 add_to_map=False,
                 metadata={'year_start': year_start})
    ]
    u = URLList(task.get_URL_base(), task.get_files())
    gee_results = CloudResults('prod_performance', __version__, d, u)
    results_schema = CloudResultsSchema()
    json_results = results_schema.dump(gee_results)

    return json_results
Esempio n. 21
0
def on_run_button_clicked(b):
    global result,m,collection,poly,imList,count,timestamplist1,timestamps2, \
           w_startdate,w_enddate,w_orbitpass,w_changemap,s2_image, \
           w_relativeorbitnumber,w_significance,w_median,w_Q, \
           mean_incidence,collectionmean,archive_crs,coords
    try:
        if w_collection.value == 'COPERNICUS/S1_GRD':
            w_text.value = 'running on GEE archive ...'
            coords = ee.List(poly.bounds().coordinates().get(0))
            collection = getS1collection(coords)
            if w_relativeorbitnumber.value > 0:
                collection = collection.filter(
                    ee.Filter.eq('relativeOrbitNumber_start',
                                 int(w_relativeorbitnumber.value)))
            if w_platform.value != 'Both':
                collection = collection.filter(
                    ee.Filter.eq('platform_number', w_platform.value))
            collection = collection.sort('system:time_start')
            acquisition_times = ee.List(
                collection.aggregate_array('system:time_start')).getInfo()
            count = len(acquisition_times)
            if count < 2:
                raise ValueError('Less than 2 images found')
            timestamplist = []
            for timestamp in acquisition_times:
                tmp = time.gmtime(int(timestamp) / 1000)
                timestamplist.append(time.strftime('%x', tmp))
    #      make timestamps in YYYYMMDD format
            timestamplist = [x.replace('/', '') for x in timestamplist]
            timestamplist = ['T20' + x[4:] + x[0:4] for x in timestamplist]
            #      in case of duplicates add running integer
            timestamplist1 = [
                timestamplist[i] + '_' + str(i + 1)
                for i in range(len(timestamplist))
            ]
            relativeorbitnumbers = map(
                int,
                ee.List(collection.aggregate_array(
                    'relativeOrbitNumber_start')).getInfo())
            rons = list(set(relativeorbitnumbers))
            txt = 'running on GEE archive ...'
            txt += 'Images found: %i, platform: %s \n' % (count,
                                                          w_platform.value)
            txt += 'Number of 10m pixels contained: %i \n' % math.floor(
                poly.area().getInfo() / 100.0)
            txt += 'Acquisition dates: %s\n' % str(timestamplist)
            txt += 'Relative orbit numbers: ' + str(rons) + '\n'
            if len(rons) == 1:
                mean_incidence = get_incidence_angle(collection.first())
                txt += 'Mean incidence angle: %f' % mean_incidence
            else:
                txt += 'Mean incidence angle: (select one rel. orbit)'
            pcollection = collection.map(get_vvvh)
            pList = pcollection.toList(100)
            first = ee.Dictionary({
                'imlist': ee.List([]),
                'poly': poly,
                'enl': ee.Number(w_enl.value)
            })
            imList = ee.Dictionary(pList.iterate(clipList,
                                                 first)).get('imlist')
            collectionmean = collection.mean().select(0).clip(poly).rename(
                'b0')
            percentiles = collectionmean.reduceRegion(
                ee.Reducer.percentile([2, 98]),
                scale=float(w_exportscale.value),
                maxPixels=10e9)
            mn = ee.Number(percentiles.get('b0_p2'))
            mx = ee.Number(percentiles.get('b0_p98'))
            vorschau = collectionmean.visualize(min=mn,
                                                max=mx,
                                                opacity=w_opacity.value)
        else:
            txt = 'running on local collection %s ...\n' % w_collection.value
            collection = ee.ImageCollection(w_collection.value)
            count = collection.size().getInfo()
            txt += 'Images found: %i\n' % count
            collectionfirst = ee.Image(collection.first())
            poly = collectionfirst.geometry()
            coords = ee.List(poly.bounds().coordinates().get(0))
            center = poly.centroid().coordinates().getInfo()
            center.reverse()
            m.center = center
            w_exportscale.value = str(
                collectionfirst.projection().nominalScale().getInfo())
            if collectionfirst.get('system:time_start').getInfo() is not None:
                acquisition_times = ee.List(
                    collection.aggregate_array('system:time_start')).getInfo()
                timestamplist1 = []
                for timestamp in acquisition_times:
                    tmp = time.gmtime(int(timestamp) / 1000)
                    timestamplist1.append(time.strftime('%x', tmp))
                timestamplist1 = [x.replace('/', '') for x in timestamplist1]
                timestamplist1 = [
                    'T20' + x[4:] + x[0:4] for x in timestamplist1
                ]
                txt += 'Acquisition dates: %s' % str(timestamplist1)
            else:
                timestamplist1 = ['T%i' % (i + 1) for i in range(count)]
                txt += 'No time property available: acquisitions: %s' % str(
                    timestamplist1)
            collectionmean = collection.mean().select(0)
            percentiles = collectionfirst.select(0).rename('b0').reduceRegion(
                ee.Reducer.percentile([0, 98]), maxPixels=10e9)
            mn = ee.Number(percentiles.get('b0_p0'))
            mx = ee.Number(percentiles.get('b0_p98'))
            vorschau = collectionmean.visualize(min=mn,
                                                max=mx,
                                                opacity=w_opacity.value)
            imList = collection.toList(100)


#      get GEE S1 archive crs for eventual image series export
        archive_crs = ee.Image(getS1collection(coords).first()).select(
            0).projection().crs().getInfo()
        #      run the algorithm
        result = ee.Dictionary(
            omnibus(imList, w_significance.value, w_enl.value, w_median.value))
        w_preview.disabled = False
        s2_image = None
        #      display collection or S2
        if len(m.layers) > 1:
            m.remove_layer(m.layers[1])
        if w_S2.value:
            #          display sentinel-2 if available
            collection2 = getS2collection(coords)
            count1 = collection2.size().getInfo()
            if count1 > 0:
                s2_image = ee.Image(collection2.first()).select(
                    ['B8', 'B4', 'B3']).clip(poly)
                percentiles = s2_image.reduceRegion(
                    ee.Reducer.percentile([2, 98]),
                    scale=float(w_exportscale.value),
                    maxPixels=10e9)
                mn = percentiles.values(['B8_p2', 'B4_p2', 'B3_p2'])
                mx = percentiles.values(['B8_p98', 'B4_p98', 'B3_p98'])
                vorschau = s2_image.visualize(min=mn,
                                              max=mx,
                                              opacity=w_opacity.value)
                timestamp = s2_image.get('system:time_start').getInfo()
                timestamp = time.gmtime(int(timestamp) / 1000)
                timestamp = time.strftime('%x', timestamp).replace('/', '')
                timestamps2 = '20' + timestamp[4:] + timestamp[0:4]
                txt += '\nSentinel-2 from %s' % timestamps2
        w_text.value = txt
        m.add_layer(TileLayer(url=GetTileLayerUrl(vorschau)))
    except Exception as e:
        w_text.value = 'Error: %s' % e
Esempio n. 22
0
 def iteration(key, first):
     new = ee.Dictionary(first)
     val = dictionary.get(key)
     return new.set(key, val)
Esempio n. 23
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.watermask = ee.Image(self.inputs["watermask"]["ee_path"])
     self.quantiles = ee.Dictionary(self.quantiles)
Esempio n. 24
0
def on_collect_button_clicked(b):
    global result,collection,count,imList,poly,timestamplist1,timestamps2, \
           s2_image,rons,mean_incidence,collectionmosaic,collectionfirst,archive_crs,coords,wc 
    with w_out:
        try:
            if (w_collection.value == 'COPERNICUS/S1_GRD') or (w_collection.value == ''): 
                w_out.clear_output()
                print('running on GEE archive COPERNICUS/S1_GRD (please wait for raster overlay) ...')
                coords = ee.List(poly.bounds().coordinates().get(0))
                collection = getS1collection()
                if w_relativeorbitnumber.value > 0:
                    collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(w_relativeorbitnumber.value)))   
                if w_platform.value != 'Both':
                    collection = collection.filter(ee.Filter.eq('platform_number', w_platform.value))         
                collection = collection.sort('system:time_start') 
                acquisition_times = ee.List(collection.aggregate_array('system:time_start')).getInfo()
                count = len(acquisition_times) 
                if count<2:
                    raise ValueError('Less than 2 images found')
                timestamplist = []
                for timestamp in acquisition_times:
                    tmp = time.gmtime(int(timestamp)/1000)
                    timestamplist.append(time.strftime('%x', tmp))  
#              make timestamps in YYYYMMDD format            
                timestamplist = [x.replace('/','') for x in timestamplist]  
                timestamplist = ['T20'+x[4:]+x[0:4] for x in timestamplist]         
                timestamplist = timestamplist[::int(w_stride.value)]
#              in case of duplicates add running integer
                timestamplist1 = [timestamplist[i] + '_' + str(i+1) for i in range(len(timestamplist))]     
                count = len(timestamplist)
                if count<2:
                    raise ValueError('Less than 2 images found, decrease stride')            
                relativeorbitnumbers = map(int,ee.List(collection.aggregate_array('relativeOrbitNumber_start')).getInfo())
                rons = list(set(relativeorbitnumbers))
                print('Images found: %i, platform: %s'%(count,w_platform.value))
                print('Number of 10m pixels contained: %i'%math.floor(poly.area().getInfo()/100.0))
                print('Acquisition dates: %s to %s'%(str(timestamplist[0]),str(timestamplist[-1])))
                print('Relative orbit numbers: '+str(rons))
                if len(rons)==1:
                    mean_incidence = get_incidence_angle(collection.first())
                    print('Mean incidence angle: %f'%mean_incidence)
                else:
                    mean_incidence = 'undefined'
                    print('Mean incidence angle: (select one rel. orbit)')
                pcollection = collection.map(get_vvvh)            
                collectionfirst = ee.Image(pcollection.first())
                w_exportscale.value = collectionfirst.projection().nominalScale().getInfo()          
                pList = pcollection.toList(500)   
                first = ee.Dictionary({'imlist':ee.List([]),'poly':poly,'enl':ee.Number(w_enl.value),'ctr':ee.Number(0),'stride':ee.Number(int(w_stride.value))}) 
                imList = ee.List(ee.Dictionary(pList.iterate(clipList,first)).get('imlist'))              
#              get a vorschau as collection mean                                           
                collectionmosaic = collection.mosaic().select(0,1).rename('b0','b1')
                percentiles = collectionmosaic.reduceRegion(ee.Reducer.percentile([2,98]),geometry=poly,scale=w_exportscale.value,maxPixels=10e9)
                mn = ee.Number(percentiles.get('b0_p2'))
                mx = ee.Number(percentiles.get('b0_p98'))        
                vorschau = collectionmosaic.select(0).visualize(min=mn, max=mx, opacity=w_opacity.value) 
            else:
                w_out.clear_output()
                collection = ee.ImageCollection(w_collection.value)
                print('running on local collection %s \n ignoring start and end dates (please wait for raster overlay) ...'%w_collection.value)  
                count = collection.size().getInfo()  
                print('Images found: %i'%count )          
                collectionfirst = ee.Image(collection.first())  
                poly = collectionfirst.geometry()   
                coords = ee.List(poly.bounds().coordinates().get(0))   
                center = poly.centroid().coordinates().getInfo()
                center.reverse()
                m.center = center                
                w_exportscale.value = collectionfirst.projection().nominalScale().getInfo()
                if collectionfirst.get('system:time_start').getInfo() is not None:
                    acquisition_times = ee.List(collection.aggregate_array('system:time_start')).getInfo()  
                    timestamplist1 = []
                    for timestamp in acquisition_times:
                        tmp = time.gmtime(int(timestamp)/1000)
                        timestamplist1.append(time.strftime('%x', tmp))            
                    timestamplist1 = [x.replace('/','') for x in timestamplist1]  
                    timestamplist1 = ['T20'+x[4:]+x[0:4] for x in timestamplist1]    
                    print('Acquisition dates: %s'%str(timestamplist1))    
                else:
                    timestamplist1 = ['T%i'%(i+1) for i in range(count)]
                    print('No time property available: acquisitions: %s'%str(timestamplist1))         
#              get a vorschau from collection mean                 
                collectionmosaic = collection.mosaic().clip(poly)
                percentiles = collectionmosaic.select(0).rename('b0').reduceRegion(ee.Reducer.percentile([2,98]),scale=w_exportscale.value,maxPixels=10e9)
                mn = ee.Number(percentiles.get('b0_p2'))
                mx = ee.Number(percentiles.get('b0_p98'))        
                vorschau = collectionmosaic.select(0).visualize(min=mn, max=mx, opacity=w_opacity.value)       
                imList = collection.toList(100)
#          get GEE S1 archive crs for eventual image series export               
#            archive_crs = ee.Image(getS1collection(coords).first()).select(0).projection().crs().getInfo()
            archive_crs = ee.Image(getS1collection().first()).select(0).projection().crs().getInfo()
#          run the algorithm        
            result = omnibus(imList,w_significance.value,w_enl.value,w_median.value)         
            w_preview.disabled = False
            w_ENL.disabled = False
            w_export_atsf.disabled = True
            s2_image = None
#          display collection or S2 
            if len(m.layers)>3:
                m.remove_layer(m.layers[3])
            if w_S2.value:
#              display sentinel-2 if available              
                collection2 = getS2collection() 
                count1 = collection2.size().getInfo()
                if count1>0:    
                    s2_image =  ee.Image(collection2.first()).select(['B8','B4','B3']).clip(poly)        
                    percentiles = s2_image.reduceRegion(ee.Reducer.percentile([2,98]),scale=w_exportscale.value,maxPixels=10e9)         
                    mn = percentiles.values(['B8_p2','B4_p2','B3_p2'])
                    mx = percentiles.values(['B8_p98','B4_p98','B3_p98'])
                    vorschau = s2_image.visualize(min=mn,max=mx,opacity=w_opacity.value)           
                    timestamp = s2_image.get('system:time_start').getInfo() 
                    timestamp = time.gmtime(int(timestamp)/1000)
                    timestamp = time.strftime('%x', timestamp).replace('/','')
                    timestamps2 = '20'+timestamp[4:]+timestamp[0:4]
                    print('Sentinel-2 from %s'%timestamps2) 
            m.add_layer(TileLayer(url=GetTileLayerUrl(vorschau)))
          
        except Exception as e:
            print('Error: %s'%e)       
Esempio n. 25
0
def imad1(current, prev):
    ''' Iteratively re-weighted MAD '''
    image = ee.Image(ee.Dictionary(prev).get('image'))
    chi2 = ee.Image(ee.Dictionary(prev).get('chi2'))
    allrhos = ee.List(ee.Dictionary(prev).get('allrhos'))
    region = image.geometry()
    nBands = image.bandNames().length().divide(2)
    weights = chi2cdf(chi2, nBands).subtract(1).multiply(-1)
    centeredImage, covarArray = covarw(image, weights)
    bNames = centeredImage.bandNames()
    bNames1 = bNames.slice(0, nBands)
    bNames2 = bNames.slice(nBands)
    centeredImage1 = centeredImage.select(bNames1)
    centeredImage2 = centeredImage.select(bNames2)
    s11 = covarArray.slice(0, 0, nBands).slice(1, 0, nBands)
    s22 = covarArray.slice(0, nBands).slice(1, nBands)
    s12 = covarArray.slice(0, 0, nBands).slice(1, nBands)
    s21 = covarArray.slice(0, nBands).slice(1, 0, nBands)
    c1 = s12.matrixMultiply(s22.matrixInverse()).matrixMultiply(s21)
    b1 = s11
    c2 = s21.matrixMultiply(s11.matrixInverse()).matrixMultiply(s12)
    b2 = s22
    #  solution of generalized eigenproblems
    lambdas, A = geneiv(c1, b1)
    _, B = geneiv(c2, b2)
    rhos = lambdas.sqrt().project(ee.List([1]))
    #  sort in increasing order
    keys = ee.List.sequence(nBands, 1, -1)
    A = A.sort([keys])
    B = B.sort([keys])
    rhos = rhos.sort(keys)
    #  test for convergence
    lastrhos = ee.Array(allrhos.get(-1))
    done = rhos.subtract(lastrhos) \
               .abs() \
               .reduce(ee.Reducer.max(),ee.List([0])) \
               .lt(ee.Number(0.001)) \
               .toList() \
               .get(0)
    allrhos = allrhos.cat([rhos.toList()])
    #  MAD variances
    sigma2s = rhos.subtract(1).multiply(-2).toList()
    sigma2s = ee.Image.constant(sigma2s)
    #  ensure sum of positive correlations between X and U is positive
    tmp = s11.matrixDiagonal().sqrt()
    ones = tmp.multiply(0).add(1)
    tmp = ones.divide(tmp).matrixToDiag()
    s = tmp.matrixMultiply(s11).matrixMultiply(A).reduce(
        ee.Reducer.sum(), [0]).transpose()
    A = A.matrixMultiply(s.divide(s.abs()).matrixToDiag())
    #  ensure positive correlation
    tmp = A.transpose().matrixMultiply(s12).matrixMultiply(B).matrixDiagonal()
    tmp = tmp.divide(tmp.abs()).matrixToDiag()
    B = B.matrixMultiply(tmp)
    #  canonical and MAD variates
    centeredImage1Array = centeredImage1.toArray().toArray(1)
    centeredImage2Array = centeredImage2.toArray().toArray(1)
    U = ee.Image(A.transpose()).matrixMultiply(centeredImage1Array) \
                   .arrayProject([0]) \
                   .arrayFlatten([bNames1])
    V = ee.Image(B.transpose()).matrixMultiply(centeredImage2Array) \
                   .arrayProject([0]) \
                   .arrayFlatten([bNames2])
    MAD = U.subtract(V)
    #  chi square image
    chi2 = MAD.pow(2) \
              .divide(sigma2s) \
              .reduce(ee.Reducer.sum()) \
              .clip(region)
    return ee.Dictionary({
        'done': done,
        'image': image,
        'allrhos': allrhos,
        'chi2': chi2,
        'MAD': MAD
    })
Esempio n. 26
0
def find_NAIP(region, add_NDVI=True, add_NDWI=True):
    """Create annual NAIP mosaic for a given region.

    Args:
        region (object): ee.Geometry
        add_NDVI (bool, optional): Whether to add the NDVI band. Defaults to True.
        add_NDWI (bool, optional): Whether to add the NDWI band. Defaults to True.

    Returns:
        object: ee.ImageCollection
    """

    init_collection = ee.ImageCollection('USDA/NAIP/DOQQ') \
        .filterBounds(region) \
        .filterDate('2009-01-01', '2019-12-31') \
        .filter(ee.Filter.listContains("system:band_names", "N"))

    yearList = ee.List(
        init_collection.distinct(['system:time_start'
                                  ]).aggregate_array('system:time_start'))
    init_years = yearList.map(lambda y: ee.Date(y).get('year'))

    # remove duplicates
    init_years = ee.Dictionary(
        init_years.reduce(ee.Reducer.frequencyHistogram())).keys()
    years = init_years.map(lambda x: ee.Number.parse(x))

    # years = init_years.map(lambda x: x)

    # Available NAIP years with NIR band
    def NAIPAnnual(year):
        start_date = ee.Date.fromYMD(year, 1, 1)
        end_date = ee.Date.fromYMD(year, 12, 31)
        collection = init_collection.filterDate(start_date, end_date)
        # .filterBounds(geometry)
        # .filter(ee.Filter.listContains("system:band_names", "N"))
        time_start = ee.Date(
            ee.List(collection.aggregate_array(
                'system:time_start')).sort().get(0)).format('YYYY-MM-dd')
        time_end = ee.Date(
            ee.List(collection.aggregate_array('system:time_end')).sort().get(
                -1)).format('YYYY-MM-dd')
        col_size = collection.size()
        image = ee.Image(collection.mosaic().clip(region))

        if add_NDVI:
            NDVI = ee.Image(image).normalizedDifference(['N',
                                                         'R']).select(['nd'],
                                                                      ['ndvi'])
            image = image.addBands(NDVI)

        if add_NDWI:
            NDWI = ee.Image(image).normalizedDifference(['G',
                                                         'N']).select(['nd'],
                                                                      ['ndwi'])
            image = image.addBands(NDWI)

        return image.set({
            'system:time_start': time_start,
            'system:time_end': time_end,
            'tiles': col_size
        })

    # remove years with incomplete coverage
    naip = ee.ImageCollection(years.map(NAIPAnnual))
    mean_size = ee.Number(naip.aggregate_mean('tiles'))
    total_sd = ee.Number(naip.aggregate_total_sd('tiles'))
    threshold = mean_size.subtract(total_sd.multiply(1))
    naip = naip.filter(
        ee.Filter.Or(ee.Filter.gte('tiles', threshold),
                     ee.Filter.gte('tiles', 15)))
    naip = naip.filter(ee.Filter.gte('tiles', 7))

    naip_count = naip.size()
    naip_seq = ee.List.sequence(0, naip_count.subtract(1))

    def set_index(index):
        img = ee.Image(naip.toList(naip_count).get(index))
        return img.set({'system:uid': ee.Number(index).toUint8()})

    naip = naip_seq.map(set_index)

    return ee.ImageCollection(naip)
Esempio n. 27
0
def ensure_default_properties(obj):
    obj = ee.Dictionary(obj)
    default_properties = ee.Dictionary({"mean": -9999, "count": -9999})
    return default_properties.combine(obj)
def main(ini_path=None,
         overwrite_flag=False,
         delay_time=0,
         gee_key_file=None,
         max_ready=-1,
         reverse_flag=False):
    """Compute annual Tcorr images from scene images

    Parameters
    ----------
    ini_path : str
        Input file path.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    delay_time : float, optional
        Delay time in seconds between starting export tasks (or checking the
        number of queued tasks, see "max_ready" parameter).  The default is 0.
    gee_key_file : str, None, optional
        Earth Engine service account JSON key file (the default is None).
    max_ready: int, optional
        Maximum number of queued "READY" tasks.  The default is -1 which is
        implies no limit to the number of tasks that will be submitted.
    reverse_flag : bool, optional
        If True, process WRS2 tiles in reverse order.

    """
    logging.info('\nCompute annual Tcorr images from scene images')

    ini = utils.read_ini(ini_path)

    model_name = 'SSEBOP'
    # model_name = ini['INPUTS']['et_model'].upper()

    tmax_name = ini[model_name]['tmax_source']

    export_id_fmt = 'tcorr_scene_{product}_{wrs2}_annual_from_scene'
    asset_id_fmt = '{coll_id}/{wrs2}'

    tcorr_annual_coll_id = '{}/{}_annual_from_scene'.format(
        ini['EXPORT']['export_coll'], tmax_name.lower())

    wrs2_coll_id = 'projects/earthengine-legacy/assets/' \
                   'projects/usgs-ssebop/wrs2_descending_custom'
    wrs2_tile_field = 'WRS2_TILE'
    # wrs2_path_field = 'ROW'
    # wrs2_row_field = 'PATH'

    try:
        wrs2_tiles = str(ini['INPUTS']['wrs2_tiles'])
        wrs2_tiles = [x.strip() for x in wrs2_tiles.split(',')]
        wrs2_tiles = sorted([x.lower() for x in wrs2_tiles if x])
    except KeyError:
        wrs2_tiles = []
        logging.debug('  wrs2_tiles: not set in INI, defaulting to []')
    except Exception as e:
        raise e

    try:
        study_area_extent = str(ini['INPUTS']['study_area_extent']) \
            .replace('[', '').replace(']', '').split(',')
        study_area_extent = [float(x.strip()) for x in study_area_extent]
    except KeyError:
        study_area_extent = None
        logging.debug(
            '  study_area_extent: not set in INI, defaulting to None')
    except Exception as e:
        raise e

    # TODO: Add try/except blocks and default values?
    # TODO: Filter Tcorr scene collection based on collections parameter
    # collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')]
    cloud_cover = float(ini['INPUTS']['cloud_cover'])
    min_pixel_count = float(ini['TCORR']['min_pixel_count'])
    min_scene_count = float(ini['TCORR']['min_scene_count'])

    if (tmax_name.upper() == 'CIMIS'
            and ini['INPUTS']['end_date'] < '2003-10-01'):
        logging.error(
            '\nCIMIS is not currently available before 2003-10-01, exiting\n')
        sys.exit()
    elif (tmax_name.upper() == 'DAYMET'
          and ini['INPUTS']['end_date'] > '2018-12-31'):
        logging.warning('\nDAYMET is not currently available past 2018-12-31, '
                        'using median Tmax values\n')
        # sys.exit()
    # elif (tmax_name.upper() == 'TOPOWX' and
    #         ini['INPUTS']['end_date'] > '2017-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2017-12-31, '
    #         'using median Tmax values\n')
    #     # sys.exit()

    logging.info('\nInitializing Earth Engine')
    if gee_key_file:
        logging.info(
            '  Using service account key file: {}'.format(gee_key_file))
        # The "EE_ACCOUNT" parameter is not used if the key file is valid
        ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file))
    else:
        ee.Initialize()

    logging.debug('\nTmax properties')
    tmax_source = tmax_name.split('_', 1)[0]
    tmax_version = tmax_name.split('_', 1)[1]
    tmax_coll_id = 'projects/earthengine-legacy/assets/' \
                   'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
    tmax_coll = ee.ImageCollection(tmax_coll_id)
    tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
    logging.debug('  Collection: {}'.format(tmax_coll_id))
    logging.debug('  Source: {}'.format(tmax_source))
    logging.debug('  Version: {}'.format(tmax_version))

    # Get the Tcorr scene image collection properties
    logging.debug('\nTcorr scene collection')
    tcorr_scene_coll_id = '{}/{}_scene'.format(ini['EXPORT']['export_coll'],
                                               tmax_name.lower())

    logging.debug('\nExport properties')
    export_info = utils.get_info(ee.Image(tmax_mask))
    if 'daymet' in tmax_name.lower():
        # Custom smaller extent for DAYMET focused on CONUS
        export_extent = [-1999750, -1890500, 2500250, 1109500]
        export_shape = [4500, 3000]
        export_geo = [1000, 0, -1999750, 0, -1000, 1109500]
        # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada
        # export_extent = [-2099750, -3090500, 2900250, 1909500]
        # export_shape = [5000, 5000]
        # export_geo = [1000, 0, -2099750, 0, -1000, 1909500]
        export_crs = export_info['bands'][0]['crs']
    else:
        export_crs = export_info['bands'][0]['crs']
        export_geo = export_info['bands'][0]['crs_transform']
        export_shape = export_info['bands'][0]['dimensions']
        # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform']
        # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs']
        # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions']
        export_extent = [
            export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
            export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]
        ]
    export_geom = ee.Geometry.Rectangle(export_extent,
                                        proj=export_crs,
                                        geodesic=False)
    logging.debug('  CRS: {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo: {}'.format(export_geo))
    logging.debug('  Shape: {}'.format(export_shape))

    if study_area_extent is None:
        if 'daymet' in tmax_name.lower():
            # CGM - For now force DAYMET to a slightly smaller "CONUS" extent
            study_area_extent = [-125, 25, -65, 49]
            # study_area_extent =  [-125, 25, -65, 52]
        elif 'cimis' in tmax_name.lower():
            study_area_extent = [-124, 35, -119, 42]
        else:
            # TODO: Make sure output from bounds is in WGS84
            study_area_extent = tmax_mask.geometry().bounds().getInfo()
        logging.debug(f'\nStudy area extent not set in INI, '
                      f'default to {study_area_extent}')
    study_area_geom = ee.Geometry.Rectangle(study_area_extent,
                                            proj='EPSG:4326',
                                            geodesic=False)

    if not ee.data.getInfo(tcorr_annual_coll_id):
        logging.info('\nExport collection does not exist and will be built'
                     '\n  {}'.format(tcorr_annual_coll_id))
        input('Press ENTER to continue')
        ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_annual_coll_id)

    # Get current asset list
    logging.debug('\nGetting GEE asset list')
    asset_list = utils.get_ee_assets(tcorr_annual_coll_id)
    # if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
    #     pprint.pprint(asset_list[:10])

    # Get current running tasks
    tasks = utils.get_ee_tasks()
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('  Tasks: {}\n'.format(len(tasks)))
        input('ENTER')

    # Limit by year
    month_list = list(range(1, 13))
    # try:
    #     month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months'])))
    # except:
    #     logging.info('\nTCORR "months" parameter not set in the INI,'
    #                  '\n  Defaulting to all months (1-12)\n')
    #     month_list = list(range(1, 13))
    try:
        year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
    except:
        logging.info('\nTCORR "years" parameter not set in the INI,'
                     '\n  Defaulting to all available years\n')
        year_list = []

    # Get the list of WRS2 tiles that intersect the data area and study area
    wrs2_coll = ee.FeatureCollection(wrs2_coll_id) \
        .filterBounds(export_geom) \
        .filterBounds(study_area_geom)
    if wrs2_tiles:
        wrs2_coll = wrs2_coll.filter(
            ee.Filter.inList(wrs2_tile_field, wrs2_tiles))
    wrs2_info = wrs2_coll.getInfo()['features']

    # Iterate over date ranges
    for wrs2_ftr in sorted(wrs2_info,
                           key=lambda k: k['properties']['WRS2_TILE'],
                           reverse=reverse_flag):
        wrs2_tile = wrs2_ftr['properties'][wrs2_tile_field]
        logging.info('{}'.format(wrs2_tile))

        wrs2_path = int(wrs2_tile[1:4])
        wrs2_row = int(wrs2_tile[5:8])
        # wrs2_path = wrs2_ftr['properties'][wrs2_path_field]
        # wrs2_row = wrs2_ftr['properties'][wrs2_row_field]

        export_id = export_id_fmt.format(product=tmax_name.lower(),
                                         wrs2=wrs2_tile)
        logging.debug('  Export ID: {}'.format(export_id))

        asset_id = asset_id_fmt.format(coll_id=tcorr_annual_coll_id,
                                       wrs2=wrs2_tile)
        logging.debug('  Asset ID: {}'.format(asset_id))

        if overwrite_flag:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, cancelling')
                ee.data.cancelTask(tasks[export_id]['id'])
            # This is intentionally not an "elif" so that a task can be
            # cancelled and an existing image/file/asset can be removed
            if asset_id in asset_list:
                logging.debug('  Asset already exists, removing')
                ee.data.deleteAsset(asset_id)
        else:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, exiting')
                continue
            elif asset_id in asset_list:
                logging.debug('  Asset already exists, skipping')
                continue

        tcorr_coll = ee.ImageCollection(tcorr_scene_coll_id) \
            .filterMetadata('wrs2_tile', 'equals', wrs2_tile) \
            .filterMetadata('tcorr_pixel_count', 'not_less_than', min_pixel_count) \
            .filter(ee.Filter.inList('year', year_list))
        # TODO: Should CLOUD_COVER_LAND filter should be re-applied here?
        #     .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover)
        #     .filterDate(start_date, end_date)
        #     .filterBounds(ee.Geometry(wrs2_ftr['geometry']))

        # Use a common reducer for the images and property stats
        reducer = ee.Reducer.median() \
            .combine(ee.Reducer.count(), sharedInputs=True)

        # Compute stats from the collection images
        # This might be used when Tcorr is spatial
        # tcorr_img = tcorr_coll.reduce(reducer).rename(['tcorr', 'count'])

        # Compute stats from the image properties
        tcorr_stats = ee.List(tcorr_coll.aggregate_array('tcorr_value')) \
            .reduce(reducer)
        tcorr_stats = ee.Dictionary(tcorr_stats) \
            .combine({'median': 0, 'count': 0}, overwrite=False)
        tcorr = ee.Number(tcorr_stats.get('median'))
        count = ee.Number(tcorr_stats.get('count'))
        index = count.lt(min_scene_count)\
            .multiply(NODATA_TCORR_INDEX - ANNUAL_TCORR_INDEX)\
            .add(ANNUAL_TCORR_INDEX)
        # index = ee.Algorithms.If(count.gte(min_scene_count), 6, 9)

        # Clip the mask image to the Landsat footprint
        # Change mask values to 1 if count >= threshold
        # Mask values of 0 will be set to nodata
        mask_img = tmax_mask.add(count.gte(min_scene_count)) \
            .clip(ee.Geometry(wrs2_ftr['geometry']))
        output_img = ee.Image(
                [mask_img.multiply(tcorr), mask_img.multiply(count)]) \
            .rename(['tcorr', 'count']) \
            .updateMask(mask_img.unmask(0))

        # # Write an empty image if the pixel count is too low
        # # CGM: Check/test if this can be combined into a single If()
        # tcorr_img = ee.Algorithms.If(
        #     count.gte(min_scene_count),
        #     tmax_mask.add(tcorr), tmax_mask.updateMask(0))
        # count_img = ee.Algorithms.If(
        #     count.gte(min_scene_count),
        #     tmax_mask.add(count), tmax_mask.updateMask(0))
        #
        # # Clip to the Landsat image footprint
        # output_img = ee.Image([tcorr_img, count_img]) \
        #     .rename(['tcorr', 'count']) \
        #     .clip(ee.Geometry(wrs2_ftr['geometry']))
        # # Clear the transparency mask
        # output_img = output_img.updateMask(output_img.unmask(0))

        output_img = output_img.set({
            'date_ingested':
            datetime.datetime.today().strftime('%Y-%m-%d'),
            'model_name':
            model_name,
            'model_version':
            ssebop.__version__,
            # 'system:time_start': utils.millis(start_dt),
            'tcorr_value':
            tcorr,
            'tcorr_index':
            index,
            'tcorr_scene_count':
            count,
            'tmax_source':
            tmax_source.upper(),
            'tmax_version':
            tmax_version.upper(),
            'wrs2_path':
            wrs2_path,
            'wrs2_row':
            wrs2_row,
            'wrs2_tile':
            wrs2_tile,
            'years':
            ','.join(map(str, year_list)),
            # 'year_start': year_list[0],
            # 'year_end': year_list[-1],
        })
        # pprint.pprint(output_img.getInfo())
        # input('ENTER')

        logging.debug('  Building export task')
        task = ee.batch.Export.image.toAsset(
            image=output_img,
            description=export_id,
            assetId=asset_id,
            crs=export_crs,
            crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
            dimensions='{0}x{1}'.format(*export_shape),
        )

        logging.info('  Starting export task')
        utils.ee_task_start(task)

        # Pause before starting the next export task
        utils.delay_task(delay_time, max_ready)
        logging.debug('')
        def wrs2_tcorr(ftr):
            # Build & merge the Landsat collections for the target path/row
            # Time filters are to remove bad (L5) and pre-op (L8) images
            path = ee.Number(ee.Feature(ftr).get('PATH'))
            row = ee.Number(ee.Feature(ftr).get('ROW'))

            def tcorr_img_func(image):
                tcorr_daily_img = image \
                    .addBands(image.multiply(0).add(
                        int(ini['TCORR']['min_pixel_count']))) \
                    .rename(['tcorr', 'count'])

                # Get Tcorr from the WRS2 centroid of the daily images
                t_stats = tcorr_daily_img.reduceRegion(
                    reducer=ee.Reducer.first(),
                    scale=30,
                    geometry=ee.Feature(ftr).geometry().centroid(),
                    bestEffort=False,
                    tileScale=1)
                # Add a 0 tcorr value for any image that can be computed
                t_stats = ee.Dictionary(t_stats) \
                    .combine({'tcorr': 0}, overwrite=False)

                tcorr = ee.Number(t_stats.get('tcorr'))
                # Use a dummy pixel count (0 or Tcorr * 2000)
                count = ee.Number(t_stats.get('tcorr')) \
                    .multiply(2 * int(ini['TCORR']['min_pixel_count']))
                # count = ee.Number(t_stats.get('count'))

                return tmax_mask.add(ee.Image.constant(tcorr)) \
                    .rename(['tcorr']) \
                    .set({
                        'system:time_start': image.get('system:time_start'),
                        'tcorr': tcorr,
                        'count': count,
                    })

            # Use a common reducer for the images and property stats
            reducer = ee.Reducer.median() \
                .combine(ee.Reducer.count(), sharedInputs=True)

            # Compute median annual value for all images in the WRS2 tile
            wrs2_tcorr_coll = ee.ImageCollection(
                    tcorr_daily_coll.map(tcorr_img_func)) \
                .filterMetadata('count', 'not_less_than',
                                int(ini['TCORR']['min_pixel_count']))

            wrs2_tcorr_img = wrs2_tcorr_coll.reduce(reducer) \
                .rename(['tcorr', 'count'])

            # Compute stats from the properties also
            wrs2_tcorr_stats = ee.Dictionary(
                ee.List(
                    wrs2_tcorr_coll.aggregate_array('tcorr')).reduce(reducer))
            wrs2_tcorr_stats = wrs2_tcorr_stats \
                .combine({'median': 0, 'count': 0}, overwrite=False)

            return wrs2_tcorr_img \
                .clip(ee.Feature(ftr).geometry()) \
                .set({
                    'wrs2_tile': path.format('%03d').cat(row.format('%03d')),
                    # 'wrs2_tile': ftr.get('WRS2_TILE'),
                    'tcorr': ee.Number(wrs2_tcorr_stats.get('median')),
                    'count': ee.Number(wrs2_tcorr_stats.get('count')),
                    'index': 2,
                })
Esempio n. 30
0
def clipList(current, prev):
    ''' clip a list of images '''
    imlist = ee.List(ee.Dictionary(prev).get('imlist'))
    poly = ee.Dictionary(prev).get('poly')
    imlist = imlist.add(ee.Image(current).clip(poly))
    return ee.Dictionary({'imlist': imlist, 'poly': poly})