def test_Collection_init_duplicate_collections_exception(): """Test if Exception is raised for duplicate Landsat types""" args = default_coll_args() args['collections'] = [ 'LANDSAT/LC08/C01/T1_RT_TOA', 'LANDSAT/LC08/C01/T1_TOA' ] with pytest.raises(ValueError): model.Collection(**args) args['collections'] = ['LANDSAT/LC08/C01/T1_SR', 'LANDSAT/LC08/C01/T1_TOA'] with pytest.raises(ValueError): model.Collection(**args)
def test_Collection_init_cloud_cover_exception(): """Test if Exception is raised for an invalid cloud_cover_max""" args = default_coll_args() args['cloud_cover_max'] = 'A' with pytest.raises(TypeError): model.Collection(**args) args['cloud_cover_max'] = -1 with pytest.raises(ValueError): model.Collection(**args) args['cloud_cover_max'] = 101 with pytest.raises(ValueError): model.Collection(**args)
def test_Collection_overpass_class_variables(): """Test that custom class variables are passed through to build function""" args = default_coll_args() args['variables'] = ['et'] output = utils.getinfo(model.Collection(**args).overpass()) assert args['variables'] == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Collection_init_enddate_exception(): """Test if Exception is raised for invalid end date formats""" args = default_coll_args() args['start_date'] = '2000-01-01' args['end_date'] = '1/2/2000' with pytest.raises(ValueError): model.Collection(**args)
def test_Collection_build_cloud_cover(): """Test if the cloud cover max parameter is being applied""" # CGM - The filtered images should probably be looked up programmatically args = default_coll_args() args['cloud_cover_max'] = 0.5 output = utils.getinfo(model.Collection(**args)._build(variables=['et'])) assert 'LE07_044033_20170724' not in parse_scene_id(output)
def test_Collection_interpolate_etr_source_not_set(): """Test if Exception is raised if etr_source is not set""" args = default_coll_args() del args['etr_source'] # del args['etr_band'] with pytest.raises(ValueError): utils.getinfo(model.Collection(**args).interpolate())
def test_Collection_build_dates(): args = default_coll_args() args['start_date'] = '2017-07-24' output = utils.getinfo( model.Collection(**args)._build(start_date='2017-07-16', end_date='2017-07-17')) assert parse_scene_id(output) == ['LC08_044033_20170716']
def test_Collection_init_swapped_date_exception(): """Test if Exception is raised when start_date == end_date""" args = default_coll_args() args['start_date'] = '2017-01-01' args['end_date'] = '2017-01-01' with pytest.raises(ValueError): model.Collection(**args)
def test_Collection_init_collection_filter(coll_id, start_date, end_date): """Test that collection IDs are filtered based on start/end dates""" # The target collection ID should be removed from the collections lists args = default_coll_args() args['collections'] = [coll_id] args['start_date'] = start_date args['end_date'] = end_date assert model.Collection(**args).collections == []
def test_Collection_build_landsat_sr(): """Test if the Landsat SR collections can be built""" args = default_coll_args() args['collections'] = ['LANDSAT/LC08/C01/T1_SR', 'LANDSAT/LE07/C01/T1_SR'] output = utils.getinfo(model.Collection(**args)._build()) assert parse_scene_id(output) == SCENE_ID_LIST assert VARIABLES == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Collection_interpolate_default(): """Default t_interval should be custom""" output = utils.getinfo( model.Collection(**default_coll_args()).interpolate()) assert output['type'] == 'ImageCollection' assert parse_scene_id(output) == ['20170701'] assert VARIABLES == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Collection_interpolate_t_interval_custom(): """Test if the custom time interval parameter works""" output = utils.getinfo( model.Collection(**default_coll_args()).interpolate( t_interval='custom')) assert output['type'] == 'ImageCollection' assert parse_scene_id(output) == ['20170701'] assert VARIABLES == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Collection_interpolate_etr_source_method(): """Test setting etr_source in the interpolate call""" args = default_coll_args() del args['etr_source'] del args['etr_band'] etr_kwargs = { 'etr_source': 'IDAHO_EPSCOR/GRIDMET', 'etr_band': 'etr', 'etr_factor': 0.85 } output = utils.getinfo(model.Collection(**args).interpolate(**etr_kwargs)) assert VARIABLES == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Collection_build_filter_args(): args = default_coll_args() coll_id = 'LANDSAT/LC08/C01/T1_SR' args['collections'] = [coll_id] args['geometry'] = ee.Geometry.Rectangle(-125, 35, -120, 40) args['filter_args'] = { coll_id: [{ 'type': 'equals', 'leftField': 'WRS_PATH', 'rightValue': 44 }, { 'type': 'equals', 'leftField': 'WRS_ROW', 'rightValue': 33 }] } output = utils.getinfo(model.Collection(**args)._build(variables=['et'])) assert set([x[5:11] for x in parse_scene_id(output)]) == set(['044033'])
def test_Collection_init_default_parameters(): """Test if init sets default parameters""" args = default_coll_args.copy() # These values are being set above but have defaults that need to be checked del args['etr_source'] del args['etr_band'] del args['etr_factor'] del args['etr_resample'] del args['variables'] m = ssebop.Collection(**args) assert m.variables == None assert m.etr_source == None assert m.etr_band == None assert m.etr_factor == None assert m.etr_resample == None assert m.cloud_cover_max == 70 assert m.model_args == {} assert m.filter_args == {} assert m._interp_vars == ['ndvi', 'etf']
def test_Collection_interpolate_interp_days_exception(): """Test if Exception is raised for an invalid interp_days parameter""" with pytest.raises(ValueError): utils.getinfo(model.Collection(**default_coll_args()) \ .interpolate(interp_days=0))
def test_Collection_interpolate_no_variables_exception(): """Test if Exception is raised if variables is not set in init or method""" args = default_coll_args() del args['variables'] with pytest.raises(ValueError): utils.getinfo(model.Collection(**args).interpolate())
def test_Collection_build_exclusive_enddate(): """Test if the end_date is exclusive""" args = default_coll_args() args['end_date'] = '2017-07-24' output = utils.getinfo(model.Collection(**args)._build()) assert [x for x in parse_scene_id(output) if int(x[-8:]) >= 20170724] == []
def test_Collection_interpolate_t_interval_exception(): """Test if Exception is raised for an invalid t_interval parameter""" with pytest.raises(ValueError): utils.getinfo(model.Collection(**default_coll_args()) \ .interpolate(t_interval='DEADBEEF'))
def test_Collection_build_variable_valueerror(): """Test if Exception is raised for an invalid variable""" args = default_coll_args() with pytest.raises(ValueError): utils.getinfo(model.Collection(**args)._build(variables=['FOO']))
def test_Collection_overpass_default(): """Test overpass method with default values (variables from Class init)""" output = utils.getinfo(model.Collection(**default_coll_args()).overpass()) assert VARIABLES == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']]))) assert parse_scene_id(output) == SCENE_ID_LIST
def test_Collection_interpolate_variables_custom(): output = utils.getinfo( model.Collection(**default_coll_args()).interpolate(variables=['et'])) assert ['et'] == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Collection_init_collection_str(coll_id='LANDSAT/LC08/C01/T1_TOA'): """Test if a single coll_id str is converted to a single item list""" args = default_coll_args() args['collections'] = coll_id assert model.Collection(**args).collections == [coll_id]
def test_Collection_build_variables(): output = utils.getinfo( model.Collection(**default_coll_args())._build(variables=['ndvi'])) assert ['ndvi'] == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def test_Image_init_cloud_cover_max_str(): """Test if cloud_cover_max strings are converted to float""" args = default_coll_args() args['cloud_cover_max'] = '70' assert model.Collection(**args).cloud_cover_max == 70
def test_Collection_build_default(): output = utils.getinfo(model.Collection(**default_coll_args())._build()) assert output['type'] == 'ImageCollection' assert parse_scene_id(output) == SCENE_ID_LIST assert VARIABLES == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def default_coll_obj(**kwargs): args = default_coll_args.copy() args.update(kwargs) return ssebop.Collection(**args)
def test_Collection_overpass_method_variables(): """Test that custom method variables are passed through to build function""" output = utils.getinfo( model.Collection(**default_coll_args()).overpass(variables=['et'])) assert ['et'] == sorted( list(set([y['id'] for x in output['features'] for y in x['bands']])))
def main(ini_path=None, overwrite_flag=False, delay_time=0, gee_key_file=None, max_ready=-1, cron_flag=False, reverse_flag=False, update_flag=False): """Compute scene Tcorr images by WRS2 tile Parameters ---------- ini_path : str Input file path. overwrite_flag : bool, optional If True, overwrite existing files if the export dates are the same and generate new images (but with different export dates) even if the tile lists are the same. The default is False. delay_time : float, optional Delay time in seconds between starting export tasks (or checking the number of queued tasks, see "max_ready" parameter). The default is 0. gee_key_file : str, None, optional Earth Engine service account JSON key file (the default is None). max_ready: int, optional Maximum number of queued "READY" tasks. The default is -1 which is implies no limit to the number of tasks that will be submitted. cron_flag: bool, optional Not currently implemented. reverse_flag : bool, optional If True, process WRS2 tiles and dates in reverse order. update_flag : bool, optional If True, only overwrite scenes with an older model version. """ logging.info('\nCompute scene Tcorr images by WRS2 tile') ini = utils.read_ini(ini_path) model_name = 'SSEBOP' # model_name = ini['INPUTS']['et_model'].upper() tmax_name = ini[model_name]['tmax_source'] export_id_fmt = 'tcorr_scene_{product}_{scene_id}' asset_id_fmt = '{coll_id}/{scene_id}' tcorr_scene_coll_id = '{}/{}_scene'.format(ini['EXPORT']['export_coll'], tmax_name.lower()) wrs2_coll_id = 'projects/earthengine-legacy/assets/' \ 'projects/usgs-ssebop/wrs2_descending_custom' wrs2_tile_field = 'WRS2_TILE' wrs2_path_field = 'ROW' wrs2_row_field = 'PATH' try: wrs2_tiles = str(ini['INPUTS']['wrs2_tiles']) wrs2_tiles = sorted([x.strip() for x in wrs2_tiles.split(',')]) except KeyError: wrs2_tiles = [] logging.debug(' wrs2_tiles: not set in INI, defaulting to []') except Exception as e: raise e try: study_area_extent = str(ini['INPUTS']['study_area_extent']) \ .replace('[', '').replace(']', '').split(',') study_area_extent = [float(x.strip()) for x in study_area_extent] except KeyError: study_area_extent = None logging.debug(' study_area_extent: not set in INI') except Exception as e: raise e # TODO: Add try/except blocks and default values? collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')] cloud_cover = float(ini['INPUTS']['cloud_cover']) min_pixel_count = float(ini['TCORR']['min_pixel_count']) # min_scene_count = float(ini['TCORR']['min_scene_count']) if (tmax_name.upper() == 'CIMIS' and ini['INPUTS']['end_date'] < '2003-10-01'): logging.error( '\nCIMIS is not currently available before 2003-10-01, exiting\n') sys.exit() elif (tmax_name.upper() == 'DAYMET' and ini['INPUTS']['end_date'] > '2018-12-31'): logging.warning('\nDAYMET is not currently available past 2018-12-31, ' 'using median Tmax values\n') # sys.exit() # elif (tmax_name.upper() == 'TOPOWX' and # ini['INPUTS']['end_date'] > '2017-12-31'): # logging.warning( # '\nDAYMET is not currently available past 2017-12-31, ' # 'using median Tmax values\n') # # sys.exit() # Extract the model keyword arguments from the INI # Set the property name to lower case and try to cast values to numbers model_args = { k.lower(): float(v) if utils.is_number(v) else v for k, v in dict(ini[model_name]).items() } # et_reference_args = { # k: model_args.pop(k) # for k in [k for k in model_args.keys() if k.startswith('et_reference_')]} logging.info('\nInitializing Earth Engine') if gee_key_file: logging.info( ' Using service account key file: {}'.format(gee_key_file)) # The "EE_ACCOUNT" parameter is not used if the key file is valid ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file), use_cloud_api=True) else: ee.Initialize(use_cloud_api=True) # Get a Tmax image to set the Tcorr values to logging.debug('\nTmax properties') tmax_source = tmax_name.split('_', 1)[0] tmax_version = tmax_name.split('_', 1)[1] if 'MEDIAN' in tmax_name.upper(): tmax_coll_id = 'projects/earthengine-legacy/assets/' \ 'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower()) tmax_coll = ee.ImageCollection(tmax_coll_id) tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0) else: # TODO: Add support for non-median tmax sources raise ValueError('unsupported tmax_source: {}'.format(tmax_name)) logging.debug(' Collection: {}'.format(tmax_coll_id)) logging.debug(' Source: {}'.format(tmax_source)) logging.debug(' Version: {}'.format(tmax_version)) logging.debug('\nExport properties') export_info = utils.get_info(ee.Image(tmax_mask)) if 'daymet' in tmax_name.lower(): # Custom smaller extent for DAYMET focused on CONUS export_extent = [-1999750, -1890500, 2500250, 1109500] export_shape = [4500, 3000] export_geo = [1000, 0, -1999750, 0, -1000, 1109500] # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada # export_extent = [-2099750, -3090500, 2900250, 1909500] # export_shape = [5000, 5000] # export_geo = [1000, 0, -2099750, 0, -1000, 1909500] export_crs = export_info['bands'][0]['crs'] else: export_crs = export_info['bands'][0]['crs'] export_geo = export_info['bands'][0]['crs_transform'] export_shape = export_info['bands'][0]['dimensions'] # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform'] # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs'] # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions'] export_extent = [ export_geo[2], export_geo[5] + export_shape[1] * export_geo[4], export_geo[2] + export_shape[0] * export_geo[0], export_geo[5] ] export_geom = ee.Geometry.Rectangle(export_extent, proj=export_crs, geodesic=False) logging.debug(' CRS: {}'.format(export_crs)) logging.debug(' Extent: {}'.format(export_extent)) logging.debug(' Geo: {}'.format(export_geo)) logging.debug(' Shape: {}'.format(export_shape)) if study_area_extent is None: if 'daymet' in tmax_name.lower(): # CGM - For now force DAYMET to a slightly smaller "CONUS" extent study_area_extent = [-125, 25, -65, 49] # study_area_extent = [-125, 25, -65, 52] elif 'cimis' in tmax_name.lower(): study_area_extent = [-124, 35, -119, 42] else: # TODO: Make sure output from bounds is in WGS84 study_area_extent = tmax_mask.geometry().bounds().getInfo() logging.debug(f'\nStudy area extent not set in INI, ' f'default to {study_area_extent}') study_area_geom = ee.Geometry.Rectangle(study_area_extent, proj='EPSG:4326', geodesic=False) # For now define the study area from an extent if study_area_extent: study_area_geom = ee.Geometry.Rectangle(study_area_extent, proj='EPSG:4326', geodesic=False) export_geom = export_geom.intersection(study_area_geom, 1) # logging.debug(' Extent: {}'.format(export_geom.bounds().getInfo())) # If cell_size parameter is set in the INI, # adjust the output cellsize and recompute the transform and shape try: export_cs = float(ini['EXPORT']['cell_size']) export_shape = [ int(math.ceil(abs((export_shape[0] * export_geo[0]) / export_cs))), int(math.ceil(abs((export_shape[1] * export_geo[4]) / export_cs))) ] export_geo = [ export_cs, 0.0, export_geo[2], 0.0, -export_cs, export_geo[5] ] logging.debug(' Custom export cell size: {}'.format(export_cs)) logging.debug(' Geo: {}'.format(export_geo)) logging.debug(' Shape: {}'.format(export_shape)) except KeyError: pass if not ee.data.getInfo(tcorr_scene_coll_id): logging.info('\nExport collection does not exist and will be built' '\n {}'.format(tcorr_scene_coll_id)) input('Press ENTER to continue') ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_scene_coll_id) # Get current asset list logging.debug('\nGetting GEE asset list') asset_list = utils.get_ee_assets(tcorr_scene_coll_id) # if logging.getLogger().getEffectiveLevel() == logging.DEBUG: # pprint.pprint(asset_list[:10]) # Get current running tasks tasks = utils.get_ee_tasks() if logging.getLogger().getEffectiveLevel() == logging.DEBUG: logging.debug(' Tasks: {}\n'.format(len(tasks))) input('ENTER') # TODO: Decide if month and year lists should be applied to scene exports # # Limit by year and month # try: # month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months']))) # except: # logging.info('\nTCORR "months" parameter not set in the INI,' # '\n Defaulting to all months (1-12)\n') # month_list = list(range(1, 13)) # try: # year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years']))) # except: # logging.info('\nTCORR "years" parameter not set in the INI,' # '\n Defaulting to all available years\n') # year_list = [] # if cron_flag: # # CGM - This seems like a silly way of getting the date as a datetime # # Why am I doing this and not using the commented out line? # end_dt = datetime.date.today().strftime('%Y-%m-%d') # end_dt = datetime.datetime.strptime(end_dt, '%Y-%m-%d') # end_dt = end_dt + datetime.timedelta(days=-4) # # end_dt = datetime.datetime.today() + datetime.timedelta(days=-1) # start_dt = end_dt + datetime.timedelta(days=-64) # else: # start_dt = datetime.datetime.strptime( # ini['INPUTS']['start_date'], '%Y-%m-%d') # end_dt = datetime.datetime.strptime( # ini['INPUTS']['end_date'], '%Y-%m-%d') start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'], '%Y-%m-%d') end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'], '%Y-%m-%d') if end_dt >= datetime.datetime.today(): logging.debug('End Date: {} - setting end date to current ' 'date'.format(end_dt.strftime('%Y-%m-%d'))) end_dt = datetime.datetime.today() if start_dt < datetime.datetime(1984, 3, 23): logging.debug('Start Date: {} - no Landsat 5+ images before ' '1984-03-23'.format(start_dt.strftime('%Y-%m-%d'))) start_dt = datetime.datetime(1984, 3, 23) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') # next_date = (start_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d') logging.debug('Start Date: {}'.format(start_date)) logging.debug('End Date: {}\n'.format(end_date)) if start_dt > end_dt: raise ValueError('start date must be before end date') # Get the list of WRS2 tiles that intersect the data area and study area wrs2_coll = ee.FeatureCollection(wrs2_coll_id) \ .filterBounds(export_geom) \ .filterBounds(study_area_geom) if wrs2_tiles: wrs2_coll = wrs2_coll.filter( ee.Filter.inList(wrs2_tile_field, wrs2_tiles)) wrs2_info = wrs2_coll.getInfo()['features'] # pprint.pprint(wrs2_info) # input('ENTER') # Iterate over WRS2 tiles (default is from west to east) for wrs2_ftr in sorted(wrs2_info, key=lambda k: k['properties']['WRS2_TILE'], reverse=not (reverse_flag)): wrs2_tile = wrs2_ftr['properties'][wrs2_tile_field] logging.info('{}'.format(wrs2_tile)) wrs2_path = int(wrs2_tile[1:4]) wrs2_row = int(wrs2_tile[5:8]) # wrs2_path = wrs2_ftr['properties']['PATH'] # wrs2_row = wrs2_ftr['properties']['ROW'] wrs2_filter = [{ 'type': 'equals', 'leftField': 'WRS_PATH', 'rightValue': wrs2_path }, { 'type': 'equals', 'leftField': 'WRS_ROW', 'rightValue': wrs2_row }] filter_args = {c: wrs2_filter for c in collections} # Build and merge the Landsat collections model_obj = ssebop.Collection( collections=collections, start_date=start_date, end_date=end_date, cloud_cover_max=cloud_cover, geometry=ee.Geometry(wrs2_ftr['geometry']), model_args=model_args, filter_args=filter_args, ) landsat_coll = model_obj.overpass(variables=['ndvi']) # pprint.pprint(landsat_coll.aggregate_array('system:id').getInfo()) # input('ENTER') try: image_id_list = landsat_coll.aggregate_array('system:id').getInfo() except Exception as e: logging.warning(' Error getting image ID list, skipping tile') logging.debug(f' {e}') continue if update_flag: assets_info = utils.get_info( ee.ImageCollection(tcorr_scene_coll_id).filterMetadata( 'wrs2_tile', 'equals', wrs2_tile).filterDate(start_date, end_date)) asset_props = { f'{tcorr_scene_coll_id}/{x["properties"]["system:index"]}': x['properties'] for x in assets_info['features'] } else: asset_props = {} # Sort by date for image_id in sorted(image_id_list, key=lambda k: k.split('/')[-1].split('_')[-1], reverse=reverse_flag): scene_id = image_id.split('/')[-1] logging.info(f'{scene_id}') export_dt = datetime.datetime.strptime( scene_id.split('_')[-1], '%Y%m%d') export_date = export_dt.strftime('%Y-%m-%d') # next_date = (export_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d') # # Uncomment to apply month and year list filtering # if month_list and export_dt.month not in month_list: # logging.debug(f' Date: {export_date} - month not in INI - skipping') # continue # elif year_list and export_dt.year not in year_list: # logging.debug(f' Date: {export_date} - year not in INI - skipping') # continue logging.debug(f' Date: {export_date}') export_id = export_id_fmt.format(product=tmax_name.lower(), scene_id=scene_id) logging.debug(f' Export ID: {export_id}') asset_id = asset_id_fmt.format(coll_id=tcorr_scene_coll_id, scene_id=scene_id) logging.debug(f' Asset ID: {asset_id}') if update_flag: def version_number(version_str): return list(map(int, version_str.split('.'))) if export_id in tasks.keys(): logging.info(' Task already submitted, skipping') continue # In update mode only overwrite if the version is old if asset_props and asset_id in asset_props.keys(): model_ver = version_number(ssebop.__version__) asset_ver = version_number( asset_props[asset_id]['model_version']) if asset_ver < model_ver: logging.info(' Asset model version is old, removing') try: ee.data.deleteAsset(asset_id) except: logging.info(' Error removing asset, skipping') continue else: logging.info(' Asset is up to date, skipping') continue elif overwrite_flag: if export_id in tasks.keys(): logging.debug(' Task already submitted, cancelling') ee.data.cancelTask(tasks[export_id]['id']) # This is intentionally not an "elif" so that a task can be # cancelled and an existing image/file/asset can be removed if asset_id in asset_list: logging.debug(' Asset already exists, removing') ee.data.deleteAsset(asset_id) else: if export_id in tasks.keys(): logging.debug(' Task already submitted, exiting') continue elif asset_id in asset_list: logging.debug(' Asset already exists, skipping') continue image = ee.Image(image_id) # TODO: Will need to be changed for SR or use from_image_id() t_obj = ssebop.Image.from_landsat_c1_toa(image_id, **model_args) t_stats = ee.Dictionary(t_obj.tcorr_stats) \ .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False) tcorr = ee.Number(t_stats.get('tcorr_p5')) count = ee.Number(t_stats.get('tcorr_count')) index = ee.Algorithms.If(count.gte(min_pixel_count), 0, 9) # Write an empty image if the pixel count is too low tcorr_img = ee.Algorithms.If(count.gt(min_pixel_count), tmax_mask.add(tcorr), tmax_mask.updateMask(0)) # Clip to the Landsat image footprint output_img = ee.Image(tcorr_img).clip(image.geometry()) # Clear the transparency mask output_img = output_img.updateMask(output_img.unmask(0)) \ .rename(['tcorr']) \ .set({ 'CLOUD_COVER': image.get('CLOUD_COVER'), 'CLOUD_COVER_LAND': image.get('CLOUD_COVER_LAND'), # 'SPACECRAFT_ID': image.get('SPACECRAFT_ID'), 'coll_id': image_id.split('/')[0], # 'cycle_day': ((export_dt - cycle_base_dt).days % 8) + 1, 'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'), 'date': export_dt.strftime('%Y-%m-%d'), 'doy': int(export_dt.strftime('%j')), 'model_name': model_name, 'model_version': ssebop.__version__, 'month': int(export_dt.month), 'scene_id': image_id.split('/')[-1], 'system:time_start': image.get('system:time_start'), 'tcorr_value': tcorr, 'tcorr_index': index, 'tcorr_pixel_count': count, 'tmax_source': tmax_source.upper(), 'tmax_version': tmax_version.upper(), 'wrs2_path': wrs2_path, 'wrs2_row': wrs2_row, 'wrs2_tile': wrs2_tile, 'year': int(export_dt.year), }) # pprint.pprint(output_img.getInfo()['properties']) # input('ENTER') logging.debug(' Building export task') task = ee.batch.Export.image.toAsset( image=output_img, description=export_id, assetId=asset_id, crs=export_crs, crsTransform='[' + ','.join(list(map(str, export_geo))) + ']', dimensions='{0}x{1}'.format(*export_shape), ) logging.info(' Starting export task') utils.ee_task_start(task) # Pause before starting the next date (not export task) utils.delay_task(delay_time, max_ready) logging.debug('')
def test_Collection_overpass_no_variables_valueerror(): """Test if Exception is raised if variables is not set in init or method""" args = default_coll_args() del args['variables'] with pytest.raises(ValueError): model.Collection(**args).overpass().getInfo()