Beispiel #1
0
def test_icor_timeseries():
    cloudmask = connection.load_collection("SENTINEL2_L2A_SENTINELHUB",temporal_extent=["2019-02-01", "2019-11-01"],bands=["CLM"])

    l1c = connection.load_collection("SENTINEL2_L1C_SENTINELHUB",
                                     temporal_extent=["2019-02-01", "2019-11-01"],
                                     bands=['B08','B04', 'B8A','B09', 'B11', 'sunAzimuthAngles',
                                            'sunZenithAngles', 'viewAzimuthMean', 'viewZenithMean'])

    start_time = time.time()
    json = l1c.mask(cloudmask).atmospheric_correction().ndvi(nir='B08',red='B04').polygonal_mean_timeseries(polygon).execute()
    elapsed_time = time.time() - start_time
    print("seconds: " + str(elapsed_time))
    from openeo.rest.conversions import timeseries_json_to_pandas
    df = timeseries_json_to_pandas(json)

    l2a = connection.load_collection("SENTINEL2_L2A_SENTINELHUB",
                                     temporal_extent=["2019-02-01", "2019-11-01"], bands=['B08','B04'])
    start_time = time.time()
    l2a_json = l2a.mask(cloudmask).ndvi(nir='B08',red='B04').polygonal_mean_timeseries(polygon).execute()
    elapsed_time = time.time() - start_time
    print("seconds: " + str(elapsed_time))
    df_l2a = timeseries_json_to_pandas(l2a_json)

    import pandas as pd
    df.index = pd.to_datetime(df.index)
    df_l2a.index = pd.to_datetime(df_l2a.index)

    df.name = "iCor"
    df_l2a.name = "Sen2Cor"
    joined = pd.concat([df, df_l2a], axis=1)
    print(joined)
    joined.to_csv('timeseries_icor_masked.csv')
Beispiel #2
0
            def get_angle(geo, start, end):
                scale = 0.0005
                offset = 29
                orbit_passes = [r'ASCENDING', r'DESCENDING']
                dict_df_angles_fields = dict()
                for orbit_pass in orbit_passes:
                    angle = self._eoconn.load_collection('S1_GRD_SIGMA0_{}'.format(orbit_pass), bands = ['angle']).band('angle')
                    try:
                        angle_fields = angle.filter_temporal(start,end).polygonal_mean_timeseries(geo).send_job().start_and_wait().get_result().load_json()
                        df_angle_fields = timeseries_json_to_pandas(angle_fields)
                    except Exception:
                        print('RUNNING IN EXECUTE MODE WAS NOT POSSIBLE ... TRY BATCH MODE')
                        angle.polygonal_mean_timeseries(geo).filter_temporal(start, end).execute_batch('angle_{}.json'.format(orbit_pass))
                        with open('angle_{}.json'.format(orbit_pass), 'r') as angle_file:
                            angle_fields_ts = json.load(angle_file)
                            df_angle_fields = timeseries_json_to_pandas(angle_fields_ts)
                            df_angle_fields.index = pd.to_datetime(df_angle_fields.index).date
                            angle_file.close()
                            os.remove(os.path.join(os.getcwd(),'angle_{}.json'.format(orbit_pass)))

                    new_columns = [str(item) + '_angle' for item in list(df_angle_fields.columns.values)]
                    df_angle_fields.rename(columns = dict(zip(list(df_angle_fields.columns.values), new_columns)), inplace= True)
                    df_angle_fields = df_angle_fields*scale + offset
                    dict_df_angles_fields.update({'{}'.format(orbit_pass): df_angle_fields})
                return dict_df_angles_fields
def test_timeseries_json_to_pandas_auto_collapse(timeseries, index,
                                                 with_auto_collapse,
                                                 without_auto_collapse):
    df = timeseries_json_to_pandas(timeseries, index=index, auto_collapse=True)
    if isinstance(with_auto_collapse, pd.Series):
        assert_series_equal(df.sort_index(), with_auto_collapse)
    else:
        assert_frame_equal(df, with_auto_collapse)
    df = timeseries_json_to_pandas(timeseries,
                                   index=index,
                                   auto_collapse=False)
    assert_frame_equal(df, without_auto_collapse)
def test_timeseries_json_to_pandas_basic():
    # 2 dates, 2 polygons, 3 bands
    timeseries = {
        DATE1: [[1, 2, 3], [4, 5, 6]],
        DATE2: [[7, 8, 9], [10, 11, 12]],
    }
    df = timeseries_json_to_pandas(timeseries)
    expected = pd.DataFrame(data=[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
                            index=pd.Index([DATE1, DATE2], name="date"),
                            columns=pd.MultiIndex.from_tuples([(0, 0), (0, 1),
                                                               (0, 2), (1, 0),
                                                               (1, 1), (1, 2)],
                                                              names=("polygon",
                                                                     "band")))
    assert_frame_equal(df, expected)
def main():
    url = "https://openeo.vito.be"

    conn = openeo.connect(url)

    polygon = shapely.geometry.Polygon([(3.71, 51.01), (3.72, 51.02),
                                        (3.73, 51.01)])
    bbox = polygon.bounds

    result = (conn.load_collection(
        "TERRASCOPE_S2_TOC_V2",
        temporal_extent=["2020-01-01", "2020-03-10"],
        spatial_extent=dict(zip(["west", "south", "east", "north"], bbox)),
        bands=["B04",
               "B08"]).ndvi().polygonal_mean_timeseries(polygon).execute())

    pprint(timeseries_json_to_pandas(result))
def test_timeseries_json_to_pandas_index_polygon():
    # 2 dates, 2 polygons, 3 bands
    timeseries = {
        DATE1: [[1, 2, 3], [4, 5, 6]],
        DATE2: [[7, 8, 9], [10, 11, 12]],
    }
    df = timeseries_json_to_pandas(timeseries, index="polygon")
    expected = pd.DataFrame(data=[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]],
                            index=pd.Index([0, 1], name="polygon"),
                            columns=pd.MultiIndex.from_tuples([(DATE1, 0),
                                                               (DATE1, 1),
                                                               (DATE1, 2),
                                                               (DATE2, 0),
                                                               (DATE2, 1),
                                                               (DATE2, 2)],
                                                              names=("date",
                                                                     "band")))
    assert_frame_equal(df, expected)
def test_timeseries_json_to_pandas_none_nan_empty_handling():
    timeseries = {
        DATE1: [[1, 2], [3, 4]],
        DATE2: [[5, 6], [None, None]],
        DATE3: [[], []],
        DATE4: [[], [7, 8]],
    }
    df = timeseries_json_to_pandas(timeseries)
    expected = pd.DataFrame(data=[
        [1, 2, 3, 4],
        [5, 6, np.nan, np.nan],
        [np.nan, np.nan, np.nan, np.nan],
        [np.nan, np.nan, 7, 8],
    ],
                            dtype=float,
                            index=pd.Index([DATE1, DATE2, DATE3, DATE4],
                                           name="date"),
                            columns=pd.MultiIndex.from_tuples([(0, 0), (0, 1),
                                                               (1, 0), (1, 1)],
                                                              names=("polygon",
                                                                     "band")))
    assert_frame_equal(df, expected)
Beispiel #8
0
datacube = connection.load_collection("TERRASCOPE_S2_NDVI_V2")

polygons_file = r'../data/BRP_Gewaspercelen_2017_subset.geojson'
import geopandas as gpd
#apply a
local_polygons = gpd.read_file(polygons_file)


def load_udf(relative_path):
    dir = Path(os.path.dirname(os.path.realpath(__file__)))
    with open(dir / relative_path, 'r+') as f:
        return f.read()


smoothing_udf = load_udf('./smooth_savitzky_golay.py')

#for zonal stats on large shp files: the file can be stored on terrascope, and batch jobs should be used
ndvi_zonal_statistics  = datacube\
    .mask(scl_mask)\
    .apply_dimension(smoothing_udf,dimension='t',runtime='Python')\
    .filter_temporal('2017-01-01', '2018-01-01')\
    .polygonal_mean_timeseries(GeometryCollection(list(local_polygons.geometry)))

print(json.dumps(ndvi_zonal_statistics.graph, indent=2))
#Backend can derive bbox from polygons provided to aggregate_spatial?
#.filter_bbox(west=5.251809,east=5.462144,north=51.838069,south=51.705835)\
result = ndvi_zonal_statistics.execute()

df = timeseries_json_to_pandas(result)
df.index = pd.to_datetime(df.index)
df.interpolate().plot()
Beispiel #9
0
def udf_cropcalendars(udf_data: UdfData):
    context_param_var = udf_data.user_context
    print(context_param_var)
    ts_dict = udf_data.get_structured_data_list()[0].data
    if not ts_dict:  #workaround of ts_dict is empty
        return
    ts_df = timeseries_json_to_pandas(ts_dict)
    ts_df.index = pd.to_datetime(ts_df.index).date

    # function to calculate the cropsar curve
    ts_df_cropsar = get_cropsar_TS(
        ts_df, context_param_var.get('unique_ids_fields'),
        context_param_var.get('metrics_order'),
        context_param_var.get('fAPAR_rescale_Openeo'))
    # rescale cropsar values
    ts_df_cropsar = rescale_cropSAR(
        ts_df_cropsar, context_param_var.get('fAPAR_range_normalization'),
        context_param_var.get('unique_ids_fields'), 'cropSAR')

    # function to rescale the metrics based
    # on the rescaling factor of the metric
    def rescale_metrics(df, rescale_factor, fAPAR_range, unique_ids_fields,
                        metric_suffix):
        df[[
            item + '_{}'.format(str(metric_suffix))
            for item in unique_ids_fields
        ]] = df.loc[:,
                    ts_df.columns.isin([
                        item + '_{}'.format(str(metric_suffix))
                        for item in unique_ids_fields
                    ])] * rescale_factor
        df[[
            item + '_{}'.format(str(metric_suffix))
            for item in unique_ids_fields
        ]] = 2 * (df[[
            item + '_{}'.format(str(metric_suffix))
            for item in unique_ids_fields
        ]] - fAPAR_range[0]) / (fAPAR_range[1] - fAPAR_range[0]) - 1
        return df

    #### USE THE FUNCTIONS TO DETERMINE THE CROP CALENDAR DATES

    ### EVENT 1: HARVEST DETECTION
    NN_model_dir = context_param_var.get('path_harvest_model')
    amount_metrics_model = len(context_param_var.get(
        'metrics_crop_event')) * context_param_var.get('window_values')

    #### PREPARE THE DATAFRAMES (REFORMATTING AND RESCALING) IN THE
    # RIGHT FORMAT TO ALLOW THE USE OF THE TRAINED NN
    ts_df_prepro = rename_df_columns(
        ts_df, context_param_var.get('unique_ids_fields'),
        context_param_var.get('metrics_order'))

    ts_df_prepro = VHVV_calc_rescale(
        ts_df_prepro, context_param_var.get('unique_ids_fields'),
        context_param_var.get('VH_VV_range_normalization'))

    #### rescale the fAPAR to 0 and 1 and convert
    # it to values between -1 and 1
    ts_df_prepro = rescale_metrics(
        ts_df_prepro, context_param_var.get('fAPAR_rescale_Openeo'),
        context_param_var.get('fAPAR_range_normalization'),
        context_param_var.get('unique_ids_fields'), 'fAPAR')

    ro_s = {
        'ascending': context_param_var.get('RO_ascending_selection_per_field'),
        'descending':
        context_param_var.get('RO_descending_selection_per_field')
    }

    #### now merge the cropsar ts file with the other
    # df containing the S1 metrics
    date_range = pd.date_range(ts_df_cropsar.index[0],
                               ts_df_cropsar.index[-1]).date
    ts_df_prepro = ts_df_prepro.reindex(
        date_range)  # need to set the index axis on the same frequency
    ts_df_prepro = pd.concat(
        [ts_df_cropsar, ts_df_prepro], axis=1
    )  # the columns of the cropsar df need to be the first ones in the new df to ensure the correct position for applying the NN model

    ### create windows in the time series to extract the metrics
    # and store each window in a seperate row in the dataframe
    ts_df_input_NN = prepare_df_NN_model(
        ts_df_prepro, context_param_var.get('window_values'),
        context_param_var.get('unique_ids_fields'), ro_s,
        context_param_var.get('metrics_crop_event'))

    ### apply the trained NN model on the window extracts
    df_NN_prediction = apply_NN_model_crop_calendars(
        ts_df_input_NN, amount_metrics_model,
        context_param_var.get('thr_detection'),
        context_param_var.get('crop_calendar_event'), NN_model_dir)
    df_crop_calendars_result = create_crop_calendars_fields(
        df_NN_prediction, context_param_var.get('unique_ids_fields'),
        context_param_var.get('index_window_above_thr'))
    print(df_crop_calendars_result)
    # return the predicted crop calendar events as a dict  (json format)
    udf_data.set_structured_data_list([
        StructuredData(description="crop calendar json",
                       data=df_crop_calendars_result.to_dict(),
                       type="dict")
    ])
    return udf_data
def test_timeseries_json_to_pandas_invalid_polygon_and_band_counts(error, ts):
    with pytest.raises(InvalidTimeSeriesException, match=error):
        timeseries_json_to_pandas(ts)