c='k', marker='*') ax.plot(X, spline_pred, 'r--', label="Spline") ax.scatter(spline_max_DoYs_series, spline_max_series, s=100, c='r', marker='*') ################################################ # # bare soil indices plots # an_EE_TS_BSI = rc.initial_clean(df=curr_field, column_to_be_cleaned='BSI') # an_EE_TS_NDWI = rc.initial_clean(df = curr_field, column_to_be_cleaned='NDWI') an_EE_TS_PSRI = rc.initial_clean(df=curr_field, column_to_be_cleaned='PSRI') an_EE_TS_LSWI = rc.initial_clean(df=curr_field, column_to_be_cleaned='LSWI') ax.plot(an_EE_TS_BSI['doy'], an_EE_TS_BSI['BSI'], label="BSI") # ax.plot(x_NDWI, y_NDWI, label="NWDI") ax.plot(an_EE_TS_PSRI['doy'], an_EE_TS_PSRI['PSRI'], label="PSRI") ax.plot(an_EE_TS_LSWI['doy'], an_EE_TS_LSWI['LSWI'], label="LSWI") ################################################ ax.legend(loc="best")
### process data ### ######################################################################################## f_name = "00_noOutlier_" + county + "_SF_" + str( SF_year) + "_" + indeks + ".csv" an_EE_TS = pd.read_csv(data_dir + f_name, low_memory=False) ######################################################################################## output_dir = data_base + "/01_jumps_removed/" os.makedirs(output_dir, exist_ok=True) ######################################################################################## an_EE_TS = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned=indeks) an_EE_TS.head(2) ### ### List of unique polygons ### polygon_list = an_EE_TS['ID'].unique() print(len(polygon_list)) ######################################################################################## ### ### initialize output data. ### output_df = pd.DataFrame(data=None, index=np.arange(an_EE_TS.shape[0]),
print(an_EE_TS.county.unique()) an_EE_TS = an_EE_TS[an_EE_TS['county'] == "Grant"] print(an_EE_TS.county.unique()) # # The following columns do not exist in the old data # if not ('DataSrc' in a_df.columns): print("Data source is being set to NA") a_df['DataSrc'] = "NA" if not ('CovrCrp' in a_df.columns): print("Data source is being set to NA") a_df['CovrCrp'] = "NA" an_EE_TS_NDVI = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned='NDVI') an_EE_TS_EVI = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned='EVI') an_EE_TS_BSI = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned='BSI') an_EE_TS_NDWI = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned='NDWI') an_EE_TS_PSRI = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned='PSRI') an_EE_TS_LSWI = rc.initial_clean(df=an_EE_TS, column_to_be_cleaned='LSWI') an_EE_TS_NDVI.head(2) ### List of unique polygons polygon_list = an_EE_TS_NDVI['geo'].unique() print(len(polygon_list)) counter = 0 for a_poly in polygon_list:
# if (filter_lastSurDate == True): # a_df = rc.filter_out_NASS(dt_df_NASS = a_df) # print ("After filtering out NASS, a_df is of dimension {fileShape}.".format(fileShape=a_df.shape)) ###################### # a_df['year'] = SF_year # # The following columns do not exist in the old data # if not('DataSrc' in a_df.columns): print ("_________________________________________________________") print ("Data source is being set to NA") a_df['DataSrc'] = "NA" a_df = rc.initial_clean(df = a_df, column_to_be_cleaned = indeks) a_df = a_df.copy() ### List of unique polygons polygon_list = a_df['ID'].unique() print ("_________________________________________________________") print("polygon_list is of length {}.".format(len(polygon_list))) # # 25 columns # SEOS_output_columns = ['ID', 'Acres', 'county', 'CropGrp', 'CropTyp', 'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes', 'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'image_year', 'SF_year', 'doy', 'EVI', 'human_system_start_time', 'Date',
os.makedirs(output_dir, exist_ok=True) os.makedirs(plot_dir_base, exist_ok=True) ###################### # The following columns do not exist in the old data # if not ('CovrCrp' in a_df.columns): print("Data source is being set to NA") a_df['CovrCrp'] = "NA" #################################################################################### an_EE_TS_NDVI = rc.initial_clean(df=a_df, column_to_be_cleaned='NDVI') an_EE_TS_EVI = rc.initial_clean(df=a_df, column_to_be_cleaned='EVI') an_EE_TS_BSI = rc.initial_clean(df=a_df, column_to_be_cleaned='BSI') an_EE_TS_NDWI = rc.initial_clean(df=a_df, column_to_be_cleaned='NDWI') an_EE_TS_PSRI = rc.initial_clean(df=a_df, column_to_be_cleaned='PSRI') an_EE_TS_LSWI = rc.initial_clean(df=a_df, column_to_be_cleaned='LSWI') an_EE_TS_NDVI = rc.add_human_start_time(an_EE_TS_NDVI) an_EE_TS_EVI = rc.add_human_start_time(an_EE_TS_EVI) an_EE_TS_BSI = rc.add_human_start_time(an_EE_TS_BSI) an_EE_TS_NDWI = rc.add_human_start_time(an_EE_TS_NDWI) an_EE_TS_PSRI = rc.add_human_start_time(an_EE_TS_PSRI) an_EE_TS_LSWI = rc.add_human_start_time(an_EE_TS_LSWI) ####################################################################################