Пример #1
0
def test_update_rapid_input_file():
    """
    Checks RAPID input file update with valid input
    """
    print("TEST 2: UPDATE NAMELIST FILE")
    rapid_manager = RAPID(
        rapid_executable_location=RAPID_EXE_PATH,
        cygwin_bin_location=CYGWIN_BIN_PATH,
        use_all_processors=True,
    )
    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_riv.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc')

    original_input_file = os.path.join(INPUT_DATA_PATH, "rapid_namelist_valid")

    updated_input_file = os.path.join(OUTPUT_DATA_PATH,
                                      "rapid_namelist-UPDATE")

    copy(original_input_file, updated_input_file)
    rapid_manager.update_namelist_file(updated_input_file)
    updated_input_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               "rapid_namelist-UPDATE")

    assert (fcmp(updated_input_file, updated_input_file_solution))

    remove_files(updated_input_file)
Пример #2
0
def test_generate_rapid_input_file():
    """
    Checks RAPID input file generation with valid input
    """
    print("TEST 1: GENERATE NAMELIST FILE")
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          use_all_processors=True,                          
                          ZS_TauR = 24*3600, #duration of routing procedure (time step of runoff data)
                          ZS_dtR = 15*60, #internal routing time step
                          ZS_TauM = 12*24*3600, #total simulation time 
                          ZS_dtM = 24*3600 #input time step 
                         )
    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_riv.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc'
                                    )
    generated_input_file = os.path.join(OUTPUT_DATA_PATH, 
                                        "rapid_namelist-GENERATE")
    rapid_manager.generate_namelist_file(generated_input_file)
    generated_input_file_solution = os.path.join(COMPARE_DATA_PATH, 
                                                 "rapid_namelist-GENERATE")


    ok_(fcmp(generated_input_file, generated_input_file_solution))
    
    remove_files(generated_input_file)
Пример #3
0
def test_download_usgs_daily_avg():
    """
    This tests downloading USGS daily avg data
    """
    print("TEST 12: TEST DOWNLOAD USGS DAILY AVERAGE DATA")

    out_streamflow_file=os.path.join(OUTPUT_DATA_PATH,"gage_streamflow.csv")
    out_stream_id_file=os.path.join(OUTPUT_DATA_PATH,"gage_rivid.csv")
    
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH)
    
    rapid_manager.generate_usgs_avg_daily_flows_opt(reach_id_gage_id_file=os.path.join(INPUT_DATA_PATH,"usgs_gage_id_rivid.csv"),
                                                    start_datetime=datetime(2000,1,1),
                                                    end_datetime=datetime(2000,1,3),
                                                    out_streamflow_file=out_streamflow_file, 
                                                    out_stream_id_file=out_stream_id_file)
                
    compare_streamflow_file=os.path.join(COMPARE_DATA_PATH,"gage_streamflow.csv")
    ok_(compare_csv_decimal_files(out_streamflow_file, compare_streamflow_file, header=False))

    compare_stream_id_file=os.path.join(COMPARE_DATA_PATH,"gage_rivid.csv")
    ok_(compare_csv_decimal_files(out_stream_id_file, compare_stream_id_file, header=False))
    
    remove_files(out_streamflow_file,
                 out_stream_id_file)
Пример #4
0
def test_gen_weight_table_lis_no_intersect():
    """
    Checks generating weight table for LIS grid with no intersect
    """
    print("TEST 8: TEST GENERATE WEIGTH TABLE FOR LIS GRIDS WITH NO INTERSECT")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH, 
                                               "weight_lis_no_intersect.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(GIS_INPUT_DATA_PATH, "uk-no_intersect",
                                      "rapid_connect_45390.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "lis", "LIS_HIST_201101210000.d01.nc")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="lon",
                          in_nc_lat_var="lat", 
                          in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'uk-no_intersect', 'Catchment_thames_drainID45390.shp'), 
                          river_id="DrainLnID", 
                          in_connectivity_file=rapid_connect_file, 
                          out_weight_table=generated_weight_table_file)
                                                         
    generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "uk-no_intersect",
                                                        "weight_lis_no_intersect.csv")
    ok_(compare_csv_decimal_files(generated_weight_table_file, 
                                  generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #5
0
def test_gen_weight_table_gldas2():
    """
    Checks generating weight table for GLDAS V2 grid
    """
    print("TEST 6: TEST GENERATE WEIGTH TABLE FOR GLDAS V2 GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH, 
                                               "weight_gldas2.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "gldas2", "GLDAS_NOAH025_3H.A20101231.0000.020.nc4")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="lon",
                          in_nc_lat_var="lat", 
                          in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'catchment.shp'), 
                          river_id="FEATUREID", 
                          in_connectivity_file=rapid_connect_file, 
                          out_weight_table=generated_weight_table_file)
                                                         
    generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                        "weight_gldas2.csv")
    ok_(compare_csv_decimal_files(generated_weight_table_file, 
                                  generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #6
0
def test_gen_weight_table_joules():
    """
    Checks generating weight table for Joules grid
    """
    print("TEST 9: TEST GENERATE WEIGTH TABLE FOR Joules GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH, 
                                               "weight_joules.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "u-k",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "joules", "ukv_test.runoff.20080803_00.nc")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="east_west",
                          in_nc_lat_var="north_south", 
                          in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'u-k', 'CatchmentSubset.shp'), 
                          river_id="DrainLnID", 
                          in_connectivity_file=rapid_connect_file, 
                          out_weight_table=generated_weight_table_file)
                                                         
    generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "u-k",
                                                        "weight_joules.csv")
    ok_(compare_csv_decimal_files(generated_weight_table_file, 
                                  generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #7
0
def test_update_rapid_numbers_input_file():
    """
    Checks RAPID input file update with number validation
    """
    print("TEST 4: GENERATE NUMBERS FOR NAMELIST FILE")
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          use_all_processors=True,
                          rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv'),
                          riv_bas_id_file=os.path.join(INPUT_DATA_PATH, 'riv_bas_id.csv'),
                         )
    rapid_manager.update_reach_number_data()
                          
    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_nasa_lis_3hr_20020830.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc'
                                    )

    generated_input_file = os.path.join(OUTPUT_DATA_PATH, 
                                      "rapid_namelist-GENERATE-NUMBERS")

    rapid_manager.generate_namelist_file(generated_input_file)
                          
    generated_input_file_solution = os.path.join(COMPARE_DATA_PATH, 
                                                 "rapid_namelist-GENERATE-NUMBERS")


    ok_(fcmp(generated_input_file, generated_input_file_solution))
    
    remove_files(generated_input_file)
Пример #8
0
def test_gen_muskingum_kfac1():
    """
    Checks generating Muskingum Kfac option 1
    """
    print("TEST 14: TEST GENERATE MUSKINGUM KFAC OPTION 1")
    generated_kfac_file = os.path.join(OUTPUT_DATA_PATH, 
                                       "kfac1.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")
    CreateMuskingumKfacFile(in_drainage_line=os.path.join(GIS_INPUT_DATA_PATH, 'flowline.shp'),
                            river_id="COMID",
                            length_id="LENGTHKM",
                            slope_id="Slope",
                            celerity=1000.0/3600.0,
                            formula_type=1,
                            in_connectivity_file=rapid_connect_file,
                            out_kfac_file=generated_kfac_file)
                            
    #CHECK OUTPUT   
    #kfac
    generated_kfac_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                "kfac1.csv")
    ok_(compare_csv_decimal_files(generated_kfac_file, 
                                  generated_kfac_file_solution))
    remove_files(generated_kfac_file)
Пример #9
0
def test_download_usgs_daily_avg():
    """
    This tests downloading USGS daily avg data
    """
    print("TEST 12: TEST DOWNLOAD USGS DAILY AVERAGE DATA")

    out_streamflow_file = os.path.join(OUTPUT_DATA_PATH, "gage_streamflow.csv")
    out_stream_id_file = os.path.join(OUTPUT_DATA_PATH, "gage_rivid.csv")

    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH)

    rapid_manager.generate_usgs_avg_daily_flows_opt(
        reach_id_gage_id_file=os.path.join(INPUT_DATA_PATH,
                                           "usgs_gage_id_rivid.csv"),
        start_datetime=datetime(2000, 1, 1),
        end_datetime=datetime(2000, 1, 3),
        out_streamflow_file=out_streamflow_file,
        out_stream_id_file=out_stream_id_file)

    compare_streamflow_file = os.path.join(COMPARE_DATA_PATH,
                                           "gage_streamflow.csv")
    assert (compare_csv_decimal_files(out_streamflow_file,
                                      compare_streamflow_file,
                                      header=False))

    compare_stream_id_file = os.path.join(COMPARE_DATA_PATH, "gage_rivid.csv")
    assert (compare_csv_decimal_files(out_stream_id_file,
                                      compare_stream_id_file,
                                      header=False))

    remove_files(out_streamflow_file, out_stream_id_file)
Пример #10
0
def test_generate_rapid_input_file():
    """
    Checks RAPID input file generation with valid input
    """
    print("TEST 1: GENERATE NAMELIST FILE")
    rapid_manager = RAPID(
        rapid_executable_location=RAPID_EXE_PATH,
        cygwin_bin_location=CYGWIN_BIN_PATH,
        use_all_processors=True,
        ZS_TauR=24 *
        3600,  #duration of routing procedure (time step of runoff data)
        ZS_dtR=15 * 60,  #internal routing time step
        ZS_TauM=12 * 24 * 3600,  #total simulation time
        ZS_dtM=24 * 3600  #input time step
    )
    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_riv.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc')
    generated_input_file = os.path.join(OUTPUT_DATA_PATH,
                                        "rapid_namelist-GENERATE")
    rapid_manager.generate_namelist_file(generated_input_file)
    generated_input_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                 "rapid_namelist-GENERATE")

    assert (fcmp(generated_input_file, generated_input_file_solution))

    remove_files(generated_input_file)
Пример #11
0
def test_gen_weight_table_era5_land_mask():
    """
    Checks generating weight table for ERA5 grid with land mask.
    """
    print("TEST 18: TEST GENERATE WEIGHT TABLE FOR ERA5 GRID WITH LAND MASK.")
    generated_weight_table_file = os.path.join(
        OUTPUT_DATA_PATH, "weight_mendocino_era5_land_mask.csv")

    #rapid_connect
    rapid_connect_file = os.path.join(
        COMPARE_DATA_PATH, "mendocino_nhdplus_catchment",
        "rapid_connectivity_mendocino_sample.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "era5_land_mask",
                            "era5_land-sea_mask_mendocino_subset.nc")

    CreateWeightTableECMWF(
        in_ecmwf_nc=lsm_grid,
        in_catchment_shapefile=os.path.join(
            GIS_INPUT_DATA_PATH, 'mendocino_nhdplus_catchment',
            'NHDCat_mendocino_watershed_hopland_sample.shp'),
        river_id="FEATUREID",
        in_connectivity_file=rapid_connect_file,
        out_weight_table=generated_weight_table_file,
        in_ecmwf_mask_var='lsm')

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'mendocino_nhdplus_catchment',
        'weight_mendocino_era5_land_mask.csv')

    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #12
0
def test_gen_weight_table_gldas2():
    """
    Checks generating weight table for GLDAS V2 grid
    """
    print("TEST 6: TEST GENERATE WEIGHT TABLE FOR GLDAS V2 GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH,
                                               "weight_gldas2.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "gldas2",
                            "GLDAS_NOAH025_3H.A20101231.0000.020.nc4")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="lon",
                          in_nc_lat_var="lat",
                          in_catchment_shapefile=os.path.join(
                              GIS_INPUT_DATA_PATH, 'catchment.shp'),
                          river_id="FEATUREID",
                          in_connectivity_file=rapid_connect_file,
                          out_weight_table=generated_weight_table_file)

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, "x-x", "weight_gldas2.csv")
    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #13
0
def test_gen_weight_table_lis():
    """
    Checks generating weight table for LIS grid
    """
    print("TEST 7: TEST GENERATE WEIGTH TABLE FOR LIS GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH, 
                                               "weight_lis.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "u-k",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "lis", "LIS_HIST_201101210000.d01.nc")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="lon",
                          in_nc_lat_var="lat", 
                          in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'u-k', 'CatchmentSubset.shp'), 
                          river_id="DrainLnID", 
                          in_connectivity_file=rapid_connect_file, 
                          out_weight_table=generated_weight_table_file)
                                                         
    generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "u-k",
                                                        "weight_lis.csv")
    ok_(compare_csv_decimal_files(generated_weight_table_file, 
                                  generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #14
0
def test_gen_muskingum_kfac1():
    """
    Checks generating Muskingum Kfac option 1
    """
    print("TEST 14: TEST GENERATE MUSKINGUM KFAC OPTION 1")
    generated_kfac_file = os.path.join(OUTPUT_DATA_PATH, "kfac1.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")
    CreateMuskingumKfacFile(in_drainage_line=os.path.join(
        GIS_INPUT_DATA_PATH, 'flowline.shp'),
                            river_id="COMID",
                            length_id="LENGTHKM",
                            slope_id="Slope",
                            celerity=1000.0 / 3600.0,
                            formula_type=1,
                            in_connectivity_file=rapid_connect_file,
                            out_kfac_file=generated_kfac_file)

    #CHECK OUTPUT
    #kfac
    generated_kfac_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                "kfac1.csv")

    kfac = np.genfromtxt(generated_kfac_file, delimiter=',')
    kfac_solution = np.genfromtxt(generated_kfac_file_solution, delimiter=',')
    np.testing.assert_allclose(kfac, kfac_solution, rtol=1.0e-2)

    remove_files(generated_kfac_file)
Пример #15
0
def test_gen_weight_table_lis_no_intersect():
    """
    Checks generating weight table for LIS grid with no intersect
    """
    print("TEST 8: TEST GENERATE WEIGHT TABLE FOR LIS GRIDS WITH NO INTERSECT")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH,
                                               "weight_lis_no_intersect.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(GIS_INPUT_DATA_PATH, "uk-no_intersect",
                                      "rapid_connect_45390.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "lis",
                            "LIS_HIST_201101210000.d01.nc")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="lon",
                          in_nc_lat_var="lat",
                          in_catchment_shapefile=os.path.join(
                              GIS_INPUT_DATA_PATH, 'uk-no_intersect',
                              'Catchment_thames_drainID45390.shp'),
                          river_id="DrainLnID",
                          in_connectivity_file=rapid_connect_file,
                          out_weight_table=generated_weight_table_file)

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, "uk-no_intersect", "weight_lis_no_intersect.csv")
    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #16
0
def test_gen_weight_table_era_t511_24hr():
    """
    Checks generating weight table for ERA T511 24hr grid
    """
    print("TEST 5: TEST GENERATE WEIGHT TABLE FOR ERA T511 24hr GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH,
                                               "weight_era_t511.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "erai24",
                            "19990109_erai_runoff.grib.nc")
    CreateWeightTableECMWF(in_ecmwf_nc=lsm_grid,
                           in_catchment_shapefile=os.path.join(
                               GIS_INPUT_DATA_PATH, 'catchment.shp'),
                           river_id="FEATUREID",
                           in_connectivity_file=rapid_connect_file,
                           out_weight_table=generated_weight_table_file)

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, "x-x", "weight_era_t511.csv")
    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #17
0
def test_gen_muskingum_kfac1():
    """
    Checks generating Muskingum Kfac option 1
    """
    print("TEST 14: TEST GENERATE MUSKINGUM KFAC OPTION 1")
    generated_kfac_file = os.path.join(OUTPUT_DATA_PATH, "kfac1.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")
    CreateMuskingumKfacFile(in_drainage_line=os.path.join(
        GIS_INPUT_DATA_PATH, 'flowline.shp'),
                            river_id="COMID",
                            length_id="LENGTHKM",
                            slope_id="Slope",
                            celerity=1000.0 / 3600.0,
                            formula_type=1,
                            in_connectivity_file=rapid_connect_file,
                            out_kfac_file=generated_kfac_file)

    #CHECK OUTPUT
    #kfac
    generated_kfac_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                "kfac1.csv")
    assert (compare_csv_decimal_files(generated_kfac_file,
                                      generated_kfac_file_solution))
    remove_files(generated_kfac_file)
Пример #18
0
def test_gen_weight_table_joules():
    """
    Checks generating weight table for Joules grid
    """
    print("TEST 9: TEST GENERATE WEIGHT TABLE FOR Joules GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH,
                                               "weight_joules.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "u-k",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "joules",
                            "ukv_test.runoff.20080803_00.nc")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="east_west",
                          in_nc_lat_var="north_south",
                          in_catchment_shapefile=os.path.join(
                              GIS_INPUT_DATA_PATH, 'u-k',
                              'CatchmentSubset.shp'),
                          river_id="DrainLnID",
                          in_connectivity_file=rapid_connect_file,
                          out_weight_table=generated_weight_table_file)

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, "u-k", "weight_joules.csv")
    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #19
0
def test_add_length_to_network_taudem():
    """
    Checks adding length to network
    """
    print("TEST 11: TEST ADD LENGTH TO NETWORK")
    td = TauDEM()

    subset_network_file = os.path.join(OUTPUT_DATA_PATH,
                                       "DrainageLineSubset2.shp")
    #to extract a specific network
    td.extractSubNetwork(
        network_file=os.path.join(GIS_INPUT_DATA_PATH, 'u-k',
                                  "DrainageLineSubset.shp"),
        out_subset_network_file=subset_network_file,
        outlet_ids=[42911],  #list of outlet ids
        river_id_field="HydroID",
        next_down_id_field="NextDownID",
        river_magnitude_field="HydroID",
        safe_mode=False,
    )

    #add length m field
    td.addLengthMeters(subset_network_file)

    #Test results
    subset_network_shapefile = ogr.Open(subset_network_file)
    subset_network_layer = subset_network_shapefile.GetLayer()

    #make sure all fields are there
    subset_network_layer_defn = subset_network_layer.GetLayerDefn()
    num_network_fields = subset_network_layer_defn.GetFieldCount()

    network_field_names = [
        'arcid', 'from_node', 'to_node', 'HydroID', 'GridID', 'NextDownID',
        'SLength', 'Avg_Slope', 'LENGTHKM', 'Shape_Leng', 'Musk_x',
        'watershed', 'subbasin', 'LENGTH_M'
    ]

    assert (num_network_fields == len(network_field_names))

    for i in range(num_network_fields):
        assert (subset_network_layer_defn.GetFieldDefn(i).GetNameRef()
                in network_field_names)

    #make sure values are OK
    length_m_list = array([
        194.440898134, 601.443392962, 1306.53179652, 1501.27444279,
        3437.46584922, 5579.56507836, 6347.04650903
    ])
    generated_list = []
    for network_feature in subset_network_layer:
        generated_list.append(network_feature.GetField('LENGTH_M'))

    assert_almost_equal(length_m_list,
                        array(sorted(generated_list)),
                        decimal=2)

    #cleanup
    remove_files(
        *glob(os.path.join(OUTPUT_DATA_PATH, "DrainageLineSubset2.*")))
Пример #20
0
def test_weight_table_with_area_id():
    """
    Checks generating weight table with area id
    """
    print("TEST 17: TEST GENERATE WEIGHT TABLE WITH INVALID POLYGON")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH,
                                               "weight_area.csv")
    # rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "gldas2",
                            "GLDAS_NOAH025_3H.A20101231.0000.020.nc4")
    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var="lon",
                          in_nc_lat_var="lat",
                          in_catchment_shapefile=os.path.join(
                              GIS_INPUT_DATA_PATH, 'test_catchments.shp'),
                          river_id="DrainLnID",
                          area_id="Shape_Area",
                          in_connectivity_file=rapid_connect_file,
                          out_weight_table=generated_weight_table_file)

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, "x-x", "weight_area.csv")
    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #21
0
def test_update_rapid_input_file():
    """
    Checks RAPID input file update with valid input
    """
    print("TEST 2: UPDATE NAMELIST FILE")
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          use_all_processors=True,                          
                         )
    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_riv.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc'
                                    )

    original_input_file = os.path.join(INPUT_DATA_PATH, 
                                      "rapid_namelist_valid")
    
    updated_input_file = os.path.join(OUTPUT_DATA_PATH, 
                                      "rapid_namelist-UPDATE")

    copy(original_input_file, updated_input_file)
    rapid_manager.update_namelist_file(updated_input_file)
    updated_input_file_solution = os.path.join(COMPARE_DATA_PATH, 
                                               "rapid_namelist-UPDATE")


    ok_(fcmp(updated_input_file, updated_input_file_solution))
    
    remove_files(updated_input_file)
Пример #22
0
def test_update_rapid_numbers_input_file():
    """
    Checks RAPID input file update with number validation
    """
    print("TEST 4: GENERATE NUMBERS FOR NAMELIST FILE")
    rapid_manager = RAPID(
        rapid_executable_location=RAPID_EXE_PATH,
        cygwin_bin_location=CYGWIN_BIN_PATH,
        use_all_processors=True,
        rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv'),
        riv_bas_id_file=os.path.join(INPUT_DATA_PATH, 'riv_bas_id.csv'),
    )
    rapid_manager.update_reach_number_data()

    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_nasa_lis_3hr_20020830.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc')

    generated_input_file = os.path.join(OUTPUT_DATA_PATH,
                                        "rapid_namelist-GENERATE-NUMBERS")

    rapid_manager.generate_namelist_file(generated_input_file)

    generated_input_file_solution = os.path.join(
        COMPARE_DATA_PATH, "rapid_namelist-GENERATE-NUMBERS")

    assert (fcmp(generated_input_file, generated_input_file_solution))

    remove_files(generated_input_file)
Пример #23
0
def test_goodness_of_fit():
    """
    This tests the goodness of fit functions
    """
    print("TEST 14: TEST GOODNESS OF FIT FUNCTIONS")

    reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv')
    observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv')
    #using CF-compliant file
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_out_analysis_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_goodness_of_fit_results-daily.csv')
    find_goodness_of_fit(cf_input_qout_file,
                         reach_id_file,
                         observed_file,
                         cf_out_analysis_file,
                         daily=True)

    cf_goodness_of_fit_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_goodness_of_fit_analysis.csv')
    ok_(
        compare_csv_decimal_files(cf_out_analysis_file,
                                  cf_goodness_of_fit_file_solution))
    #using original RAPID file
    raw_goodness_of_fit_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'raw_goodness_of_fit_analysis.csv')
    original_input_qout_file = os.path.join(
        COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    original_out_analysis_file = os.path.join(
        OUTPUT_DATA_PATH, 'original_goodness_of_fit_results-daily.csv')
    find_goodness_of_fit(original_input_qout_file,
                         reach_id_file,
                         observed_file,
                         original_out_analysis_file,
                         steps_per_group=8)

    ok_(
        compare_csv_decimal_files(original_out_analysis_file,
                                  raw_goodness_of_fit_file_solution))

    #using new RAPID file
    new_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                       'Qout_nasa_lis_3hr_20020830.nc')
    new_out_analysis_file = os.path.join(OUTPUT_DATA_PATH,
                                         'goodness_of_fit_results-daily.csv')
    find_goodness_of_fit(new_input_qout_file,
                         reach_id_file,
                         observed_file,
                         new_out_analysis_file,
                         steps_per_group=8)

    ok_(
        compare_csv_decimal_files(new_out_analysis_file,
                                  raw_goodness_of_fit_file_solution))

    remove_files(cf_out_analysis_file, original_out_analysis_file,
                 new_out_analysis_file)
Пример #24
0
    def test_run_era_interim_inflow(self):
        """
        Checks generating inflow file from ERA Interim LSM
        """
        print("TEST 1: TEST GENERATE INFLOW FILE FROM ERA INTERIM DATA")
        
        rapid_input_path, rapid_output_path = self._setup_automated("x-x")

        #run main process    
        run_lsm_rapid_process(
            rapid_executable_location=self.RAPID_EXE_PATH,
            cygwin_bin_location=self.CYGWIN_BIN_PATH,
            rapid_io_files_location=self.OUTPUT_DATA_PATH,
            lsm_data_location=os.path.join(self.LSM_INPUT_DATA_PATH, 'erai3'), 
            simulation_start_datetime=datetime(1980, 1, 1),
            simulation_end_datetime=datetime(2014, 1, 31),
            generate_rapid_namelist_file=False,
            run_rapid_simulation=True,
            generate_return_periods_file=False,
            generate_seasonal_initialization_file=False,
            generate_initialization_file=True,
            use_all_processors=True,
        )
        
        #CHECK OUTPUT    
        #m3_riv
        m3_file_name = "m3_riv_bas_erai_t511_3hr_20030121to20030122.nc"
        generated_m3_file = os.path.join(rapid_output_path, m3_file_name)
        generated_m3_file_solution = os.path.join(self.INFLOW_COMPARE_DATA_PATH, m3_file_name)
        self._compare_m3(generated_m3_file,generated_m3_file_solution)
        
        #qout file
        qout_file_name = "Qout_erai_t511_3hr_20030121to20030122.nc"
        generated_qout_file = os.path.join(rapid_output_path, qout_file_name)
        generated_qout_file_solution = os.path.join(self.INFLOW_COMPARE_DATA_PATH, qout_file_name)
        d1 = Dataset(generated_qout_file)
        d2 = Dataset(generated_qout_file_solution)
        assert_almost_equal(d1.variables['Qout'][:], d2.variables['Qout'][:], decimal=5)
        ok_((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
        ok_((d1.variables['time'][:] == d2.variables['time'][:]).all())
        if 'lat' in d2.variables.keys():
            ok_((d1.variables['lat'][:] == d2.variables['lat'][:]).all())
        if 'lon' in d2.variables.keys():
            ok_((d1.variables['lon'][:] == d2.variables['lon'][:]).all())
        d1.close()
        d2.close()
                                                     
        #initialization file
        qinit_file_name = "qinit_erai_t511_3hr_20030121to20030122.csv"
        generated_qinit_file = os.path.join(rapid_input_path, qinit_file_name)
        generated_qinit_file_solution = os.path.join(self.INFLOW_COMPARE_DATA_PATH, qinit_file_name)
    
        ok_(compare_csv_decimal_files(generated_qinit_file, generated_qinit_file_solution))
        
        #additional cleanup
        remove_files(generated_qinit_file)
Пример #25
0
def test_goodness_of_fit():
    """
    This tests the goodness of fit functions
    """
    print("TEST 14: TEST GOODNESS OF FIT FUNCTIONS")

    reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv')
    observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv')
    #using CF-compliant file
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_out_analysis_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_goodness_of_fit_results-daily.csv')
    find_goodness_of_fit(cf_input_qout_file,
                         reach_id_file,
                         observed_file,
                         cf_out_analysis_file,
                         daily=True)

    cf_goodness_of_fit_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_goodness_of_fit_analysis.csv')
    assert (compare_csv_decimal_files(cf_out_analysis_file,
                                      cf_goodness_of_fit_file_solution))

    reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id_1.csv')
    observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow_1.csv')
    #using CF-compliant file single input
    cf_out_analysis_file_1 = os.path.join(
        OUTPUT_DATA_PATH, 'cf_goodness_of_fit_results_1-daily.csv')
    find_goodness_of_fit(cf_input_qout_file,
                         reach_id_file,
                         observed_file,
                         cf_out_analysis_file_1,
                         daily=True)

    cf_goodness_of_fit_file_solution_1 = os.path.join(
        COMPARE_DATA_PATH, 'cf_goodness_of_fit_analysis_1.csv')
    assert (compare_csv_decimal_files(cf_out_analysis_file_1,
                                      cf_goodness_of_fit_file_solution_1))

    observed_simulated_file = os.path.join(COMPARE_DATA_PATH,
                                           'goodness_of_fit_obs_sim.csv')
    goodness_obs_sim_solution = os.path.join(OUTPUT_DATA_PATH,
                                             'goodness_of_fit_obs_sim.txt')
    # test print goodness of fit to file
    find_goodness_of_fit_csv(observed_simulated_file,
                             out_file=goodness_obs_sim_solution)
    goodness_obs_sim = os.path.join(COMPARE_DATA_PATH,
                                    'goodness_of_fit_obs_sim.txt')
    assert (fcmp(goodness_obs_sim, goodness_obs_sim_solution))
    # test print goodness of fit to console
    find_goodness_of_fit_csv(observed_simulated_file)

    remove_files(cf_out_analysis_file, cf_out_analysis_file_1)
Пример #26
0
def test_convert_file_to_be_cf_compliant_new_format_comid_lat_lon_z():
    """
    Test Convert RAPID Output to be CF Compliant for new format with COMID_LAT_LON_Z
    """
    print(
        "TEST 8: TEST CONVERT RAPID OUTPUT TO CF COMPLIANT (COMID_LAT_LON_Z)")

    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830.nc')
    temp_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_test_cf_lat_lon_z.nc')
    copy(input_qout_file, temp_qout_file)

    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          Qout_file=temp_qout_file,
                          rapid_connect_file=os.path.join(
                              INPUT_DATA_PATH, 'rapid_connect.csv'),
                          ZS_TauR=3 * 3600)

    rapid_manager.make_output_cf_compliant(
        simulation_start_datetime=datetime(2002, 8, 30),
        comid_lat_lon_z_file=os.path.join(INPUT_DATA_PATH,
                                          'comid_lat_lon_z.csv'),
        project_name=
        "ERA Interim (T511 Grid) 3 Hourly Runoff Based Historical flows by US Army ERDC"
    )

    cf_qout_file_solution = os.path.join(COMPARE_DATA_PATH,
                                         'Qout_nasa_lis_3hr_20020830_CF.nc')

    #check Qout
    assert (compare_qout_files(temp_qout_file, cf_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(temp_qout_file)
    d2 = Dataset(cf_qout_file_solution)
    # MPG: new dimensions have been introduced in RAPID. We only test for those     # included in the original benchmarks.
    for dim in ['time', 'rivid']:
        assert (dim in d1.dimensions.keys())
    # MPG: new variables have been introduced in RAPID. We only test for those
    # included in the original benchmarks.
    for v in [u'Qout', u'rivid', u'time', u'lon', u'lat', u'crs']:
        assert (v in d1.variables.keys())
    assert ((d1.variables['time'][:] == d2.variables['time'][:]).all())
    assert ((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
    assert ((d1.variables['lat'][:] == d2.variables['lat'][:]).all())
    assert ((d1.variables['lon'][:] == d2.variables['lon'][:]).all())
    d1.close()
    d2.close()

    remove_files(temp_qout_file)
Пример #27
0
def test_generate_qinit_file():
    """
    This tests the qinit file function to create an input qinit file for RAPID
    """
    print("TEST 11: TEST GENERATE QINIT FILE")
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          rapid_connect_file=os.path.join(
                              INPUT_DATA_PATH, 'rapid_connect.csv'))

    #test with original rapid outpur
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830.nc')
    original_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, original_qout_file)

    qinit_original_rapid_qout = os.path.join(OUTPUT_DATA_PATH,
                                             'qinit_original_rapid_qout.csv')
    rapid_manager.update_parameters(Qout_file=original_qout_file)
    rapid_manager.generate_qinit_from_past_qout(
        qinit_file=qinit_original_rapid_qout)

    qinit_original_rapid_qout_solution = os.path.join(
        COMPARE_DATA_PATH, 'qinit_original_rapid_qout.csv')
    ok_(
        compare_csv_decimal_files(qinit_original_rapid_qout,
                                  qinit_original_rapid_qout_solution,
                                  header=False))

    #test with CF rapid output and alternate time index
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    qinit_cf_rapid_qout = os.path.join(OUTPUT_DATA_PATH,
                                       'qinit_cf_rapid_qout.csv')
    rapid_manager.update_parameters(Qout_file=cf_qout_file)
    rapid_manager.generate_qinit_from_past_qout(qinit_file=qinit_cf_rapid_qout,
                                                time_index=5)

    qinit_cf_rapid_qout_solution = os.path.join(COMPARE_DATA_PATH,
                                                'qinit_cf_rapid_qout.csv')
    ok_(
        compare_csv_decimal_files(qinit_cf_rapid_qout,
                                  qinit_cf_rapid_qout_solution,
                                  header=False))

    remove_files(original_qout_file, qinit_original_rapid_qout, cf_qout_file,
                 qinit_cf_rapid_qout)
Пример #28
0
def test_add_length_to_network_taudem():
    """
    Checks adding length to network
    """
    print("TEST 11: TEST ADD LENGTH TO NETWORK")
    td = TauDEM()
    
    subset_network_file = os.path.join(OUTPUT_DATA_PATH, "DrainageLineSubset2.shp")
    #to extract a specific network
    td.extractSubNetwork(network_file=os.path.join(GIS_INPUT_DATA_PATH, 'u-k', "DrainageLineSubset.shp"),
                         out_subset_network_file=subset_network_file,
                         outlet_ids=[42911], #list of outlet ids
                         river_id_field="HydroID",
                         next_down_id_field="NextDownID",
                         river_magnitude_field="HydroID",
                         safe_mode=False,
                         )
    
    #add length m field
    td.addLengthMeters(subset_network_file)                                 
                                                         
    #Test results
    subset_network_shapefile = ogr.Open(subset_network_file)
    subset_network_layer = subset_network_shapefile.GetLayer()

    #make sure all fields are there
    subset_network_layer_defn = subset_network_layer.GetLayerDefn()
    num_network_fields = subset_network_layer_defn.GetFieldCount()

    network_field_names = ['arcid','from_node','to_node','HydroID','GridID',
                           'NextDownID', 'SLength', 'Avg_Slope','LENGTHKM',
                           'Shape_Leng','Musk_x','watershed','subbasin', 'LENGTH_M']
                           
    ok_(num_network_fields==len(network_field_names))    

    for i in range(num_network_fields):
        ok_(subset_network_layer_defn.GetFieldDefn(i).GetNameRef() in network_field_names)


    #make sure values are OK
    length_m_list = array([194.440898134, 601.443392962, 1306.53179652, 1501.27444279,  
                           3437.46584922, 5579.56507836, 6347.04650903])
    generated_list = []
    for network_feature in subset_network_layer:
        generated_list.append(network_feature.GetField('LENGTH_M'))
        
    assert_almost_equal(length_m_list, array(sorted(generated_list)), decimal=2)
    
    #cleanup
    remove_files(*glob(os.path.join(OUTPUT_DATA_PATH,"DrainageLineSubset2.*")))
Пример #29
0
def test_run_rapid_simulation():
    """
    Test Running RAPID Simulation
    """

    print("TEST 7: TEST RUNNING RAPID SIMULATION")
    generated_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_generated.nc')

    rapid_manager = RAPID(
        rapid_executable_location=RAPID_EXE_PATH,
        cygwin_bin_location=CYGWIN_BIN_PATH,
        num_processors=1,
        rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv'),
        riv_bas_id_file=os.path.join(INPUT_DATA_PATH, 'riv_bas_id.csv'),
        Vlat_file=os.path.join(INPUT_DATA_PATH, 'm3_nasa_lis_3hr_20020830.nc'),
        k_file=os.path.join(INPUT_DATA_PATH, 'k.csv'),
        x_file=os.path.join(INPUT_DATA_PATH, 'x.csv'),
        ZS_dtM=10800,
        ZS_dtR=900,
        ZS_TauM=2 * 86400,
        ZS_TauR=10800,
        Qout_file=generated_qout_file)
    rapid_manager.update_reach_number_data()
    rapid_manager.run()

    generated_qout_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')

    #check Qout
    assert (compare_qout_files(generated_qout_file,
                               generated_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(generated_qout_file)
    d2 = Dataset(generated_qout_file_solution)
    # MPG: new dimensions have been introduced in RAPID. We only test for those     # included in the original benchmarks.
    for dim in ['time', 'rivid']:
        assert (dim in d1.dimensions.keys())
    # MPG: new variables have been introduced in RAPID. We only test for those
    # included in the original benchmarks.
    for v in [u'Qout', u'rivid', u'time', u'lon', u'lat', u'crs']:
        assert (v in d1.variables.keys())
    assert ((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
    d1.close()
    d2.close()

    remove_files(generated_qout_file)
Пример #30
0
def test_convert_file_to_be_cf_compliant_original_format():
    """
    Test Convert RAPID Output to be CF Compliant for original format
    """
    print(
        "TEST 10: TEST CONVERT RAPID OUTPUT TO CF COMPLIANT - ORIGINAL (COMID_LAT_LON_Z)"
    )

    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830_original.nc')
    temp_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original_test_cf.nc')
    copy(input_qout_file, temp_qout_file)

    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          Qout_file=temp_qout_file,
                          rapid_connect_file=os.path.join(
                              INPUT_DATA_PATH, 'rapid_connect.csv'),
                          ZS_TauR=3 * 3600)

    rapid_manager.make_output_CF_compliant(
        simulation_start_datetime=datetime(2002, 8, 30),
        comid_lat_lon_z_file=os.path.join(INPUT_DATA_PATH,
                                          'comid_lat_lon_z.csv'),
        project_name=
        "ERA Interim (T511 Grid) 3 Hourly Runoff Based Historical flows by US Army ERDC"
    )

    cf_qout_file_solution = os.path.join(COMPARE_DATA_PATH,
                                         'Qout_nasa_lis_3hr_20020830_CF.nc')

    #check Qout
    assert (compare_qout_files(temp_qout_file, cf_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(temp_qout_file)
    d2 = Dataset(cf_qout_file_solution)
    assert (d1.dimensions.keys() == d2.dimensions.keys())
    assert (d1.variables.keys() == d2.variables.keys())
    assert ((d1.variables['time'][:] == d2.variables['time'][:]).all())
    assert ((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
    assert ((d1.variables['lat'][:] == d2.variables['lat'][:]).all())
    assert ((d1.variables['lon'][:] == d2.variables['lon'][:]).all())
    d1.close()
    d2.close()

    remove_files(temp_qout_file)
Пример #31
0
def test_goodness_of_fit():
    """
    This tests the goodness of fit functions
    """
    print("TEST 14: TEST GOODNESS OF FIT FUNCTIONS")

    reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv') 
    observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv') 
    #using CF-compliant file
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_out_analysis_file = os.path.join(OUTPUT_DATA_PATH, 'cf_goodness_of_fit_results-daily.csv') 
    find_goodness_of_fit(cf_input_qout_file, reach_id_file, observed_file,
                         cf_out_analysis_file, daily=True)

    cf_goodness_of_fit_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_goodness_of_fit_analysis.csv') 
    ok_(compare_csv_decimal_files(cf_out_analysis_file, cf_goodness_of_fit_file_solution))
    #using original RAPID file
    raw_goodness_of_fit_file_solution = os.path.join(COMPARE_DATA_PATH, 'raw_goodness_of_fit_analysis.csv') 
    original_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    original_out_analysis_file = os.path.join(OUTPUT_DATA_PATH, 'original_goodness_of_fit_results-daily.csv') 
    find_goodness_of_fit(original_input_qout_file, reach_id_file, observed_file,
                         original_out_analysis_file, steps_per_group=8)

    ok_(compare_csv_decimal_files(original_out_analysis_file, raw_goodness_of_fit_file_solution))

    #using new RAPID file
    new_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    new_out_analysis_file = os.path.join(OUTPUT_DATA_PATH, 'goodness_of_fit_results-daily.csv') 
    find_goodness_of_fit(new_input_qout_file, reach_id_file, observed_file,
                         new_out_analysis_file, steps_per_group=8)

    ok_(compare_csv_decimal_files(new_out_analysis_file, raw_goodness_of_fit_file_solution))

    reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id_1.csv') 
    observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow_1.csv') 
    #using CF-compliant file single input
    cf_out_analysis_file_1 = os.path.join(OUTPUT_DATA_PATH, 'cf_goodness_of_fit_results_1-daily.csv') 
    find_goodness_of_fit(cf_input_qout_file, reach_id_file, observed_file,
                         cf_out_analysis_file_1, daily=True)

    cf_goodness_of_fit_file_solution_1 = os.path.join(COMPARE_DATA_PATH, 'cf_goodness_of_fit_analysis_1.csv') 
    ok_(compare_csv_decimal_files(cf_out_analysis_file_1, cf_goodness_of_fit_file_solution_1))

    remove_files(cf_out_analysis_file,
                 original_out_analysis_file,
                 new_out_analysis_file)
Пример #32
0
def test_gen_static_nhd_connect_rapid_input():
    """
    Checks generating static NHDPlus connect RAPID input
    """
    print("TEST 2: TEST GENERATE STATIC NHDPlus CONNECT RAPID INPUT DATA")
    generated_rapid_connect_file = os.path.join(OUTPUT_DATA_PATH, 
                                                "rapid_connect_nhd.csv")
    CreateNetworkConnectivityNHDPlus(in_drainage_line=os.path.join(GIS_INPUT_DATA_PATH, 'flowline.shp'),
                                     out_connectivity_file=generated_rapid_connect_file)
    #rapid_connect
    generated_rapid_connect_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                         "rapid_connect.csv")
                                                         
    ok_(compare_csv_decimal_files(generated_rapid_connect_file, 
                                  generated_rapid_connect_file_solution))

    remove_files(generated_rapid_connect_file)
Пример #33
0
def test_gen_static_nhd_connect_rapid_input():
    """
    Checks generating static NHDPlus connect RAPID input
    """
    print("TEST 2: TEST GENERATE STATIC NHDPlus CONNECT RAPID INPUT DATA")
    generated_rapid_connect_file = os.path.join(OUTPUT_DATA_PATH, 
                                                "rapid_connect_nhd.csv")
    CreateNetworkConnectivityNHDPlus(in_drainage_line=os.path.join(GIS_INPUT_DATA_PATH, 'flowline.shp'),
                                     out_connectivity_file=generated_rapid_connect_file)
    #rapid_connect
    generated_rapid_connect_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                         "rapid_connect.csv")
                                                         
    ok_(compare_csv_decimal_files(generated_rapid_connect_file, 
                                  generated_rapid_connect_file_solution))

    remove_files(generated_rapid_connect_file)
Пример #34
0
def test_cf_merge():
    """
    This tests merging two qout files
    """
    print("TEST 15: TEST MERGE QOUT")

    orig_qout_1 = os.path.join(INPUT_DATA_PATH, 'Qout_merge_3hr.nc')
    orig_qout_2 = os.path.join(INPUT_DATA_PATH, 'Qout_merge_6hr.nc')

    qout_1 = os.path.join(OUTPUT_DATA_PATH, 'Qout_merge_3hr.nc')
    qout_2 = os.path.join(OUTPUT_DATA_PATH, 'Qout_merge_6hr.nc')

    copy(orig_qout_1, qout_1)
    copy(orig_qout_2, qout_2)
    #Merge all files together at the end
    cv = ConvertRAPIDOutputToCF(
        rapid_output_file=[qout_1, qout_2],
        start_datetime=datetime(2016, 2, 12),
        time_step=[3 * 3600, 6 * 3600],
        qinit_file="",
        comid_lat_lon_z_file="",
        rapid_connect_file="",
        project_name="ECMWF-RAPID Predicted flows by US Army ERDC",
        output_id_dim_name='rivid',
        output_flow_var_name='Qout',
        print_debug=False)
    cv.convert()

    cf_merge_qout_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'Qout_merge.nc')

    #check Qout
    assert (compare_qout_files(qout_1, cf_merge_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(qout_1)
    d2 = Dataset(cf_merge_qout_file_solution)
    assert (d1.dimensions.keys() == d2.dimensions.keys())
    assert (d1.variables.keys() == d2.variables.keys())
    assert ((d1.variables['time'][:] == d2.variables['time'][:]).all())
    assert ((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
    d1.close()
    d2.close()

    remove_files(qout_1, qout_2)
Пример #35
0
def test_cf_merge():
    """
    This tests merging two qout files
    """
    print("TEST 15: TEST MERGE QOUT")

    orig_qout_1 = os.path.join(INPUT_DATA_PATH, 'Qout_merge_3hr.nc')
    orig_qout_2 = os.path.join(INPUT_DATA_PATH, 'Qout_merge_6hr.nc')

    qout_1 = os.path.join(OUTPUT_DATA_PATH, 'Qout_merge_3hr.nc')
    qout_2 = os.path.join(OUTPUT_DATA_PATH, 'Qout_merge_6hr.nc')

    copy(orig_qout_1, qout_1)
    copy(orig_qout_2, qout_2)
    #Merge all files together at the end
    cv = ConvertRAPIDOutputToCF(rapid_output_file=[qout_1, qout_2], 
                                start_datetime=datetime(2016, 2, 12), 
                                time_step=[3*3600, 6*3600], 
                                qinit_file="", 
                                comid_lat_lon_z_file="",
                                rapid_connect_file="", 
                                project_name="ECMWF-RAPID Predicted flows by US Army ERDC", 
                                output_id_dim_name='rivid',
                                output_flow_var_name='Qout',
                                print_debug=False)
    cv.convert()

    cf_merge_qout_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'Qout_merge.nc')

    #check Qout    
    ok_(compare_qout_files(qout_1, cf_merge_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(qout_1)
    d2 = Dataset(cf_merge_qout_file_solution)
    ok_(d1.dimensions.keys() == d2.dimensions.keys())
    ok_(d1.variables.keys() == d2.variables.keys())
    ok_((d1.variables['time'][:] == d1.variables['time'][:]).all())
    ok_((d1.variables['rivid'][:] == d1.variables['rivid'][:]).all())
    d1.close()
    d2.close()
    
    remove_files(qout_1,
                 qout_2)
Пример #36
0
def test_generate_network_taudem_dinf():
    """
    Checks generate TauDEM network dinf
    """
    print("TEST 13: TEST GENERATE TauDEM NETWORK DINF")
    TAUDEM_EXE_PATH = os.path.join(MAIN_TESTS_FOLDER,
                                   "..", "..", "TauDEM")
    td = TauDEM(TAUDEM_EXE_PATH)
    
    elevation_dem = os.path.join(GIS_INPUT_DATA_PATH, 'jamaica_dem.tif')

    td.demToStreamNetwork(OUTPUT_DATA_PATH,
                          pit_filled_elevation_grid=elevation_dem,
                          threshold=1000,
                          use_dinf=True)
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'flow_dir_grid_d8.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'flow_dir_grid_d8.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'flow_dir_grid_dinf.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'flow_dir_grid_dinf.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'contributing_area_grid_d8.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'contributing_area_grid_d8.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'contributing_area_grid_dinf.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'contributing_area_grid_dinf.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'slope_grid_d8.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'slope_grid_d8.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'slope_grid_dinf.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'slope_grid_dinf.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_raster_grid.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_raster_grid.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_order_grid.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_order_grid.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'network_connectivity_tree.txt')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'network_coordinates.txt')))
#    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_reach_file.shp')))
#    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_reach_file.shx')))
#    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_reach_file.dbf')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'stream_reach_file.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'watershed_grid.tif')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'watershed_grid.prj')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'watershed_shapefile.shp')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'watershed_shapefile.shx')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'watershed_shapefile.dbf')))
    ok_(os.path.exists(os.path.join(OUTPUT_DATA_PATH, 'watershed_shapefile.prj')))
    #cleanup
    remove_files(*[f for f in glob(os.path.join(OUTPUT_DATA_PATH,"*")) if not f.endswith(".gitignore")])
Пример #37
0
def test_gen_muskingum_x_drainage():
    """
    Checks generating Muskingum X from draiange line
    """
    print("TEST 15: TEST GENERATE MUSKINGUM X FROM DRAINAGE LINE")
    generated_x_file = os.path.join(OUTPUT_DATA_PATH, "x_drain.csv")

    CreateMuskingumXFileFromDranageLine(in_drainage_line=os.path.join(
        GIS_INPUT_DATA_PATH, 'u-k', "DrainageLineSubset.shp"),
                                        x_id="Musk_x",
                                        out_x_file=generated_x_file)

    #CHECK OUTPUT
    generated_x_file_solution = os.path.join(COMPARE_DATA_PATH, "u-k",
                                             "x_drain.csv")
    assert (compare_csv_decimal_files(generated_x_file,
                                      generated_x_file_solution))
    remove_files(generated_x_file)
Пример #38
0
def test_gen_muskingum_x_drainage():
    """
    Checks generating Muskingum X from draiange line
    """
    print("TEST 15: TEST GENERATE MUSKINGUM X FROM DRAINAGE LINE")
    generated_x_file = os.path.join(OUTPUT_DATA_PATH, 
                                    "x_drain.csv")
                                    
    CreateMuskingumXFileFromDranageLine(in_drainage_line=os.path.join(GIS_INPUT_DATA_PATH, 'u-k', "DrainageLineSubset.shp"),
                                        x_id="Musk_x",
                                        out_x_file=generated_x_file)
                            
    #CHECK OUTPUT   
    generated_x_file_solution = os.path.join(COMPARE_DATA_PATH, "u-k",
                                             "x_drain.csv")
    ok_(compare_csv_decimal_files(generated_x_file, 
                                  generated_x_file_solution))
    remove_files(generated_x_file)
Пример #39
0
def test_update_rapid_numbers_forcing_input_file():
    """
    Checks RAPID input file update with forcing data and number validation
    """
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          use_all_processors=True,
                          rapid_connect_file=os.path.join(INPUT_DATA_PATH,
                                                          'rapid_connect.csv'),
                          riv_bas_id_file=os.path.join(INPUT_DATA_PATH,
                                                       'riv_bas_id.csv'),
                          for_tot_id_file=os.path.join(INPUT_DATA_PATH,
                                                       'for_tot_id.csv'),
                          for_use_id_file=os.path.join(INPUT_DATA_PATH,
                                                       'for_use_id.csv'),
                          ZS_dtF=3 * 60 * 60,
                          BS_opt_for=True
                          )

    rapid_manager.update_reach_number_data()

    rapid_manager.update_parameters(rapid_connect_file='rapid_connect.csv',
                                    Vlat_file='m3_nasa_lis_3hr_20020830.nc',
                                    riv_bas_id_file='riv_bas_id.csv',
                                    k_file='k.csv',
                                    x_file='x.csv',
                                    Qout_file='Qout.nc',
                                    Qfor_file='qfor.csv',
                                    for_tot_id_file='for_tot_id.csv',
                                    for_use_id_file='for_use_id.csv',
                                    )

    generated_input_file = os.path.join(OUTPUT_DATA_PATH,
                                      "rapid_namelist-GENERATE-NUMBERS-FORCING")

    rapid_manager.generate_namelist_file(generated_input_file)

    generated_input_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                 "rapid_namelist-GENERATE-NUMBERS-FORCING")


    assert (fcmp(generated_input_file, generated_input_file_solution))

    remove_files(generated_input_file)
Пример #40
0
def test_run_rapid_simulation():
    """
    Test Running RAPID Simulation
    """
    
    print("TEST 7: TEST RUNNING RAPID SIMULATION")
    generated_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_generated.nc')



    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          num_processors=1,
                          rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv'),
                          riv_bas_id_file=os.path.join(INPUT_DATA_PATH, 'riv_bas_id.csv'),
                          Vlat_file=os.path.join(INPUT_DATA_PATH, 'm3_nasa_lis_3hr_20020830.nc'),
                          k_file=os.path.join(INPUT_DATA_PATH, 'k.csv'),
                          x_file=os.path.join(INPUT_DATA_PATH, 'x.csv'),
                          ZS_dtM=10800,
                          ZS_dtR=900,
                          ZS_TauM=2*86400,
                          ZS_TauR=10800,
                          Qout_file=generated_qout_file
                         )
    rapid_manager.update_reach_number_data()
    rapid_manager.run()

    generated_qout_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                'Qout_nasa_lis_3hr_20020830.nc')

    #check Qout    
    ok_(compare_qout_files(generated_qout_file, generated_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(generated_qout_file)
    d2 = Dataset(generated_qout_file_solution)
    ok_(d1.dimensions.keys() == d2.dimensions.keys())
    ok_(d1.variables.keys() == d2.variables.keys())
    ok_((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
    d1.close()
    d2.close()

    remove_files(generated_qout_file)
Пример #41
0
def test_run_rapid_simulation():
    """
    Test Running RAPID Simulation
    """

    print("TEST 7: TEST RUNNING RAPID SIMULATION")
    generated_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_generated.nc')

    rapid_manager = RAPID(
        rapid_executable_location=RAPID_EXE_PATH,
        cygwin_bin_location=CYGWIN_BIN_PATH,
        num_processors=1,
        rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv'),
        riv_bas_id_file=os.path.join(INPUT_DATA_PATH, 'riv_bas_id.csv'),
        Vlat_file=os.path.join(INPUT_DATA_PATH, 'm3_nasa_lis_3hr_20020830.nc'),
        k_file=os.path.join(INPUT_DATA_PATH, 'k.csv'),
        x_file=os.path.join(INPUT_DATA_PATH, 'x.csv'),
        ZS_dtM=10800,
        ZS_dtR=900,
        ZS_TauM=2 * 86400,
        ZS_TauR=10800,
        Qout_file=generated_qout_file)
    rapid_manager.update_reach_number_data()
    rapid_manager.run()

    generated_qout_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')

    #check Qout
    ok_(compare_qout_files(generated_qout_file, generated_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(generated_qout_file)
    d2 = Dataset(generated_qout_file_solution)
    ok_(d1.dimensions.keys() == d2.dimensions.keys())
    ok_(d1.variables.keys() == d2.variables.keys())
    ok_((d1.variables['rivid'][:] == d2.variables['rivid'][:]).all())
    d1.close()
    d2.close()

    remove_files(generated_qout_file)
Пример #42
0
def test_generate_qinit_file():
    """
    This tests the qinit file function to create an input qinit file for RAPID
    """
    print("TEST 11: TEST GENERATE QINIT FILE")
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv')
                         )

    #test with original rapid outpur
    input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    original_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, original_qout_file)

    qinit_original_rapid_qout = os.path.join(OUTPUT_DATA_PATH, 'qinit_original_rapid_qout.csv')
    rapid_manager.update_parameters(Qout_file=original_qout_file)
    rapid_manager.generate_qinit_from_past_qout(qinit_file=qinit_original_rapid_qout)
    
    qinit_original_rapid_qout_solution = os.path.join(COMPARE_DATA_PATH, 'qinit_original_rapid_qout.csv')
    ok_(compare_csv_decimal_files(qinit_original_rapid_qout, qinit_original_rapid_qout_solution, header=False))

    #test with CF rapid output and alternate time index
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    qinit_cf_rapid_qout = os.path.join(OUTPUT_DATA_PATH, 'qinit_cf_rapid_qout.csv')
    rapid_manager.update_parameters(Qout_file=cf_qout_file)
    rapid_manager.generate_qinit_from_past_qout(qinit_file=qinit_cf_rapid_qout,
                                                time_index=5)
                                                
    qinit_cf_rapid_qout_solution = os.path.join(COMPARE_DATA_PATH, 'qinit_cf_rapid_qout.csv')
    ok_(compare_csv_decimal_files(qinit_cf_rapid_qout, qinit_cf_rapid_qout_solution, header=False))

    remove_files(original_qout_file, 
                 qinit_original_rapid_qout,
                 cf_qout_file,
                 qinit_cf_rapid_qout
                 )
Пример #43
0
def test_convert_file_to_be_cf_compliant_original_format():
    """
    Test Convert RAPID Output to be CF Compliant for original format
    """
    print("TEST 10: TEST CONVERT RAPID OUTPUT TO CF COMPLIANT - ORIGINAL (COMID_LAT_LON_Z)")

    input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    temp_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original_test_cf.nc')
    copy(input_qout_file, temp_qout_file)
    
    rapid_manager = RAPID(rapid_executable_location=RAPID_EXE_PATH,
                          cygwin_bin_location=CYGWIN_BIN_PATH,
                          Qout_file=temp_qout_file,
                          rapid_connect_file=os.path.join(INPUT_DATA_PATH, 'rapid_connect.csv'),
                          ZS_TauR=3*3600)

    rapid_manager.make_output_CF_compliant(simulation_start_datetime=datetime(2002, 8, 30),
                                           comid_lat_lon_z_file=os.path.join(INPUT_DATA_PATH, 'comid_lat_lon_z.csv'),
                                           project_name="ERA Interim (T511 Grid) 3 Hourly Runoff Based Historical flows by US Army ERDC")

    cf_qout_file_solution = os.path.join(COMPARE_DATA_PATH,
                                         'Qout_nasa_lis_3hr_20020830_CF.nc')

    #check Qout    
    ok_(compare_qout_files(temp_qout_file, cf_qout_file_solution))

    #check other info in netcdf file
    d1 = Dataset(temp_qout_file)
    d2 = Dataset(cf_qout_file_solution)
    ok_(d1.dimensions.keys() == d2.dimensions.keys())
    ok_(d1.variables.keys() == d2.variables.keys())
    ok_((d1.variables['time'][:] == d1.variables['time'][:]).all())
    ok_((d1.variables['rivid'][:] == d1.variables['rivid'][:]).all())
    ok_((d1.variables['lat'][:] == d1.variables['lat'][:]).all())
    ok_((d1.variables['lon'][:] == d1.variables['lon'][:]).all())
    d1.close()
    d2.close()
    
    remove_files(temp_qout_file)
Пример #44
0
def test_gen_weight_table_era_t511_24hr():
    """
    Checks generating weight table for ERA T511 24hr grid
    """
    print("TEST 5: TEST GENERATE WEIGTH TABLE FOR ERA T511 24hr GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH, 
                                               "weight_era_t511.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH,"x-x",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "erai24", "19990109_erai_runoff.grib.nc")
    CreateWeightTableECMWF(in_ecmwf_nc=lsm_grid, 
                           in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'catchment.shp'), 
                           river_id="FEATUREID", 
                           in_connectivity_file=rapid_connect_file, 
                           out_weight_table=generated_weight_table_file)
                                                         
    generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                        "weight_era_t511.csv")
    ok_(compare_csv_decimal_files(generated_weight_table_file, 
                                  generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #45
0
def test_gen_weight_table_lis_land_mask():
    """
    Checks generating weight table for LIS grid with land mask.
    """
    print("TEST 20: TEST GENERATE WEIGHT TABLE FOR LIS GRID WITH LAND MASK.")
    generated_weight_table_file = os.path.join(
        OUTPUT_DATA_PATH, "weight_lis_land_fraction_mendocino_subset.csv")

    #rapid_connect
    rapid_connect_file = os.path.join(
        COMPARE_DATA_PATH, "mendocino_nhdplus_catchment",
        "rapid_connectivity_mendocino_sample.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "lis_land_mask",
                            "lisglobalmask557ww_mendocino_subset.nc")

    CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
                          in_nc_lon_var='lon',
                          in_nc_lat_var='lat',
                          in_catchment_shapefile=os.path.join(
                              GIS_INPUT_DATA_PATH,
                              'mendocino_nhdplus_catchment',
                              'NHDCat_mendocino_watershed_hopland_sample.shp'),
                          river_id='FEATUREID',
                          in_connectivity_file=rapid_connect_file,
                          out_weight_table=generated_weight_table_file,
                          in_land_area_fraction_var='LANDMASK')

    generated_weight_table_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'mendocino_nhdplus_catchment',
        'weight_lis_land_fraction_mendocino_subset.csv')

    assert (compare_csv_decimal_files(generated_weight_table_file,
                                      generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #46
0
def test_gen_weight_table_era_t255():
    """
    Checks generating weight table for ERA T255 grid
    """
    print("TEST 4: TEST GENERATE WEIGTH TABLE FOR ERA T255 GRIDS")
    generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH, 
                                               "weight_era_t255.csv")
    #rapid_connect
    rapid_connect_file = os.path.join(COMPARE_DATA_PATH, "x-x",
                                      "rapid_connect.csv")

    lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "erai3t255", "era_interim_runoff_20140820.nc")
    CreateWeightTableECMWF(in_ecmwf_nc=lsm_grid, 
                           in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'catchment.shp'), 
                           river_id="FEATUREID", 
                           in_connectivity_file=rapid_connect_file, 
                           out_weight_table=generated_weight_table_file)
                                                         
    generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                        "weight_era_t255.csv")
    ok_(compare_csv_decimal_files(generated_weight_table_file, 
                                  generated_weight_table_file_solution))

    remove_files(generated_weight_table_file)
Пример #47
0
def test_extract_timeseries_to_gssha_xys():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA xys file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA xys file")
    
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_daily_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     river_index=20,
                                                     daily=True)

    cf_timeseries_daily_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_file, cf_timeseries_daily_file_solution))
    
    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_file, cf_timeseries_file_solution, header=True))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_daily_date_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     river_id=75224,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     date_search_end=datetime(2002, 8, 31, 23, 59, 59),
                                                     daily=True,
                                                     mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_date.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution))
    
    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_date.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_date_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     #date_search_end=None,
                                                     river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_date.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_date_file, cf_timeseries_date_file_solution))

    remove_files(cf_timeseries_file,
                 cf_qout_file,
                 cf_timeseries_daily_file,
                 cf_timeseries_daily_date_file,
                 cf_timeseries_date_file,
                 )
Пример #48
0
def test_extract_sub_network_taudem():
    """
    Checks extracting sub network from larger network
    """
    print("TEST 10: TEST EXTRACTING SUB NETWORK FROM LARGER NETWORK")
    td = TauDEM()
    
    subset_network_file = os.path.join(OUTPUT_DATA_PATH, "DrainageLineSubset2.shp")
    #to extract a specific network
    td.extractSubNetwork(network_file=os.path.join(GIS_INPUT_DATA_PATH, 'u-k', "DrainageLineSubset.shp"),
                         out_subset_network_file=subset_network_file,
                         outlet_ids=[42911], #list of outlet ids
                         river_id_field="HydroID",
                         next_down_id_field="NextDownID",
                         river_magnitude_field="HydroID",
                         safe_mode=False,
                         )
    
    #to extract the subset watersheds using subset river network
    subset_watershed_file = os.path.join(OUTPUT_DATA_PATH,"CatchmentSubset2.shp")
    td.extractSubsetFromWatershed(subset_network_file=subset_network_file,
                                  subset_network_river_id_field="HydroID",
                                  watershed_file=os.path.join(GIS_INPUT_DATA_PATH, 'u-k', 'CatchmentSubset.shp'),
                                  watershed_network_river_id_field="DrainLnID",
                                  out_watershed_subset_file=subset_watershed_file)
                                  
                                  
    #Test results
    subset_network_shapefile = ogr.Open(subset_network_file)
    subset_network_layer = subset_network_shapefile.GetLayer()

    ogr_watershed_shapefile = ogr.Open(subset_watershed_file)
    ogr_watershed_shapefile_lyr = ogr_watershed_shapefile.GetLayer()

    number_of_network_features = subset_network_layer.GetFeatureCount()
    number_of_watershed_features = ogr_watershed_shapefile_lyr.GetFeatureCount()
    
    #count number of features
    ok_(number_of_network_features==7)
    ok_(number_of_watershed_features==7)
    
    #make sure IDs correct
    network_id_list = [42911,42891,42747,42748,42892,42841,42846]    
    for feature_idx, network_feature in enumerate(subset_network_layer):
        ok_(network_feature.GetField("HydroID") in network_id_list)
    for feature_idx, watershed_feature in enumerate(ogr_watershed_shapefile_lyr):
        ok_(watershed_feature.GetField("DrainLnID") in network_id_list)
     
    #make sure all fields are there
     
     #TEST WATERSHED
    subset_watershed_layer_defn = ogr_watershed_shapefile_lyr.GetLayerDefn()
    num_watershed_fields = subset_watershed_layer_defn.GetFieldCount()

    watershed_field_names = ['Shape_Leng','Shape_Area','HydroID','GridID','DrainLnID']
    ok_(num_watershed_fields==len(watershed_field_names))    
    for i in range(num_watershed_fields):
        ok_(subset_watershed_layer_defn.GetFieldDefn(i).GetNameRef() in watershed_field_names)
          
    #TEST NETWORK                                         
    subset_network_layer_defn = subset_network_layer.GetLayerDefn()
    num_network_fields = subset_network_layer_defn.GetFieldCount()

    network_field_names = ['arcid','from_node','to_node','HydroID','GridID',
                           'NextDownID','SLength','Avg_Slope','LENGTHKM',
                           'Shape_Leng','Musk_x','watershed','subbasin']
                           
    ok_(num_network_fields==len(network_field_names))    
    
    for i in range(num_network_fields):
        ok_(subset_network_layer_defn.GetFieldDefn(i).GetNameRef() in network_field_names)
    
    #cleanup
    remove_files(*glob(os.path.join(OUTPUT_DATA_PATH,"DrainageLineSubset2.*")))
    remove_files(*glob(os.path.join(OUTPUT_DATA_PATH,"CatchmentSubset2.*")))
Пример #49
0
def test_extract_timeseries_to_gssha_ihg_tzinfo():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    with different time zone output
    """
    print("TEST 17: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file tzinfo")
    
    CENTRAL_TZ = timezone('US/Central')
    
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)


    #if file is CF compliant, you can write out daily average
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file3.csv')
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_daily_file,
                                                     connection_list_file=connection_list_file,
                                                     daily=True)

    cf_timeseries_daily_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_file, cf_timeseries_daily_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file1.csv')
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_file,
                                                     connection_list_file=connection_list_file,
                                                     )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_file, cf_timeseries_file_solution, header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file1.csv')
    cf_timeseries_daily_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_daily_date_file,
                                                     connection_list_file=connection_list_file,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     date_search_end=datetime(2002, 8, 31, 23, 59, 59),
                                                     daily=True,
                                                     mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries and filter by date
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file3.csv')
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_date_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_date_file,
                                                     connection_list_file=connection_list_file,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     )

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_date_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_date_file, cf_timeseries_date_file_solution, header=False))

    remove_files(cf_timeseries_file,
                 cf_qout_file,
                 cf_timeseries_daily_file,
                 cf_timeseries_daily_date_file,
                 cf_timeseries_date_file,
                 )
Пример #50
0
def test_gen_static_rapid_input():
    """
    Checks generating static RAPID input
    """
    print("TEST 1: TEST GENERATE STATIC RAPID INPUT DATA")
    CreateAllStaticECMWFRAPIDFiles(in_drainage_line=os.path.join(GIS_INPUT_DATA_PATH, 'flowline.shp'),
                                   river_id="COMID",
                                   length_id="LENGTHKM",
                                   slope_id="Slope",
                                   next_down_id="NextDownID",
                                   in_catchment=os.path.join(GIS_INPUT_DATA_PATH, 'catchment.shp'),
                                   catchment_river_id="FEATUREID",
                                   rapid_output_folder=OUTPUT_DATA_PATH,
                                   kfac_length_units="km",
                                   )
    
    #CHECK OUTPUT   
    #comid_lat_lon_z
    generated_comid_lat_lon_z_file = os.path.join(OUTPUT_DATA_PATH, 
                                                  "comid_lat_lon_z.csv")
    generated_comid_lat_lon_z_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                           "comid_lat_lon_z.csv")
    ok_(compare_csv_decimal_files(generated_comid_lat_lon_z_file, 
                                  generated_comid_lat_lon_z_file_solution))

    #rapid_connect
    generated_rapid_connect_file = os.path.join(OUTPUT_DATA_PATH, 
                                                "rapid_connect.csv")
    generated_rapid_connect_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                         "rapid_connect.csv")
    ok_(compare_csv_decimal_files(generated_rapid_connect_file, 
                                  generated_rapid_connect_file_solution))

    #riv_bas_id
    generated_riv_bas_id_file = os.path.join(OUTPUT_DATA_PATH, 
                                             "riv_bas_id.csv")
    generated_riv_bas_id_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                      "riv_bas_id.csv")
    ok_(compare_csv_decimal_files(generated_riv_bas_id_file, 
                                  generated_riv_bas_id_file_solution))

    #kfac
    generated_kfac_file = os.path.join(OUTPUT_DATA_PATH, 
                                       "kfac.csv")
    generated_kfac_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                "kfac.csv")
    ok_(compare_csv_decimal_files(generated_kfac_file, 
                                  generated_kfac_file_solution))
    
    #k
    generated_k_file = os.path.join(OUTPUT_DATA_PATH, 
                                    "k.csv")
    generated_k_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                             "k.csv")
    ok_(compare_csv_decimal_files(generated_k_file, 
                                  generated_k_file_solution))

    #x
    generated_x_file = os.path.join(OUTPUT_DATA_PATH, 
                                    "x.csv")
    generated_x_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                             "x.csv")
    ok_(compare_csv_decimal_files(generated_x_file, 
                                  generated_x_file_solution))

    #weight_ecmwf_t1279
    generated_weight_ecmwf_t1279_file = os.path.join(OUTPUT_DATA_PATH, 
                                                     "weight_ecmwf_t1279.csv")
    generated_weight_ecmwf_t1279_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                              "weight_ecmwf_t1279.csv")
    ok_(compare_csv_decimal_files(generated_weight_ecmwf_t1279_file, 
                                  generated_weight_ecmwf_t1279_file_solution))

    #weight_ecmwf_tco369
    generated_weight_ecmwf_tco639_file = os.path.join(OUTPUT_DATA_PATH, 
                                                      "weight_ecmwf_tco639.csv")
    generated_weight_ecmwf_tco639_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                               "weight_ecmwf_tco639.csv")
    ok_(compare_csv_decimal_files(generated_weight_ecmwf_tco639_file, 
                                  generated_weight_ecmwf_tco639_file_solution))

    #weight_era_t511
    generated_weight_era_t511_file = os.path.join(OUTPUT_DATA_PATH, 
                                                  "weight_era_t511.csv")
    generated_weight_era_t511_file_solution = os.path.join(COMPARE_DATA_PATH, "x-x",
                                                           "weight_era_t511.csv")
    ok_(compare_csv_decimal_files(generated_weight_era_t511_file, 
                                  generated_weight_era_t511_file_solution))

    remove_files(generated_comid_lat_lon_z_file,
                 generated_rapid_connect_file,
                 generated_riv_bas_id_file,
                 generated_kfac_file,
                 generated_k_file,
                 generated_x_file,
                 generated_weight_ecmwf_t1279_file,
                 generated_weight_ecmwf_tco639_file,
                 generated_weight_era_t511_file)
Пример #51
0
def test_extract_timeseries():
    """
    This tests extracting a timeseries from RAPID Qout file
    """
    print("TEST 13: TEST EXTRACT TIMESERIES FROM QINIT FILE")

    #for writing entire time series to file from new rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830.nc')
    new_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                 'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, new_qout_file)
    new_timeseries_file = os.path.join(OUTPUT_DATA_PATH,
                                       'new_timeseries_file.csv')

    with RAPIDDataset(new_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(new_timeseries_file, river_id=75224)

        if qout_nc.is_time_variable_valid():
            original_timeseries_file_solution = os.path.join(
                COMPARE_DATA_PATH, 'original_timeseries.csv')
        else:
            original_timeseries_file_solution = os.path.join(
                COMPARE_DATA_PATH, 'original_timeseries-notime.csv')

    assert (compare_csv_timeseries_files(new_timeseries_file,
                                         original_timeseries_file_solution,
                                         header=False))

    #for writing entire time series to file from original rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830_original.nc')
    original_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    copy(input_qout_file, original_qout_file)
    original_timeseries_file = os.path.join(OUTPUT_DATA_PATH,
                                            'original_timeseries.csv')

    with RAPIDDataset(original_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(original_timeseries_file, river_id=75224)
    original_timeseries_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'original_timeseries-notime.csv')

    assert (compare_csv_timeseries_files(original_timeseries_file,
                                         original_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_file,
                                   river_index=20,
                                   daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution,
                                         header=False))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_file, river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_date_file,
                                   river_id=75224,
                                   date_search_start=datetime(2002, 8, 31),
                                   date_search_end=datetime(
                                       2002, 8, 31, 23, 59, 59),
                                   daily=True,
                                   mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.csv')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file,
        cf_timeseries_daily_date_file_solution,
        header=False))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(
            cf_timeseries_date_file,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
            river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution,
                                         header=False))

    remove_files(
        new_timeseries_file,
        new_qout_file,
        original_timeseries_file,
        original_qout_file,
        cf_timeseries_file,
        cf_timeseries_date_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_qout_file,
    )
Пример #52
0
def test_extract_timeseries_to_gssha_xys():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA xys file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA xys file")

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_daily_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_index=20,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=True))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_daily_date_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_id=75224,
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.xys')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_date_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
            river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
Пример #53
0
def test_extract_timeseries_to_gssha_ihg_tzinfo():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    with different time zone output
    """
    print(
        "TEST 17: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file tzinfo"
    )

    CENTRAL_TZ = timezone('US/Central')

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file3.csv')
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_file,
            connection_list_file=connection_list_file,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution,
                                         header=False))

    #if file is CF compliant, check write out timeseries
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file1.csv')
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_file,
            connection_list_file=connection_list_file,
        )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file1.csv')
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_date_file,
            connection_list_file=connection_list_file,
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file,
        cf_timeseries_daily_date_file_solution,
        header=False))

    #if file is CF compliant, check write out timeseries and filter by date
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file3.csv')
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_date_file,
            connection_list_file=connection_list_file,
            date_search_start=datetime(2002, 8, 31),
        )

    cf_timeseries_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_date_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution,
                                         header=False))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
Пример #54
0
def test_extract_timeseries():
    """
    This tests extracting a timeseries from RAPID Qout file
    """
    print("TEST 13: TEST EXTRACT TIMESERIES FROM QINIT FILE")
    
    #for writing entire time series to file from new rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    new_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, new_qout_file)
    new_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'new_timeseries_file.csv')
    
    with RAPIDDataset(new_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(new_timeseries_file,
                                   river_id=75224)
                                   
        if qout_nc.is_time_variable_valid():
            original_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'original_timeseries.csv')
        else:
            original_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'original_timeseries-notime.csv')
        
    ok_(compare_csv_timeseries_files(new_timeseries_file, original_timeseries_file_solution, header=False))
    
    #for writing entire time series to file from original rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    original_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    copy(input_qout_file, original_qout_file)
    original_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'original_timeseries.csv')
    
    with RAPIDDataset(original_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(original_timeseries_file,
                                   river_id=75224)
    original_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'original_timeseries-notime.csv')
        
    ok_(compare_csv_timeseries_files(original_timeseries_file, original_timeseries_file_solution, header=False))

    #if file is CF compliant, you can write out daily average
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_file,
                                   river_index=20,
                                   daily=True)

    cf_timeseries_daily_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_file, cf_timeseries_daily_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_file,
                                   river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_file, cf_timeseries_file_solution, header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_date_file,
                                   river_id=75224,
                                   date_search_start=datetime(2002, 8, 31),
                                   date_search_end=datetime(2002, 8, 31, 23, 59, 59),
                                   daily=True,
                                   mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_date.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_date.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_date_file,
                                   date_search_start=datetime(2002, 8, 31),
                                   #date_search_end=None,
                                   river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_date.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_date_file, cf_timeseries_date_file_solution, header=False))

    remove_files(new_timeseries_file, 
                 new_qout_file,
                 original_timeseries_file, 
                 original_qout_file,
                 cf_timeseries_file,
                 cf_timeseries_date_file,
                 cf_timeseries_daily_file,
                 cf_timeseries_daily_date_file,
                 cf_qout_file,
                 )
Пример #55
0
 def tearDown(self):
     #remove unused data
     remove_files(*[f for f in glob(os.path.join(self.OUTPUT_DATA_PATH,"*")) if not f.endswith(".gitignore")])