示例#1
0
文件: event.py 项目: waternk/gsshapy
    def prepare_rapid_streamflow(self, path_to_rapid_qout,
                                 connection_list_file):
        """
        Prepares RAPID streamflow for GSSHA simulation
        """
        ihg_filename = '{0}.ihg'.format(self.project_manager.name)
        with tmp_chdir(self.project_manager.project_directory):
            # write out IHG file
            time_index_range = []
            with RAPIDDataset(path_to_rapid_qout,
                              out_tzinfo=self.tz) as qout_nc:

                time_index_range = qout_nc.get_time_index_range(
                    date_search_start=self.simulation_start,
                    date_search_end=self.simulation_end)

                if len(time_index_range) > 0:
                    time_array = qout_nc.get_time_array(
                        return_datetime=True,
                        time_index_array=time_index_range)

                    # GSSHA STARTS INGESTING STREAMFLOW AT SECOND TIME STEP
                    if self.simulation_start is not None:
                        if self.simulation_start == time_array[0]:
                            log.warning(
                                "First timestep of streamflow skipped "
                                "in order for GSSHA to capture the streamflow."
                            )
                            time_index_range = time_index_range[1:]
                            time_array = time_array[1:]

                if len(time_index_range) > 0:
                    start_datetime = time_array[0]

                    if self.simulation_start is None:
                        self._update_simulation_start(start_datetime)

                    if self.simulation_end is None:
                        self.simulation_end = time_array[-1]

                    qout_nc.write_flows_to_gssha_time_series_ihg(
                        ihg_filename,
                        connection_list_file,
                        date_search_start=start_datetime,
                        date_search_end=self.simulation_end,
                    )
                else:
                    log.warning("No streamflow values found in time range ...")

            if len(time_index_range) > 0:
                # update cards
                self._update_simulation_start_cards()

                self._update_card(
                    "END_TIME", self.simulation_end.strftime("%Y %m %d %H %M"))
                self._update_card("CHAN_POINT_INPUT", ihg_filename, True)

                # update duration
                self.set_simulation_duration(self.simulation_end -
                                             self.simulation_start)

                # UPDATE GMT CARD
                self._update_gmt()
            else:
                # cleanup
                os.remove(ihg_filename)
                self.project_manager.deleteCard('CHAN_POINT_INPUT',
                                                self.db_session)
示例#2
0
def test_extract_timeseries_to_gssha_ihg_tzinfo():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    with different time zone output
    """
    print(
        "TEST 17: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file tzinfo"
    )

    CENTRAL_TZ = timezone('US/Central')

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file3.csv')
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_file,
            connection_list_file=connection_list_file,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution,
                                         header=False))

    #if file is CF compliant, check write out timeseries
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file1.csv')
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_file,
            connection_list_file=connection_list_file,
        )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file1.csv')
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_date_file,
            connection_list_file=connection_list_file,
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file,
        cf_timeseries_daily_date_file_solution,
        header=False))

    #if file is CF compliant, check write out timeseries and filter by date
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file3.csv')
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_date_file,
            connection_list_file=connection_list_file,
            date_search_start=datetime(2002, 8, 31),
        )

    cf_timeseries_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_date_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution,
                                         header=False))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
示例#3
0
def test_extract_timeseries_to_gssha_xys():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA xys file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA xys file")

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_daily_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_index=20,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=True))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_daily_date_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_id=75224,
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.xys')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_date_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
            river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
示例#4
0
def test_extract_timeseries():
    """
    This tests extracting a timeseries from RAPID Qout file
    """
    print("TEST 13: TEST EXTRACT TIMESERIES FROM QINIT FILE")

    #for writing entire time series to file from new rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830.nc')
    new_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                 'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, new_qout_file)
    new_timeseries_file = os.path.join(OUTPUT_DATA_PATH,
                                       'new_timeseries_file.csv')

    with RAPIDDataset(new_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(new_timeseries_file, river_id=75224)

        if qout_nc.is_time_variable_valid():
            original_timeseries_file_solution = os.path.join(
                COMPARE_DATA_PATH, 'original_timeseries.csv')
        else:
            original_timeseries_file_solution = os.path.join(
                COMPARE_DATA_PATH, 'original_timeseries-notime.csv')

    assert (compare_csv_timeseries_files(new_timeseries_file,
                                         original_timeseries_file_solution,
                                         header=False))

    #for writing entire time series to file from original rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830_original.nc')
    original_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    copy(input_qout_file, original_qout_file)
    original_timeseries_file = os.path.join(OUTPUT_DATA_PATH,
                                            'original_timeseries.csv')

    with RAPIDDataset(original_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(original_timeseries_file, river_id=75224)
    original_timeseries_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'original_timeseries-notime.csv')

    assert (compare_csv_timeseries_files(original_timeseries_file,
                                         original_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_file,
                                   river_index=20,
                                   daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution,
                                         header=False))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_file, river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_date_file,
                                   river_id=75224,
                                   date_search_start=datetime(2002, 8, 31),
                                   date_search_end=datetime(
                                       2002, 8, 31, 23, 59, 59),
                                   daily=True,
                                   mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.csv')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file,
        cf_timeseries_daily_date_file_solution,
        header=False))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(
            cf_timeseries_date_file,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
            river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution,
                                         header=False))

    remove_files(
        new_timeseries_file,
        new_qout_file,
        original_timeseries_file,
        original_qout_file,
        cf_timeseries_file,
        cf_timeseries_date_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_qout_file,
    )
示例#5
0
def test_dataset_exceptions():
    """This tests RAPIDDataset exceptions"""
    dummy_file = os.path.join(OUTPUT_DATA_PATH,
                              'dummy_file.txt')
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    with pytest.raises(IndexError):
        with RAPIDDataset(cf_qout_file,
                          river_id_dimension='fake_rivid') as qout_nc:
            print(qout_nc)

    # this only prints a warning
    with RAPIDDataset(cf_qout_file,
                      river_id_variable='fake_rivid') as qout_nc:
        print(qout_nc)

    with pytest.raises(IndexError):
        with RAPIDDataset(cf_qout_file,
                          streamflow_variable='fake_qout') as qout_nc:
            print(qout_nc)

    with pytest.raises(IndexError):
        with RAPIDDataset(cf_qout_file) as qout_nc:
            print(qout_nc.get_qout(49876539))

    with RAPIDDataset(cf_qout_file) as qout_nc:
        aaa, bbb, ccc = qout_nc.get_subset_riverid_index_list([49876539])
        assert not aaa
        assert not bbb
        assert ccc[0] == 49876539

    with pytest.raises(ValueError):
        with RAPIDDataset(cf_qout_file) as qout_nc:
            qout_nc.write_flows_to_gssha_time_series_xys(
                dummy_file,
                series_name="RAPID_TO_GSSHA",
                series_id=34)

    with pytest.raises(ValueError):
        with RAPIDDataset(cf_qout_file) as qout_nc:
            qout_nc.write_flows_to_csv(dummy_file)

    # for writing entire time series to file from original rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830_original.nc')
    original_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_original.nc')
    copy(input_qout_file, original_qout_file)

    with pytest.raises(ValueError):
        with RAPIDDataset(original_qout_file) as qout_nc:
            print(qout_nc.get_time_array())

    with pytest.raises(IndexError):
        with RAPIDDataset(original_qout_file) as qout_nc:
            qout_nc.write_flows_to_gssha_time_series_xys(
                dummy_file,
                series_name="RAPID_TO_GSSHA",
                series_id=34,
                river_index=0)

    with pytest.raises(IndexError):
        with RAPIDDataset(original_qout_file) as qout_nc:
            qout_nc.write_flows_to_gssha_time_series_ihg(
                dummy_file,
                dummy_file)
示例#6
0
def test_extract_timeseries_to_gssha_ihg():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file")

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    connection_list = [
        {
            'node_id': 1,
            'link_id': 599,
            'baseflow': 0.0,
            'rapid_rivid': 75224,
        },
        {
            'node_id': 1,
            'link_id': 603,
            'baseflow': 0.0,
            'rapid_rivid': 75225,
        },
        {
            'node_id': 1,
            'link_id': 605,
            'baseflow': 0.0,
            'rapid_rivid': 75226,
        },
    ]

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.ihg')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_file,
            connection_list=connection_list,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_daily_file,
                                     cf_timeseries_daily_file_solution,
                                     header=True))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.ihg')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_file,
            connection_list=connection_list[:1],
        )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_file,
                                     cf_timeseries_file_solution,
                                     header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.ihg')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_date_file,
            connection_list=connection_list[:1],
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_daily_date_file,
                                     cf_timeseries_daily_date_file_solution,
                                     header=True))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.ihg')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_date_file,
            connection_list=connection_list,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
        )

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_date_file,
                                     cf_timeseries_date_file_solution,
                                     header=False))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )