Ejemplo n.º 1
0
    def test_rapid_to_gssha_date_range(self):
        """
        Test RAPID to GSSHA functionality with date filters
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable="",
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file,
                               gssha_simulation_start=datetime(2002,8,30),
                               gssha_simulation_end=datetime(2002,8,30,23,59),
                               read_hotstart=True,  # SHOULD NOT CHANGE ANYTHING
                               )

        gr.run_forecast()

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(self.readDirectory, 'framework',
                                        'grid_standard_rapid_200208300000to200208302359_{0}.prj'.format(self.os_name))
        self._compare_files(self._generated_file_path('run_200208300000to200208302359'), compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(self.readDirectory, "framework",
                                        "grid_standard_rapid_200208300000to200208302359.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path('run_200208300000to200208302359', extension='ihg'),
                                            compare_igh_file, header=False)
        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard.cmt"),
                            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory exists
        assert os.path.exists(self._generated_file_path('run_200208300000to200208302359', extension="cmt"))
Ejemplo n.º 2
0
    def test_rapid_to_gssha_date_range(self):
        """
        Test RAPID to GSSHA functionality with date filters
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable="",
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file,
                               gssha_simulation_start=datetime(2002,8,30),
                               gssha_simulation_end=datetime(2002,8,30,23,59),
                               read_hotstart=True,  # SHOULD NOT CHANGE ANYTHING
                               )

        with pytest.raises(ValueError):
            gr.run_forecast()

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(self.readDirectory, 'framework',
                                        'grid_standard_rapid_200208300000to200208302359_{0}.prj'.format(self.os_name))
        self._compare_files(self._generated_file_path('run_200208300000to200208302359'), compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(self.readDirectory, "framework",
                                        "grid_standard_rapid_200208300000to200208302359.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path('run_200208300000to200208302359', extension='ihg'),
                                            compare_igh_file, header=False)
        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard.cmt"),
                            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory correct
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard_compare_run_{0}.cmt".format(self.os_name)),
                            self._generated_file_path('run_200208300000to200208302359', extension="cmt"))
Ejemplo n.º 3
0
    def test_rapid_to_gssha_read_hotstart(self):
        """
        Test RAPID to GSSHA functionality read hotstart
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable='',
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file,
                               read_hotstart=True,
                               )
        gr.run_forecast()

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(self.readDirectory, 'framework',
                                        'grid_standard_rapid_read_hotstart_200208291800to200208311800_{0}.prj'.format(self.os_name))
        self._compare_files(self._generated_file_path('run_200208291800to200208311800'), compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(self.readDirectory, "framework",
                                        "grid_standard_rapid_200208291800to200208311800.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path('run_200208291800to200208311800', extension='ihg'),
                                            compare_igh_file, header=False)

        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard.cmt"),
                            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory exists
        assert os.path.exists(self._generated_file_path('run_200208291800to200208311800', extension="cmt"))
Ejemplo n.º 4
0
    def test_rapid_to_gssha_read_hotstart(self):
        """
        Test RAPID to GSSHA functionality read hotstart
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable='',
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file,
                               read_hotstart=True,
                               )
        with pytest.raises(ValueError):
            gr.run_forecast()

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(self.readDirectory, 'framework',
                                        'grid_standard_rapid_read_hotstart_200208291800to200208311800_{0}.prj'.format(self.os_name))
        self._compare_files(self._generated_file_path('run_200208291800to200208311800'), compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(self.readDirectory, "framework",
                                        "grid_standard_rapid_200208291800to200208311800.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path('run_200208291800to200208311800', extension='ihg'),
                                            compare_igh_file, header=False)

        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard.cmt"),
                            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory correct
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard_compare_run_{0}.cmt".format(self.os_name)),
                            self._generated_file_path('run_200208291800to200208311800', extension="cmt"))
Ejemplo n.º 5
0
    def test_rapid_to_gssha(self):
        """
        Test RAPID to GSSHA functionality
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable='',
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file)
        with pytest.raises(ValueError):
            gr.run_forecast()

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(
            self.readDirectory, 'framework',
            'grid_standard_rapid_200208291800to200208311800_{0}.prj'.format(
                self.os_name))
        self._compare_files(
            self._generated_file_path('run_200208291800to200208311800'),
            compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(
            self.readDirectory, "framework",
            "grid_standard_rapid_200208291800to200208311800.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path(
            'run_200208291800to200208311800', extension='ihg'),
                                            compare_igh_file,
                                            header=False)

        # compare yml files
        self._compare_files(
            os.path.join(self.readDirectory, 'framework',
                         'gssha_event_rapid_200208291800to200208311800.yml'),
            os.path.join(self.gssha_project_directory, 'gsshapy_event.yml'))

        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(
            os.path.join(self.readDirectory, "gssha_project",
                         "grid_standard.cmt"),
            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory correct
        self._compare_files(
            os.path.join(
                self.readDirectory, "gssha_project",
                "grid_standard_compare_run_{0}.cmt".format(self.os_name)),
            self._generated_file_path('run_200208291800to200208311800',
                                      extension="cmt"))
Ejemplo n.º 6
0
    def test_rapid_to_gssha_min_hotstart(self):
        """
        Test RAPID to GSSHA functionality with minmal mode hotstart generation
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable="",
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file,
                               gssha_simulation_duration=timedelta(seconds=6*3600),
                               write_hotstart=True,
                               hotstart_minimal_mode=True,
                               )
        with pytest.raises(ValueError):
            gr.run_forecast()

        # check folder exists
        assert os.path.exists(os.path.join(self.gssha_project_directory, "hotstart"))

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(self.readDirectory, 'framework',
                                        'grid_standard_rapid_minimal_hotstart_200208291800to200208300000_{0}.prj'.format(self.os_name))
        self._compare_files(self._generated_file_path('minimal_hotstart_run_200208291800to200208300000'), compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(self.readDirectory, "framework",
                                        "grid_standard_rapid_hotstart_200208291800to2002083000.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path('minimal_hotstart_run_200208291800to200208300000', extension='ihg'),
                                            compare_igh_file, header=False)

        # compare yml files
        self._compare_files(os.path.join(self.readDirectory, 'framework', 'gssha_event_rapid_hotstart_200208291800to200208300000.yml'),
                            os.path.join(self.gssha_project_directory, 'gsshapy_event.yml'))
        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard.cmt"),
                            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory correct
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard_compare_run_{0}.cmt".format(self.os_name)),
                            self._generated_file_path('minimal_hotstart_run_200208291800to200208300000', extension="cmt"))
Ejemplo n.º 7
0
    def test_rapid_to_gssha_min_hotstart(self):
        """
        Test RAPID to GSSHA functionality with minmal mode hotstart generation
        """
        # INITIALIZE CLASS AND RUN
        gr = GSSHAWRFFramework(gssha_executable="",
                               gssha_directory=self.gssha_project_directory,
                               project_filename=self.gssha_project_file,
                               path_to_rapid_qout=self.path_to_rapid_qout,
                               connection_list_file=self.connection_list_file,
                               gssha_simulation_duration=timedelta(seconds=6*3600),
                               write_hotstart=True,
                               hotstart_minimal_mode=True,
                               )
        gr.run_forecast()

        # check folder exists
        assert os.path.exists(os.path.join(self.gssha_project_directory, "hotstart"))

        # COMPARE FILES
        # grid_standard.prj
        compare_prj_file = os.path.join(self.readDirectory, 'framework',
                                        'grid_standard_rapid_minimal_hotstart_200208291800to200208300000_{0}.prj'.format(self.os_name))
        self._compare_files(self._generated_file_path('minimal_hotstart_run_200208291800to200208300000'), compare_prj_file)
        # grid_standard.ihg
        compare_igh_file = os.path.join(self.readDirectory, "framework",
                                        "grid_standard_rapid_hotstart_200208291800to2002083000.ihg")
        assert compare_csv_timeseries_files(self._generated_file_path('minimal_hotstart_run_200208291800to200208300000', extension='ihg'),
                                            compare_igh_file, header=False)

        # compare yml files
        self._compare_files(os.path.join(self.readDirectory, 'framework', 'gssha_event_rapid_hotstart_200208291800to200208300000.yml'),
                            os.path.join(self.gssha_project_directory, 'gsshapy_event.yml'))
        # grid_standard.cmt
        # 1 file in main directory not modified
        self._compare_files(os.path.join(self.readDirectory, "gssha_project", "grid_standard.cmt"),
                            os.path.join(self.gssha_project_directory, "grid_standard.cmt"))
        # 2 file in working directory exists
        assert os.path.exists(self._generated_file_path('minimal_hotstart_run_200208291800to200208300000', extension="cmt"))
Ejemplo n.º 8
0
def test_extract_timeseries_to_gssha_ihg_tzinfo():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    with different time zone output
    """
    print(
        "TEST 17: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file tzinfo"
    )

    CENTRAL_TZ = timezone('US/Central')

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file3.csv')
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_file,
            connection_list_file=connection_list_file,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution,
                                         header=False))

    #if file is CF compliant, check write out timeseries
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file1.csv')
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_file,
            connection_list_file=connection_list_file,
        )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file1.csv')
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_date_file,
            connection_list_file=connection_list_file,
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file,
        cf_timeseries_daily_date_file_solution,
        header=False))

    #if file is CF compliant, check write out timeseries and filter by date
    connection_list_file = os.path.join(INPUT_DATA_PATH,
                                        'rapid_gssha_connect_file3.csv')
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_date_file,
            connection_list_file=connection_list_file,
            date_search_start=datetime(2002, 8, 31),
        )

    cf_timeseries_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_date_tz.ihg')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution,
                                         header=False))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
Ejemplo n.º 9
0
def test_extract_timeseries_to_gssha_xys():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA xys file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA xys file")

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_daily_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_index=20,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=True))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_daily_date_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            river_id=75224,
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.xys')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(
            cf_timeseries_date_file,
            series_name="RAPID_TO_GSSHA",
            series_id=25,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
            river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.xys')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
Ejemplo n.º 10
0
def test_extract_timeseries():
    """
    This tests extracting a timeseries from RAPID Qout file
    """
    print("TEST 13: TEST EXTRACT TIMESERIES FROM QINIT FILE")

    #for writing entire time series to file from new rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830.nc')
    new_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                 'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, new_qout_file)
    new_timeseries_file = os.path.join(OUTPUT_DATA_PATH,
                                       'new_timeseries_file.csv')

    with RAPIDDataset(new_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(new_timeseries_file, river_id=75224)

        if qout_nc.is_time_variable_valid():
            original_timeseries_file_solution = os.path.join(
                COMPARE_DATA_PATH, 'original_timeseries.csv')
        else:
            original_timeseries_file_solution = os.path.join(
                COMPARE_DATA_PATH, 'original_timeseries-notime.csv')

    assert (compare_csv_timeseries_files(new_timeseries_file,
                                         original_timeseries_file_solution,
                                         header=False))

    #for writing entire time series to file from original rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                   'Qout_nasa_lis_3hr_20020830_original.nc')
    original_qout_file = os.path.join(
        OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    copy(input_qout_file, original_qout_file)
    original_timeseries_file = os.path.join(OUTPUT_DATA_PATH,
                                            'original_timeseries.csv')

    with RAPIDDataset(original_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(original_timeseries_file, river_id=75224)
    original_timeseries_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'original_timeseries-notime.csv')

    assert (compare_csv_timeseries_files(original_timeseries_file,
                                         original_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_file,
                                   river_index=20,
                                   daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_daily_file,
                                         cf_timeseries_daily_file_solution,
                                         header=False))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_file, river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_file,
                                         cf_timeseries_file_solution,
                                         header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_date_file,
                                   river_id=75224,
                                   date_search_start=datetime(2002, 8, 31),
                                   date_search_end=datetime(
                                       2002, 8, 31, 23, 59, 59),
                                   daily=True,
                                   mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.csv')
    assert (compare_csv_timeseries_files(
        cf_timeseries_daily_date_file,
        cf_timeseries_daily_date_file_solution,
        header=False))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(
            cf_timeseries_date_file,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
            river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.csv')
    assert (compare_csv_timeseries_files(cf_timeseries_date_file,
                                         cf_timeseries_date_file_solution,
                                         header=False))

    remove_files(
        new_timeseries_file,
        new_qout_file,
        original_timeseries_file,
        original_qout_file,
        cf_timeseries_file,
        cf_timeseries_date_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_qout_file,
    )
Ejemplo n.º 11
0
def test_extract_timeseries_to_gssha_ihg():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file")

    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
                                      'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH,
                                'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    connection_list = [
        {
            'node_id': 1,
            'link_id': 599,
            'baseflow': 0.0,
            'rapid_rivid': 75224,
        },
        {
            'node_id': 1,
            'link_id': 603,
            'baseflow': 0.0,
            'rapid_rivid': 75225,
        },
        {
            'node_id': 1,
            'link_id': 605,
            'baseflow': 0.0,
            'rapid_rivid': 75226,
        },
    ]

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH,
                                            'cf_timeseries_daily.ihg')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_file,
            connection_list=connection_list,
            daily=True)

    cf_timeseries_daily_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_daily_file,
                                     cf_timeseries_daily_file_solution,
                                     header=True))

    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.ihg')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_file,
            connection_list=connection_list[:1],
        )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH,
                                               'cf_timeseries.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_file,
                                     cf_timeseries_file_solution,
                                     header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(
        OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.ihg')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_daily_date_file,
            connection_list=connection_list[:1],
            date_search_start=datetime(2002, 8, 31),
            date_search_end=datetime(2002, 8, 31, 23, 59, 59),
            daily=True,
            mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(
        COMPARE_DATA_PATH, 'cf_timeseries_daily_date.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_daily_date_file,
                                     cf_timeseries_daily_date_file_solution,
                                     header=True))

    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH,
                                           'cf_timeseries_date.ihg')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(
            cf_timeseries_date_file,
            connection_list=connection_list,
            date_search_start=datetime(2002, 8, 31),
            #date_search_end=None,
        )

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH,
                                                    'cf_timeseries_date.ihg')
    ok_(
        compare_csv_timeseries_files(cf_timeseries_date_file,
                                     cf_timeseries_date_file_solution,
                                     header=False))

    remove_files(
        cf_timeseries_file,
        cf_qout_file,
        cf_timeseries_daily_file,
        cf_timeseries_daily_date_file,
        cf_timeseries_date_file,
    )
Ejemplo n.º 12
0
def test_extract_timeseries_to_gssha_ihg_tzinfo():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA ihg file
    with different time zone output
    """
    print("TEST 17: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA ihg file tzinfo")
    
    CENTRAL_TZ = timezone('US/Central')
    
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)


    #if file is CF compliant, you can write out daily average
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file3.csv')
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_daily_file,
                                                     connection_list_file=connection_list_file,
                                                     daily=True)

    cf_timeseries_daily_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_file, cf_timeseries_daily_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file1.csv')
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_file,
                                                     connection_list_file=connection_list_file,
                                                     )

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_file, cf_timeseries_file_solution, header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file1.csv')
    cf_timeseries_daily_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')

    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_daily_date_file,
                                                     connection_list_file=connection_list_file,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     date_search_end=datetime(2002, 8, 31, 23, 59, 59),
                                                     daily=True,
                                                     mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_date_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries and filter by date
    connection_list_file = os.path.join(INPUT_DATA_PATH, 'rapid_gssha_connect_file3.csv')
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_date_tz.ihg')
    with RAPIDDataset(cf_qout_file, out_tzinfo=CENTRAL_TZ) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_ihg(cf_timeseries_date_file,
                                                     connection_list_file=connection_list_file,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     )

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_date_tz.ihg')    
    ok_(compare_csv_timeseries_files(cf_timeseries_date_file, cf_timeseries_date_file_solution, header=False))

    remove_files(cf_timeseries_file,
                 cf_qout_file,
                 cf_timeseries_daily_file,
                 cf_timeseries_daily_date_file,
                 cf_timeseries_date_file,
                 )
Ejemplo n.º 13
0
def test_extract_timeseries_to_gssha_xys():
    """
    This tests extracting a timeseries from RAPID Qout file to GSHHA xys file
    """
    print("TEST 16: TEST EXTRACT TIMESERIES FROM Qout file to GSSHA xys file")
    
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)

    #if file is CF compliant, you can write out daily average
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_daily_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     river_index=20,
                                                     daily=True)

    cf_timeseries_daily_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_file, cf_timeseries_daily_file_solution))
    
    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_file, cf_timeseries_file_solution, header=True))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.xys')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_daily_date_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     river_id=75224,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     date_search_end=datetime(2002, 8, 31, 23, 59, 59),
                                                     daily=True,
                                                     mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_date.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution))
    
    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_date.xys')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_gssha_time_series_xys(cf_timeseries_date_file,
                                                     series_name="RAPID_TO_GSSHA",
                                                     series_id=25,
                                                     date_search_start=datetime(2002, 8, 31),
                                                     #date_search_end=None,
                                                     river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_date.xys')    
    ok_(compare_csv_timeseries_files(cf_timeseries_date_file, cf_timeseries_date_file_solution))

    remove_files(cf_timeseries_file,
                 cf_qout_file,
                 cf_timeseries_daily_file,
                 cf_timeseries_daily_date_file,
                 cf_timeseries_date_file,
                 )
Ejemplo n.º 14
0
def test_extract_timeseries():
    """
    This tests extracting a timeseries from RAPID Qout file
    """
    print("TEST 13: TEST EXTRACT TIMESERIES FROM QINIT FILE")
    
    #for writing entire time series to file from new rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    new_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830.nc')
    copy(input_qout_file, new_qout_file)
    new_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'new_timeseries_file.csv')
    
    with RAPIDDataset(new_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(new_timeseries_file,
                                   river_id=75224)
                                   
        if qout_nc.is_time_variable_valid():
            original_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'original_timeseries.csv')
        else:
            original_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'original_timeseries-notime.csv')
        
    ok_(compare_csv_timeseries_files(new_timeseries_file, original_timeseries_file_solution, header=False))
    
    #for writing entire time series to file from original rapid output
    input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    original_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_original.nc')
    copy(input_qout_file, original_qout_file)
    original_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'original_timeseries.csv')
    
    with RAPIDDataset(original_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(original_timeseries_file,
                                   river_id=75224)
    original_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'original_timeseries-notime.csv')
        
    ok_(compare_csv_timeseries_files(original_timeseries_file, original_timeseries_file_solution, header=False))

    #if file is CF compliant, you can write out daily average
    cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    cf_qout_file = os.path.join(OUTPUT_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
    copy(cf_input_qout_file, cf_qout_file)
    cf_timeseries_daily_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_file,
                                   river_index=20,
                                   daily=True)

    cf_timeseries_daily_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_file, cf_timeseries_daily_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries
    cf_timeseries_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_file,
                                   river_index=20)

    cf_timeseries_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_file, cf_timeseries_file_solution, header=False))

    #if file is CF compliant, you can write out daily average, filter by date, and use max mode
    cf_timeseries_daily_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_daily_date.csv')

    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_daily_date_file,
                                   river_id=75224,
                                   date_search_start=datetime(2002, 8, 31),
                                   date_search_end=datetime(2002, 8, 31, 23, 59, 59),
                                   daily=True,
                                   mode='max')

    cf_timeseries_daily_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_daily_date.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_daily_date_file, cf_timeseries_daily_date_file_solution, header=False))
    
    #if file is CF compliant, check write out timeseries and filter by date
    cf_timeseries_date_file = os.path.join(OUTPUT_DATA_PATH, 'cf_timeseries_date.csv')
    with RAPIDDataset(cf_qout_file) as qout_nc:
        qout_nc.write_flows_to_csv(cf_timeseries_date_file,
                                   date_search_start=datetime(2002, 8, 31),
                                   #date_search_end=None,
                                   river_id=75224)

    cf_timeseries_date_file_solution = os.path.join(COMPARE_DATA_PATH, 'cf_timeseries_date.csv')    
    ok_(compare_csv_timeseries_files(cf_timeseries_date_file, cf_timeseries_date_file_solution, header=False))

    remove_files(new_timeseries_file, 
                 new_qout_file,
                 original_timeseries_file, 
                 original_qout_file,
                 cf_timeseries_file,
                 cf_timeseries_date_file,
                 cf_timeseries_daily_file,
                 cf_timeseries_daily_date_file,
                 cf_qout_file,
                 )