Exemplo n.º 1
0
    def test_when_given_invalid_parameters_then_does_not_raise(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        output_dir = "dummy_dir"
        input_params = {}
        css_name = "dummy_name"
        css_length = 0
        css_location = (0, 0)
        test_css = CS(input_params, css_name, css_length, css_location)
        cross_sections = [test_css]

        # 2. Set initial expectations
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        assert os.path.exists(output_dir)

        # 3. run test
        try:
            runner._export_cross_sections(cross_sections, output_dir)
        except Exception as e:
            e_message = str(e)
            shutil.rmtree(output_dir)
            pytest.fail(
                "No exception expected but was thrown {}".format(e_message))

        # 4. Clean up directory
        shutil.rmtree(output_dir)
Exemplo n.º 2
0
    def test_when_given_invalid_parameters_then_no_exception_risen(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        input_param_dict = {"dummy_key": "dummy_value"}
        cross_section = CS(input_param_dict, "dummy_name", 4.2, (4, 2))
        fm_args = (None, None, None, None, None)
        fm_model_data = FMD(fm_args)
        start_time = datetime.datetime.now

        # 2. Set expectations
        assert runner is not None
        assert input_param_dict is not None
        assert cross_section is not None
        assert fm_model_data is not None
        assert start_time is not None

        # 3. Run test
        try:
            runner._set_fm_data_to_cross_section(
                cross_section=cross_section,
                input_param_dict=input_param_dict,
                fm_model_data=fm_model_data,
                start_time=start_time,
            )
        except Exception as e_info:
            pytest.fail("No expected exception but was thrown: {}".format(
                str(e_info)))
Exemplo n.º 3
0
    def test_when_given_valid_parameters_then_css_are_exported(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        output_dir = "dummy_dir"
        input_params = {}
        css_name = "dummy_name"
        css_length = 0
        css_location = (0, 0)
        test_css = CS(input_params, css_name, css_length, css_location)
        cross_sections = [test_css]

        # 2. Set initial expectations
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        assert os.path.exists(output_dir)
        expected_files = [
            "CrossSectionDefinitions.ini",
            "CrossSectionLocations.ini",
            "geometry.csv",
        ]
        # 3. run test
        try:
            runner._export_cross_sections(cross_sections, output_dir)
        except Exception as e:
            e_message = str(e)
            shutil.rmtree(output_dir)
            pytest.fail(
                "No exception expected but was thrown {}".format(e_message))

        # 4. Verify final expectations
        data_in_dir = os.listdir(output_dir)
        assert data_in_dir is not None
        for expected_file in expected_files:
            assert expected_file in data_in_dir
Exemplo n.º 4
0
    def test_when_all_parameters_are_correct_then_calculates_css_correction(
            self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)

        css_name = "dummy_name"
        css_length = 0
        css_location = (0, 0)
        test_css = CS(input_param_dict, css_name, css_length, css_location)

        # 2. Set up / verify initial expectations
        assert runner is not None
        assert test_css is not None
        assert not test_css._css_is_corrected

        # 2.1. values required for correction.
        test_css._css_total_volume = np.array([2, 3, 1, 0])
        test_css._fm_total_volume = np.array([2, 3, 1, 0])
        test_css._css_flow_volume = np.array([2, 3, 1, 0])
        test_css._fm_flow_volume = np.array([2, 3, 1, 0])
        test_css._css_z = np.array([0, 1, 2, 3])

        # 3. Run test
        try:
            runner._perform_2D_volume_correction(test_css)
        except:
            pytest.fail(
                "Unexpected exception while calculating css correction.")

        # 4. Verify final expectations.
        assert test_css._css_is_corrected, (
            "" +
            "The calculation did not set the flag 'is corrected ' to True")
Exemplo n.º 5
0
    def test_when_all_parameters_are_correct_then_reduce_points(self):
        # set up test data
        new_number_of_css_points = 25
        old_number_of_css_points = 30

        runner = Fm2ProfRunner(None)
        input_param_dict = {
            "number_of_css_points": str(new_number_of_css_points)
        }

        css_name = "dummy_name"
        css_length = 0
        css_location = (0, 0)
        test_css = CS(input_param_dict, css_name, css_length, css_location)

        # initial expectation
        assert runner is not None
        assert test_css is not None
        assert test_css._css_is_reduced is False
        test_css._css_total_width = np.linspace(10, 20,
                                                old_number_of_css_points)
        test_css._css_z = np.linspace(0, 10, old_number_of_css_points)
        test_css._css_flow_width = np.linspace(5, 15, old_number_of_css_points)

        # run
        try:
            runner._reduce_css_points(input_param_dict, test_css)
        except:
            pytest.fail("Unexpected exception while reducing css points.")
Exemplo n.º 6
0
    def test_when_all_parameters_are_correct_then_returns_expected_css(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        test_css_name = "dummy_css"
        css_data = {"id": test_css_name}
        input_param_dict = {"dummyKey": "dummyValue"}

        css_data_length = 42
        css_data_location = (4, 2)
        css_data_branch_id = 420
        css_data_chainage = 4.2
        css_data = {
            "id": test_css_name,
            "length": css_data_length,
            "xy": css_data_location,
            "branchid": css_data_branch_id,
            "chainage": css_data_chainage,
        }
        css_data_dict = {
            "id": [test_css_name],
            "length": [css_data_length],
            "xy": [css_data_location],
            "branchid": [css_data_branch_id],
            "chainage": [css_data_chainage],
        }
        fmd_arg_list = (None, None, None, None, css_data_dict)
        fm_model_data = FMD(fmd_arg_list)

        # 2. Expectations
        return_css = None

        # 3. Run test
        try:
            return_css = runner._generate_cross_section(
                css_data=css_data,
                input_param_dict=input_param_dict,
                fm_model_data=fm_model_data,
            )
        except Exception as e_info:
            pytest.fail("No expected exception but was risen:" +
                        " {}".format(str(e_info)))

        # 4. Verify final expectations
        assert return_css is not None
        assert (return_css.name == test_css_name
                ), "" + "Expected name {} but was {}".format(
                    test_css_name, return_css.name)
        assert (return_css.length == css_data_length
                ), "" + "Expected length {} but was {}".format(
                    css_data_length, return_css.length)
        assert (return_css.location == css_data_location
                ), "" + "Expected location {} but was {}".format(
                    css_data_location, return_css.location)
        assert (return_css.branch == css_data_branch_id
                ), "" + "Expected branch {} but was {}".format(
                    css_data_branch_id, return_css.branch)
        assert (return_css.chainage == css_data_chainage
                ), "" + "Expected chainage {} but was {}".format(
                    css_data_chainage, return_css.chainage)
Exemplo n.º 7
0
    def test_when_waal_case_then_performance_is_slow(self):
        # 1. Set up test model.
        case_name = "case_08_waal"
        local_test_dir = TestUtils.get_local_test_data_dir("performance_waal")
        ini_file = str(local_test_dir / "fm2prof_08.ini")
        json_file = str(local_test_dir / "SectionPolygonDissolved.json")
        external_test_dir = TestUtils.get_external_test_data_subdir(case_name)
        map_file = str(external_test_dir / "Data\\FM\\FlowFM_fm2prof_map.nc")
        css_file = str(external_test_dir / "Data\\cross_section_locations.xyz")

        # 1.1. Create ini file.
        ini_file_path = None
        test_ini_file = IniFile(ini_file_path)
        base_output_dir = _get_base_output_dir()
        test_ini_file._output_dir = str(
            _check_and_create_test_case_output_dir(base_output_dir, case_name))

        test_ini_file._input_file_paths = {
            "fm_netcdfile": map_file,
            "crosssectionlocationfile": css_file,
            "regionpolygonfile": None,
            "sectionpolygonfile": json_file,
        }
        test_ini_file._input_parameters = {
            "number_of_css_points": 20,
            "transitionheight_sd": 0.25,
            "velocity_threshold": 0.01,
            "relative_threshold": 0.03,
            "min_depth_storage": 0.02,
            "plassen_timesteps": 10,
            "storagemethod_wli": 1,
            "bedlevelcriterium": 0.1,
            "sdstorage": 1,
            "frictionweighing": 0,
            "sectionsmethod": 1,
            "sdoptimisationmethod": 0,
            "exportmapfiles": 1,
        }

        # 2. Verify initial expectations.
        assert os.path.exists(ini_file), "Ini (test) file was not found."
        assert os.path.exists(json_file), "Json (test) file was not found."
        assert os.path.exists(map_file), "Map (test) file was not found."
        assert os.path.exists(
            css_file), "" + "CrossSection (test) file was not found"

        # 3. Run test.
        try:
            runner = Fm2ProfRunner(iniFilePath=None)
            runner.run_inifile(iniFile=test_ini_file)
        except Exception as e_error:
            # if os.path.exists(root_output_dir):
            #     shutil.rmtree(root_output_dir)
            pytest.fail("No exception expected but was thrown {}.".format(
                str(e_error)))

        # 4. Verify final expectations.
        pass
Exemplo n.º 8
0
    def test_when_no_file_path_then_no_exception_is_risen(self):
        # 1. Set up initial test dat
        runner = None

        # 2. Run test
        try:
            runner = Fm2ProfRunner()
        except Exception as e:
            pytest.fail("No exception expected, but thrown: {}".format(str(e)))

        # 3. Verify final expectations
        assert runner is not None
Exemplo n.º 9
0
    def test_when_no_cross_sections_then_does_not_raise(self, cross_sections):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        output_dir = "dummy_dir"

        # 2. run test
        try:
            runner._export_cross_sections(cross_sections, output_dir)
        except Exception as e:
            e_message = str(e)
            pytest.fail(
                "No exception was expected, but given: {}".format(e_message))
Exemplo n.º 10
0
    def test_when_given_correct_parameters_then_returns_list_with_only_valid_css(
            self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        expected_css = 1
        expected_name = "dummy_css"
        expected_data_length = 42
        expected_data_location = (4, 2)
        expected_data_branch_id = 420
        expected_data_data_chainage = 4.2
        css_data = {
            "id": [expected_name],  # , 'dummy_css_1'],
            "length": [expected_data_length],
            "xy": [expected_data_location],
            "branchid": [expected_data_branch_id],
            "chainage": [expected_data_data_chainage],
        }

        fmd_arg_list = (None, None, None, None, css_data)
        fm_model_data = FMD(fmd_arg_list)
        input_param_dict = {"dummyKey": "dummyValue"}
        # 2. Verify initial expectations
        assert runner is not None

        # 3. Run test
        try:
            return_css_list = runner._generate_cross_section_list(
                input_param_dict, fm_model_data)
        except Exception as e_info:
            pytest.fail("Exception {}".format(str(e_info)) +
                        " was given while generating cross sections")

        # 4. Verify final expectations
        assert return_css_list is not None
        assert len(return_css_list) == expected_css
        return_css = return_css_list[0]
        assert return_css is not None
        assert (return_css.name == expected_name
                ), "" + "Expected name {} but was {}".format(
                    expected_name, return_css.name)
        assert (return_css.length == expected_data_length
                ), "" + "Expected length {} but was {}".format(
                    expected_data_length, return_css.length)
        assert (return_css.location == expected_data_location
                ), "" + "Expected location {} but was {}".format(
                    expected_data_location, return_css.location)
        assert (return_css.branch == expected_data_branch_id
                ), "" + "Expected branch {} but was {}".format(
                    expected_data_branch_id, return_css.branch)
        assert (return_css.chainage == expected_data_data_chainage
                ), "" + "Expected chainage {} but was {}".format(
                    expected_data_data_chainage, return_css.chainage)
Exemplo n.º 11
0
    def test_when_cross_section_not_given_then_no_exception_risen(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        input_param_dict = {"sdstorage": "1"}
        test_css = None
        starttime = datetime.datetime.now()

        # 2. Set up / verify initial expectations
        assert runner is not None

        # 3. Run test
        try:
            runner._perform_2D_volume_correction(test_css)
        except:
            pytest.fail(
                "Unexpected exception while calculating css correction.")
Exemplo n.º 12
0
    def test_when_given_input_data_then_output_is_generated(
            self, case_name, map_file, css_file, region_file, section_file):
        # 1. Set up test data.
        iniFilePath = None
        iniFile = IniFile(iniFilePath)
        test_data_dir = TestUtils.get_external_test_data_subdir(case_name)
        if not test_data_dir.is_dir():
            pytest.fail(f"Test dir not found at {test_data_dir}")
        base_output_dir = _get_base_output_dir()

        iniFile._set_output_directory_no_validation(
            str(
                _check_and_create_test_case_output_dir(base_output_dir,
                                                       case_name)))

        if region_file:
            region_file_path = test_data_dir / region_file
        else:
            region_file_path = region_file

        if section_file:
            section_file_path = test_data_dir / section_file
        else:
            section_file_path = section_file

        # iniFile.set_parameter('ExportMapFiles', True)
        iniFile.set_parameter("skipmaps", 6)
        iniFile.set_input_file("2dmapoutput", str(test_data_dir / map_file))
        iniFile.set_input_file("crosssectionlocationfile",
                               str(test_data_dir / css_file))
        iniFile.set_input_file("regionpolygonfile", region_file_path)
        iniFile.set_input_file("sectionpolygonfile", section_file_path)

        # Create the runner and set the saving figures variable to true
        buf = io.StringIO(iniFile.print_configuration())
        runner = Fm2ProfRunner(buf)

        # 2. Verify precondition (no output generated)
        assert (os.path.exists(iniFile.get_output_directory())
                and not len(os.listdir(iniFile.get_output_directory())) > 1)

        # 3. Run file:
        runner.run()

        # 4. Verify there is output generated:
        assert os.listdir(iniFile.get_output_directory(
        )), "" + "There is no output generated for {0}".format(case_name)
Exemplo n.º 13
0
    def test_when_no_output_dir_then_does_not_raise(self, output_dir):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        input_params = {}
        css_name = "dummy_name"
        css_length = 0
        css_location = (0, 0)
        test_css = CS(input_params, css_name, css_length, css_location)
        cross_sections = [test_css]

        # 2. run test
        try:
            runner._export_cross_sections(cross_sections, output_dir)
        except Exception as e:
            e_message = str(e)
            pytest.fail(
                "No exception was expected, but given: {}".format(e_message))
Exemplo n.º 14
0
    def test_when_not_given_css_data_then_returns_none(self, css_data):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)

        # 2. Expectations
        return_value = None

        # 3. Run test
        try:
            return_value = runner._get_new_cross_section(css_data=css_data,
                                                         input_param_dict=None)
        except Exception as e_info:
            pytest.fail("No expected exception but was risen: {}".format(
                str(e_info)))

        # 4. Verify final expectations
        assert return_value is None
Exemplo n.º 15
0
    def test_when_given_valid_arguments_then_returns_expected_css(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        test_css_name = "dummy_css"
        input_param_dict = {"dummyKey": "dummyValue"}
        css_data_length = 42
        css_data_location = (4, 2)
        css_data_branch_id = 420
        css_data_chainage = 4.2
        css_data = {
            "id": test_css_name,
            "length": css_data_length,
            "xy": css_data_location,
            "branchid": css_data_branch_id,
            "chainage": css_data_chainage,
        }

        # 2. Expectations
        return_css = None

        # 3. Run test
        try:
            return_css = runner._get_new_cross_section(
                css_data=css_data, input_param_dict=input_param_dict)
        except Exception as e_info:
            pytest.fail("No expected exception but was risen: {}".format(
                str(e_info)))

        # 4. Verify final expectations
        assert return_css is not None
        assert (return_css.name == test_css_name
                ), "" + "Expected name {} but was {}".format(
                    test_css_name, return_css.name)
        assert (return_css.length == css_data_length
                ), "" + "Expected length {} but was {}".format(
                    css_data_length, return_css.length)
        assert (return_css.location == css_data_location
                ), "" + "Expected location {} but was {}".format(
                    css_data_location, return_css.location)
        assert (return_css.branch == css_data_branch_id
                ), "" + "Expected branch {} but was {}".format(
                    css_data_branch_id, return_css.branch)
        assert (return_css.chainage == css_data_chainage
                ), "" + "Expected chainage {} but was {}".format(
                    css_data_chainage, return_css.chainage)
Exemplo n.º 16
0
    def test_when_no_css_data_is_given_then_expected_exception_risen(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)

        # 2. Set expectations
        expected_error = "No data was given to create a Cross Section"

        # 3. Run test
        with pytest.raises(Exception) as e_info:
            runner._generate_cross_section(css_data=None,
                                           input_param_dict=None,
                                           fm_model_data=None)

        # 4. Verify final expectations
        error_message = str(e_info.value)
        assert error_message == expected_error, (
            "" + "Expected exception message {},".format(expected_error) +
            " retrieved {}".format(error_message))
Exemplo n.º 17
0
    def test_when_css_data_misses_rest_of_key_values_then_returns_none(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        input_param_dict = {"dummyKey": "dummyValue"}
        css_data = {"id": []}

        # 2. Expectations
        return_value = None

        # 3. Run test
        try:
            return_value = runner._get_new_cross_section(
                css_data=css_data, input_param_dict=input_param_dict)
        except Exception as e_info:
            pytest.fail("No expected exception but was risen: {}".format(
                str(e_info)))

        # 4. Verify final expectations
        assert return_value is None
Exemplo n.º 18
0
    def test_when_not_given_FmModelData_then_returns_empty_list(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        input_param_dict = {"DummyKey": "dummyValue"}
        return_value = None

        # 2. Verify initial expectations
        assert runner is not None

        # 3. Run test
        try:
            return_value = runner._generate_cross_section_list(
                input_param_dict, None)
        except Exception as e_info:
            pytest.fail("Exception {}".format(str(e_info)) +
                        " was given while generating cross sections")

        # 4. Verify final expectations
        assert return_value is not None
        assert len(return_value) == 0
Exemplo n.º 19
0
    def test_when_not_given_input_param_dict_then_returns_empty_list(self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        fm_model_data_args = (0, 1, 2, 3, {})
        fm_model_data = FMD(fm_model_data_args)
        return_value = None

        # 2. Verify initial expectations
        assert runner is not None

        # 3. Run test
        try:
            return_value = runner._generate_cross_section_list(
                None, fm_model_data)
        except Exception as e_info:
            pytest.fail("Exception {}".format(str(e_info)) +
                        " was given while generating cross sections")

        # 4. Verify final expectations
        assert return_value is not None
        assert len(return_value) == 0
Exemplo n.º 20
0
    def test_given_inifile_then_no_exception_is_risen(self):
        # 1. Set up initial test data
        ini_file_name = "valid_ini_file.ini"
        dir_name = "IniFile"
        test_data_dir = TestUtils.get_local_test_data_dir(dir_name)
        ini_file_path = os.path.join(test_data_dir, ini_file_name)
        runner = None

        # 2. Verify the initial expectations
        assert os.path.exists(
            ini_file_path), "" "Test File {} was not found".format(
                ini_file_path)

        # 3. Run test
        try:
            runner = Fm2ProfRunner(ini_file_path)
        except Exception as e:
            pytest.fail("No exception expected, but thrown: {}".format(str(e)))

        # 4. Verify final expectations
        assert runner is not None
Exemplo n.º 21
0
    def test_when_no_input_param_dict_is_given_then_expected_exception_risen(
            self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        test_css_name = "dummy_css"
        css_data = {"id": test_css_name}
        # 2. Set expectations
        expected_error = (
            "No input parameters (from ini file)" +
            " given for new cross section {}".format(test_css_name))

        # 3. Run test
        with pytest.raises(Exception) as e_info:
            runner._generate_cross_section(css_data=css_data,
                                           input_param_dict=None,
                                           fm_model_data=None)

        # 4. Verify final expectations
        error_message = str(e_info.value)
        assert error_message == expected_error, (
            "" + "Expected exception message {},".format(expected_error) +
            " retrieved {}".format(error_message))
Exemplo n.º 22
0
    def test_when_given_correct_parameters_then_returns_list_with_expected_css(
            self):
        # 1. Set up test data
        runner = Fm2ProfRunner(None)
        input_param_dict = {"dummyKey": "dummyValue"}
        test_number_of_css = 2
        test_css_name = "dummy_css"
        css_data_length = 42
        css_data_location = (4, 2)
        css_data_branch_id = 420
        css_data_chainage = 4.2
        id_key = "id"
        length_key = "length"
        xy_key = "xy"
        branchid_key = "branchid"
        chainage_key = "chainage"
        id_keys = []
        length_values = []
        xy_values = []
        branchid_values = []
        chainage_values = []
        for i in range(test_number_of_css):
            valid_mult = i + 1
            id_keys.append(test_css_name + "_" + str(i))
            length_values.append(css_data_length * valid_mult)
            xy_values.append(tuple([valid_mult * x
                                    for x in css_data_location]))
            branchid_values.append(css_data_branch_id * valid_mult)
            chainage_values.append(css_data_chainage * valid_mult)
        css_data = {
            id_key: id_keys,
            length_key: length_values,
            xy_key: xy_values,
            branchid_key: branchid_values,
            chainage_key: chainage_values,
        }

        fmd_arg_list = (None, None, None, None, css_data)
        fm_model_data = FMD(fmd_arg_list)
        # 2. Verify initial expectations
        assert runner is not None

        # 3. Run test
        try:
            return_css_list = runner._generate_cross_section_list(
                input_param_dict, fm_model_data)
        except Exception as e_info:
            pytest.fail("Exception {}".format(str(e_info)) +
                        " was given while generating cross sections")

        # 4. Verify final expectations
        assert return_css_list is not None
        assert len(return_css_list) == test_number_of_css
        for idx in range(len(return_css_list)):
            valid_mult = idx + 1
            expected_name = test_css_name + "_" + str(idx)
            expected_data_length = css_data_length * valid_mult
            expected_data_location = tuple(
                [valid_mult * x for x in css_data_location])
            expected_data_branch_id = css_data_branch_id * valid_mult
            expected_data_data_chainage = css_data_chainage * valid_mult
            return_css = return_css_list[idx]
            assert (return_css.name == expected_name
                    ), "" + "Expected name {} but was {}".format(
                        expected_name, return_css.name)
            assert (return_css.length == expected_data_length
                    ), "" + "Expected length {} but was {}".format(
                        expected_data_length, return_css.length)
            assert (return_css.location == expected_data_location
                    ), "" + "Expected location {} but was {}".format(
                        expected_data_location, return_css.location)
            assert (return_css.branch == expected_data_branch_id
                    ), "" + "Expected branch {} but was {}".format(
                        expected_data_branch_id, return_css.branch)
            assert (return_css.chainage == expected_data_data_chainage
                    ), "" + "Expected chainage {} but was {}".format(
                        expected_data_data_chainage, return_css.chainage)