def test_plot_sensitivity(self):
        """Test plot sensitivity indices first oder"""

        exp_unc, _, haz_unc = make_imp_uncs()
        samples = pd.DataFrame({
            'x_exp': [1, 2, 3, 4],
            'x_haz': [0.1, 0.2, 0.3, 0.4]
        })
        metrics = {
            'freq_curve':
            pd.DataFrame({
                'rp100': [9, 10, 11, 12],
                'rp250': [100, 110, 120, 130]
            })
        }

        unc = Uncertainty(unc_vars={
            'exp': exp_unc,
            'haz': haz_unc
        },
                          samples=samples,
                          metrics=metrics)

        unc.calc_sensitivity(method_kwargs={'calc_second_order': False})
        unc.plot_sensitivity()
        plt.close()

        unc.calc_sensitivity(salib_method='rbd_fast', method_kwargs={'M': 8})
        unc.plot_sensitivity()
        plt.close()
    def test_plot_distribution(self):
        """Test plot metrics distribution"""

        exp_unc, _, haz_unc = make_imp_uncs()
        samples = pd.DataFrame({
            'x_exp': [1, 2, 3, 4],
            'x_haz': [0.1, 0.2, 0.3, 0.4]
        })
        metrics = {
            'freq_curve':
            pd.DataFrame({
                'rp100': [9, 10, 11, 12],
                'rp250': [100, 110, 120, 130]
            })
        }

        unc = Uncertainty(unc_vars={
            'exp': exp_unc,
            'haz': haz_unc
        },
                          samples=samples,
                          metrics=metrics)

        unc.plot_distribution()
        plt.close()
    def test_calc_sensitivty_pass(self):
        """Test compute sensitivity default"""

        exp_unc, _, haz_unc = make_imp_uncs()
        samples = pd.DataFrame({
            'x_exp': [1, 2, 3, 4],
            'x_haz': [0.1, 0.2, 0.3, 0.4]
        })
        metrics = {
            'rp':
            pd.DataFrame({
                'rp100': [9, 10, 11, 12],
                'rp250': [100, 110, 120, 130]
            })
        }

        unc = Uncertainty(unc_vars={
            'exp': exp_unc,
            'haz': haz_unc
        },
                          samples=samples,
                          metrics=metrics)

        sens = unc.calc_sensitivity(method_kwargs={'calc_second_order': False})
        self.assertSetEqual(set(sens.keys()), {'rp'})
        self.assertSetEqual(set(sens['rp'].keys()), {'rp100', 'rp250'})
        self.assertSetEqual(set(sens['rp']['rp100'].keys()),
                            {'S1', 'S1_conf', 'ST', 'ST_conf'})
        self.assertTrue(
            np.allclose(sens['rp']['rp100']['S1'],
                        np.array([0.66666667, 1.33333333])))
    def test_calc_sensitivty_XY_pass(self):
        """Test compute sensitvity method rbd_fast (variables names different
        from default)"""

        exp_unc, _, haz_unc = make_imp_uncs()
        samples = pd.DataFrame({
            'x_exp': [1, 2, 3, 4],
            'x_haz': [0.1, 0.2, 0.3, 0.4]
        })
        metrics = {
            'rp':
            pd.DataFrame({
                'rp100': [9.0, 10.0, 11.0, 12.0],
                'rp250': [100.0, 110.0, 120.0, 130.0]
            })
        }

        unc = Uncertainty(unc_vars={
            'exp': exp_unc,
            'haz': haz_unc
        },
                          samples=samples,
                          metrics=metrics)

        sens = unc.calc_sensitivity(salib_method='rbd_fast',
                                    method_kwargs={'M': 8})
        self.assertSetEqual(set(sens.keys()), {'rp'})
        self.assertSetEqual(set(sens['rp'].keys()), {'rp100', 'rp250'})
        self.assertSetEqual(set(sens['rp']['rp100'].keys()), {'S1', 'names'})
        self.assertTrue(
            np.allclose(sens['rp']['rp100']['S1'], np.array([1.0, 1.0])))
    def test_init_pass(self):
        """Test initiliazation uncertainty"""

        exp_unc, impf_unc, haz_unc = make_imp_uncs()

        unc = Uncertainty({'exp': exp_unc, 'impf': impf_unc, 'haz': haz_unc})
        self.assertDictEqual(unc.metrics, {})
        self.assertDictEqual(unc.sensitivity, {})

        self.assertEqual(unc.n_samples, 0)
        self.assertSetEqual(set(unc.param_labels),
                            {'x_exp', 'x_haz', 'x_paa', 'x_mdd'})
        self.assertSetEqual(set(unc.problem_sa['names']),
                            {'x_exp', 'x_haz', 'x_paa', 'x_mdd'})
        self.assertSetEqual(set(unc.distr_dict.keys()),
                            {"x_exp", "x_paa", "x_mdd", "x_haz"})

        unc = Uncertainty(
            {
                'exp': exp_unc,
                'impf': impf_unc
            },
            samples=pd.DataFrame({
                'x_exp': [1, 2],
                'x_paa': [3, 4],
                'x_mdd': [1, 2]
            }),
            metrics={'aai_agg': pd.DataFrame({'aai_agg': [100, 200]})})
        self.assertEqual(unc.n_samples, 2)
        self.assertSetEqual(set(unc.param_labels), {'x_exp', 'x_paa', 'x_mdd'})
        self.assertListEqual(list(unc.metrics['aai_agg']['aai_agg']),
                             [100, 200])
        self.assertDictEqual(unc.sensitivity, {})
    def test_save_pass(self):
        """Test save samples"""

        exp_unc, impf_unc, haz_unc = make_imp_uncs()

        unc = Uncertainty({'exp': exp_unc, 'impf': impf_unc, 'haz': haz_unc})
        unc.make_sample(1)
        filename = unc.save_samples_df()

        unc_imp = UncImpact(exp_unc, impf_unc, haz_unc)
        unc_imp.load_samples_df(filename)

        unc_imp.calc_distribution()
        unc_imp.calc_sensitivity()
    def test_make_sample_pass(self):
        """Test generate sample"""

        exp_unc, _, haz_unc = make_imp_uncs()

        unc = Uncertainty({'exp': exp_unc, 'haz': haz_unc})

        #default sampling saltelli
        unc.make_sample(N=1, sampling_kwargs={'calc_second_order': True})
        self.assertEqual(unc.n_samples, 1 * (2 * 2 + 2))  # N * (2 * D + 2)
        self.assertTrue(isinstance(unc.samples_df, pd.DataFrame))
        self.assertTrue(
            np.allclose(unc.samples_df['x_exp'],
                        np.array([
                            1.239453, 1.837109, 1.239453, 1.239453, 1.837109,
                            1.837109
                        ]),
                        rtol=1e-05))
        self.assertListEqual(list(unc.samples_df['x_haz']),
                             [0.0, 0.0, 1.0, 1.0, 0.0, 1.0])

        #latin sampling
        unc.make_sample(N=1,
                        sampling_method='latin',
                        sampling_kwargs={'seed': 11245})
        self.assertEqual(unc.n_samples, 1)
        self.assertTrue(isinstance(unc.samples_df, pd.DataFrame))
        self.assertTrue(
            np.allclose(unc.samples_df['x_exp'],
                        np.array([2.58309]),
                        rtol=1e-05))
        self.assertListEqual(list(unc.samples_df['x_haz']), [2.0])
    def test_plot_sample_pass(self):

        exp_unc, _, haz_unc = make_imp_uncs()

        unc = Uncertainty({'exp': exp_unc, 'haz': haz_unc})

        unc.make_sample(N=1)
        unc.plot_sample()
        plt.close()
    def test_est_comp_time_pass(self):

        exp_unc, _, haz_unc = make_imp_uncs()

        unc = Uncertainty({'exp': exp_unc, 'haz': haz_unc})

        unc.make_sample(N=1, sampling_kwargs={'calc_second_order': False})
        est = unc.est_comp_time(0.12345)
        self.assertEqual(est, 1 * (2 + 2) * 0.123)  # N * (D + 2)

        pool = Pool(nodes=4)
        est = unc.est_comp_time(0.12345, pool)
        self.assertEqual(est, 1 * (2 + 2) * 0.123 / 4)  # N * (D + 2)
        pool.close()
        pool.join()
        pool.clear()