示例#1
0
 def test_cumtri_2_cumtri(self):
     # Convert cumulative DataFrame tri to cum triangle.
     tri = trikit.totri(self.cumtri, type_="cum", data_format="cum", data_shape="triangle")
     self.assertTrue(
         isinstance(tri, trikit.triangle.CumTriangle),
         "Error converting cumtri data to cumtri."
         )
示例#2
0
 def test_incrtri_2_cumtri(self):
     # Convert incremental DataFrame tri to cum triangle.
     tri = trikit.totri(self.incrtri, type_="cum", data_format="incr", data_shape="triangle")
     self.assertTrue(
         isinstance(tri, trikit.triangle.CumTriangle),
         "Error converting incr tri data to cum tri."
         )
示例#3
0
 def test_incrtab_2_incrtri(self):
     # Convert incremental tabular data to incr triangle.
     tri = trikit.totri(self.incrtab, type_="incr", data_format="incr", data_shape="tabular")
     self.assertTrue(
         isinstance(tri, trikit.triangle.IncrTriangle),
         "Error converting incr tabular data to incr tri."
         )
示例#4
0
 def test_cumtab_2_incrtri(self):
     # Convert cumulative tabular data to incr triangle.
     tri = trikit.totri(self.cumtab, type_="incr", data_format="cum", data_shape="tabular")
     self.assertTrue(
         isinstance(tri, trikit.triangle.IncrTriangle),
         "Error converting cum tabular data to incr tri."
         )
示例#5
0
    def setUp(self):
        # Modify origin and development periods to test not sequentials.
        df = trikit.load(dataset="ta83")
        df["dev"] = df["dev"] * 12
        df["origin"] = df["origin"] + 2000
        tri = trikit.totri(df,
                           tri_type="cum",
                           data_shape="tabular",
                           data_format="incr")
        mcl = mack.MackChainLadder(cumtri=tri)
        r_lognorm = mcl(alpha=1, dist="lognorm")
        r_norm = mcl(alpha=1, dist="norm")

        dactual_ta83 = {
            "norm_mu_sum": 18680869.054532073,
            "norm_sigma_sum": 4771773.155719111,
            "norm_75_sum": 21899381.138325423,
            "norm_95_sum": 26529737.436706323,
            "lognorm_mu_sum": 125.8539998696597,
            "lognorm_sigma_sum": 2.6740386407158327,
            "lognorm_75_sum": 21420867.75494642,
            "lognorm_95_sum": 27371140.20920447,
            "mse_sum": 4156154300629.1504,
            "std_error_sum": 4771773.155719111,
            "cv_sum": 2.80203003051732,
            "process_error_sum": 3527957849338.302,
            "parameter_error_sum": 628196451290.8485,
            "ldfs_sum": 14.207460332760107,
            "ultimates_sum": 53038959.05453208,
            "reserves_sum": 18680869.054532073,
            "devpvar_sum": 279118.8961841563,
            "ldfvar_sum": 0.05702584091389985,
        }

        with np.errstate(invalid="ignore"):
            self.norm_75_sum = pd.Series([
                r_norm.rvs[ii].ppf(.75) for ii in r_norm.tri.index
            ]).dropna().sum()
            self.norm_95_sum = pd.Series([
                r_norm.rvs[ii].ppf(.95) for ii in r_norm.tri.index
            ]).dropna().sum()
            self.lognorm_75_sum = pd.Series([
                r_lognorm.rvs[ii].ppf(.75) for ii in r_lognorm.tri.index
            ]).dropna().sum()
            self.lognorm_95_sum = pd.Series([
                r_lognorm.rvs[ii].ppf(.95) for ii in r_lognorm.tri.index
            ]).dropna().sum()

        self.mse_sum = r_lognorm.mse.drop("total").dropna().sum()
        self.std_error_sum = r_lognorm.std_error.drop("total").dropna().sum()
        self.cv_sum = r_lognorm.cv.drop("total").dropna().sum()
        self.process_error_sum = r_lognorm.process_error.dropna().sum()
        self.parameter_error_sum = r_lognorm.parameter_error.dropna().sum()
        self.ldfs_sum = r_lognorm.ldfs.dropna().sum()
        self.ultimates_sum = r_lognorm.ultimate.drop("total").dropna().sum()
        self.reserves_sum = r_lognorm.reserve.drop("total").dropna().sum()
        self.devpvar_sum = r_lognorm.devpvar.dropna().sum()
        self.ldfvar_sum = r_lognorm.ldfvar.dropna().sum()
        self.dactual_ta83 = dactual_ta83
示例#6
0
 def test_cy_effects_test(self):
     ref_tt0 = (8.965613354894957, 16.78438664510504)
     ref_tt1 = 14.
     tri = trikit.totri(trikit.load("raa"))
     tt = tri.mack_cl().cy_effects_test()
     test1 = all([np.allclose(ii, jj) for ii, jj in zip(ref_tt0, tt[0])])
     test2 = np.allclose(ref_tt1, tt[-1])
     self.assertTrue(
         test1 and test2,
         "Non-equality between computed vs. reference cy effects test.")
示例#7
0
 def test_devp_corr_test(self):
     ref_tt0 = (-0.12746658149149367, 0.12746658149149367)
     ref_tt1 = 0.0695578231292517
     tri = trikit.totri(trikit.load("raa"))
     tt = tri.mack_cl().devp_corr_test()
     test1 = all([np.allclose(ii, jj) for ii, jj in zip(ref_tt0, tt[0])])
     test2 = np.allclose(ref_tt1, tt[-1])
     self.assertTrue(
         test1 and test2,
         "Non-equality between computed vs. reference devp correlation test."
     )
示例#8
0
 def setUp(self):
     data = trikit.load(dataset="raa")
     self.tri = trikit.totri(data=data, type_="incremental")
     self.latest_ref = pd.DataFrame({
         "origin":list(range(1981, 1991, 1)), "maturity":list(range(10, 0, -1)),
         "dev":list(range(10, 0, -1)),
         "latest":[
             172.0, 535.0, 603.0, 984.0, 225.0, 2917.0, 1368.0,
             6165.0, 2262.0, 2063.0
             ],
         }, index=list(range(0, 10, 1))
         )
示例#9
0
 def test_alt_colnames(self):
     # Create triangle with different origin, dev and value names.
     dfrnm = self.incrtab.rename(
         {"origin":"ay", "dev":"devp", "value":"loss_amt"}, axis=1
         )
     tri = trikit.totri(
         dfrnm, type_="cum", data_format="incr", data_shape="tabular",
         origin="ay", dev="devp", value="loss_amt"
         )
     self.assertTrue(
         isinstance(tri, trikit.triangle.CumTriangle),
         "Error converting cumtri data to cumtri."
         )
示例#10
0
    def setUp(self):
        data = trikit.load(dataset="raa")
        self.tri = trikit.totri(data=data, tri_type="incremental")
        self.latest_ref = pd.DataFrame(
            {
                "origin":
                list(range(1981, 1991, 1)),
                "maturity":
                list(range(10, 0, -1)),
                "dev":
                list(range(10, 0, -1)),
                "latest": [
                    172.0, 535.0, 603.0, 984.0, 225.0, 2917.0, 1368.0, 6165.0,
                    2262.0, 2063.0
                ],
            },
            index=list(range(0, 10, 1)))

        self.offset_1 = np.asarray(
            [54., 673., 649., 2658., 3786., 1233., 6926., 5596., 3133.])
        self.offset_2 = np.asarray(
            [599., -103., 3479., 2159., 6333., 5257., 3463., 1351.])
        self.offset_7 = np.asarray([2638., 4179., 3410.])
示例#11
0
    def setUp(self):

        raa  = trikit.load(dataset="raa")
        self.tri = trikit.totri(raa, type_="cumulative")

        self.latest_ref = pd.DataFrame({
            "origin":list(range(1981, 1991, 1)), "maturity":list(range(10, 0, -1)),
            "dev":list(range(10, 0, -1)),
            "latest":[18834.0, 16704.0, 23466.0, 27067.0, 26180.0, 15852.0, 12314.0, 13112.0, 5395.0, 2063.0],
            }, index=list(range(0, 10, 1))
            )

        self.a2aref = pd.DataFrame({
            1:[1.64984, 40.42453, 2.63695, 2.04332, 8.75916, 4.25975, 7.21724, 5.14212, 1.72199],
            2:[1.31902, 1.25928, 1.54282, 1.36443, 1.65562, 1.81567, 2.72289, 1.88743, np.NaN],
            3:[1.08233, 1.97665, 1.16348, 1.34885, 1.39991, 1.10537, 1.12498, np.NaN, np.NaN],
            4:[1.14689, 1.29214, 1.16071, 1.10152, 1.17078, 1.22551,  np.NaN, np.NaN, np.NaN],
            5:[1.19514, 1.13184, 1.1857 , 1.11347, 1.00867,np.NaN, np.NaN, np.NaN, np.NaN],
            6:[1.11297, 0.9934 , 1.02922, 1.03773,  np.NaN, np.NaN,np.NaN, np.NaN, np.NaN],
            7:[1.03326, 1.04343, 1.02637,np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
            8:[1.0029 , 1.03309,np.NaN, np.NaN, np.NaN, np.NaN,np.NaN, np.NaN, np.NaN],
            9:[1.00922,np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
            }, index=list(range(1981, 1990))
            )
示例#12
0
    def setUp(self):
        data = trikit.load(dataset="raa")
        tri = trikit.totri(data,
                           type_="cum",
                           data_shape="tabular",
                           data_format="incr")
        self.cl = trikit.chainladder.BaseChainLadder(cumtri=tri).__call__(
            sel="all-weighted", tail=1.)

        raa_cl_ref = pd.DataFrame({
            "origin": [
                1981,
                1982,
                1983,
                1984,
                1985,
                1986,
                1987,
                1988,
                1989,
                1990,
            ],
            "maturity": [
                '10',
                '9',
                '8',
                '7',
                '6',
                '5',
                '4',
                '3',
                '2',
                '1',
            ],
            "cldf": [
                1.,
                1.00922,
                1.02631,
                1.06045,
                1.10492,
                1.2302,
                1.44139,
                1.83185,
                2.97405,
                8.92023,
            ],
            "latest": [
                18834.,
                16704.,
                23466.,
                27067.,
                26180.,
                15852.,
                12314.,
                13112.,
                5395.,
                2063.,
            ],
            "ultimate": [
                18834.,
                16857.95392,
                24083.37092,
                28703.14216,
                28926.73634,
                19501.10318,
                17749.30259,
                24019.19251,
                16044.9841,
                18402.44253,
            ],
            "reserve": [
                0.,
                153.95392,
                617.37092,
                1636.14216,
                2746.73634,
                3649.10318,
                5435.30259,
                10907.19251,
                10649.9841,
                16339.44253,
            ]
        })

        ref_ldfs = pd.Series([
            2.99936,
            1.62352,
            1.27089,
            1.17167,
            1.11338,
            1.04193,
            1.03326,
            1.01694,
            1.00922,
            1.,
        ],
                             dtype=np.float)

        ref_cldfs = np.asarray([
            8.92023, 2.97405, 1.83185, 1.44139, 1.2302, 1.10492, 1.06045,
            1.02631, 1.00922, 1.
        ],
                               dtype=np.float)

        self.raa_cl_ref = raa_cl_ref  #[raa_cl_ref.index!="total"]
        self.ref_ldfs = ref_ldfs
        self.ref_cldfs = ref_cldfs