def setUp(self): origin, dev, value = "origin", "dev", "value" incrtab = trikit.load(dataset="raa") # Create cumulative tabular data. cumtab = incrtab.copy(deep=True).sort_values( by=["origin", "dev"]).reset_index(drop=True) cumtab["cum"] = cumtab.groupby(["origin"], as_index=False)["value"].cumsum() cumtab = cumtab.drop("value", axis=1).rename({"cum": "value"}, axis=1) # Create incremental triangle data. incrtri = incrtab[[origin, dev, value]] incrtri = incrtri.groupby([origin, dev], as_index=False).sum() incrtri = incrtri.sort_values(by=[origin, dev]) incrtri = incrtri.pivot(index=origin, columns=dev).rename_axis(None) incrtri.columns = incrtri.columns.droplevel(0) # Create cumulative triangle data. cumtri = incrtab[[origin, dev, value]] cumtri = cumtri.groupby([origin, dev], as_index=False).sum() cumtri = cumtri.sort_values(by=[origin, dev]) cumtri = cumtri.pivot(index=origin, columns=dev).rename_axis(None) cumtri.columns = cumtri.columns.droplevel(0) self.incrtab = incrtab self.cumtab = cumtab self.incrtri = incrtri self.cumtri = cumtri self.incr_latest_ref = pd.DataFrame( { "origin": list(range(1981, 1991, 1)), "maturity": list(range(10, 0, -1)), "dev": list(range(10, 0, -1)), "latest": [ 172.0, 535.0, 603.0, 984.0, 225.0, 2917.0, 1368.0, 6165.0, 2262.0, 2063.0 ], }, index=list(range(0, 10, 1))) self.cum_latest_ref = pd.DataFrame( { "origin": list(range(1981, 1991, 1)), "maturity": list(range(10, 0, -1)), "dev": list(range(10, 0, -1)), "latest": [ 18834.0, 16704.0, 23466.0, 27067.0, 26180.0, 15852.0, 12314.0, 13112.0, 5395.0, 2063.0 ], }, index=list(range(0, 10, 1)))
def setUp(self): tri = trikit.load(dataset="raa", tri_type="cum") # tri = trikit.totri(data, tri_type="cum", data_shape="tabular", data_format="incr") # cl = base.BaseChainLadder(cumtri=tri) # r_cl = cl() r_cl = tri.base_cl() dactual_raa = { "ldfs_sum": 13.28018030198903, "cldfs_sum": 21.59861048771567, "latest_sum": 321974.0, "ultimates_sum": 213122.22826121017, "reserves_sum": 52135.228261210155, } self.ultimates_sum = r_cl.ultimate.drop("total").dropna().sum() self.reserves_sum = r_cl.reserve.drop("total").dropna().sum() self.latest_sum = r_cl.latest.dropna().sum() self.cldfs_sum = r_cl.cldfs.dropna().sum() self.ldfs_sum = r_cl.ldfs.dropna().sum() self.dactual_raa = dactual_raa self.tri = tri self.custom_ldfs = np.asarray( [2.75, 1.55, 1.50, 1.25, 1.15, 1.075, 1.03, 1.02, 1.01]) self.custom_ldfs_reserves_total = 145820.33941776195
def setUp(self): # Modify origin and development periods to test not sequentials. df = trikit.load(dataset="ta83") df["dev"] = df["dev"] * 12 df["origin"] = df["origin"] + 2000 tri = trikit.totri(df, tri_type="cum", data_shape="tabular", data_format="incr") mcl = mack.MackChainLadder(cumtri=tri) r_lognorm = mcl(alpha=1, dist="lognorm") r_norm = mcl(alpha=1, dist="norm") dactual_ta83 = { "norm_mu_sum": 18680869.054532073, "norm_sigma_sum": 4771773.155719111, "norm_75_sum": 21899381.138325423, "norm_95_sum": 26529737.436706323, "lognorm_mu_sum": 125.8539998696597, "lognorm_sigma_sum": 2.6740386407158327, "lognorm_75_sum": 21420867.75494642, "lognorm_95_sum": 27371140.20920447, "mse_sum": 4156154300629.1504, "std_error_sum": 4771773.155719111, "cv_sum": 2.80203003051732, "process_error_sum": 3527957849338.302, "parameter_error_sum": 628196451290.8485, "ldfs_sum": 14.207460332760107, "ultimates_sum": 53038959.05453208, "reserves_sum": 18680869.054532073, "devpvar_sum": 279118.8961841563, "ldfvar_sum": 0.05702584091389985, } with np.errstate(invalid="ignore"): self.norm_75_sum = pd.Series([ r_norm.rvs[ii].ppf(.75) for ii in r_norm.tri.index ]).dropna().sum() self.norm_95_sum = pd.Series([ r_norm.rvs[ii].ppf(.95) for ii in r_norm.tri.index ]).dropna().sum() self.lognorm_75_sum = pd.Series([ r_lognorm.rvs[ii].ppf(.75) for ii in r_lognorm.tri.index ]).dropna().sum() self.lognorm_95_sum = pd.Series([ r_lognorm.rvs[ii].ppf(.95) for ii in r_lognorm.tri.index ]).dropna().sum() self.mse_sum = r_lognorm.mse.drop("total").dropna().sum() self.std_error_sum = r_lognorm.std_error.drop("total").dropna().sum() self.cv_sum = r_lognorm.cv.drop("total").dropna().sum() self.process_error_sum = r_lognorm.process_error.dropna().sum() self.parameter_error_sum = r_lognorm.parameter_error.dropna().sum() self.ldfs_sum = r_lognorm.ldfs.dropna().sum() self.ultimates_sum = r_lognorm.ultimate.drop("total").dropna().sum() self.reserves_sum = r_lognorm.reserve.drop("total").dropna().sum() self.devpvar_sum = r_lognorm.devpvar.dropna().sum() self.ldfvar_sum = r_lognorm.ldfvar.dropna().sum() self.dactual_ta83 = dactual_ta83
def test_cy_effects_test(self): ref_tt0 = (8.965613354894957, 16.78438664510504) ref_tt1 = 14. tri = trikit.totri(trikit.load("raa")) tt = tri.mack_cl().cy_effects_test() test1 = all([np.allclose(ii, jj) for ii, jj in zip(ref_tt0, tt[0])]) test2 = np.allclose(ref_tt1, tt[-1]) self.assertTrue( test1 and test2, "Non-equality between computed vs. reference cy effects test.")
def test_devp_corr_test(self): ref_tt0 = (-0.12746658149149367, 0.12746658149149367) ref_tt1 = 0.0695578231292517 tri = trikit.totri(trikit.load("raa")) tt = tri.mack_cl().devp_corr_test() test1 = all([np.allclose(ii, jj) for ii, jj in zip(ref_tt0, tt[0])]) test2 = np.allclose(ref_tt1, tt[-1]) self.assertTrue( test1 and test2, "Non-equality between computed vs. reference devp correlation test." )
def setUp(self): data = trikit.load(dataset="raa") self.tri = trikit.totri(data=data, type_="incremental") self.latest_ref = pd.DataFrame({ "origin":list(range(1981, 1991, 1)), "maturity":list(range(10, 0, -1)), "dev":list(range(10, 0, -1)), "latest":[ 172.0, 535.0, 603.0, 984.0, 225.0, 2917.0, 1368.0, 6165.0, 2262.0, 2063.0 ], }, index=list(range(0, 10, 1)) )
def setUp(self): tri = trikit.load(dataset="raa", tri_type="cum") bcl = bootstrap.BootstrapChainLadder(tri) r_bcl = bcl(random_state=516) dactual_raa = { "ldfs_sum": 13.28018030198903, "cldfs_sum": 21.59861048771567, "latest_sum": 321974.0, "ultimates_sum": 214989.29310411066, "reserves_sum": 54002.29310411069, "se_sum": 44279.591512336876, "dof": 56, "scale_param": 632.3368030912758, "fitted_cum_sum": 707622.0, "fitted_incr_sum": 160987.0, "resid_us_sum": 4.7274165831925234, "resid_adj_sum": 4.68501737172304, "sampling_dist_sum": 4.68501737172304, "bs_samples_sum": 16198534.554200275, "bs_ldfs_sum": 1245.7029695928195, "bs_forecasts_sum": 21528898.436414644, "bs_process_error_sum": 156451006.23791158, "bs_reserves_sum": 5227886.663640983, } self.ultimates_sum = r_bcl.ultimate.drop("total").dropna().sum() self.reserves_sum = r_bcl.reserve.drop("total").dropna().sum() self.se_sum = r_bcl.std_error.dropna().sum() self.latest_sum = r_bcl.latest.dropna().sum() self.cldfs_sum = r_bcl.cldfs.dropna().sum() self.ldfs_sum = r_bcl.ldfs.dropna().sum() self.dof = r_bcl.dof self.tri_fit_cum = bcl._tri_fit_cum(r_bcl.ldfs) self.tri_fit_incr = bcl._tri_fit_incr(self.tri_fit_cum) self.resid_us = bcl._resid_us(self.tri_fit_incr) self.scale_param = bcl._scale_param(self.resid_us) self.resid_adj = bcl._resid_adj(self.resid_us) self.sampling_dist = bcl._sampling_dist(self.resid_adj) self.bs_samples = bcl._bs_samples(self.sampling_dist, self.tri_fit_incr, sims=100, random_state=516) self.bs_ldfs = bcl._bs_ldfs(self.bs_samples) self.bs_forecasts = bcl._bs_forecasts(self.bs_samples, self.bs_ldfs, self.scale_param) self.bs_process_error = bcl._bs_process_error(self.bs_forecasts, self.scale_param, procdist="gamma", random_state=516) self.bs_reserves = bcl._bs_reserves(self.bs_process_error) self.dactual_raa = dactual_raa
def setUp(self): data = trikit.load(dataset="raa") self.tri = trikit.totri(data=data, tri_type="incremental") self.latest_ref = pd.DataFrame( { "origin": list(range(1981, 1991, 1)), "maturity": list(range(10, 0, -1)), "dev": list(range(10, 0, -1)), "latest": [ 172.0, 535.0, 603.0, 984.0, 225.0, 2917.0, 1368.0, 6165.0, 2262.0, 2063.0 ], }, index=list(range(0, 10, 1))) self.offset_1 = np.asarray( [54., 673., 649., 2658., 3786., 1233., 6926., 5596., 3133.]) self.offset_2 = np.asarray( [599., -103., 3479., 2159., 6333., 5257., 3463., 1351.]) self.offset_7 = np.asarray([2638., 4179., 3410.])
def setUp(self): raa = trikit.load(dataset="raa") self.tri = trikit.totri(raa, type_="cumulative") self.latest_ref = pd.DataFrame({ "origin":list(range(1981, 1991, 1)), "maturity":list(range(10, 0, -1)), "dev":list(range(10, 0, -1)), "latest":[18834.0, 16704.0, 23466.0, 27067.0, 26180.0, 15852.0, 12314.0, 13112.0, 5395.0, 2063.0], }, index=list(range(0, 10, 1)) ) self.a2aref = pd.DataFrame({ 1:[1.64984, 40.42453, 2.63695, 2.04332, 8.75916, 4.25975, 7.21724, 5.14212, 1.72199], 2:[1.31902, 1.25928, 1.54282, 1.36443, 1.65562, 1.81567, 2.72289, 1.88743, np.NaN], 3:[1.08233, 1.97665, 1.16348, 1.34885, 1.39991, 1.10537, 1.12498, np.NaN, np.NaN], 4:[1.14689, 1.29214, 1.16071, 1.10152, 1.17078, 1.22551, np.NaN, np.NaN, np.NaN], 5:[1.19514, 1.13184, 1.1857 , 1.11347, 1.00867,np.NaN, np.NaN, np.NaN, np.NaN], 6:[1.11297, 0.9934 , 1.02922, 1.03773, np.NaN, np.NaN,np.NaN, np.NaN, np.NaN], 7:[1.03326, 1.04343, 1.02637,np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], 8:[1.0029 , 1.03309,np.NaN, np.NaN, np.NaN, np.NaN,np.NaN, np.NaN, np.NaN], 9:[1.00922,np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], }, index=list(range(1981, 1990)) )
def test_raa2incr(self): self.assertTrue( isinstance(trikit.load("raa", tri_type="incr"), trikit.triangle.IncrTriangle), "RAA dataset not coerced to incremental triangle object.")
def test_raa2cum(self): self.assertTrue( isinstance(trikit.load("raa", tri_type="cum"), trikit.triangle.CumTriangle), "RAA dataset not coerced to cumulative triangle object.")
def test_amw09(self): self.assertEqual( trikit.load("amw09").value.sum(), self.dactual["amw09"], "Issue detected with amw09 sample dataset.")
def test_raa2df(self): self.assertTrue( trikit.load("raa", tri_type=None).value.sum() == self.raa_incr_sum, "Non-equality between computed vs. raa losses.")
def test_singproperty(self): self.assertEqual( trikit.load("singproperty").value.sum(), self.dactual["singproperty"], "Issue detected with singproperty sample dataset.")
def test_raa(self): self.assertEqual( trikit.load("raa").value.sum(), self.dactual["raa"], "Issue detected with raa sample dataset.")
def test_glre(self): self.assertEqual( trikit.load("glre").value.sum(), self.dactual["glre"], "Issue detected with glre sample dataset.")
def test_autoliab(self): self.assertEqual( trikit.load("autoliab").value.sum(), self.dactual["autoliab"], "Issue detected with autoliab sample dataset.")
def test_ta83(self): self.assertEqual( trikit.load("ta83").value.sum(), self.dactual["ta83"], "Issue detected with ta83 sample dataset.")
def setUp(self): data = trikit.load(dataset="raa") tri = trikit.totri(data, type_="cum", data_shape="tabular", data_format="incr") self.cl = trikit.chainladder.BaseChainLadder(cumtri=tri).__call__( sel="all-weighted", tail=1.) raa_cl_ref = pd.DataFrame({ "origin": [ 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, ], "maturity": [ '10', '9', '8', '7', '6', '5', '4', '3', '2', '1', ], "cldf": [ 1., 1.00922, 1.02631, 1.06045, 1.10492, 1.2302, 1.44139, 1.83185, 2.97405, 8.92023, ], "latest": [ 18834., 16704., 23466., 27067., 26180., 15852., 12314., 13112., 5395., 2063., ], "ultimate": [ 18834., 16857.95392, 24083.37092, 28703.14216, 28926.73634, 19501.10318, 17749.30259, 24019.19251, 16044.9841, 18402.44253, ], "reserve": [ 0., 153.95392, 617.37092, 1636.14216, 2746.73634, 3649.10318, 5435.30259, 10907.19251, 10649.9841, 16339.44253, ] }) ref_ldfs = pd.Series([ 2.99936, 1.62352, 1.27089, 1.17167, 1.11338, 1.04193, 1.03326, 1.01694, 1.00922, 1., ], dtype=np.float) ref_cldfs = np.asarray([ 8.92023, 2.97405, 1.83185, 1.44139, 1.2302, 1.10492, 1.06045, 1.02631, 1.00922, 1. ], dtype=np.float) self.raa_cl_ref = raa_cl_ref #[raa_cl_ref.index!="total"] self.ref_ldfs = ref_ldfs self.ref_cldfs = ref_cldfs
c1 = t1.cum c2 = _CumTriangle(trisize=10, incr=False) c3 = _CumTriangle(incr=False) # Convert incremental triangle to cumulative. i2c = _CumTriangle(i1) # Convert cumulative triangle to incremental. c2i = _IncrTriangle(c1) ctri = _Triangle(ds0, origin='ORIGIN',dev='DEV',value='VALUE').cumulative ta = trikit.load("ta83") sf = trikit.load("sf") sf1 = trikit.load("sf",lob="com_auto") # ChainLadder tests => # cl = trikit.ChainLadder(ds0,origin='ORIGIN',dev='DEV',value='VALUE', tail_fact=1.05) # # # Get age to ultimate factors # cl.age2ult # # cl.squared_tri