def test_grid(): # Load Data clrd = cl.load_sample("clrd") medmal_paid = clrd.groupby("LOB").sum().loc["medmal"]["CumPaidLoss"] medmal_prem = (clrd.groupby("LOB").sum().loc["medmal"] ["EarnedPremDIR"].latest_diagonal) medmal_prem.rename("development", ["premium"]) # Pipeline dev = cl.Development() tail = cl.TailCurve() benk = cl.Benktander() steps = [("dev", dev), ("tail", tail), ("benk", benk)] pipe = cl.Pipeline(steps) # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(benk__n_iters=[250], benk__apriori=[1.00]) scoring = {"IBNR": lambda x: x.named_steps.benk.ibnr_.sum()} grid = cl.GridSearch(pipe, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, benk__sample_weight=medmal_prem) assert (grid.results_["IBNR"][0] == cl.Benktander( n_iters=250, apriori=1).fit( cl.TailCurve().fit_transform( cl.Development().fit_transform(medmal_paid)), sample_weight=medmal_prem, ).ibnr_.sum())
def test_grid(): # Load Data clrd = cl.load_dataset('clrd') medmal_paid = clrd.groupby('LOB').sum().loc['medmal']['CumPaidLoss'] medmal_prem = clrd.groupby( 'LOB').sum().loc['medmal']['EarnedPremDIR'].latest_diagonal medmal_prem.rename('development', ['premium']) # Pipeline dev = cl.Development() tail = cl.TailCurve() benk = cl.Benktander() steps = [('dev', dev), ('tail', tail), ('benk', benk)] pipe = cl.Pipeline(steps) # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(benk__n_iters=[250], benk__apriori=[1.00]) scoring = {'IBNR': lambda x: x.named_steps.benk.ibnr_.sum()[0]} grid = cl.GridSearch(pipe, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, benk__sample_weight=medmal_prem) assert grid.results_['IBNR'][0] == \ cl.Benktander(n_iters=250, apriori=1).fit(cl.TailCurve().fit_transform(cl.Development().fit_transform(medmal_paid)), sample_weight=medmal_prem).ibnr_.sum()[0]
def test_pipeline(): tri = cl.load_sample('clrd').groupby('LOB').sum()[[ 'CumPaidLoss', 'IncurLoss', 'EarnedPremDIR' ]] tri['CaseIncurredLoss'] = tri['IncurLoss'] - tri['CumPaidLoss'] X = tri[['CumPaidLoss', 'CaseIncurredLoss']] sample_weight = tri['EarnedPremDIR'].latest_diagonal dev = [ cl.Development(), cl.ClarkLDF(), cl.Trend(), cl.IncrementalAdditive(), cl.MunichAdjustment(paid_to_incurred=('CumPaidLoss', 'CaseIncurredLoss')), cl.CaseOutstanding(paid_to_incurred=('CumPaidLoss', 'CaseIncurredLoss')) ] tail = [cl.TailCurve(), cl.TailConstant(), cl.TailBondy(), cl.TailClark()] ibnr = [ cl.Chainladder(), cl.BornhuetterFerguson(), cl.Benktander(n_iters=2), cl.CapeCod() ] for model in list(itertools.product(dev, tail, ibnr)): print(model) cl.Pipeline( steps=[('dev', model[0]), ('tail', model[1]), ('ibnr', model[2])]).fit_predict( X, sample_weight=sample_weight).ibnr_.sum( 'origin').sum('columns').sum()
def test_benktander_to_chainladder(data, atol): tri = cl.load_sample(data) a = cl.Chainladder().fit(tri).ibnr_ b = cl.Benktander(apriori=.8, n_iters=255).fit(tri, sample_weight=a).ibnr_ xp = tri.get_array_module() assert xp.allclose(xp.nan_to_num(a.values), xp.nan_to_num(b.values), atol=atol)
def test_misaligned_index2(clrd): clrd = clrd['CumPaidLoss'] w = cl.load_sample('clrd')['EarnedPremDIR'].latest_diagonal bcl = cl.Chainladder().fit( cl.Development(groupby=['LOB']).fit_transform(clrd)) bbk = cl.Benktander().fit( cl.Development(groupby=['LOB']).fit_transform(clrd), sample_weight=w) bcc = cl.CapeCod().fit(cl.Development(groupby=['LOB']).fit_transform(clrd), sample_weight=w) a = bcl.ultimate_.iloc[:10].sum().sum() b = bcl.predict(clrd.iloc[:10]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bbk.ultimate_.iloc[:10].sum().sum() b = bbk.predict(clrd.iloc[:10], sample_weight=w.iloc[:10]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcc.ultimate_.iloc[:10].sum().sum() b = bcc.predict(clrd.iloc[:10], sample_weight=w.iloc[:10]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcl.ultimate_.iloc[150:153].sum().sum() b = bcl.predict(clrd.iloc[150:153]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bbk.ultimate_.iloc[150:153].sum().sum() b = bbk.predict(clrd.iloc[150:153], sample_weight=w.iloc[150:153]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcc.ultimate_.iloc[150:153].sum().sum() b = bcc.predict(clrd.iloc[150:153], sample_weight=w.iloc[150:153]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcl.ultimate_.iloc[150:152].sum().sum() b = bcl.predict(clrd.iloc[150:152]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bbk.ultimate_.iloc[150:152].sum().sum() b = bbk.predict(clrd.iloc[150:152], sample_weight=w.iloc[150:152]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcc.ultimate_.iloc[150:152].sum().sum() b = bcc.predict(clrd.iloc[150:152], sample_weight=w.iloc[150:152]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcl.ultimate_.iloc[150].sum().sum() b = bcl.predict(clrd.iloc[150]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bbk.ultimate_.iloc[150].sum().sum() b = bbk.predict(clrd.iloc[150], sample_weight=w.iloc[150]).ultimate_.sum().sum() assert abs(a - b) < 1e-5 a = bcc.ultimate_.iloc[150].sum().sum() b = bcc.predict(clrd.iloc[150], sample_weight=w.iloc[150]).ultimate_.sum().sum() assert abs(a - b) < 1e-5
def test_benktander_to_chainladder(data, atol): tri = cl.load_dataset(data) a = cl.Chainladder().fit(tri).ibnr_ b = cl.Benktander(apriori=.8, n_iters=255).fit(tri, sample_weight=a).ibnr_ assert_allclose(a.triangle, b.triangle, atol=atol)
and as ``n_iters`` approaches infinity yields the chainladder. As ``n_iters`` increases the apriori selection becomes less relevant regardless of initial choice. """ import chainladder as cl # Load Data clrd = cl.load_sample('clrd') medmal_paid = clrd.groupby('LOB').sum().loc['medmal', 'CumPaidLoss'] medmal_prem = clrd.groupby('LOB').sum().loc['medmal', 'EarnedPremDIR'].latest_diagonal medmal_prem.rename('development', ['premium']) # Generate LDFs and Tail Factor medmal_paid = cl.Development().fit_transform(medmal_paid) medmal_paid = cl.TailCurve().fit_transform(medmal_paid) # Benktander Model benk = cl.Benktander() # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(n_iters=list(range(1,100,2)), apriori=[0.50, 0.75, 1.00]) scoring = {'IBNR':lambda x: x.ibnr_.sum()} grid = cl.GridSearch(benk, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, sample_weight=medmal_prem) # Plot data grid.results_.pivot(index='n_iters', columns='apriori', values='IBNR').plot( title='Benktander convergence to Chainladder', grid=True).set(ylabel='IBNR')
tri = clrd.groupby('LOB').sum()[[ 'CumPaidLoss', 'IncurLoss', 'EarnedPremDIR' ]] tri['CaseIncurredLoss'] = tri['IncurLoss'] - tri['CumPaidLoss'] return tri dev = [ cl.Development, cl.ClarkLDF, cl.Trend, cl.IncrementalAdditive, lambda: cl.MunichAdjustment(paid_to_incurred=( 'CumPaidLoss', 'CaseIncurredLoss')), lambda: cl.CaseOutstanding( paid_to_incurred=('CumPaidLoss', 'CaseIncurredLoss')) ] tail = [cl.TailCurve, cl.TailConstant, cl.TailBondy, cl.TailClark] ibnr = [ cl.Chainladder, cl.BornhuetterFerguson, lambda: cl.Benktander(n_iters=2), cl.CapeCod ] @pytest.mark.parametrize('dev', dev) @pytest.mark.parametrize('tail', tail) @pytest.mark.parametrize('ibnr', ibnr) def test_pipeline(tri, dev, tail, ibnr): X = tri[['CumPaidLoss', 'CaseIncurredLoss']] sample_weight = tri['EarnedPremDIR'].latest_diagonal cl.Pipeline(steps=[('dev', dev()), ('tail', tail()), ('ibnr', ibnr())]).fit_predict( X, sample_weight=sample_weight).ibnr_.sum( 'origin').sum('columns').sum()
def test_benktander_to_chainladder(data, atol): tri = cl.load_dataset(data) a = cl.Chainladder().fit(tri).ibnr_ b = cl.Benktander(apriori=.8, n_iters=255).fit(tri, sample_weight=a).ibnr_ xp = cp.get_array_module(a.values) xp.testing.assert_allclose(a.values, b.values, atol=atol)