Exemple #1
0
def test_base_minimum_exposure_triangle():
    raa = (cl.load_dataset('raa').latest_diagonal * 0 +
           50000).to_frame().reset_index()
    raa['index'] = raa['index'].astype(str)
    cl.Triangle(raa,
                origin='index',
                columns=list(cl.load_dataset('raa').columns))
Exemple #2
0
def test_constant_cdf():
    dev = cl.Development().fit(cl.load_dataset('raa'))
    link_ratios = {(num + 1) * 12: item
                   for num, item in enumerate(dev.ldf_.values[0, 0, 0, :])}
    dev_c = cl.DevelopmentConstant(patterns=link_ratios,
                                   style='ldf').fit(cl.load_dataset('raa'))
    assert_allclose(dev.cdf_.values, dev_c.cdf_.values, atol=1e-5)
def test_to_frame_unusual():
    a = cl.load_dataset('clrd').groupby(
        ['LOB']).sum().latest_diagonal['CumPaidLoss'].to_frame().values
    b = cl.load_dataset('clrd').latest_diagonal['CumPaidLoss'].groupby(
        ['LOB']).sum().to_frame().values
    xp = cp.get_array_module(a)
    xp.testing.assert_array_equal(a, b)
Exemple #4
0
def test_bf_eq_cl_when_using_cl_apriori():
    cl_ult = cl.Chainladder().fit(cl.load_dataset('quarterly')).ultimate_
    cl_ult.rename('development', ['apriori'])
    bf_ult = cl.BornhuetterFerguson().fit(cl.load_dataset('quarterly'),
                                          sample_weight=cl_ult).ultimate_
    xp = cp.get_array_module(cl_ult.values)
    xp.testing.assert_allclose(cl_ult.values, bf_ult.values, atol=1e-5)
Exemple #5
0
def test_struhuss():
    X = cl.load_dataset('cc_sample')['loss']
    X = cl.TailConstant(tail=1 / 0.85).fit_transform(
        cl.Development().fit_transform(X))
    sample_weight = cl.load_dataset('cc_sample')['exposure'].latest_diagonal
    ibnr = int(
        cl.CapeCod(trend=0.07,
                   decay=0.75).fit(X, sample_weight=sample_weight).ibnr_.sum())
    assert ibnr == 17052
def test_origin_and_value_setters():
    raa = cl.load_dataset('raa')
    raa2 = cl.load_dataset('raa')
    raa.columns = list(raa.columns)
    raa.origin = list(raa.origin)
    assert np.all((np.all(raa2.origin == raa.origin),
                   np.all(raa2.development == raa.development),
                   np.all(raa2.odims == raa.odims),
                   np.all(raa2.vdims == raa.vdims)))
def test_exposure_tri():
    x = cl.load_dataset('auto')
    x = x[x.development == 12]
    x = x['paid'].to_frame().T.unstack().reset_index()
    x.columns = ['LOB', 'origin', 'paid']
    x.origin = x.origin.astype(str)
    y = cl.Triangle(x, origin='origin', index='LOB', columns='paid')
    x = cl.load_dataset('auto')['paid']
    x = x[x.development == 12]
    assert x == y
Exemple #8
0
def test_simple_exhibit():
    exhibits = cl.Exhibits()
    exhibits.add_exhibit(data=cl.load_dataset('raa'),
                         col_nums=False,
                         sheet_name='Sheet1')
    exhibits.add_exhibit(data=cl.load_dataset('raa'),
                         col_nums=False,
                         sheet_name='Sheet2')
    exhibits.del_exhibit('Sheet1')
    exhibits.to_excel('test_excel.xlsx')
    np.testing.assert_equal(pd.read_excel('test_excel.xlsx', index_col=0).values,
                            cl.load_dataset('raa').to_frame().values)
Exemple #9
0
def mack_p(data, average, est_sigma, tail):
    if tail:
        return cl.MackChainladder().fit(
            cl.TailCurve(curve='exponential').fit_transform(
                cl.Development(average=average,
                               sigma_interpolation=est_sigma).fit_transform(
                                   cl.load_dataset(data))))
    else:
        return cl.MackChainladder().fit(
            cl.Development(average=average,
                           sigma_interpolation=est_sigma).fit_transform(
                               cl.load_dataset(data)))
Exemple #10
0
def test_constant_balances():
    raa = cl.load_dataset('quarterly')
    assert np.prod(
        cl.TailConstant(1.05,
                        decay=0.8).fit(raa).ldf_.iloc[0,
                                                      1].values[0, 0, 0,
                                                                -5:]) == 1.05
def test_link_ratio():
    tri = cl.load_dataset('RAA')
    xp = cp.get_array_module(tri.values)
    xp.testing.assert_allclose(tri.link_ratio.values *
                               tri.values[:, :, :-1, :-1],
                               tri.values[:, :, :-1, 1:],
                               atol=1e-5)
Exemple #12
0
def test_grid():
    # Load Data
    clrd = cl.load_dataset('clrd')
    medmal_paid = clrd.groupby('LOB').sum().loc['medmal']['CumPaidLoss']
    medmal_prem = clrd.groupby(
        'LOB').sum().loc['medmal']['EarnedPremDIR'].latest_diagonal
    medmal_prem.rename('development', ['premium'])

    # Pipeline
    dev = cl.Development()
    tail = cl.TailCurve()
    benk = cl.Benktander()

    steps = [('dev', dev), ('tail', tail), ('benk', benk)]
    pipe = cl.Pipeline(steps)

    # Prep Benktander Grid Search with various assumptions, and a scoring function
    param_grid = dict(benk__n_iters=[250], benk__apriori=[1.00])
    scoring = {'IBNR': lambda x: x.named_steps.benk.ibnr_.sum()[0]}

    grid = cl.GridSearch(pipe, param_grid, scoring=scoring)
    # Perform Grid Search
    grid.fit(medmal_paid, benk__sample_weight=medmal_prem)
    assert grid.results_['IBNR'][0] == \
        cl.Benktander(n_iters=250, apriori=1).fit(cl.TailCurve().fit_transform(cl.Development().fit_transform(medmal_paid)), sample_weight=medmal_prem).ibnr_.sum()[0]
def test_tail_doesnt_mutate_ldf_(data, averages, est_sigma):
    p = mack_p(
        data, averages[0],
        est_sigma[0]).ldf_.values[..., :len(cl.load_dataset(data).ddims) - 1]
    xp = cp.get_array_module(p)
    p_no_tail = mack_p_no_tail(data, averages[0], est_sigma[0]).ldf_.values
    xp.testing.assert_array_equal(p_no_tail, p)
Exemple #14
0
def test_n_periods():
    d = cl.load_dataset('usauto')['incurred']
    return np.all(
        np.round(
            np.unique(cl.Development(n_periods=3, average='volume').fit(
                d).ldf_.values,
                      axis=-2), 3).flatten() == np.
        array([1.164, 1.056, 1.027, 1.012, 1.005, 1.003, 1.002, 1.001, 1.0]))
def test_fit_period():
    tri = cl.load_dataset('tail_sample')
    dev = cl.Development(average='simple').fit_transform(tri)
    assert round(
        cl.TailCurve(fit_period=slice(-6, None, None),
                     extrap_periods=10).fit(dev).cdf_['paid'].values[0, 0, 0,
                                                                     -2],
        3) == 1.044
Exemple #16
0
def test_triangle_json_io():
    clrd = cl.load_dataset('clrd')
    clrd2 = cl.read_json(clrd.to_json())
    np.testing.assert_equal(clrd.values, clrd2.values)
    np.testing.assert_equal(clrd.kdims, clrd2.kdims)
    np.testing.assert_equal(clrd.vdims, clrd2.vdims)
    np.testing.assert_equal(clrd.odims, clrd2.odims)
    np.testing.assert_equal(clrd.ddims, clrd2.ddims)
    assert np.all(clrd.valuation == clrd2.valuation)
Exemple #17
0
def test_schmidt():
    tri = cl.load_dataset('ia_sample')
    ia = cl.IncrementalAdditive()
    answer = ia.fit_transform(tri.iloc[0, 1],
                              sample_weight=tri.iloc[0, 0].latest_diagonal)
    answer = answer.incremental_.incr_to_cum().values[0, 0, :, -1]
    check = np.array([3483., 4007.84795031, 4654.36196862, 5492.00685523,
                      6198.10197128, 7152.82539296])
    assert_allclose(answer, check, atol=1e-5)
Exemple #18
0
def test_mcl_paid():
    df = r('MunichChainLadder(MCLpaid, MCLincurred)').rx('MCLPaid')
    p = cl.MunichAdjustment(paid_to_incurred={
        'paid': 'incurred'
    }).fit(
        cl.Development(sigma_interpolation='mack').fit_transform(
            cl.load_dataset('mcl'))).munich_full_triangle_[0, 0, 0, :, :]
    arr = np.array(df[0])
    assert_allclose(arr, p, atol=1e-5)
Exemple #19
0
def test_commutative():
    tri = cl.load_dataset('quarterly')
    xp = cp.get_array_module(tri.values)
    full = cl.Chainladder().fit(tri).full_expectation_
    assert tri.grain('OYDY').val_to_dev() == tri.val_to_dev().grain('OYDY')
    assert tri.cum_to_incr().grain('OYDY').val_to_dev() == tri.val_to_dev().cum_to_incr().grain('OYDY')
    assert tri.grain('OYDY').cum_to_incr().val_to_dev().incr_to_cum() == tri.val_to_dev().grain('OYDY')
    assert full.grain('OYDY').val_to_dev() == full.val_to_dev().grain('OYDY')
    assert full.cum_to_incr().grain('OYDY').val_to_dev() == full.val_to_dev().cum_to_incr().grain('OYDY')
    assert xp.allclose(xp.nan_to_num(full.grain('OYDY').cum_to_incr().val_to_dev().incr_to_cum().values),
            xp.nan_to_num(full.val_to_dev().grain('OYDY').values), atol=1e-5)
def test_bs_random_state_predict():
    tri = cl.load_dataset('clrd').groupby('LOB').sum().loc[
        'wkcomp', ['CumPaidLoss', 'EarnedPremNet']]
    X = cl.BootstrapODPSample(random_state=100).fit_transform(
        tri['CumPaidLoss'])
    bf = cl.BornhuetterFerguson(
        apriori=0.6, apriori_sigma=0.1, random_state=42).fit(
            X, sample_weight=tri['EarnedPremNet'].latest_diagonal)
    assert bf.predict(
        X,
        sample_weight=tri['EarnedPremNet'].latest_diagonal).ibnr_ == bf.ibnr_
def test_slicers_honor_order():
    clrd = cl.load_dataset('clrd').groupby('LOB').sum()
    assert clrd.iloc[[1, 0], :].iloc[0, 1] == clrd.iloc[1, 1]  #row
    assert clrd.iloc[[1, 0], [1, 0]].iloc[0, 0] == clrd.iloc[1, 1]  #col
    assert clrd.loc[:, ['CumPaidLoss', 'IncurLoss']].iloc[0, 0] == clrd.iloc[0,
                                                                             1]
    assert clrd.loc[['ppauto', 'medmal'], ['CumPaidLoss', 'IncurLoss']].iloc[
        0, 0] == clrd.iloc[3]['CumPaidLoss']
    assert clrd.loc[clrd['LOB'] == 'comauto',
                    ['CumPaidLoss', 'IncurLoss']] == clrd[
                        clrd['LOB'] == 'comauto'].iloc[:, [1, 0]]
Exemple #22
0
def test_simple_exhibit():
    raa = cl.load_dataset('raa')

    col = cl.Column(cl.DataFrame(raa, margin=(0, 0, 1, 0)),
                    cl.DataFrame(raa.link_ratio, formats={'italic': True}))
    composite = cl.Row(col,
                       col,
                       title=['This title spans both Column Objects'],
                       margin=(0, 1, 0, 0))
    x = cl.Tabs(('a_sheet', composite),
                ('another_sheet', composite)).to_excel('workbook.xlsx')
def test_constant_balances():
    raa = cl.load_dataset('quarterly')
    xp = cp.get_array_module(raa.values)
    assert round(
        float(
            xp.prod(
                cl.TailConstant(1.05,
                                decay=0.8).fit(raa).ldf_.iloc[0,
                                                              1].values[0, 0,
                                                                        0,
                                                                        -5:])),
        3) == 1.050
def test_tail_doesnt_mutate_ldf_(data, averages, est_sigma):
    p = mack_p(data, averages[0], est_sigma[0]).ldf_.triangle[..., :len(cl.load_dataset(data).ddims)-1]
    p_no_tail = mack_p_no_tail(data, averages[0], est_sigma[0]).ldf_.triangle
    assert_equal(p_no_tail, p)
def mack_p_no_tail(data, average, est_sigma):
    return cl.Development(average=average, sigma_interpolation=est_sigma).fit_transform(cl.load_dataset(data))
def mack_p(data, average, est_sigma):
    return cl.TailCurve(curve='exponential').fit_transform(cl.Development(average=average, sigma_interpolation=est_sigma).fit_transform(cl.load_dataset(data)))
def test_bf_eq_cl_when_using_cl_apriori():
    cl_ult = cl.Chainladder().fit(cl.load_dataset('quarterly')).ultimate_
    cl_ult.rename('development', ['apriori'])
    bf_ult = cl.BornhuetterFerguson().fit(cl.load_dataset('quarterly'),
                                          sample_weight=cl_ult).ultimate_
    assert_allclose(cl_ult.triangle, bf_ult.triangle, atol=1e-5)
def test_benktander_to_chainladder(data, atol):
    tri = cl.load_dataset(data)
    a = cl.Chainladder().fit(tri).ibnr_
    b = cl.Benktander(apriori=.8, n_iters=255).fit(tri, sample_weight=a).ibnr_
    assert_allclose(a.triangle, b.triangle, atol=atol)
"""
======================
Value at Risk example
======================

This example uses the `BootstrapODPSample` to simulate new triangles that
are then used to simulate an IBNR distribution from which we can do
Value at Risk percentile lookups.
"""

import chainladder as cl
import seaborn as sns
sns.set_style('whitegrid')

# Load triangle
triangle = cl.load_dataset('genins')

# Create 1000 bootstrap samples of the triangle
resampled_triangles = cl.BootstrapODPSample().fit_transform(triangle)

# Create 1000 IBNR estimates
sim_ibnr = cl.Chainladder().fit(resampled_triangles).ibnr_.sum('origin')

# X - mu
sim_ibnr = (sim_ibnr - sim_ibnr.mean()).to_frame().sort_values()

# Plot data
sim_ibnr.index = [item / 1000 for item in range(1000)]
sim_ibnr.loc[0.90:].plot(title='Bootstrap VaR (90% and above)',
                         color='red').set(xlabel='VaR')
"""
====================================
Picking Bornhuetter-Ferguson Apriori
====================================

This example demonstrates how you can can use the output of one method as the
apriori selection for the Bornhuetter-Ferguson Method.
"""
import chainladder as cl
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')

# Create Aprioris as the mean AY chainladder ultimate
raa = cl.load_dataset('RAA')
cl_ult = cl.Chainladder().fit(raa).ultimate_  # Chainladder Ultimate
apriori = cl_ult * 0 + (cl_ult.sum() / 10)  # Mean Chainladder Ultimate
bf_ult = cl.BornhuetterFerguson(apriori=1).fit(raa,
                                               sample_weight=apriori).ultimate_

# Plot of Ultimates
plot_data = cl_ult.to_frame().rename({'values': 'Chainladder'}, axis=1)
plot_data['BornhuetterFerguson'] = bf_ult.to_frame()
plot_data = plot_data.stack().reset_index()
plot_data.columns = ['Accident Year', 'Method', 'Ultimate']
plot_data['Accident Year'] = plot_data['Accident Year'].dt.year
pd.pivot_table(plot_data,
               index='Accident Year',
               columns='Method',
               values='Ultimate').plot()