예제 #1
0
def test_grid():
    # Load Data
    clrd = cl.load_sample("clrd")
    medmal_paid = clrd.groupby("LOB").sum().loc["medmal"]["CumPaidLoss"]
    medmal_prem = (clrd.groupby("LOB").sum().loc["medmal"]
                   ["EarnedPremDIR"].latest_diagonal)
    medmal_prem.rename("development", ["premium"])

    # Pipeline
    dev = cl.Development()
    tail = cl.TailCurve()
    benk = cl.Benktander()

    steps = [("dev", dev), ("tail", tail), ("benk", benk)]
    pipe = cl.Pipeline(steps)

    # Prep Benktander Grid Search with various assumptions, and a scoring function
    param_grid = dict(benk__n_iters=[250], benk__apriori=[1.00])
    scoring = {"IBNR": lambda x: x.named_steps.benk.ibnr_.sum()}

    grid = cl.GridSearch(pipe, param_grid, scoring=scoring)
    # Perform Grid Search
    grid.fit(medmal_paid, benk__sample_weight=medmal_prem)
    assert (grid.results_["IBNR"][0] == cl.Benktander(
        n_iters=250, apriori=1).fit(
            cl.TailCurve().fit_transform(
                cl.Development().fit_transform(medmal_paid)),
            sample_weight=medmal_prem,
        ).ibnr_.sum())
예제 #2
0
def test_grid():
    # Load Data
    clrd = cl.load_dataset('clrd')
    medmal_paid = clrd.groupby('LOB').sum().loc['medmal']['CumPaidLoss']
    medmal_prem = clrd.groupby(
        'LOB').sum().loc['medmal']['EarnedPremDIR'].latest_diagonal
    medmal_prem.rename('development', ['premium'])

    # Pipeline
    dev = cl.Development()
    tail = cl.TailCurve()
    benk = cl.Benktander()

    steps = [('dev', dev), ('tail', tail), ('benk', benk)]
    pipe = cl.Pipeline(steps)

    # Prep Benktander Grid Search with various assumptions, and a scoring function
    param_grid = dict(benk__n_iters=[250], benk__apriori=[1.00])
    scoring = {'IBNR': lambda x: x.named_steps.benk.ibnr_.sum()[0]}

    grid = cl.GridSearch(pipe, param_grid, scoring=scoring)
    # Perform Grid Search
    grid.fit(medmal_paid, benk__sample_weight=medmal_prem)
    assert grid.results_['IBNR'][0] == \
        cl.Benktander(n_iters=250, apriori=1).fit(cl.TailCurve().fit_transform(cl.Development().fit_transform(medmal_paid)), sample_weight=medmal_prem).ibnr_.sum()[0]
def test_drop1(raa):
    assert (
        cl.Development(drop=("1982", 12)).fit(raa).ldf_.values[0, 0, 0, 0]
        == cl.Development(drop_high=[True] + [False] * 8)
        .fit(raa)
        .ldf_.values[0, 0, 0, 0]
    )
예제 #4
0
def test_assymetric_development():
    quarterly = cl.load_sample('quarterly')['paid']
    xp = np if quarterly.array_backend == 'sparse' else quarterly.get_array_module(
    )
    dev = cl.Development(n_periods=1, average='simple').fit(quarterly)
    dev2 = cl.Development(n_periods=1, average='regression').fit(quarterly)
    assert xp.allclose(dev.ldf_.values, dev2.ldf_.values, atol=1e-5)
예제 #5
0
파일: IBNR.py 프로젝트: Quantuary/IBNR
 def sample_weighted_80_20_6m_12m(cls, triangle_1D):
     param6 = {'n_periods':6, 'average':'simple'}
     param12 = {'n_periods':12, 'average':'simple'} 
     
     model6 = cl.Development(**param6).fit(triangle_1D.incr_to_cum())
     model12 = cl.Development(**param12).fit(triangle_1D.incr_to_cum())
     
     def generate_sample_weight(model, value):
         weight = model.w_
         new_weight = weight.reshape(-1,1)
         new_weight[new_weight ==1] = value
         new_wewight = new_weight.reshape(model.w_.shape[2],model.w_.shape[3])
         return new_wewight
     weight6 = generate_sample_weight(model6, 0.8)
     weight12 = generate_sample_weight(model12, 0.2) 
     
     param = {'n_periods':12, 'average':'simple'} 
     selected_link = cl.Development(**param).fit_transform(triangle_1D.incr_to_cum()).link_ratio.to_frame().values
     
     
     weight_80_20 = np.where(weight6==0, weight12, weight6 )                        # making 80 20 sample weight
     weight_80_20 = weight_80_20[:selected_link.shape[0], :selected_link.shape[0]]  # reshape to align with link ratio
     weight_80_20[np.isnan(selected_link)] = 0                                      # delete the extra diagonal where no link ratio
     
     product = np.multiply(weight_80_20, selected_link)                             # mutiple two matrix together
     n = np.nansum(product, axis=0)                                                 # sum by columns
     d = np.nansum(weight_80_20, axis=0)                                            # sum weight
     ldf = pd.DataFrame(np.divide(n, d)).T.rename(index={0:'80/20-6m/12m-sample-weighted'})                  # divide by denominator(weight) then transpose and rename
     
     
     cdf = ldf[ldf.columns[::-1]].cumprod(axis=1)                                   # calculate cumulative factor
     cdf = cdf[cdf.columns[::-1]]
 
     return ldf, cdf
예제 #6
0
def test_assymetric_development():
    quarterly = cl.load_sample("quarterly")["paid"]
    xp = np if quarterly.array_backend == "sparse" else quarterly.get_array_module(
    )
    dev = cl.Development(n_periods=1, average="simple").fit(quarterly)
    dev2 = cl.Development(n_periods=1, average="regression").fit(quarterly)
    assert xp.allclose(dev.ldf_.values, dev2.ldf_.values, atol=1e-5)
def test_full_slice2():
    assert (
        cl.Development().fit_transform(cl.load_sample("GenIns")).ldf_
        == cl.Development(n_periods=[1000] * (cl.load_sample("GenIns").shape[3] - 1))
        .fit_transform(cl.load_sample("GenIns"))
        .ldf_
    )
def test_drop2(raa):
    assert (
        cl.Development(drop_valuation="1981").fit(raa).ldf_.values[0, 0, 0, 0]
        == cl.Development(drop_low=[True] + [False] * 8)
        .fit(raa)
        .ldf_.values[0, 0, 0, 0]
    )
예제 #9
0
def test_misaligned_index2(clrd):
    clrd = clrd['CumPaidLoss']
    w = cl.load_sample('clrd')['EarnedPremDIR'].latest_diagonal
    bcl = cl.Chainladder().fit(
        cl.Development(groupby=['LOB']).fit_transform(clrd))
    bbk = cl.Benktander().fit(
        cl.Development(groupby=['LOB']).fit_transform(clrd), sample_weight=w)
    bcc = cl.CapeCod().fit(cl.Development(groupby=['LOB']).fit_transform(clrd),
                           sample_weight=w)

    a = bcl.ultimate_.iloc[:10].sum().sum()
    b = bcl.predict(clrd.iloc[:10]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bbk.ultimate_.iloc[:10].sum().sum()
    b = bbk.predict(clrd.iloc[:10],
                    sample_weight=w.iloc[:10]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bcc.ultimate_.iloc[:10].sum().sum()
    b = bcc.predict(clrd.iloc[:10],
                    sample_weight=w.iloc[:10]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5

    a = bcl.ultimate_.iloc[150:153].sum().sum()
    b = bcl.predict(clrd.iloc[150:153]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bbk.ultimate_.iloc[150:153].sum().sum()
    b = bbk.predict(clrd.iloc[150:153],
                    sample_weight=w.iloc[150:153]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bcc.ultimate_.iloc[150:153].sum().sum()
    b = bcc.predict(clrd.iloc[150:153],
                    sample_weight=w.iloc[150:153]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5

    a = bcl.ultimate_.iloc[150:152].sum().sum()
    b = bcl.predict(clrd.iloc[150:152]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bbk.ultimate_.iloc[150:152].sum().sum()
    b = bbk.predict(clrd.iloc[150:152],
                    sample_weight=w.iloc[150:152]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bcc.ultimate_.iloc[150:152].sum().sum()
    b = bcc.predict(clrd.iloc[150:152],
                    sample_weight=w.iloc[150:152]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5

    a = bcl.ultimate_.iloc[150].sum().sum()
    b = bcl.predict(clrd.iloc[150]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bbk.ultimate_.iloc[150].sum().sum()
    b = bbk.predict(clrd.iloc[150],
                    sample_weight=w.iloc[150]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
    a = bcc.ultimate_.iloc[150].sum().sum()
    b = bcc.predict(clrd.iloc[150],
                    sample_weight=w.iloc[150]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
예제 #10
0
def mack_p(data, average, est_sigma, tail):
    if tail:
        return cl.MackChainladder().fit(
            cl.TailCurve(curve='exponential').fit_transform(
                cl.Development(average=average,
                               sigma_interpolation=est_sigma).fit_transform(
                                   cl.load_sample(data))))
    else:
        return cl.MackChainladder().fit(
            cl.Development(average=average,
                           sigma_interpolation=est_sigma).fit_transform(
                               cl.load_sample(data)))
예제 #11
0
def test_mack_to_triangle():
    assert (
        cl.MackChainladder()
        .fit(
            cl.TailConstant().fit_transform(
                cl.Development().fit_transform(cl.load_sample("ABC"))
            )
        )
        .summary_
        == cl.MackChainladder()
        .fit(cl.Development().fit_transform(cl.load_sample("ABC")))
        .summary_
    )
예제 #12
0
파일: IBNR.py 프로젝트: Quantuary/IBNR
 def all_ldf_cdf(self, development_period=12):
     
     ldf = pd.DataFrame()
     cdf = pd.DataFrame()
     for label in IBNR.model_params:
         param = IBNR.model_params[label]
         
         model = cl.Development(**param).fit(self.triangle_1D.incr_to_cum())
         _ldf = model.ldf_.to_frame()
         _ldf.rename(index={'(All)':label}, inplace=True)
         ldf = ldf.append(_ldf)
         
         _cdf = model.cdf_.to_frame()
         _cdf.rename(index={'(All)': label}, inplace=True)
         cdf = cdf.append(_cdf)
     
     s_ldf, s_cdf = filter_.sample_weighted_80_20_6m_12m(self.triangle_1D)
     s_ldf.columns = _ldf.columns
     s_cdf.columns = _cdf.columns
     
     ldf = ldf.append(s_ldf)
     ldf = ldf.iloc[:,:development_period]
     ldf.name = 'Loss Development Factor'
     
     cdf = cdf.append(s_cdf)
     cdf = cdf.iloc[:,:development_period]
     cdf.name = 'Cumulative Development Factor'
     
     return IBNR.format_table(ldf) , IBNR.format_table(cdf)
예제 #13
0
def test_constant_cdf():
    dev = cl.Development().fit(cl.load_dataset('raa'))
    link_ratios = {(num + 1) * 12: item
                   for num, item in enumerate(dev.ldf_.values[0, 0, 0, :])}
    dev_c = cl.DevelopmentConstant(patterns=link_ratios,
                                   style='ldf').fit(cl.load_dataset('raa'))
    assert_allclose(dev.cdf_.values, dev_c.cdf_.values, atol=1e-5)
예제 #14
0
def test_constant_cdf(raa):
    dev = cl.Development().fit(raa)
    xp = dev.ldf_.get_array_module()
    link_ratios = {(num + 1) * 12: item
                   for num, item in enumerate(dev.ldf_.values[0, 0, 0, :])}
    dev_c = cl.DevelopmentConstant(patterns=link_ratios, style="ldf").fit(raa)
    assert xp.allclose(dev.cdf_.values, dev_c.cdf_.values, atol=1e-5)
예제 #15
0
def test_misaligned_index(prism):
    prism = prism['Paid']
    model = cl.Chainladder().fit(
        cl.Development(groupby=['Line', 'Type']).fit_transform(prism))
    a = model.ultimate_.loc[prism.index.iloc[:10]].sum().sum()
    b = model.predict(prism.iloc[:10]).ultimate_.sum().sum()
    assert abs(a - b) < 1e-5
예제 #16
0
def test_mcl_ult():
    mcl = cl.load_sample("mcl")
    dev = cl.Development().fit_transform(mcl)
    cl_traditional = cl.Chainladder().fit(dev).ultimate_
    dev_munich = cl.MunichAdjustment(
        paid_to_incurred=[("paid", "incurred")]).fit_transform(dev)
    cl_munich = cl.Chainladder().fit(dev_munich).ultimate_
예제 #17
0
def test_groupby(clrd):
    clrd = clrd[clrd['LOB'] == 'comauto']
    # But only the top 10 get their own CapeCod aprioris. Smaller companies get grouped together
    top_10 = clrd['EarnedPremDIR'].groupby('GRNAME').sum().latest_diagonal
    top_10 = top_10.loc[..., '1997', :].to_frame().nlargest(10)
    cc_groupby = clrd.index['GRNAME'].map(
        lambda x: x if x in top_10.index else 'Remainder')
    idx = clrd.index
    idx['Top 10'] = cc_groupby
    clrd.index = idx

    # All companies share the same development factors regardless of size
    X = cl.Development().fit(clrd['CumPaidLoss'].sum()).transform(
        clrd['CumPaidLoss'])
    sample_weight = clrd['EarnedPremDIR'].latest_diagonal
    a = cl.CapeCod(groupby='Top 10', decay=0.98,
                   trend=0.02).fit(X,
                                   sample_weight=sample_weight).ibnr_.groupby(
                                       'Top 10').sum().sort_index()
    b = cl.CapeCod(decay=0.98,
                   trend=0.02).fit(X.groupby('Top 10').sum(),
                                   sample_weight=sample_weight.groupby(
                                       'Top 10').sum()).ibnr_.sort_index()
    xp = a.get_array_module()
    b = b.set_backend(a.array_backend)
    xp.allclose(xp.nan_to_num(a.values), xp.nan_to_num(b.values), atol=1e-5)
예제 #18
0
def test_pipeline():
    tri = cl.load_sample('clrd').groupby('LOB').sum()[[
        'CumPaidLoss', 'IncurLoss', 'EarnedPremDIR'
    ]]
    tri['CaseIncurredLoss'] = tri['IncurLoss'] - tri['CumPaidLoss']

    X = tri[['CumPaidLoss', 'CaseIncurredLoss']]
    sample_weight = tri['EarnedPremDIR'].latest_diagonal

    dev = [
        cl.Development(),
        cl.ClarkLDF(),
        cl.Trend(),
        cl.IncrementalAdditive(),
        cl.MunichAdjustment(paid_to_incurred=('CumPaidLoss',
                                              'CaseIncurredLoss')),
        cl.CaseOutstanding(paid_to_incurred=('CumPaidLoss',
                                             'CaseIncurredLoss'))
    ]
    tail = [cl.TailCurve(), cl.TailConstant(), cl.TailBondy(), cl.TailClark()]
    ibnr = [
        cl.Chainladder(),
        cl.BornhuetterFerguson(),
        cl.Benktander(n_iters=2),
        cl.CapeCod()
    ]

    for model in list(itertools.product(dev, tail, ibnr)):
        print(model)
        cl.Pipeline(
            steps=[('dev',
                    model[0]), ('tail',
                                model[1]), ('ibnr', model[2])]).fit_predict(
                                    X, sample_weight=sample_weight).ibnr_.sum(
                                        'origin').sum('columns').sum()
def test_fit_period():
    tri = cl.load_sample('tail_sample')
    dev = cl.Development(average='simple').fit_transform(tri)
    assert round(
        cl.TailCurve(fit_period=(tri.ddims[-7], None),
                     extrap_periods=10).fit(dev).cdf_['paid'].set_backend(
                         'numpy', inplace=True).values[0, 0, 0, -2],
        3) == 1.044
예제 #20
0
def test_n_periods():
    d = cl.load_dataset('usauto')['incurred']
    return np.all(
        np.round(
            np.unique(cl.Development(n_periods=3, average='volume').fit(
                d).ldf_.values,
                      axis=-2), 3).flatten() == np.
        array([1.164, 1.056, 1.027, 1.012, 1.005, 1.003, 1.002, 1.001, 1.0]))
예제 #21
0
def test_fit_period():
    tri = cl.load_dataset('tail_sample')
    dev = cl.Development(average='simple').fit_transform(tri)
    assert round(
        cl.TailCurve(fit_period=slice(-6, None, None),
                     extrap_periods=10).fit(dev).cdf_['paid'].values[0, 0, 0,
                                                                     -2],
        3) == 1.044
예제 #22
0
def test_pipeline_json_io():
    pipe = cl.Pipeline(
        steps=[('dev', cl.Development()), ('model', cl.BornhuetterFerguson())])
    pipe2 = cl.read_json(pipe.to_json())
    assert {item[0]: item[1].get_params()
            for item in pipe.get_params()['steps']} == \
           {item[0]: item[1].get_params()
            for item in pipe2.get_params()['steps']}
예제 #23
0
def test_mcl_paid():
    df = r("MunichChainLadder(MCLpaid, MCLincurred)").rx("MCLPaid")
    p = cl.MunichAdjustment(paid_to_incurred=("paid", "incurred")).fit(
        cl.Development(sigma_interpolation="mack").fit_transform(
            cl.load_sample("mcl")))
    xp = p.ldf_.get_array_module()
    arr = xp.array(df[0])
    assert xp.allclose(arr, p.munich_full_triangle_[0, 0, 0, :, :], atol=1e-5)
예제 #24
0
def test_pipeline_json_io():
    pipe = cl.Pipeline(
        steps=[("dev", cl.Development()), ("model", cl.BornhuetterFerguson())]
    )
    pipe2 = cl.read_json(pipe.to_json())
    assert {item[0]: item[1].get_params() for item in pipe.get_params()["steps"]} == {
        item[0]: item[1].get_params() for item in pipe2.get_params()["steps"]
    }
예제 #25
0
def test_mcl_paid():
    df = r('MunichChainLadder(MCLpaid, MCLincurred)').rx('MCLPaid')
    p = cl.MunichAdjustment(paid_to_incurred=('paid', 'incurred')).fit(
        cl.Development(sigma_interpolation='mack').fit_transform(
            cl.load_sample('mcl'))).munich_full_triangle_[0, 0, 0, :, :]
    xp = cp.get_array_module(p)
    arr = xp.array(df[0])
    xp.testing.assert_allclose(arr, p, atol=1e-5)
예제 #26
0
def test_constant_cdf():
    dev = cl.Development().fit(cl.load_sample('raa'))
    xp = cp.get_array_module(dev.ldf_.values)
    link_ratios = {(num + 1) * 12: item
                   for num, item in enumerate(dev.ldf_.values[0, 0, 0, :])}
    dev_c = cl.DevelopmentConstant(patterns=link_ratios,
                                   style='ldf').fit(cl.load_sample('raa'))
    xp.testing.assert_allclose(dev.cdf_.values, dev_c.cdf_.values, atol=1e-5)
예제 #27
0
def test_mcl_paid():
    df = r('MunichChainLadder(MCLpaid, MCLincurred)').rx('MCLPaid')
    p = cl.MunichAdjustment(paid_to_incurred={
        'paid': 'incurred'
    }).fit(
        cl.Development(sigma_interpolation='mack').fit_transform(
            cl.load_dataset('mcl'))).munich_full_triangle_[0, 0, 0, :, :]
    arr = np.array(df[0])
    assert_allclose(arr, p, atol=1e-5)
예제 #28
0
def test_struhuss():
    X = cl.load_sample("cc_sample")["loss"]
    X = cl.TailConstant(tail=1 / 0.85).fit_transform(
        cl.Development().fit_transform(X))
    sample_weight = cl.load_sample("cc_sample")["exposure"].latest_diagonal
    ibnr = int(
        cl.CapeCod(trend=0.07,
                   decay=0.75).fit(X, sample_weight=sample_weight).ibnr_.sum())
    assert ibnr == 17052
def test_fit_period():
    tri = cl.load_sample("tail_sample")
    dev = cl.Development(average="simple").fit_transform(tri)
    assert (round(
        cl.TailCurve(fit_period=(tri.ddims[-7], None),
                     extrap_periods=10).fit(dev).cdf_["paid"].set_backend(
                         "numpy", inplace=True).values[0, 0, 0, -2],
        3,
    ) == 1.044)
예제 #30
0
def test_n_periods():
    d = cl.load_sample('usauto')['incurred']
    xp = np if d.array_backend == 'sparse' else d.get_array_module()
    return xp.all(
        xp.around(
            xp.unique(cl.Development(n_periods=3, average='volume').fit(
                d).ldf_.values,
                      axis=-2), 3).flatten() == xp.
        array([1.164, 1.056, 1.027, 1.012, 1.005, 1.003, 1.002, 1.001, 1.0]))