Пример #1
0
def test_weight_broadcasting():
    clrd = cl.load_sample("clrd")[["CumPaidLoss", "EarnedPremDIR"]]
    clrd = clrd[clrd["LOB"] == "wkcomp"]

    bcl = cl.Chainladder()
    bf = cl.BornhuetterFerguson()
    cc = cl.CapeCod()

    estimators = [('bcl', bcl), ('bf', bf), ('cc', cc)]
    min_dim_weights = np.array([[1, 2, 3]] * 4 + [[0, 0.5, 0.5]] * 3 +
                               [[0, 0, 1]] * 3)
    mid_dim_weights = np.array(
        [[[1, 2, 3]] * 4 + [[0, 0.5, 0.5]] * 3 + [[0, 0, 1]] * 3] * 1)
    max_dim_weights = np.array(
        [[[[1, 2, 3]] * 4 + [[0, 0.5, 0.5]] * 3 + [[0, 0, 1]] * 3] * 1] * 132)

    min_dim_ult = cl.VotingChainladder(
        estimators=estimators, weights=min_dim_weights).fit(
            clrd['CumPaidLoss'],
            sample_weight=clrd["EarnedPremDIR"].latest_diagonal,
        ).ultimate_.sum()
    mid_dim_ult = cl.VotingChainladder(
        estimators=estimators, weights=mid_dim_weights).fit(
            clrd['CumPaidLoss'],
            sample_weight=clrd["EarnedPremDIR"].latest_diagonal,
        ).ultimate_.sum()
    max_dim_ult = cl.VotingChainladder(
        estimators=estimators, weights=max_dim_weights).fit(
            clrd['CumPaidLoss'],
            sample_weight=clrd["EarnedPremDIR"].latest_diagonal,
        ).ultimate_.sum()
    assert (abs(min_dim_ult - mid_dim_ult - max_dim_ult) < 1)
Пример #2
0
def test_voting_ultimate(triangle_data, estimators, weights):
    bcl_ult = cl.Chainladder().fit(
        triangle_data["CumPaidLoss"].sum(), ).ultimate_
    bf_ult = cl.BornhuetterFerguson().fit(
        triangle_data["CumPaidLoss"].sum(),
        sample_weight=triangle_data["EarnedPremDIR"].sum(
        ).latest_diagonal).ultimate_
    cc_ult = cl.CapeCod().fit(triangle_data["CumPaidLoss"].sum(),
                              sample_weight=triangle_data["EarnedPremDIR"].sum(
                              ).latest_diagonal).ultimate_

    vot_ult = cl.VotingChainladder(
        estimators=estimators, weights=weights,
        default_weighting=(1, 2, 3)).fit(
            triangle_data["CumPaidLoss"].sum(),
            sample_weight=triangle_data["EarnedPremDIR"].sum().latest_diagonal,
        ).ultimate_

    direct_weight = np.array([[1, 2, 3]] * 4 + [[0, 0.5, 0.5]] * 3 +
                             [[0, 0, 1]] * 3)
    direct_weight = direct_weight[..., np.newaxis]

    assert abs((
        (bcl_ult * direct_weight[..., 0, :] + bf_ult *
         direct_weight[..., 1, :] + cc_ult * direct_weight[..., 2, :]) /
        direct_weight.sum(axis=-2)).sum() - vot_ult.sum()) < 1
Пример #3
0
def test_voting_ultimate():
    clrd = cl.load_sample("clrd")[["CumPaidLoss", "EarnedPremDIR"]]
    clrd = clrd[clrd["LOB"] == "wkcomp"]

    bcl_ult = cl.Chainladder().fit(clrd["CumPaidLoss"].sum(), ).ultimate_
    bf_ult = cl.BornhuetterFerguson().fit(
        clrd["CumPaidLoss"].sum(),
        sample_weight=clrd["EarnedPremDIR"].sum().latest_diagonal).ultimate_
    cc_ult = cl.CapeCod().fit(
        clrd["CumPaidLoss"].sum(),
        sample_weight=clrd["EarnedPremDIR"].sum().latest_diagonal).ultimate_

    bcl = cl.Chainladder()
    bf = cl.BornhuetterFerguson()
    cc = cl.CapeCod()

    estimators = [('bcl', bcl), ('bf', bf), ('cc', cc)]
    weights = np.array([[0.25, 0.25, 0.5]] * 4 + [[0, 0.5, 0.5]] * 3 +
                       [[0, 0, 1]] * 3)

    vot_ult = cl.VotingChainladder(estimators=estimators, weights=weights).fit(
        clrd["CumPaidLoss"].sum(),
        sample_weight=clrd["EarnedPremDIR"].sum().latest_diagonal,
    ).ultimate_

    weights = weights[..., np.newaxis]

    assert abs((bcl_ult * weights[..., 0, :] + bf_ult * weights[..., 1, :] +
                cc_ult * weights[..., 2, :]).sum() - vot_ult.sum()) < 1
Пример #4
0
def test_weight_broadcasting(triangle_data, estimators, weights):
    mid_dim_weights = np.array(
        [[[1, 2, 3]] * 4 + [[0, 0.5, 0.5]] * 3 + [[0, 0, 1]] * 3] * 1)
    max_dim_weights = np.array(mid_dim_weights * 132)

    min_dim_ult = cl.VotingChainladder(
        estimators=estimators, weights=weights).fit(
            triangle_data['CumPaidLoss'],
            sample_weight=triangle_data["EarnedPremDIR"].latest_diagonal,
        ).ultimate_.sum()
    mid_dim_ult = cl.VotingChainladder(
        estimators=estimators, weights=mid_dim_weights).fit(
            triangle_data['CumPaidLoss'],
            sample_weight=triangle_data["EarnedPremDIR"].latest_diagonal,
        ).ultimate_.sum()
    max_dim_ult = cl.VotingChainladder(
        estimators=estimators, weights=max_dim_weights).fit(
            triangle_data['CumPaidLoss'],
            sample_weight=triangle_data["EarnedPremDIR"].latest_diagonal,
        ).ultimate_.sum()
    assert (abs(min_dim_ult - mid_dim_ult - max_dim_ult) < 1)
Пример #5
0
def test_different_backends(triangle_data, estimators, weights):
    model = cl.VotingChainladder(
        estimators=estimators, weights=weights,
        default_weighting=(1, 2, 3)).fit(
            triangle_data["CumPaidLoss"].sum().set_backend("numpy"),
            sample_weight=triangle_data["EarnedPremDIR"].sum().latest_diagonal.
            set_backend("numpy"),
        )
    assert (abs((model.predict(
        triangle_data["CumPaidLoss"].sum().set_backend("sparse"),
        sample_weight=triangle_data["EarnedPremDIR"].sum().latest_diagonal.
        set_backend("sparse")).ultimate_.sum() - model.ultimate_.sum())) < 1)
Пример #6
0
def test_voting_predict():
    bcl = cl.Chainladder()
    bf = cl.BornhuetterFerguson()
    cc = cl.CapeCod()

    estimators = [('bcl', bcl), ('bf', bf), ('cc', cc)]
    weights = np.array([[1, 2, 3]] * 3 + [[0, 0.5, 0.5]] * 3 + [[0, 0, 1]] * 3)

    vot = cl.VotingChainladder(estimators=estimators, weights=weights).fit(
        raa_1989,
        sample_weight=apriori_1989,
    )
    vot.predict(raa_1990, sample_weight=apriori_1990)
Пример #7
0
def test_different_backends():
    clrd = cl.load_sample("clrd")[["CumPaidLoss", "EarnedPremDIR"]]
    clrd = clrd[clrd["LOB"] == "wkcomp"]

    bcl = cl.Chainladder()
    bf = cl.BornhuetterFerguson()
    cc = cl.CapeCod()

    estimators = [('bcl', bcl), ('bf', bf), ('cc', cc)]
    weights = np.array([[1, 2, 3]] * 4 + [[0, 0.5, 0.5]] * 3 + [[0, 0, 1]] * 3)

    model = cl.VotingChainladder(estimators=estimators, weights=weights).fit(
        clrd["CumPaidLoss"].sum().set_backend("numpy"),
        sample_weight=clrd["EarnedPremDIR"].sum().latest_diagonal.set_backend(
            "numpy"),
    )
    assert (abs(
        (model.predict(clrd["CumPaidLoss"].sum().set_backend("sparse"),
                       sample_weight=clrd["EarnedPremDIR"].sum().
                       latest_diagonal.set_backend("sparse")).ultimate_.sum() -
         model.ultimate_.sum())) < 1)
import pandas as pd
import chainladder as cl

# Load the data
raa = cl.load_sample('raa')
cl_ult = cl.Chainladder().fit(raa).ultimate_  # Chainladder Ultimate
apriori = cl_ult * 0 + (float(cl_ult.sum()) / 10)  # Mean Chainladder Ultimate

# Load estimators to vote between
bcl = cl.Chainladder()
cc = cl.CapeCod()
estimators = [('bcl', bcl), ('cc', cc)]

# Fit VotingChainladder using CC after 1987 and a blend of BCL and CC otherwise
vot = cl.VotingChainladder(
    estimators=estimators,
    weights=lambda origin: (0, 1) if origin.year > 1987 else (0.5, 0.5)
    )
vot.fit(raa, sample_weight=apriori)

# Plotting
bcl_ibnr = bcl.fit(raa).ibnr_.to_frame()
cc_ibnr = cc.fit(raa, sample_weight=apriori).ibnr_.to_frame()
vot_ibnr = vot.ibnr_.to_frame()

plot_ibnr = pd.concat([bcl_ibnr, vot_ibnr, cc_ibnr], axis=1)
plot_ibnr.columns = ['BCL', 'Voting', 'CC']

g = plot_ibnr.plot(
    kind='bar', ylim=(0, None), grid=True,
    title='Voting Chainladder IBNR').set(
    xlabel='Accident Year', ylabel='Loss');