Beispiel #1
0
def test_basic_transform(raa):
    cl.Development().fit_transform(raa)
    cl.ClarkLDF().fit_transform(raa)
    cl.TailClark().fit_transform(raa)
    cl.TailBondy().fit_transform(raa)
    cl.TailConstant().fit_transform(raa)
    cl.TailCurve().fit_transform(raa)
    cl.BootstrapODPSample().fit_transform(raa)
    cl.IncrementalAdditive().fit_transform(raa,
                                           sample_weight=raa.latest_diagonal)
def test_basic_transform():
    tri = cl.load_sample("raa")
    cl.Development().fit_transform(tri)
    cl.ClarkLDF().fit_transform(tri)
    cl.TailClark().fit_transform(tri)
    cl.TailBondy().fit_transform(tri)
    cl.TailConstant().fit_transform(tri)
    cl.TailCurve().fit_transform(tri)
    cl.BootstrapODPSample().fit_transform(tri)
    cl.IncrementalAdditive().fit_transform(tri,
                                           sample_weight=tri.latest_diagonal)
def test_bs_random_state_predict():
    tri = (cl.load_sample("clrd").groupby("LOB").sum().loc[
        "wkcomp", ["CumPaidLoss", "EarnedPremNet"]])
    X = cl.BootstrapODPSample(random_state=100).fit_transform(
        tri["CumPaidLoss"])
    bf = cl.BornhuetterFerguson(
        apriori=0.6, apriori_sigma=0.1, random_state=42).fit(
            X, sample_weight=tri["EarnedPremNet"].latest_diagonal)
    assert (abs(
        bf.predict(X, sample_weight=tri["EarnedPremNet"].latest_diagonal).
        ibnr_.sum().sum() / bf.ibnr_.sum().sum() - 1) < 5e-3)
def test_bs_random_state_predict():
    tri = cl.load_dataset('clrd').groupby('LOB').sum().loc[
        'wkcomp', ['CumPaidLoss', 'EarnedPremNet']]
    X = cl.BootstrapODPSample(random_state=100).fit_transform(
        tri['CumPaidLoss'])
    bf = cl.BornhuetterFerguson(
        apriori=0.6, apriori_sigma=0.1, random_state=42).fit(
            X, sample_weight=tri['EarnedPremNet'].latest_diagonal)
    assert bf.predict(
        X,
        sample_weight=tri['EarnedPremNet'].latest_diagonal).ibnr_ == bf.ibnr_
Beispiel #5
0
def test_bs_sample(raa):
    tri = raa
    a = (cl.Development().fit(
        cl.BootstrapODPSample(n_sims=40000).fit_transform(tri).mean()).ldf_)
    b = cl.Development().fit_transform(tri).ldf_
    assert tri.get_array_module().all(abs(((a - b) / b).values) < 0.005)
Beispiel #6
0
def test_bs_multiple_cols():
    assert cl.BootstrapODPSample().fit_transform(
        cl.load_sample('berqsherm').iloc[0]).shape == (1000, 4, 8, 8)
"""
======================
Value at Risk example
======================

This example uses the `BootstrapODPSample` to simulate new triangles that
are then used to simulate an IBNR distribution from which we can do
Value at Risk percentile lookups.
"""

import chainladder as cl
import seaborn as sns
sns.set_style('whitegrid')

# Load triangle
triangle = cl.load_dataset('genins')

# Create 1000 bootstrap samples of the triangle
resampled_triangles = cl.BootstrapODPSample().fit_transform(triangle)

# Create 1000 IBNR estimates
sim_ibnr = cl.Chainladder().fit(resampled_triangles).ibnr_.sum('origin')

# X - mu
sim_ibnr = (sim_ibnr - sim_ibnr.mean()).to_frame().sort_values()

# Plot data
sim_ibnr.index = [item / 1000 for item in range(1000)]
sim_ibnr.loc[0.90:].plot(title='Bootstrap VaR (90% and above)',
                         color='red').set(xlabel='VaR')
3. We use the `broadcast_axis` method of the triangle class (new in 0.4.7)

"""
import chainladder as cl
import numpy as np

# Simulation parameters
random_state = 42
n_sims = 1000

# Get data
loss = cl.load_dataset('genins')
premium = loss.latest_diagonal*0+8e6

# Simulate loss triangles
sim = cl.BootstrapODPSample(random_state=random_state, n_sims=n_sims)
sim.fit(loss, sample_weight=premium)

# Repeat the premium triangle to align with simulated losses
sim_p = premium.broadcast_axis('index', sim.resampled_triangles_.index)

# Simulate aprioris using numpy
apriori_mu = 0.65
apriori_sigma = .10
aprioris = np.random.normal(apriori_mu, apriori_sigma, n_sims)
sim_p.values = (sim_p.values * aprioris.reshape(n_sims,-1)[..., np.newaxis, np.newaxis])

# Fit Bornhuetter-Ferguson to stochastically generated data
model = cl.BornhuetterFerguson().fit(sim.resampled_triangles_, sample_weight=sim_p)

# Grab completed triangle replacing simulated known data with actual known data
def test_bs_sample():
    tri = cl.load_dataset('raa')
    a = cl.Development().fit(cl.BootstrapODPSample(n_sims=40000).fit_transform(tri).mean()).ldf_
    b = cl.Development().fit_transform(tri).ldf_
    assert np.all(abs(((a-b)/b).values)<.005)
Beispiel #10
0
ODP Bootstrap Example
======================

This example demonstrates how you can can use the Overdispersed Poisson
Bootstrap sampler and get various properties about parameter uncertainty.
"""
import chainladder as cl

import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')

#  Grab a Triangle
tri = cl.load_dataset('bs_sample')
# Generate bootstrap samples
sims = cl.BootstrapODPSample().fit_transform(tri)
# Calculate LDF for each simulation
sim_ldf = cl.Development().fit(sims).ldf_
sim_ldf = sim_ldf[sim_ldf.origin == sim_ldf.origin.max()]

# Plot the Data
fig, ((ax00, ax01), (ax10, ax11)) = plt.subplots(ncols=2,
                                                 nrows=2,
                                                 figsize=(10, 10))
tri.T.plot(ax=ax00).set(title='Raw Data',
                        xlabel='Development',
                        ylabel='Incurred')
sims.mean().T.plot(ax=ax01).set(title='Mean Simulation',
                                xlabel='Development',
                                ylabel='Incurred')
sim_ldf.T.plot(legend=False, color='lightgray', ax=ax10) \
Value at Risk example
======================

This example uses the `BootstrapODPSample` to simulate new triangles that
are then used to simulate an IBNR distribution from which we can do
Value at Risk percentile lookups.
"""

import chainladder as cl
import matplotlib.pyplot as plt

# Load triangle
triangle = cl.load_sample('genins')

# Create 1000 bootstrap samples of the triangle
resampled_triangles = cl.BootstrapODPSample(random_state=42).fit_transform(triangle)

# Create 1000 IBNR estimates
sim_ibnr = cl.Chainladder().fit(resampled_triangles).ibnr_.sum('origin')

# X - mu
sim_ibnr = (sim_ibnr - sim_ibnr.mean()).to_frame().sort_values()

# Plot data
fig, ax = plt.subplots()
sim_ibnr.index = [item/1000 for item in range(1000)]
(sim_ibnr/1e6).loc[0.90:].plot(kind='area', alpha=0.5,
    title='Bootstrap VaR (90% and above)', color='red', ax=ax).set(
    xlabel='Percentile', xlim=(0.899, 1.0), ylabel='Value (Millions)');
ax.grid(axis='y')
for spine in ax.spines:
Beispiel #12
0
========================

This example demonstrates how you can drop the outlier link ratios from the
BootstrapODPSample to reduce reserve variability estimates.

"""
import chainladder as cl

import seaborn as sns
sns.set_style('whitegrid')

# Load triangle
triangle = cl.load_dataset('raa')

# Use bootstrap sampler to get resampled triangles
s1 = cl.BootstrapODPSample(n_sims=5000,
                           random_state=42).fit(triangle).resampled_triangles_

## Alternatively use fit_transform() to access resampled triangles dropping
#  outlier link-ratios from resampler
s2 = cl.BootstrapODPSample(drop_high=True,
                           drop_low=True,
                           n_sims=5000,
                           random_state=42).fit_transform(triangle)

# Summarize results of first model
results = cl.Chainladder().fit(s1).ibnr_.sum('origin').rename(
    'columns', ['Original'])
# Add another column to triangle with second set of results.
results['Dropped'] = cl.Chainladder().fit(s2).ibnr_.sum('origin')

# Plot both IBNR distributions