def test_base_minimum_exposure_triangle(): raa = (cl.load_dataset('raa').latest_diagonal * 0 + 50000).to_frame().reset_index() raa['index'] = raa['index'].astype(str) cl.Triangle(raa, origin='index', columns=list(cl.load_dataset('raa').columns))
def test_malformed_init(): assert cl.Triangle( data=pd.DataFrame({ 'Accident Date': ['2020-07-23', '2019-07-23', '2018-07-23', '2016-07-23', '2020-08-23', '2019-09-23', '2018-10-23'], 'Valuation Date': ['2021-01-01', '2021-01-01', '2021-01-01', '2021-01-01', '2021-01-01', '2021-01-01', '2021-01-01'], 'Loss': [10000, 10000, 10000, 10000, 0, 0, 0]}), origin='Accident Date', development='Valuation Date', columns='Loss' ).origin_grain == 'M'
def test_init_vector(raa): a = raa.latest_diagonal b = pd.DataFrame( {"AccYear": [item for item in range(1981, 1991)], "premium": [3000000] * 10} ) b = cl.Triangle(b, origin="AccYear", columns="premium") assert np.all(a.valuation == b.valuation) assert a.valuation_date == b.valuation_date
def test_partial_year(prism): before = prism['Paid'].sum().incr_to_cum() before=before[before.valuation<='2017-08'].latest_diagonal after = cl.Triangle( before.to_frame(keepdims=True).reset_index(), origin='origin', development='valuation', columns='Paid', index=before.key_labels) assert after.valuation_date == before.valuation_date
def test_init_vector(): assert raa == raa_gt a = raa[raa.development == 12] b = pd.DataFrame( {"AccYear": [item for item in range(1981, 1991)], "premium": [3000000] * 10} ) b = cl.Triangle(b, origin="AccYear", columns="premium") assert np.all(a.valuation == b.valuation) assert a.valuation_date == b.valuation_date
def test_exposure_tri(): x = cl.load_sample("auto") x = x[x.development == 12] x = x["paid"].to_frame().T.unstack().reset_index() x.columns = ["LOB", "origin", "paid"] x.origin = x.origin.astype(str) y = cl.Triangle(x, origin="origin", index="LOB", columns="paid") x = cl.load_sample("auto")["paid"] x = x[x.development == 12] assert x == y
def test_exposure_tri(): x = cl.load_sample('auto') x = x[x.development == 12] x = x['paid'].to_frame().T.unstack().reset_index() x.columns = ['LOB', 'origin', 'paid'] x.origin = x.origin.astype(str) y = cl.Triangle(x, origin='origin', index='LOB', columns='paid') x = cl.load_sample('auto')['paid'] x = x[x.development == 12] assert x == y
def __init__(self): self.Data_Handler = Data_Handler() ''' Using the chainladder (cl) package: https://chainladder-python.readthedocs.io/en/latest/index.html ''' self.Triangles = cl.Triangle(self.Data_Handler.data , origin='ACCIDENT_MONTH_DATE', development='ACCOUNTING_MONTH_DATE', columns='CLAIM_PAID_EX_GST_D', index=['PRODUCT_CODE_CLAIM','CLAIM_CATEGORY', 'CLAIM_CATEGORY_RESERVING', 'CLIENT_NAME', 'RISK_GROUP' #'PRODUCT_GROUP' ], cumulative=False ) self.filter_ = filter_
def test_base_minimum_exposure_triangle(): assert raa == raa_gt d = (raa.latest_diagonal * 0 + 50000).to_frame().reset_index() d["index"] = d["index"].astype(str) cl.Triangle(d, origin="index", columns=d.columns[-1])
def test_correct_valutaion(raa): new = cl.Triangle( raa.iloc[..., :-3, :].latest_diagonal.to_frame(keepdims=True, implicit_axis=True), origin='origin', development='valuation', columns='values') assert new.valuation_date == raa.valuation_date
def test_create_full_triangle(raa): a = cl.Chainladder().fit(raa).full_triangle_ b = cl.Triangle( a.to_frame(keepdims=True, implicit_axis=True), origin='origin', development='valuation', columns='values') assert a == b
def summary_triangles(data, reporting_date): ''' Remove all data which is not know at the reporting date and convert data into triangles, for further analysis Returns 'chainladder' triangles ''' # #fileter out all data not known at reporting date # # create masks data_triangles = data.copy() date_mask_notpaid = (data_triangles['Claim_payment_date'] - reporting_date) / np.timedelta64(1, 'D') > 0 date_mask_notreported = (data_triangles['Claim_report_date'] - reporting_date) / np.timedelta64(1, 'D') > 0 date_mask_paid = (data_triangles['Claim_payment_date'] - reporting_date) / np.timedelta64(1, 'D') <= 0 date_mask_reported = (data_triangles['Claim_report_date'] - reporting_date) / np.timedelta64(1, 'D') <= 0 date_mask_notwritten = (data_triangles['Start_date'] - reporting_date) / np.timedelta64(1, 'D') > 0 # clear paid and reported data which is unknown at reporting date data_triangles.loc[date_mask_notpaid, 'Claim_payment_date'] = np.nan data_triangles.loc[date_mask_notreported, [ 'Claim_report_date', 'Claim_incident_date', 'Claim_value', 'Claim_value_gu' ]] = np.nan # add counts for paid and reported data_triangles['Claim_count_reported'] = 0 data_triangles['Claim_count_paid'] = 0 data_triangles.loc[date_mask_reported, 'Claim_count_reported'] = 1 data_triangles.loc[date_mask_paid, 'Claim_count_paid'] = 1 # remove unwritten policies data_policies = data_triangles.loc[~date_mask_notwritten].copy() tri_paid = cl.Triangle( data_policies, origin='Start_date', index='Class_name', development='Claim_payment_date', columns='Claim_value', cumulative=False).incr_to_cum().to_frame().reset_index().melt( id_vars="index", var_name='Development Month', value_name='Paid Value').rename({'index': 'Origin Month'}, axis='columns') tri_paid_count = cl.Triangle( data_policies, origin='Start_date', index='Class_name', development='Claim_payment_date', columns='Claim_count_paid', cumulative=False).incr_to_cum().to_frame().reset_index().melt( id_vars="index", var_name='Development Month', value_name='Paid Count').rename({'index': 'Origin Month'}, axis='columns') tri_reported = cl.Triangle( data_policies, origin='Start_date', index='Class_name', development='Claim_report_date', columns='Claim_value', cumulative=False).incr_to_cum().to_frame().reset_index().melt( id_vars="index", var_name='Development Month', value_name='Reported Value').rename({'index': 'Origin Month'}, axis='columns') tri_reported_count = cl.Triangle( data_policies, origin='Start_date', index='Class_name', development='Claim_report_date', columns='Claim_count_reported', cumulative=False).incr_to_cum().to_frame().reset_index().melt( id_vars="index", var_name='Development Month', value_name='Reported Count').rename({'index': 'Origin Month'}, axis='columns') tri_premium = cl.Triangle( data_policies, origin='Start_date', index='Class_name', development='Policy_premium_date', columns='Policy_premium', cumulative=False).incr_to_cum().to_frame().reset_index().melt( id_vars="index", var_name='Development Month', value_name='Premium Value').rename({'index': 'Origin Month'}, axis='columns') #join all triangles together into a single dataframe join_cols = ['Origin Month', 'Development Month'] tri_all = tri_paid.merge(tri_paid_count, on=join_cols) tri_all = tri_paid.merge(tri_reported, on=join_cols) tri_all = tri_all.merge(tri_reported_count, on=join_cols) tri_all = tri_all.merge(tri_premium, on=join_cols) return tri_all, data_policies
================= Although triangles have both origin and development attributes, it is often convenient to create premium or exposure vectors that can work with loss triangles. The `Triangle` class treats the development parameter as optional. This example instantiates a 'premium' triangle as a single vector. """ import chainladder as cl import pandas as pd import chainladder as cl # Raw premium data in pandas premium_df = pd.DataFrame( {'AccYear':[item for item in range(1977, 1988)], 'premium': [3000000]*11}) # Create a premium 'triangle' with no development premium = cl.Triangle(premium_df, origin='AccYear', columns='premium') # Create some loss triangle loss = cl.load_sample('abc') ultimate = cl.Chainladder().fit(loss).ultimate_ # Plot (ultimate / premium).plot( kind='area', title='Loss Ratio by Accident Year', alpha=0.7, color='darkgreen', legend=False, grid=True).set( xlabel='Accident Year', ylabel='Loss Ratio');
def test_base_minimum_exposure_triangle(): d = (raa.latest_diagonal * 0 + 50000).to_frame().reset_index() d['index'] = d['index'].astype(str) cl.Triangle(d, origin='index', columns=d.columns[-1])
def test_df_period_input(): assert raa == raa_gt d = raa.latest_diagonal df = d.to_frame().reset_index() assert cl.Triangle(df, origin="index", columns=df.columns[-1]) == d
import chainladder as cl import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # Read in the data lobs = 'wkcomp' data = pd.read_csv( r'https://www.casact.org/research/reserve_data/wkcomp_pos.csv') data = data[data['DevelopmentYear'] <= 1997] # Create a triangle triangle = cl.Triangle( data, origin='AccidentYear', development='DevelopmentYear', index=['GRNAME'], columns=['IncurLoss_D', 'CumPaidLoss_D', 'EarnedPremDIR_D']) # Output print('Raw data:') print(data.head()) print() print('Triangle summary:') print(triangle) print() print('Aggregate Paid Triangle:') print(triangle['CumPaidLoss_D'].sum()) plot_data = triangle['CumPaidLoss_D'].sum().to_frame().unstack().reset_index() plot_data.columns = [
#http://chainladder-python.readthedocs.io/en/master/quickstart.html import chainladder as chainl import numpy as numpi import pyliferisk as pylife import matplotlib.pyplot as plt ##%matplotlib inline ###############vars#################### s = "This is a test" ##############objects############### RAA = chainl.load_dataset('RAA') RAA_TriangleObj = chainl.Triangle(RAA) RAA_ChainL = chainl.Chainladder(RAA_TriangleObj) #############calls############## RAA.round(0) ############ sys out ##################### def main(): # my code here print(s + " 1") print(RAA.round(0)) print("\n\n\n\n") print(s + " 2") plt.plot(RAA_TriangleObj.cum_to_incr()) plt.ylabel("Aggregate Claim") plt.xlabel("Year") plt.show() print("\n\n\n\n") print(s + " 3")