from dataclasses import replace
import numpy as np
import pandas as pd
import plotly.io as pio
from _example.notebook.helper_funcs import (append_curves_to_structure_df,
df_to_classes, get_xs_freqs,
plot_xs_freqs, print_layer_results,
process_curve_df, show_tree_chart)
from repy.distributions import Lognormal
from repy.exposure import (Curve, PAndCClassAssumptions, PAndCClassTree,
PAndCExposureAnalysis, PDLGDClassAssumptions,
PDLGDClassTree, PDLGDExposureAnalysis)
pd.options.display.float_format = (lambda x: f'{x:.2%}' if abs(x) <= 1
else f'{x:,.2f}')
pio.renderers.default = 'notebook_connected'
Treaty Background¶
Cedant writes 5 lines of Casualty business:
- D&O
- Costs Included
- E&O
- Costs Included
- Med Mal
- Costs In Addition
- GL
- Costs In Addition
- Workers' Comp
- Costs In Addition
Cedant writes 10m maximum line. In the example data, the following are written for each LOB:
- 1m primary acceptances, written 100%
- 4m xs 1m acceptances, written 75%
- "Supported excess", stacks with the primary
- 15m xs 5m acceptances, written 40%
- "Supported excess", stacks with the primary and 4m xs 1m
- 10m xs 10m acceptances, written 50%
- "Unsupported Excess", does not stack
The treaty layers are as follows:
- 1m xs 1m
- 3m xs 2m
- 5m xs 5m
The costs treatment for the treaty layers follows the underlying policies
- Included -> Included
- In Addition -> Pro Rata In Addition
Analysis 1: LOBs as Modelling Classes¶
Analysis Structure¶
Exposure Curves¶
# Read curves in we intend to use.
raw_curve_df = pd.read_csv('_example/notebook/curves.csv', index_col='analysis_cls')
curve_df = process_curve_df(raw_curve_df)
curve_df
curve_type | curve_params | curve_inc_alae | severity_scale | |
---|---|---|---|---|
analysis_cls | ||||
Loading... (need help?) |
# Form Curves and store in dict.
# Note last last 2 columns in curve DataFrame are optional kwargs.
curve_dict = df_to_classes(curve_df, Curve,
optional_kwargs=['curve_inc_alae', 'severity_scale'])
curve_dict
{'D&O': Curve(curve_type='Lognormal', curve_params=[12, 2], curve_inc_alae=True, severity_scale=1.25), 'E&O': Curve(curve_type='Pareto', curve_params=[1000000.0, 1.8], curve_inc_alae=False, severity_scale=1.0), 'Med Mal': Curve(curve_type='UserILFs', curve_params=[(1000000.0, 2000000.0, 5000000.0, 10000000.0, 20000000.0), (1, 1.5, 1.7, 1.8, 1.85)], curve_inc_alae=False, severity_scale=1.0), 'GL': Curve(curve_type='MixedExponential', curve_params=[(10000.0, 1000000.0, 10000000.0), (0.6, 0.3, 0.1)], curve_inc_alae=False, severity_scale=1.0), 'Workers Comp': Curve(curve_type='ELPPFs', curve_params=[(1000000.0, 2000000.0, 5000000.0, 10000000.0, 20000000.0), (0.2, 0.08, 0.03, 0.015, 0.01)], curve_inc_alae=False, severity_scale=1.0)}
# Curves take array-like inputs and use broadcasting.
Lognormal.lev(1e6, np.array([12, 12.5, 13]).reshape((-1, 1)),
[2, 2.5, 3])
array([[347196.82745759, 374799.04776828, 394339.63813165], [433316.8583742 , 447185.68394235, 456384.89301557], [523696.0825669 , 521766.85523259, 519691.97689529]])
Remaining Structural Elements¶
# Read in analysis class structure details.
structure_df = pd.read_csv('_example/notebook/structure.csv',
index_col='analysis_cls')
structure_df
glr | glr_includes_alae | prm_prospective | pol_costs | ri_costs | alae_pc_indemnity | |
---|---|---|---|---|---|---|
analysis_cls | ||||||
Loading... (need help?) |
# Insert column with curves for each analysis class.
# Each entry is [(curve, weight)].
# In this first example, each class has a single curve with 100% weight.
append_curves_to_structure_df(structure_df, curve_dict)
structure_df
glr | glr_includes_alae | prm_prospective | pol_costs | ri_costs | alae_pc_indemnity | curves | |
---|---|---|---|---|---|---|---|
analysis_cls | |||||||
Loading... (need help?) |
# Form dict of analysis class assumptions.
analysis_cls_dict = df_to_classes(structure_df, PAndCClassAssumptions,
optional_kwargs=['alae_pc_indemnity'])
analysis_cls_dict
{'D&O': PAndCClassAssumptions(curves=[(Curve(curve_type='Lognormal', curve_params=[12, 2], curve_inc_alae=True, severity_scale=1.25), 1)], glr=0.6, prm_prospective=10000000.0, glr_includes_alae=True, alae_pc_indemnity=0.3, pol_costs='Included Within Limit', ri_costs='Included'), 'E&O': PAndCClassAssumptions(curves=[(Curve(curve_type='Pareto', curve_params=[1000000.0, 1.8], curve_inc_alae=False, severity_scale=1.0), 1)], glr=0.55, prm_prospective=8000000.0, glr_includes_alae=True, alae_pc_indemnity=0.1, pol_costs='Included Within Limit', ri_costs='Included'), 'Med Mal': PAndCClassAssumptions(curves=[(Curve(curve_type='UserILFs', curve_params=[(1000000.0, 2000000.0, 5000000.0, 10000000.0, 20000000.0), (1, 1.5, 1.7, 1.8, 1.85)], curve_inc_alae=False, severity_scale=1.0), 1)], glr=0.7, prm_prospective=9000000.0, glr_includes_alae=True, alae_pc_indemnity=0.05, pol_costs='In Addition', ri_costs='PRIA'), 'GL': PAndCClassAssumptions(curves=[(Curve(curve_type='MixedExponential', curve_params=[(10000.0, 1000000.0, 10000000.0), (0.6, 0.3, 0.1)], curve_inc_alae=False, severity_scale=1.0), 1)], glr=0.65, prm_prospective=10000000.0, glr_includes_alae=True, alae_pc_indemnity=0.05, pol_costs='In Addition', ri_costs='PRIA'), 'Workers Comp': PAndCClassAssumptions(curves=[(Curve(curve_type='ELPPFs', curve_params=[(1000000.0, 2000000.0, 5000000.0, 10000000.0, 20000000.0), (0.2, 0.08, 0.03, 0.015, 0.01)], curve_inc_alae=False, severity_scale=1.0), 1)], glr=0.8, prm_prospective=7000000.0, glr_includes_alae=True, alae_pc_indemnity=0.05, pol_costs='In Addition', ri_costs='PRIA')}
# Form analysis structure.
# This is of hierarchical tree form, although in this first example we
# just have root and leaves, no branches.
structure = PAndCClassTree.as_flat_classes(analysis_cls_dict)
Exposure Listing¶
# Read policy data.
df = pd.read_csv('_example/notebook/policy_data.csv')
df
stack_id | cls_id | lim | xs | share | prm | user_description |
---|---|---|---|---|---|---|
Loading... (need help?) |
Rate Layers¶
analysis = PAndCExposureAnalysis(df, structure)
layers = [(1e6, 1e6), (3e6, 2e6), (5e6, 5e6)]
analysis.rate(layers)
Results by Layer¶
print_layer_results(display, analysis, layers)
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 1 | ||||||
Loading... (need help?) |
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 2 | ||||||
Loading... (need help?) |
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 3 | ||||||
Loading... (need help?) |
show_tree_chart(analysis, 2)
Excess Frequencies¶
xs_freqs = get_xs_freqs(analysis, start=1e6, stop=10e6)
xs_freqs
Threshold | Analysis | Analysis - Med Mal | Analysis - E&O | Analysis - D&O | Analysis - GL | Analysis - Workers Comp |
---|---|---|---|---|---|---|
Loading... (need help?) |
plot_xs_freqs(xs_freqs)
Analysis 2: Hierarchical Classes¶
In this extension of the example, the cedant now writes property (10m primary policies, written 100%), as well as casualty.
Furthermore, for the E&O casualty LOB, we become aware that the cedant is reweighting its book, writing greater unsupported excess. Hence, for E&O, we wish to split our analysis into Unsupported Excess and "Other" business.
Analysis Structure¶
# Create property curve.
curve_property = Curve(curve_type='UserFLS', curve_params=[
[0, 0.1, 0.9, 1], [0, 0.8, 0.99, 1]
])
# Form property analysis class data.
property_data = PAndCClassAssumptions(
curves=[(curve_property, 1)], glr=0.4, prm_prospective=50e6
)
# Form modified analysis class data for E&O, now we're splitting the class.
eo_unsupported_data = replace(structure['E&O'].data, prm_prospective=2e6)
eo_other_data = replace(structure['E&O'].data, prm_prospective=6e6)
# Form a hierarchical analysis class tree.
# Most of our rating assumptions stay as before, but we add property
# and apportion the prospective premium into the 2 subclasses for E&O.
# cls_ids 1, 2, ...
structure_dict = {
('Casualty', 'D&O'): structure['D&O'].data,
('Casualty', 'E&O', 'Other'): eo_other_data,
('Casualty', 'Med Mal'): structure['Med Mal'].data,
('Casualty', 'GL'): structure['GL'].data,
('Casualty', 'Workers Comp'): structure['Workers Comp'].data,
('Property',): property_data, # id 6.
('Casualty', 'E&O', 'Unsupported Excess'): eo_unsupported_data, # id 7.
}
structure = PAndCClassTree.from_leaves(structure_dict)
Exposure Listing¶
# We can modify our existing listing by adding a line for the property
# exposure, making sure to stamp it with cls_id 6.
property_row = pd.DataFrame({
'stack_id': [9999], # No stacking with existing exposures.
'cls_id': 6, # Matching structure, above.
'lim': 10e6,
'xs': 0,
'share': 1,
'prm': 6.5e6,
'user_description': 'Property'
})
df = pd.concat([df, property_row], ignore_index=True)
df
stack_id | cls_id | lim | xs | share | prm | user_description |
---|---|---|---|---|---|---|
Loading... (need help?) |
# Our policy listing currently has cls_id 2 allocated to both
# "Unsupported Excess" and "Other".
# We need to change the unsupported excess rows to cls_id 7.
unsupported_xs_mask = (df['cls_id'] == 2) & (df['lim'] == 10e6)
df.loc[unsupported_xs_mask, 'cls_id'] = 7
df
stack_id | cls_id | lim | xs | share | prm | user_description |
---|---|---|---|---|---|---|
Loading... (need help?) |
Rate Layers¶
analysis = PAndCExposureAnalysis(df, structure)
analysis.rate(layers)
Results by Layer¶
print_layer_results(display, analysis, layers)
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 1 | ||||||
Loading... (need help?) |
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 2 | ||||||
Loading... (need help?) |
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 3 | ||||||
Loading... (need help?) |
show_tree_chart(analysis, 2)
# We can add results from any leaf nodes we desire using '+' operator.
analysis.results(1)['Property'].data + \
analysis.results(1)[('Casualty', 'E&O', 'Unsupported Excess')].data
PAndCStats(pro_rata_rate=0.10384615384615385, ltlr=0.03184435155304396, xs_rate=0.014088912584943419, prm_profiled=6700000.0, prm_prospective=52000000.0)
# We can see full row-by-row calcs that generated these results.
# analysis.expos_calcs
Excess Frequencies¶
xs_freqs = get_xs_freqs(analysis, start=1e6, stop=10e6)
xs_freqs
Threshold | Analysis | Analysis - Casualty - Workers Comp | Analysis - Casualty - E&O | Analysis - Casualty - GL | Analysis - Casualty - Med Mal | Analysis - Property | Analysis - Casualty - E&O - Other | Analysis - Casualty - D&O | Analysis - Casualty - E&O - Unsupported Excess | Analysis - Casualty |
---|---|---|---|---|---|---|---|---|---|---|
Loading... (need help?) |
plot_xs_freqs(xs_freqs)
Analysis 3: Weighted Curves¶
In this extension of example 2, we wish to use a State-split for the GL line of business. We will simplify here and assume only two States, each with is own exposure curve.
Analysis Structure¶
# We now have 2 curves for GL. First as before, 10% weight.
curve_gl_weighted_1 = Curve(curve_type='MixedExponential', curve_params=[
[10e3, 1e6, 10e6], [0.6, 0.3, 0.1]
])
# Make second curve severe first-loss-scale, 90% weight.
curve_gl_weighted_2 = Curve(curve_type='Swiss', curve_params=0.1)
curves_gl = [(curve_gl_weighted_1, 0.1), (curve_gl_weighted_2, 0.9)]
# Replace curves in our structure.
structure_dict[('Casualty', 'GL')] = replace(structure_dict[('Casualty', 'GL')],
curves=curves_gl)
structure = PAndCClassTree.from_leaves(structure_dict)
Rate Layers¶
analysis = PAndCExposureAnalysis(df, structure)
analysis.rate(layers)
Results by Layer¶
print_layer_results(display, analysis, layers)
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 1 | ||||||
Loading... (need help?) |
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 2 | ||||||
Loading... (need help?) |
pro_rata_rate | ltlr | xs_rate | prm_profiled | prm_prospective | prospective_xs_prm | |
---|---|---|---|---|---|---|
Layer 3 | ||||||
Loading... (need help?) |
show_tree_chart(analysis, 2)
Excess Frequencies¶
xs_freqs = get_xs_freqs(analysis, start=1e6, stop=10e6)
xs_freqs
Threshold | Analysis | Analysis - Property | Analysis - Casualty - GL | Analysis - Casualty - Med Mal | Analysis - Casualty - E&O - Unsupported Excess | Analysis - Casualty - E&O - Other | Analysis - Casualty | Analysis - Casualty - Workers Comp | Analysis - Casualty - E&O | Analysis - Casualty - D&O |
---|---|---|---|---|---|---|---|---|---|---|
Loading... (need help?) |
plot_xs_freqs(xs_freqs)
Analysis 4: Credit / Surety¶
np.random.seed(42)
df = pd.DataFrame({
'cls_id': [1, 2, 3] * 3_333 + [1],
'lim': np.random.choice([1e6, 20e6, 100e6], 10_000, p=[0.1, 0.89, 0.01]),
'pd': np.random.choice([0.01, 0.1], 10_000, p=[0.9, 0.1]),
'lgd_mean': np.random.choice([0.06, 0.2], 10_000),
'lgd_cov': np.random.uniform(0.3, 0.6, 10_000)
})
df
cls_id | lim | pd | lgd_mean | lgd_cov |
---|---|---|---|---|
Loading... (need help?) |
structure = PDLGDClassTree.from_leaves({
('Credit', 'WTO'): PDLGDClassAssumptions(prm_prospective=30e6),
('Credit', 'Structured'): PDLGDClassAssumptions(prm_prospective=30e6),
('Surety',): PDLGDClassAssumptions(prm_prospective=20e6)
})
analysis = PDLGDExposureAnalysis(df, structure)
layers = [(10e6, 10e6), (30e6, 20e6)]
analysis.rate(layers)
print_layer_results(display, analysis, layers)
xs_prm | xs_rate | prm_prospective | |
---|---|---|---|
Layer 1 | |||
Loading... (need help?) |
xs_prm | xs_rate | prm_prospective | |
---|---|---|---|
Layer 2 | |||
Loading... (need help?) |
xs_freqs = get_xs_freqs(analysis, start=10e6, stop=100e6)
xs_freqs
Threshold | Analysis | Analysis - Surety | Analysis - Credit - WTO | Analysis - Credit - Structured | Analysis - Credit |
---|---|---|---|---|---|
Loading... (need help?) |
plot_xs_freqs(xs_freqs)