aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--python/analytics/scenarios.py108
-rw-r--r--python/graphics.py2
-rw-r--r--python/notebooks/tranche and swaption portfolio strategy.ipynb105
3 files changed, 184 insertions, 31 deletions
diff --git a/python/analytics/scenarios.py b/python/analytics/scenarios.py
index d3ff5652..b389d7a3 100644
--- a/python/analytics/scenarios.py
+++ b/python/analytics/scenarios.py
@@ -1,4 +1,5 @@
from analytics import ATMstrike
+from joblib import delayed, Parallel
import pandas as pd
from copy import deepcopy
import numpy as np
@@ -8,6 +9,7 @@ from functools import partial
from multiprocessing import Pool
from .index_data import _get_singlenames_curves
from .curve_trades import curve_shape
+from scipy.interpolate import RectBivariateSpline
def run_swaption_scenarios(swaption, date_range, spread_shock, vol_shock,
vol_surface, params=["pv"], vol_time_roll=True):
@@ -109,49 +111,125 @@ def run_tranche_scenarios(tranche, spread_range, date_range, corr_map=False):
corr_map: static correlation or mapped correlation
"""
- #create empty lists
- index_pv = np.empty_like(spread_range)
- tranche_pv = np.empty((len(spread_range), tranche.K.size - 1))
- tranche_delta = np.empty((len(spread_range), tranche.K.size - 1))
-
- tranche.build_skew()
+ if np.isnan(tranche.rho[2]):
+ tranche.build_skew()
temp_tranche = deepcopy(tranche)
_get_singlenames_curves.cache_clear()
orig_tranche_pvs = tranche.tranche_pvs().bond_price
results = []
- print(tranche.tranche_pvs().bond_price)
+ index_pv = np.empty_like(spread_range)
+ tranche_pv = np.empty((len(spread_range), tranche.K.size - 1))
+ tranche_delta = np.empty((len(spread_range), tranche.K.size - 1))
for d in date_range:
temp_tranche.value_date = d.date()
for i, spread in enumerate(spread_range):
temp_tranche.tweak(spread)
- print(tranche.tranche_pvs().bond_price)
if corr_map:
temp_tranche.rho = tranche.map_skew(temp_tranche, 'TLP')
index_pv[i] = temp_tranche._snacpv(spread * 1e-4,
- temp_tranche.coupon(temp_tranche.maturity),
- temp_tranche.recovery)
+ temp_tranche.coupon(temp_tranche.maturity),
+ temp_tranche.recovery)
tranche_pv[i] = temp_tranche.tranche_pvs().bond_price
tranche_delta[i] = temp_tranche.tranche_deltas()['delta']
carry = temp_tranche.tranche_quotes.running * \
(d.date() - tranche.value_date).days / 360
df = pd.concat({'pv': pd.DataFrame(tranche_pv, index=spread_range,
- columns=tranche._row_names),
+ columns=tranche._row_names),
'delta': pd.DataFrame(tranche_delta, index=spread_range,
- columns=tranche._row_names),
+ columns=tranche._row_names),
'carry': pd.DataFrame(
np.tile(carry, (len(spread_range), 1)),
index=spread_range, columns=tranche._row_names)},
- axis=1)
+ axis=1)
df = df.join(
pd.concat({'pnl': df['pv'].sub(orig_tranche_pvs),
- 'index_price_snac_pv': pd.Series(index_pv, index=spread_range,
+ 'index_price_snac_pv': pd.Series(index_pv, index=spread_range,
name='pv')},
- axis=1))
+ axis=1))
results.append(df)
results = pd.concat(results, keys=date_range)
results.index.names = ['date', 'spread_range']
return results
+def run_tranche_scenarios_rolldown(tranche, spread_range, date_range, corr_map=False):
+ """computes the pnl of a tranche for a range of spread scenarios
+ curve roll down from the back, and valuations interpolated in the dates in between
+
+ Parameters
+ ----------
+ tranche : TrancheBasket
+ spread_range : `np.array`, spread range to run (different from swaption)
+ corr_map: static correlation or mapped correlation
+ """
+
+ if np.isnan(tranche.rho[2]):
+ tranche.build_skew()
+ temp_tranche = deepcopy(tranche)
+ orig_tranche_pvs = tranche.tranche_pvs().bond_price
+
+ #create blanks
+ index_pv, tranche_pv, tranche_delta = [], [], []
+ tranche_pv_f, tranche_delta_f = [], []
+
+ #do less scenarios, takes less time since the convexity is not as strong as swaptions
+ days = np.diff((tranche.cs.index - date_range[0]).days.values)
+ num_shortened = np.sum(tranche.cs.index < date_range[-1])
+ shorten_by = np.arange(0, max(1, num_shortened)+1, 1)
+ days = np.append(0, np.cumsum(np.flip(days,0))[:len(shorten_by)-1])
+ smaller_spread_range = np.linspace(spread_range[0], spread_range[-1], 10)
+ for i, spread in enumerate(smaller_spread_range):
+ for shortened in shorten_by:
+ if shortened > 0:
+ temp_tranche.cs = tranche.cs.iloc[:-shortened]
+ else:
+ temp_tranche.cs = tranche.cs
+ temp_tranche.tweak(spread)
+ if corr_map:
+ temp_tranche.rho = tranche.map_skew(temp_tranche, 'TLP')
+ index_pv.append(temp_tranche.index_pv().bond_price)
+ tranche_pv.append(temp_tranche.tranche_pvs().bond_price)
+ tranche_delta.append(temp_tranche.tranche_deltas()['delta'])
+ index_pv = np.reshape(index_pv, (smaller_spread_range.shape[0], days.shape[0])).transpose()
+ tranche_pv = np.array(tranche_pv).transpose()
+ tranche_delta = np.array(tranche_delta).transpose()
+ index_pv_f = RectBivariateSpline(days, smaller_spread_range, index_pv, kx=1, ky=1)
+ for pv, delta in zip(tranche_pv, tranche_delta):
+ pv = np.reshape(pv, (smaller_spread_range.shape[0], days.shape[0])).transpose()
+ delta = np.reshape(delta, (smaller_spread_range.shape[0], days.shape[0])).transpose()
+ tranche_pv_f.append(RectBivariateSpline(days, smaller_spread_range, pv, kx=1, ky=1))
+ tranche_delta_f.append(RectBivariateSpline(days, smaller_spread_range, delta, kx=1, ky=1))
+
+ #Reset the blanks
+ date_range_days = (date_range - date_range[0]).days.values
+ tranche_pv = np.empty((tranche.K.size - 1, len(date_range_days), len(spread_range)))
+ tranche_delta = np.empty((tranche.K.size - 1, len(date_range_days), len(spread_range)))
+ index_pv = index_pv_f(date_range_days, spread_range)
+ for i in range(len(tranche_pv_f)):
+ tranche_pv[i] = tranche_pv_f[i](date_range_days, spread_range)
+ tranche_delta[i] = tranche_delta_f[i](date_range_days, spread_range)
+ index_pv = index_pv.reshape(1,len(date_range_days) * len(spread_range)).T
+ tranche_pv = tranche_pv.reshape(len(tranche._row_names),len(date_range_days) * len(spread_range)).T
+ tranche_delta = tranche_delta.reshape(len(tranche._row_names),len(date_range_days) * len(spread_range)).T
+ days_diff = np.tile(((date_range - date_range[0]).days/360).values, len(tranche._row_names))
+ carry = pd.DataFrame(days_diff.reshape(len(tranche._row_names),len(date_range)).T,
+ index=date_range,
+ columns=pd.MultiIndex.from_product([['carry'], tranche._row_names]))
+ carry.index.name = 'date'
+ df = pd.concat({'index_pv': pd.DataFrame(index_pv,
+ index=pd.MultiIndex.from_product([date_range, spread_range]),
+ columns=['index_pv']),
+ 'pv': pd.DataFrame(tranche_pv,
+ index=pd.MultiIndex.from_product([date_range, spread_range]),
+ columns=tranche._row_names),
+ 'delta': pd.DataFrame(tranche_delta,
+ index=pd.MultiIndex.from_product([date_range, spread_range]),
+ columns=tranche._row_names)},
+ axis=1)
+ df.index.names = ['date', 'spread_range']
+ df = df.join(carry)
+ df = df.join(pd.concat({'pnl': df['pv'].sub(orig_tranche_pvs)}, axis=1))
+ return df
+
def run_curve_scenarios(portf, spread_range, date_range, curve_per):
"""computes the pnl of a portfolio of indices for a range of spread/curve scenarios
diff --git a/python/graphics.py b/python/graphics.py
index fc9764bb..75740081 100644
--- a/python/graphics.py
+++ b/python/graphics.py
@@ -62,7 +62,7 @@ def plot_time_color_map(df, spread_shock, attr="pnl", path=".", color_map=cm.RdY
val_date = df.index[0].date()
df = df.reset_index()
- df['days'] = (df['date'] - val_date).dt.days
+ df['days'] = (df['date'].dt.date - val_date).dt.days
ascending = [True,True] if index == 'HY' else [True,False]
df.sort_values(by=['date','spread'], ascending = ascending, inplace = True)
date_range = df.days.unique()
diff --git a/python/notebooks/tranche and swaption portfolio strategy.ipynb b/python/notebooks/tranche and swaption portfolio strategy.ipynb
index 8e73f973..3bef916a 100644
--- a/python/notebooks/tranche and swaption portfolio strategy.ipynb
+++ b/python/notebooks/tranche and swaption portfolio strategy.ipynb
@@ -11,22 +11,15 @@
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
- "from analytics.scenarios import run_tranche_scenarios, run_portfolio_scenarios\n",
+ "from analytics.scenarios import run_tranche_scenarios, run_portfolio_scenarios, run_tranche_scenarios_rolldown\n",
"from analytics import Swaption, BlackSwaption, Index, BlackSwaptionVolSurface, Portfolio, ProbSurface\n",
"from db import dbengine\n",
"from datetime import date\n",
"from graphics import plot_time_color_map\n",
"\n",
- "dawnengine = dbengine('dawndb')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "value_date = (pd.datetime.today() - pd.offsets.BDay(1)).date()"
+ "dawnengine = dbengine('dawndb')\n",
+ "\n",
+ "value_date = (pd.datetime.today() - pd.offsets.BDay(2)).date()"
]
},
{
@@ -64,11 +57,12 @@
"#Run Swaption sensitivities\n",
"#Set Shock range\n",
"shock_min = -.3\n",
- "shock_max = 1.5\n",
+ "shock_max = 1.25\n",
"spread_shock = np.arange(shock_min, shock_max, 0.05)\n",
"#Set Date range\n",
"earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
"date_range = pd.bdate_range(value_date, earliest_expiry - pd.offsets.BDay(), freq='20B')\n",
+ "#date_range = [earliest_expiry - pd.offsets.BDay()]\n",
"#Setup Vol Surface\n",
"vs = BlackSwaptionVolSurface(index,series, value_date=value_date)\n",
"ps = ProbSurface(index,series, value_date=value_date)\n",
@@ -98,7 +92,7 @@
"#Run tranche scenarios\n",
"temp = []\n",
"for i, r in pos.iterrows():\n",
- " df = run_tranche_scenarios(r.basket, spread_range, date_range, corr_map=False)\n",
+ " df = run_tranche_scenarios_rolldown(r.basket, spread_range, date_range, corr_map=False)\n",
" temp.append(r.notional*df.xs(str(r.attach) + \"-\" + str(r.detach), axis=1, level=1))\n",
"tranches_scens = sum(temp)"
]
@@ -109,6 +103,7 @@
"metadata": {},
"outputs": [],
"source": [
+ "#Create snapshot of the the first scenario date\n",
"total_scens = swaption_scens.reset_index().merge(tranches_scens.reset_index(), \n",
" left_on=['date', 'spread'], \n",
" right_on=['date', 'spread_range'], \n",
@@ -125,8 +120,8 @@
"metadata": {},
"outputs": [],
"source": [
- "#tranche positions delta at different spreads \n",
- "ax = total_scens_single_date.delta_t.plot()\n",
+ "#tranche positions delta at different spreads\n",
+ "ax = total_scens_single_date.delta_t.plot(title = 'delta vs. spread levels')\n",
"ax.ticklabel_format(style='plain')\n",
"plt.tight_layout()"
]
@@ -178,6 +173,86 @@
"execution_count": null,
"metadata": {},
"outputs": [],
+ "source": [
+ "#PNL of just the swaptions\n",
+ "plot_time_color_map(swaption_scens, spread_range, attr=\"pnl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Construct levered Super senior hedged with swaption\n",
+ "index = 'IG'\n",
+ "series = 30\n",
+ "option_delta = Index.from_name(index, series, '5yr')\n",
+ "option_delta.spread = 62\n",
+ "option_delta.notional = 1\n",
+ "option1 = BlackSwaption(option_delta, date(2018, 7, 19), 75, option_type=\"payer\")\n",
+ "option1.sigma = .52\n",
+ "option1.direction = 'Long'\n",
+ "option1.notional = 2_000_000_000\n",
+ "\n",
+ "#If we have two options instead of just one\n",
+ "option2 = BlackSwaption(option_delta, date(2018, 7, 19), 90, option_type=\"payer\")\n",
+ "option2.sigma = .68\n",
+ "option2.direction = 'Long'\n",
+ "option2.notional = 6_000_000_000\n",
+ "\n",
+ "option3 = BlackSwaption(option_delta, date(2018, 12, 19), 55, option_type=\"receiver\")\n",
+ "option3.sigma = .373\n",
+ "option3.direction = 'Short'\n",
+ "option3.notional = 5_000_000_000\n",
+ "\n",
+ "#portf = Portfolio([option1, option_delta])\n",
+ "portf = Portfolio([option1, option2, option3, option_delta])\n",
+ "portf.value_date = value_date\n",
+ "portf.reset_pv()\n",
+ "#Run Swaption sensitivities\n",
+ "#Set Shock range\n",
+ "shock_min = -.5\n",
+ "shock_max = 1.25\n",
+ "spread_shock = np.arange(shock_min, shock_max, 0.05)\n",
+ "#Set Date range\n",
+ "earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
+ "date_range = pd.bdate_range(value_date, earliest_expiry - pd.offsets.BDay(), freq='10B')\n",
+ "#Setup Vol Surface\n",
+ "vs = BlackSwaptionVolSurface(index,series, value_date=value_date)\n",
+ "ps = ProbSurface(index,series, value_date=value_date)\n",
+ "vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
+ "swaption_scens = run_portfolio_scenarios(portf, date_range, spread_shock, np.array([0]),\n",
+ " vol_surface, params=[\"pnl\", \"delta\"])\n",
+ "#swaption delta is in protection terms: switch to risk terms\n",
+ "swaption_scens.delta = -swaption_scens.delta\n",
+ "\n",
+ "notional = 30_000_000_000\n",
+ "t = bkt.TrancheBasket('IG', '29', '3yr')\n",
+ "t.build_skew()\n",
+ "#get back to 17bps, .36 delta\n",
+ "port_spread = 67\n",
+ "#t.rho = np.array([np.nan, 0.39691196, 0.48904597, 0.8, np.nan])\n",
+ "t.tweak(port_spread)\n",
+ "spread_range = (1+ spread_shock) * port_spread\n",
+ "tranches_scens = run_tranche_scenarios_rolldown(t, spread_range, date_range, corr_map=False)\n",
+ "tranches_scens = notional*tranches_scens.xs('15-100', axis=1, level=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dawnengine.dispose()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
"source": []
}
],