aboutsummaryrefslogtreecommitdiffstats
path: root/python/exploration/tranches.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/exploration/tranches.py')
-rw-r--r--python/exploration/tranches.py297
1 files changed, 297 insertions, 0 deletions
diff --git a/python/exploration/tranches.py b/python/exploration/tranches.py
new file mode 100644
index 00000000..4cb1c4c3
--- /dev/null
+++ b/python/exploration/tranches.py
@@ -0,0 +1,297 @@
+import analytics.tranche_functions as tch
+import analytics.tranche_basket as bkt
+import analytics.basket_index as idx_bkt
+import numpy as np
+import pandas as pd
+
+from analytics import Swaption, BlackSwaption, Index, VolatilitySurface, Portfolio
+from analytics.scenarios import run_swaption_scenarios, run_index_scenarios, run_portfolio_scenarios
+import exploration.swaption_calendar_spread as spread
+from operator import attrgetter
+from scipy.interpolate import interp1d
+
+import matplotlib
+import matplotlib.pyplot as plt
+from matplotlib import cm
+
+
+from datetime import date
+from db import dbengine
+engine = dbengine('serenitasdb')
+
+def rv_calc1():
+ #let's do IG27 from IG29, need to get the quotes from risk_numbers_new not just random ones
+ #Get IG29-1 year shortened rho with TLP, compare to IG27 5y rho
+ index = 'IG'
+ series = 29
+ series2 = series -2
+ tenor = '5yr'
+ shortened = 4
+ method = 'TLP'
+
+ #Read existing results, find which ones need to run
+ try:
+ results = pd.read_csv("/home/serenitas/edwin/Python/rv_" + index + str(series) + ".csv", parse_dates=['date'], index_col=['date'])
+ except IOError:
+ results = pd.DataFrame()
+ sql_string = "select distinct date from risk_numbers_new where index = %s and series = %s order by date desc"
+ df = pd.read_sql_query(sql_string, engine,params=(index, series), parse_dates=['date'])
+ df1 = pd.read_sql_query(sql_string, engine,params=(index, series2), parse_dates=['date'])
+ df = df.merge(df1, on=['date'])
+ df = df[~df.date.isin(results.index)]
+
+ rho_tlp, pv_tlp, rho_prev_index, pv_prev_index = [], [], [], []
+
+ for trade_date in df.date:
+ tranche = bkt.TrancheBasket('IG', series, '5yr', trade_date=trade_date)
+ tranche.build_skew()
+ tranche1 = bkt.TrancheBasket('IG', series, '5yr', trade_date=trade_date)
+ tranche1.cs = tranche1.cs[:-shortened]
+ tranche1.rho = tranche.map_skew(tranche1, method)
+ _, _, pv = tranche1.tranche_pvs()
+ rho_tlp.append(tranche1.rho[~np.isnan(tranche1.rho)])
+ pv_tlp.append(pv)
+
+ tranche2 = bkt.TrancheBasket('IG', series2, '5yr', trade_date=trade_date)
+ tranche2.build_skew()
+ rho_prev_index.append(tranche2.rho[~np.isnan(tranche2.rho)])
+
+ tranche1.rho = tranche2.rho
+ _, _, pv = tranche1.tranche_pvs()
+ pv_prev_index.append(pv)
+
+ temp1 = pd.DataFrame(rho_tlp, index=df.date, columns=['3_rho_tlp','7_rho_tlp','15_rho_tlp'])
+ temp2 = pd.DataFrame(pv_tlp, index=df.date, columns=['03_pv_tlp','37_pv_tlp','715_pv_tlp','15100_pv_tlp'])
+ temp3 = pd.DataFrame(rho_prev_index, index=df.date, columns=['3_rho_ig27','7_rho_ig27','15_rho_ig27'])
+ temp4 = pd.DataFrame(pv_prev_index, index=df.date, columns=['03_pv_ig27','37_pv_ig27','715_pv_ig27','15100_pv_ig27'])
+
+ results = results.append(pd.concat([temp1, temp2, temp3, temp4], axis=1))
+
+ result.to_csv("/home/serenitas/edwin/Python/rv_" + index + series + ".csv")
+
+def dispersion():
+
+ from quantlib.time.api import Schedule, Rule, Date, Period, WeekendsOnly
+ from quantlib.settings import Settings
+
+ curves = {}
+ maturities = {}
+ settings = Settings()
+ for series in [24, 25, 26, 27, 28, 29]:
+ index_temp = idx_bkt.MarkitBasketIndex('IG', series, ["5yr",], trade_date=trade_date)
+ maturities[series] = index_temp.maturities[0]
+ cds_schedule = Schedule.from_rule(settings.evaluation_date, Date.from_datetime(maturities[series]),
+ Period('3M'), WeekendsOnly(), date_generation_rule=Rule.CDS2015)
+ sm, tickers = index_temp.survival_matrix(cds_schedule.to_npdates().view('int') + 134774)
+ curves[series] = pd.DataFrame(1 - sm, index=tickers, columns=cds_schedule)
+ #temp = (pd.to_datetime(maturities[series]) - datetime.datetime(1970,1,1)).days + 134774
+ #curves[series] = pd.concat([c.to_series() for _,_, c in index_temp.items()], axis=1)
+ curve_df = pd.concat(curves).stack()
+ curve_df.index.rename(['series', 'maturity', 'name'], inplace=True)
+ disp = {}
+ for series in [24, 25, 26, 27, 28, 29]:
+ temp = curve_df.xs([series, maturities[series].strftime('%Y-%m-%d')])
+ temp = temp[pd.qcut(temp, 10, labels=False) == 9]
+ disp[series] = temp.std()/temp.mean()
+ dispersion = pd.concat(disp)
+ curve_df.groupby(['series', 'maturity']).mean()
+ curve_df.groupby(['series', 'maturity']).std()
+
+def scenarios(tranche, shock_range=None, roll_corr=False):
+
+ from copy import deepcopy
+
+ tranche.build_skew()
+ orig_tranche_cl, _, orig_tranche_pv = tranche.tranche_pvs()
+
+ if shock_range is None:
+ shock, step = 1, 10
+ shock_range = (1 + np.linspace(-.3, shock, step)) * tranche.tranche_quotes.indexrefspread[0]
+
+ #create empty lists
+ shock_index_pv_calc = np.empty(len(shock_range))
+ shock_tranche_pv = np.empty((len(shock_range), tranche.K.size - 1))
+ shock_tranche_delta = np.empty((len(shock_range), tranche.K.size - 1))
+ shock_tranche_cl = np.empty((len(shock_range), tranche.K.size - 1))
+ shock_tranche_carry = np.empty((len(shock_range), tranche.K.size - 1))
+ results = pd.DataFrame()
+
+ for shortened in [0,1,2]:
+ temp_tranche = deepcopy(tranche)
+ if shortened > 0:
+ temp_tranche.cs = temp_tranche.cs[:-shortened]
+ for i, shock in enumerate(shock_range):
+ temp_tranche.tweak(shock)
+ if roll_corr is True:
+ temp_tranche.rho = tranche.map_skew(temp_tranche, 'TLP')
+ shock_index_pv_calc[i] = temp_tranche._snacpv(shock * 1e-4, temp_tranche.coupon(temp_tranche.maturity), temp_tranche.recovery)
+ shock_tranche_cl[i], _, shock_tranche_pv[i] = temp_tranche.tranche_pvs()
+ shock_tranche_delta[i] = temp_tranche.tranche_deltas()['delta']
+ shock_tranche_carry[i] = temp_tranche.tranche_quotes.running
+ temp1 = pd.DataFrame(shock_tranche_pv, index=shock_range, columns=[s + "_pv" for s in tranche._row_names])
+ temp2 = pd.DataFrame(shock_tranche_delta, index=shock_range, columns=[s + "_delta" for s in tranche._row_names])
+ temp3 = pd.DataFrame(np.subtract(shock_tranche_pv, orig_tranche_pv), index=shock_range, columns=[s + "_pnl" for s in tranche._row_names])
+ temp4 = pd.DataFrame(shock_index_pv_calc, index=shock_range, columns=['index_price_snacpv'])
+ temp5 = pd.DataFrame(shock_tranche_carry, index=shock_range, columns=[s + "_carry" for s in tranche._row_names])
+ #temp5 = pd.DataFrame(np.subtract(shock_tranche_cl, orig_tranche_cl), index=shock_range, columns=[s + "_coupon_pnl" for s in tranche._row_names])
+ df = pd.concat([temp1, temp2, temp3, temp4, temp5], axis=1)
+ if shortened > 0:
+ df['days'] = ((tranche.cs.index[-1] - tranche.cs.index[-shortened-1])/ np.timedelta64(1, 'D')).astype(int)
+ else:
+ df['days'] = 0
+ for column in [s + "_carry" for s in tranche._row_names]:
+ df[column] *= df['days']/365
+
+ results = results.append(df)
+
+ return results
+
+def run_scen(trade_date = pd.Timestamp.today().normalize()- pd.offsets.BDay()):
+
+ option_delta = Index.from_tradeid(910)
+ option1 = BlackSwaption.from_tradeid(13, option_delta)
+ option2 = BlackSwaption.from_tradeid(12, option_delta)
+ portf = Portfolio([option1, option2, option_delta])
+ trade_date = pd.Timestamp.today().normalize()
+ trade_date = trade_date - pd.offsets.BDay()
+
+ #Start with swaptions
+ portf.reset_pv()
+ portf.mark()
+ earliest_date = min(portf.swaptions,key=attrgetter('exercise_date')).exercise_date
+ #date_range = pd.bdate_range(portf.indices[0].trade_date, earliest_date - BDay(), freq = '3B')
+ date_range = pd.date_range(trade_date, periods=4, freq = '5B')
+ vol_shock = np.arange(-0.01, 0.01, 0.01)
+ shock_min=-.3
+ shock_max=.8
+ spread_shock = np.arange(shock_min, shock_max, 0.05)
+ index = portf.indices[0].name.split()[1]
+ series = portf.indices[0].name.split()[3][1:]
+ vs = VolatilitySurface(index, series, trade_date=trade_date)
+ vol_select = vs.list(option_type='payer', model='black')[-1]
+ vol_surface = vs[vol_select]
+
+ df = run_portfolio_scenarios(portf, date_range, spread_shock, vol_shock, vol_surface,
+ params=["pnl","delta"])
+ df = df[df.vol_shock == 0]
+ df['days'] = ((df.index - trade_date)/ np.timedelta64(1, 'D')).astype(int)
+
+ #now do the tranches
+ index = 'IG'
+ series = 29
+ tenor = '5yr'
+ tranche = bkt.TrancheBasket('IG', series, '5yr', trade_date=trade_date)
+ shock_range = (1 + spread_shock) * portf.indices[0].spread
+
+ results = scenarios(tranche, shock_range, date_range)
+ results.set_index('days', append=True)
+
+ notional = 10000000
+ results['delta'] = -notional * (results['0-3_delta'] - 6* results['7-15_delta'])
+ results['pnl'] = notional* (results['0-3_pnl'] + results['0-3_carry'] - 6* (results['7-15_pnl'] + results['7-15_carry']))
+ results['date'] = tranche.trade_date + results.days * pd.offsets.Day()
+ results.index.name = 'spread'
+
+ #now combine the results
+ f = {}
+ for i, g in results.groupby('spread'):
+ f[i] = interp1d(g.days, g.pnl)
+
+ df['total_pnl'] = df.apply(lambda df: f[df.spread](df.days), axis = 1)
+ df.total_pnl = df.total_pnl.astype(float)
+
+ return results, df, shock_range
+
+def plot_pnl():
+
+ a, b, shock_range = run_scen()
+ a.reset_index(inplace=True)
+ a.set_index('date', inplace=True)
+ #plot Tranche only PNL
+ plot_time_color_map(a, shock_range, attr="pnl")
+ #plot swaption only PNL
+ plot_time_color_map(b, shock_range, attr="pnl")
+ #plot Tranche and Swaption PNL
+ plot_time_color_map(b, shock_range, attr="total_pnl")
+
+
+def plot_time_color_map(df, spread_shock, attr="pnl", path=".", color_map=cm.RdYlGn, index='IG'):
+
+ val_date = df.index[0].date()
+ df = df.reset_index()
+ df['days'] = (df['date'] - val_date).dt.days
+ ascending = [True,True] if index == 'HY' else [True,False]
+ df.sort_values(by=['date','spread'], ascending = ascending, inplace = True)
+ date_range = df.days.unique()
+
+ #plt.style.use('seaborn-whitegrid')
+ fig, ax = plt.subplots()
+ series = df[attr]
+ midpoint = 1 - series.max() / (series.max() + abs(series.min()))
+ shifted_cmap = shiftedColorMap(color_map, midpoint=midpoint, name='shifted')
+
+ chart = ax.imshow(series.values.reshape(date_range.size, spread_shock.size).T,
+ extent=(date_range.min(), date_range.max(),
+ spread_shock.min(), spread_shock.max()),
+ aspect='auto', interpolation='bilinear', cmap=shifted_cmap)
+
+ #chart = ax.contour(date_range, spread_shock, series.values.reshape(date_range.size, spread_shock.size).T)
+
+ ax.set_xlabel('Days')
+ ax.set_ylabel('Price') if index == 'HY' else ax.set_ylabel('Spread')
+ ax.set_title('{} of Trade'.format(attr.title()))
+
+ fig.colorbar(chart, shrink=.8)
+ #fig.savefig(os.path.join(path, "spread_time_color_map_"+ attr+ "_{}.png".format(val_date)))
+
+def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
+ '''
+ Function to offset the "center" of a colormap. Useful for
+ data with a negative min and positive max and you want the
+ middle of the colormap's dynamic range to be at zero
+
+ Input
+ -----
+ cmap : The matplotlib colormap to be altered
+ start : Offset from lowest point in the colormap's range.
+ Defaults to 0.0 (no lower ofset). Should be between
+ 0.0 and `midpoint`.
+ midpoint : The new center of the colormap. Defaults to
+ 0.5 (no shift). Should be between 0.0 and 1.0. In
+ general, this should be 1 - vmax/(vmax + abs(vmin))
+ For example if your data range from -15.0 to +5.0 and
+ you want the center of the colormap at 0.0, `midpoint`
+ should be set to 1 - 5/(5 + 15)) or 0.75
+ stop : Offset from highets point in the colormap's range.
+ Defaults to 1.0 (no upper ofset). Should be between
+ `midpoint` and 1.0.
+ '''
+ cdict = {
+ 'red': [],
+ 'green': [],
+ 'blue': [],
+ 'alpha': []
+ }
+
+ # regular index to compute the colors
+ reg_index = np.linspace(start, stop, 257)
+
+ # shifted index to match the data
+ shift_index = np.hstack([
+ np.linspace(0.0, midpoint, 128, endpoint=False),
+ np.linspace(midpoint, 1.0, 129, endpoint=True)
+ ])
+
+ for ri, si in zip(reg_index, shift_index):
+ r, g, b, a = cmap(ri)
+
+ cdict['red'].append((si, r, r))
+ cdict['green'].append((si, g, g))
+ cdict['blue'].append((si, b, b))
+ cdict['alpha'].append((si, a, a))
+
+ newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
+ plt.register_cmap(cmap=newcmap)
+
+ return newcmap
+