aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/analytics/curve_trades.py54
-rw-r--r--python/exploration/VaR.py34
-rw-r--r--python/globeop_reports.py2
-rw-r--r--python/mark_backtest_backfill.py1
-rw-r--r--python/mark_backtest_underpar.py3
-rw-r--r--python/notebooks/Allocation Reports.ipynb71
-rw-r--r--python/notebooks/Curve Trades.ipynb42
-rw-r--r--python/notebooks/Curve cap.ipynb24
-rw-r--r--python/notebooks/Option Trades.ipynb55
-rw-r--r--python/notebooks/Reto Report.ipynb216
-rw-r--r--python/notebooks/Single Names Monitoring.ipynb32
-rw-r--r--python/notebooks/VaR.ipynb71
-rw-r--r--python/notebooks/tranche and swaption portfolio strategy.ipynb287
-rw-r--r--python/notebooks/tranches numbers.ipynb72
14 files changed, 767 insertions, 197 deletions
diff --git a/python/analytics/curve_trades.py b/python/analytics/curve_trades.py
index b989bd69..555194ba 100644
--- a/python/analytics/curve_trades.py
+++ b/python/analytics/curve_trades.py
@@ -8,6 +8,7 @@ from statsmodels.sandbox.regression.predstd import wls_prediction_std
from scipy.interpolate import interp1d
from itertools import chain
from copy import deepcopy
+from matplotlib import cm
import pandas as pd
import math
@@ -30,7 +31,7 @@ def curve_spread_diff(index='IG', rolling=6, years=3, percentage=False, percenta
# look at spreads
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'], years=years)
- spreads = df.groupby(level=['date', 'tenor']).nth(-1)['closespread'].unstack(-1)
+ spreads = df.groupby(level=['date', 'tenor']).nth(-1)['close_spread'].unstack(-1)
spreads_diff = spreads.diff(axis=1)
del spreads_diff['3yr']
spreads_diff.columns = ['3-5', '5-7', '7-10']
@@ -55,7 +56,8 @@ def theta_matrix_by_series(index='IG', rolling=6):
otr = on_the_run(index)
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'])
- df['theta_per_dur'] = df.theta2 / df.duration2
+ #now get_index_quotes are all based on theta2/duration2
+ df['theta_per_dur'] = df.theta / df.duration
theta_matrix = df.groupby(level=['date', 'tenor','series']).nth(-1)['theta_per_dur']
theta_matrix = theta_matrix.loc[theta_matrix.index[-1][0]].unstack(0)
return theta_matrix[['3yr', '5yr', '7yr', '10yr']]
@@ -78,7 +80,7 @@ def on_the_run_theta(index='IG', rolling=6):
otr = on_the_run(index)
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'])
- df['theta_per_dur'] = df.theta2/df.duration2
+ df['theta_per_dur'] = df.theta/df.duration
theta_matrix = df.groupby(level=['date', 'tenor']).nth(-1)['theta_per_dur']
theta_matrix.unstack(-1).plot()
@@ -145,8 +147,8 @@ def cross_series_curve(index='IG', rolling=6):
def forward_loss(index='IG'):
start_date = (pd.Timestamp.now() - pd.DateOffset(years=3)).date()
- df = pd.read_sql_query("SELECT date, index, series, tenor, duration, closespread, "\
- "closespread*duration / 100 AS indexel " \
+ df = pd.read_sql_query("SELECT date, index, series, tenor, duration, close_spread, "\
+ "close_spread*duration / 100 AS indexel " \
"FROM index_quotes WHERE index=%s AND date >= %s " \
"ORDER BY date DESC, series ASC, duration ASC",
serenitasdb, parse_dates=['date'], params=[index, start_date])
@@ -162,16 +164,16 @@ def forward_loss(index='IG'):
def curve_model(tenor_1='5yr', tenor_2='10yr'):
#OLS model
- df = ratio_within_series(param='closespread')
+ df = ratio_within_series(param='close_spread')
df = pd.concat([df.duration[tenor_1], df.duration[tenor_2],
- df.closespread[tenor_1],
- df.closespread_ratio_to_5yr[tenor_2],
+ df.close_spread[tenor_1],
+ df.close_spread_ratio_to_5yr[tenor_2],
df.theta[tenor_1], df.theta[tenor_2]],
axis=1,
- keys=['duration1', 'duration2', 'closespread',
+ keys=['duration1', 'duration2', 'close_spread',
'ratio', 'theta1', 'theta2'])
df = np.log(df)
- ols_model = smf.ols('ratio ~ closespread + duration1 + theta1 + theta2',
+ ols_model = smf.ols('ratio ~ close_spread + duration1 + theta1 + theta2',
data=df).fit()
return df, ols_model
@@ -184,13 +186,13 @@ def curve_model_results(df, model):
df = df.join(b)
df = df.join(c)
#dr/dspread = exp(k) + spread_coeff * duration ^ dur_coeff * spread ^ (spread_coeff-1)
- cols = ['ratio', 'closespread', 'down_2_stdev', 'up_2_stdev']
+ cols = ['ratio', 'close_spread', 'down_2_stdev', 'up_2_stdev']
df[cols] = np.exp(df[cols])
df['predicted'] = np.exp(model.predict())
df[['predicted', 'down_2_stdev', 'up_2_stdev']]=\
- df[['predicted', 'down_2_stdev', 'up_2_stdev']].multiply(df['closespread'].values, axis=0)
+ df[['predicted', 'down_2_stdev', 'up_2_stdev']].multiply(df['close_spread'].values, axis=0)
ax = df[['predicted', 'down_2_stdev', 'up_2_stdev']].reset_index(level='series', drop=True).plot()
- df['dr_dspread'] = np.exp(model.params[0]) * model.params[2] * df.duration1 ** model.params[1] * df.closespread ** (model.params[2] - 1)
+ df['dr_dspread'] = np.exp(model.params[0]) * model.params[2] * df.duration1 ** model.params[1] * df.close_spread ** (model.params[2] - 1)
return df
@@ -199,7 +201,7 @@ def spread_fin_crisis(index='IG'):
# look at spreads
df = get_index_quotes(index, list(range(8, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'], years=20)
- spreads = df.groupby(level=['date', 'tenor']).nth(-1)['closespread'].unstack(-1)
+ spreads = df.groupby(level=['date', 'tenor']).nth(-1)['close_spread'].unstack(-1)
spreads_diff = spreads.diff(axis=1)
to_plot = pd.DataFrame()
to_plot['spread'] = spreads['5yr']
@@ -314,6 +316,28 @@ def curve_shape(value_date, index='IG', percentile=.95, spread=None):
return interp1d(np.hstack([0, df.year_frac]), np.hstack([0, df.spread]))
+def plot_curve_shape(date):
+
+ '''
+ Plots the curve shape that's being used for the scenarios'''
+
+ curve_per = np.arange(.01, .99, .1)
+ time_per = np.arange(.1, 10.1, .5)
+ r=[]
+ for per in curve_per:
+ shape = curve_shape(date, percentile = per)
+ r.append(shape(time_per))
+ df = pd.DataFrame(r, index=curve_per, columns=time_per)
+ fig = plt.figure()
+ ax = fig.gca(projection='3d')
+ xx, yy = np.meshgrid(curve_per, time_per)
+ z = np.vstack(r).transpose()
+ surf = ax.plot_surface(xx, yy, z, cmap=cm.viridis)
+ ax.set_xlabel("steepness percentile")
+ ax.set_ylabel("tenor")
+ ax.set_zlabel("spread")
+
+
def pos_pnl_abs(portf, value_date, index='IG', rolling=6, years=3):
'''
@@ -323,7 +347,7 @@ def pos_pnl_abs(portf, value_date, index='IG', rolling=6, years=3):
series = on_the_run(index)
df = get_index_quotes(index, list(range(series - rolling, series + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'], years=years)
- df = df.groupby(level=['date', 'tenor']).nth(-1)['closespread'].unstack(-1)
+ df = df.groupby(level=['date', 'tenor']).nth(-1)['close_spread'].unstack(-1)
sql_string = "SELECT tenor, maturity FROM index_maturity where index = %s and series = %s"
lookup_table = pd.read_sql_query(sql_string, serenitasdb, parse_dates=['maturity'],
diff --git a/python/exploration/VaR.py b/python/exploration/VaR.py
index bb64c314..f578bf84 100644
--- a/python/exploration/VaR.py
+++ b/python/exploration/VaR.py
@@ -9,7 +9,6 @@ import datetime
dawndb = dbengine('dawndb')
serenitasdb = dbengine('serenitasdb')
-
def hist_var(portf, index_type='IG', quantile=.05, years=5):
df = index_returns(index=index_type, years=years,
tenor=['3yr', '5yr', '7yr', '10yr'])
@@ -36,35 +35,4 @@ def hist_var(portf, index_type='IG', quantile=.05, years=5):
portf.spread = spreads.spread * (1 + spreads.join(shocks).shocks)
r.append((k, portf.pnl))
pnl = pd.DataFrame.from_records(r, columns=['date', 'pnl'], index=['date'])
- return pnl.quantile(quantile) * math.sqrt(12)
-
-def get_pos(report_date, strategy=None):
-
- df = pd.read_sql_query("SELECT * from list_cds_marks_by_strat(%s)",
- dawndb, params=(report_date,))
- if strategy is None:
- return df
- else:
- return df[df['strategy'] == strategy]
-
-def cleared_cds_margins(report_date=datetime.date.today()):
-
- df = get_pos(report_date)
-
- #Cap Allocation for Deltas
- percentile = .95 #monthly 90%tile case...
- shocks, widen, tighten, onTR_dur, onTR_spread = {}, {}, {}, {}, {}
- for ind in ['IG', 'HY', 'EU']:
- shocks[ind], onTR_spread[ind], onTR_dur[ind] = rel_spread_diff(report_date, index=ind)
- widen[ind] = shocks[ind].quantile(percentile)
- tighten[ind] = shocks[ind].quantile(1-percentile)
-
- df['onTR_notional'] = df.apply(lambda df:
- df.notional * df.factor * df.duration / onTR_dur[df.p_index], axis=1)
- df['widen'] = df.apply(lambda df:
- df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * widen[df.p_index]/10000, axis=1)
- df['tighten'] = df.apply(lambda df:
- df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * tighten[df.p_index]/10000, axis=1)
- delta_alloc = df.groupby('strategy').sum()
- delta_alloc['total'] = delta_alloc.apply(lambda df: max(abs(df.widen), abs(df.tighten)), axis=1)
- return delta_alloc
+ return pnl.quantile(quantile) * math.sqrt(12) \ No newline at end of file
diff --git a/python/globeop_reports.py b/python/globeop_reports.py
index feaeb18c..e656ae5f 100644
--- a/python/globeop_reports.py
+++ b/python/globeop_reports.py
@@ -135,7 +135,7 @@ def get_rmbs_pos_df(date=None):
for d, g in df.groupby(pd.Grouper(freq='M')):
model_date = pd.to_datetime(timestamps[timestamps.timestamp <= d + DateOffset(days=1)].max()[0]).date()
yc.link_to(YC(evaluation_date=model_date))
- libor = libor1m.fixing(libor1.fixing_calendar.adjust(Date.from_datetime(d)))
+ libor = libor1m.fixing(libor1m.fixing_calendar.adjust(Date.from_datetime(d)))
sql_string = ("SELECT date(timestamp) as timestamp, cusip, model_version, "
"pv, moddur, delta_yield, delta_ir "
"FROM priced where date(timestamp) = %s "
diff --git a/python/mark_backtest_backfill.py b/python/mark_backtest_backfill.py
index 6a2913c9..ca000956 100644
--- a/python/mark_backtest_backfill.py
+++ b/python/mark_backtest_backfill.py
@@ -97,6 +97,7 @@ def get_globs():
return chain.from_iterable(globs)
settings = {
+ 'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20181001.20181031.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
'ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20180901.20180930.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20180801.20180831.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20180701.20180731.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
diff --git a/python/mark_backtest_underpar.py b/python/mark_backtest_underpar.py
index 0465049e..4b8eb4b0 100644
--- a/python/mark_backtest_underpar.py
+++ b/python/mark_backtest_underpar.py
@@ -107,7 +107,8 @@ def alt_nav_impact():
navs = ops.get_net_navs()
df = calc_mark_diff(get_mark_df())
df = df.join(navs.endbooknav)
- return df.iloc[-1]/df.iloc[-1]['endbooknav']
+ return df
+ #return df.iloc[-1]/df.iloc[-1]['endbooknav']
def back_test(begindate = '2013-01-01', enddate = '2018-01-01', sell_price_threshold = 200):
df = pd.read_sql_query("SELECT * FROM external_marks_mapped WHERE source IS NOT NULL", dbengine('dawndb'),
diff --git a/python/notebooks/Allocation Reports.ipynb b/python/notebooks/Allocation Reports.ipynb
index 1a30f348..b20f77d0 100644
--- a/python/notebooks/Allocation Reports.ipynb
+++ b/python/notebooks/Allocation Reports.ipynb
@@ -14,6 +14,9 @@
"import numpy as np\n",
"\n",
"from db import dbengine\n",
+ "from yieldcurve import YC\n",
+ "from quantlib.termstructures.yield_term_structure import YieldTermStructure\n",
+ "\n",
"engine = dbengine('dawndb')\n",
"Sengine = dbengine('serenitasdb')"
]
@@ -53,8 +56,19 @@
"#Input latest NAVS to: '/home/serenitas/edwin/Python/subscription_fee_data.csv'\n",
"pnl_alloc = m_pnl.groupby(['date', 'pnl']).sum()\n",
"pnl_alloc = pnl_alloc.join(nav.begbooknav)\n",
- "pnl_alloc['strat_return'] = pnl_alloc.mtdtotalbookpl / pnl_alloc.begbooknav\n",
- "pnl_alloc_last_month = pnl_alloc.xs(report_date)"
+ "pnl_alloc['strat_return'] = pnl_alloc.mtdtotalbookpl / pnl_alloc.begbooknav"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#rolling 12 months PNL per strategy - copy to RiskMonitor\n",
+ "start_date = report_date - pd.tseries.offsets.MonthEnd(11)\n",
+ "rolling_return = pnl_alloc[start_date:report_date].groupby('pnl').sum()['strat_return']\n",
+ "rolling_return.to_clipboard()"
]
},
{
@@ -64,6 +78,7 @@
"outputs": [],
"source": [
"#Plot this month's PNL\n",
+ "pnl_alloc_last_month = pnl_alloc.xs(report_date)\n",
"ax = pnl_alloc_last_month['strat_return'].plot(kind='bar', figsize = (6,6), width = .35)\n",
"ax.set_xlabel('Strategy')\n",
"ax.set_ylabel('Return (%)')\n",
@@ -154,6 +169,15 @@
"metadata": {},
"outputs": [],
"source": [
+ "turnover.to_clipboard(sep='\\t')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"#Number of bond positions by strategy by month\n",
"df = go.get_portfolio()\n",
"df = df[(df.custacctname == 'V0NSCLMAMB') &\n",
@@ -265,6 +289,16 @@
"metadata": {},
"outputs": [],
"source": [
+ "a.to_clipboard()\n",
+ "#b.to_clipboard()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"#RMBS Risk - need RMBS Positions and Risks\n",
"sql_string = \"select date, duration, series from on_the_run where index = 'HY'\"\n",
"duration = pd.read_sql_query(sql_string, Sengine, parse_dates=['date'], index_col=['date'])\n",
@@ -288,10 +322,11 @@
" right_index=True,\n",
" by='cusip')\n",
"df_with_trades['hold'] = (df_with_trades.index - df_with_trades.trade_date2).dt.days/365\n",
- "holding_period = df_with_trades.groupby('timestamp').apply(lambda df: sum(df.endbooknav * df.hold)/sum(df.endbooknav))\n",
+ "holding_period = df_with_trades.groupby('timestamp').apply(lambda df: sum(df.endbookmv * df.hold)/sum(df.endbookmv))\n",
"ax = holding_period.plot(legend=False, title='Average Holding Period')\n",
"ax.set_xlabel('date')\n",
- "ax.set_ylabel('Years')"
+ "ax.set_ylabel('Years')\n",
+ "holding_period[-1]"
]
},
{
@@ -300,7 +335,31 @@
"metadata": {},
"outputs": [],
"source": [
- "engine.dispose()"
+ "#Calculate Interests allocation \n",
+ "sql_string = \"select periodenddate, strat, sum(endqty) as bal, counterparty \" \\\n",
+ " \"from valuation_reports where invid = 'USDLOAN' and \" \\\n",
+ " \"extract(month from periodenddate) = %s and \" \\\n",
+ " \"extract(year from periodenddate) = %s \" \\\n",
+ " \"group by periodenddate, strat, counterparty \" \\\n",
+ " \"order by periodenddate desc\"\n",
+ "df = pd.read_sql_query(sql_string, dbengine('dawndb'), \n",
+ " parse_dates=['periodenddate'],\n",
+ " index_col=['strat', 'counterparty'],\n",
+ " params=[report_date.month, report_date.year])\n",
+ "df['day_frac'] = -(df.groupby(level=['strat','counterparty'])['periodenddate'].transform(lambda s:\n",
+ " s.diff().astype('timedelta64[D]') / 360)).astype(float)\n",
+ "df = df.fillna(0)\n",
+ "r = {}\n",
+ "yc = YieldTermStructure()\n",
+ "for t in df['periodenddate'].unique():\n",
+ " yc.link_to(YC(evaluation_date=pd.Timestamp(t)))\n",
+ " r[pd.Timestamp(t)] = (float(yc.zero_rate(.083333)))\n",
+ "rates = pd.DataFrame.from_dict(r, orient='index')\n",
+ "df = df.reset_index().set_index('periodenddate', drop=False).join(rates)\n",
+ "df = df.rename(columns={0: 'rate'})\n",
+ "df = df.set_index(['strat','counterparty'], append=True)\n",
+ "df['interest'] = df['rate'] * df['day_frac'] * df['bal']\n",
+ "interests = df['interest'].groupby(level=['counterparty','strat']).sum()"
]
},
{
@@ -327,7 +386,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.6"
+ "version": "3.7.1"
}
},
"nbformat": 4,
diff --git a/python/notebooks/Curve Trades.ipynb b/python/notebooks/Curve Trades.ipynb
index b2cde72e..05b05f89 100644
--- a/python/notebooks/Curve Trades.ipynb
+++ b/python/notebooks/Curve Trades.ipynb
@@ -9,7 +9,6 @@
"import analytics.curve_trades as ct\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
- "import pandas as pd\n",
"import numpy as np\n",
"import graphics as g\n",
"import globeop_reports as go\n",
@@ -42,7 +41,7 @@
"outputs": [],
"source": [
"index = w.value\n",
- "report_date = (pd.datetime.today() - pd.offsets.BDay(2)).date()"
+ "report_date = (pd.datetime.today() - pd.offsets.BDay(5)).date()"
]
},
{
@@ -92,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
- "rolling = 10\n",
+ "rolling = 20\n",
"years = 5\n",
"ret = ct.curve_returns(index, rolling, years)\n",
"if index == 'IG':\n",
@@ -187,9 +186,9 @@
"source": [
"#Theta with 3-5-10 Strategy\n",
"df = ct.ratio_within_series(param='duration')\n",
- "s = - df.theta2['3yr'] / df.duration_ratio_to_5yr['3yr'] \\\n",
- " + 2 * df.theta2['5yr'] \\\n",
- " - df.theta2['10yr'] / df.duration_ratio_to_5yr['10yr']\n",
+ "s = - df.theta['3yr'] / df.duration_ratio_to_5yr['3yr'] \\\n",
+ " + 2 * df.theta['5yr'] \\\n",
+ " - df.theta['10yr'] / df.duration_ratio_to_5yr['10yr']\n",
"s.dropna().unstack(-1).plot()"
]
},
@@ -200,7 +199,7 @@
"outputs": [],
"source": [
"#Theta with 5-10 Strategy: buy sell 5y, buy 10y\n",
- "s = df.theta2['5yr'] - df.theta2['10yr'] / df.duration_ratio_to_5yr['10yr']\n",
+ "s = df.theta['5yr'] - df.theta['10yr'] / df.duration_ratio_to_5yr['10yr']\n",
"s.dropna().unstack(-1).plot()"
]
},
@@ -211,8 +210,8 @@
"outputs": [],
"source": [
"#Relative Spread Difference\n",
- "spread_ratio = ct.ratio_within_series(param = 'closespread')\n",
- "spread_ratio.groupby(level = ['date']).last()['closespread_ratio_to_5yr'].plot()"
+ "spread_ratio = ct.ratio_within_series(param = 'close_spread')\n",
+ "spread_ratio.groupby(level = ['date']).last()['close_spread_ratio_to_5yr'].plot()"
]
},
{
@@ -263,9 +262,9 @@
"sql_string = \"SELECT closespread FROM index_quotes where index = %s and series = %s and tenor = %s and date = %s\"\n",
"spread_df = pd.read_sql_query(sql_string, dbengine('serenitasdb'),\n",
" params=[index, ct.on_the_run(index), '5yr', report_date])\n",
- "spread_range = (1+ spread_shock) * spread_df.iloc[0][0]\n",
- "#need to max it at the closest maturity date\n",
- "date_range = pd.bdate_range(report_date, report_date + 180* pd.offsets.DateOffset(), freq='5B')\n",
+ "spread_range = np.round((1+ spread_shock) * spread_df.iloc[0][0], 2)\n",
+ "closest_mat = min([t.end_date for t in portf.trades])\n",
+ "date_range = pd.bdate_range(report_date, min(closest_mat, (report_date + 180* pd.offsets.DateOffset()).date()), freq='5B')\n",
"curve_per = np.arange(.01, .99, .1)\n",
"\n",
"df = run_curve_scenarios(portf, spread_range, date_range, curve_per)"
@@ -277,8 +276,21 @@
"metadata": {},
"outputs": [],
"source": [
- "df_plot = df[df.curve_per == curve_per[5]]\n",
- "g.plot_time_color_map(df_plot, spread_range, attr='pnl')"
+ "#plot steepness scenario at current spread\n",
+ "df_plot = df.set_index(['spread', 'curve_per'], append=True)\n",
+ "df_plot = df_plot.xs(round(spread_df.iloc[0][0], 2), level = 'spread')\n",
+ "df_plot.name = 'pnl'\n",
+ "g.plot_color_map(df_plot, spread_range)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Plot the shape of the scenario that was run above\n",
+ "ct.plot_curve_shape(report_date)"
]
},
{
@@ -371,7 +383,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.5"
+ "version": "3.7.1"
}
},
"nbformat": 4,
diff --git a/python/notebooks/Curve cap.ipynb b/python/notebooks/Curve cap.ipynb
index 4ce1f2f6..1a42524e 100644
--- a/python/notebooks/Curve cap.ipynb
+++ b/python/notebooks/Curve cap.ipynb
@@ -58,6 +58,28 @@
"execution_count": null,
"metadata": {},
"outputs": [],
+ "source": [
+ "with init_bbg_session(BBG_IP) as session:\n",
+ " hist_data_210 = retrieve_data(session, [\"USSW2 Curncy\", \"USSW10 Curncy\"], [\"PX_LAST\"], start_date=pd.datetime(1994, 1, 1))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.concat(hist_data).unstack(level=0)\n",
+ "df.columns = ['2y_swap', '10y_swap']\n",
+ "df['diff'] = df['10y_swap'] - df['2y_swap']\n",
+ "df.plot(title='Historical swap rates')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
"source": []
}
],
@@ -77,7 +99,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.4"
+ "version": "3.7.0"
}
},
"nbformat": 4,
diff --git a/python/notebooks/Option Trades.ipynb b/python/notebooks/Option Trades.ipynb
index d43d86ee..6d60669c 100644
--- a/python/notebooks/Option Trades.ipynb
+++ b/python/notebooks/Option Trades.ipynb
@@ -57,7 +57,7 @@
" df = df.set_index(['date', 'price', 'vol_shock'])\n",
" sort_order = [True, False]\n",
" else:\n",
- " ss_df['spread'] = portf.indices[0].spread * (1 + df.spread_shock)\n",
+ " df['spread'] = portf.indices[0].spread * (1 + df.spread_shock)\n",
" df = df.set_index(['date', 'spread', 'vol_shock'])\n",
" sort_order = [True, True]\n",
" \n",
@@ -83,15 +83,56 @@
"outputs": [],
"source": [
"#Ad hoc\n",
+ "index = 'IG'\n",
+ "series = 31\n",
+ "value_date = datetime.date(2018, 11, 18)\n",
+ "option_delta = CreditIndex(index, series, '5yr', value_date)\n",
+ "option_delta.spread = 76.5\n",
+ "option1 = BlackSwaption(option_delta, datetime.date(2019, 2, 20), 65, option_type=\"payer\")\n",
+ "option2 = BlackSwaption(option_delta, datetime.date(2019, 2, 20), 90, option_type=\"payer\")\n",
+ "option3 = BlackSwaption(option_delta, datetime.date(2019, 2, 20), 60, option_type=\"payer\")\n",
+ "option1.sigma = .425\n",
+ "option2.sigma = .59\n",
+ "option3.sigma = .0\n",
+ "option1.notional = 300_000_000\n",
+ "option2.notional = 300_000_000\n",
+ "option3.notional = 1\n",
+ "option1.direction = 'Long'\n",
+ "option2.direction = 'Short'\n",
+ "option3.direction = 'Long'\n",
+ "option_delta.notional = 1\n",
+ "option_delta.notional = option1.notional * option1.delta + option2.notional * option2.delta + option3.notional * option3.delta\n",
+ "option_delta.direction = 'Seller' if option_delta.notional > 0 else 'Buyer'\n",
+ "option_delta.notional = abs(option_delta.notional)\n",
+ "portf = Portfolio([option1, option2, option3, option_delta], trade_ids=['opt1', 'opt2', 'opt3', 'delta'])\n",
+ "#Plot Scenarios Inputs: Portfolio, spread shock tightening%, spread shock widening%, snapshot period)\n",
+ "portf"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plot_trade_scenarios(portf)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Ad hoc\n",
"index = 'HY'\n",
"series = 30\n",
"value_date = datetime.date(2018, 6, 7)\n",
"option_delta = CreditIndex(index, series, '5yr', value_date)\n",
- "#option_delta.spread = 66\n",
- "option_delta.price = 106.75\n",
- "option1 = BlackSwaption(option_delta, datetime.date(2018, 8, 15), 103, option_type=\"payer\")\n",
- "option2 = BlackSwaption(option_delta, datetime.date(2018, 8, 15), 101.5, option_type=\"payer\")\n",
- "option3 = BlackSwaption(option_delta, datetime.date(2018, 8, 15), 100, option_type=\"payer\")\n",
+ "option_delta.spread = 66\n",
+ "option1 = BlackSwaption(option_delta, datetime.date(2018, 10, 17), 55, option_type=\"receiver\")\n",
+ "option2 = BlackSwaption(option_delta, datetime.date(2018, 10, 17), 57.5, option_type=\"receiver\")\n",
+ "option3 = BlackSwaption(option_delta, datetime.date(2018, 10, 17), 60, option_type=\"receiver\")\n",
"option1.sigma = .47\n",
"option2.sigma = .53\n",
"option3.sigma = .69\n",
@@ -255,7 +296,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.6"
+ "version": "3.7.1"
}
},
"nbformat": 4,
diff --git a/python/notebooks/Reto Report.ipynb b/python/notebooks/Reto Report.ipynb
new file mode 100644
index 00000000..c91cd918
--- /dev/null
+++ b/python/notebooks/Reto Report.ipynb
@@ -0,0 +1,216 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import datetime\n",
+ "import pandas.tseries.offsets as off\n",
+ "import globeop_reports as go\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import exploration.VaR as var\n",
+ "\n",
+ "from analytics.curve_trades import curve_pos, on_the_run\n",
+ "from analytics.index_data import get_index_quotes\n",
+ "from analytics.scenarios import run_portfolio_scenarios\n",
+ "from analytics import Swaption, BlackSwaption, CreditIndex, BlackSwaptionVolSurface, Portfolio, ProbSurface, DualCorrTranche\n",
+ "from db import dbconn, dbengine\n",
+ "\n",
+ "conn = dbconn('dawndb')\n",
+ "dawndb = dbengine('dawndb')\n",
+ "serenitasdb = dbengine('serenitasdb')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#PNL Allocation\n",
+ "date = datetime.date.today() - off.BDay(1)\n",
+ "report_date = date - off.MonthEnd(1)\n",
+ "report_date"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Find the strategies that are not defined: undefined needs to be mapped in strat_map\n",
+ "strats = pd.read_csv('/home/serenitas/edwin/Python/strat_map.csv')\n",
+ "nav = go.get_net_navs()\n",
+ "m_pnl = go.get_monthly_pnl(['strat', 'custacctname'])\n",
+ "m_pnl = m_pnl.reset_index().merge(strats, on=['strat', 'custacctname'], how='left')\n",
+ "undefined = m_pnl[m_pnl.pnl.isna()].groupby(['strat', 'custacctname']).last()\n",
+ "#Get PNL Allocation\n",
+ "#Input latest NAVS to: '/home/serenitas/edwin/Python/subscription_fee_data.csv'\n",
+ "pnl_alloc = m_pnl.groupby(['date', 'pnl']).sum()\n",
+ "pnl_alloc = pnl_alloc.join(nav.begbooknav)\n",
+ "pnl_alloc['strat_return'] = pnl_alloc.mtdtotalbookpl / pnl_alloc.begbooknav\n",
+ "#rolling 12 months PNL per strategy - copy to RiskMonitor\n",
+ "start_date = report_date - pd.tseries.offsets.MonthEnd(11)\n",
+ "rolling_return = pnl_alloc[start_date:report_date].groupby('pnl').sum()['strat_return']\n",
+ "rolling_return.to_clipboard()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Average Portfolio Sales Turnover - as of last monthend from today\n",
+ "#(total Bond Sales Proceeds + paydown)/average starting 12 months NAV\n",
+ "nav = go.get_net_navs()\n",
+ "sql_string = \"SELECT * FROM bonds where buysell = 'False'\"\n",
+ "df = pd.read_sql_query(sql_string, dbengine('dawndb'),\n",
+ " parse_dates={'lastupdate':'utc=True', 'trade_date':'', 'settle_date':''},\n",
+ " index_col = 'trade_date')\n",
+ "df = df.groupby(pd.Grouper(freq='M')).sum()\n",
+ "#Now get portfolio paydown per month\n",
+ "portfolio = go.get_portfolio()\n",
+ "portfolio = portfolio[(portfolio.custacctname == 'V0NSCLMAMB') &\n",
+ " (portfolio.port == 'MORTGAGES') &\n",
+ " (portfolio.identifier != 'USD') &\n",
+ " (portfolio.endqty != 0)]\n",
+ "portfolio = portfolio.set_index('identifier', append=True)\n",
+ "portfolio = portfolio['endqty'].groupby(['identifier', 'periodenddate']).sum()\n",
+ "portfolio = portfolio.reset_index('identifier') \n",
+ "sql_string = \"SELECT * from cashflow_history\"\n",
+ "cf = pd.read_sql_query(sql_string, dbengine('dawndb'), parse_dates=['date'],\n",
+ " index_col=['date']).sort_index()\n",
+ "df_1 = pd.merge_asof(cf, portfolio.sort_index(), left_index=True, right_index=True, by='identifier')\n",
+ "df_1 = df_1.dropna(subset=['endqty'])\n",
+ "df_1 = df_1[(df_1.principal_bal != 0) & (df_1.principal != 0)]\n",
+ "df_1['paydown'] = df_1.apply(lambda df: df.endqty/df.principal_bal * df.principal, axis=1)\n",
+ "paydowns = df_1.paydown.groupby(pd.Grouper(freq='M')).sum()\n",
+ "temp = pd.concat([paydowns, df.principal_payment, df.accrued_payment], axis=1).fillna(0)\n",
+ "turnover = temp.rolling(12).sum().sum(axis=1)/ nav.begbooknav.rolling(12).mean()\n",
+ "turnover[12:].plot()\n",
+ "turnover[-1]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Calculate amount of stress for reports\n",
+ "df = get_index_quotes('HY', list(range(on_the_run('HY') - 10, on_the_run('HY') + 1)),\n",
+ " tenor=['5yr'], years=5)\n",
+ "df = df.xs('5yr', level='tenor')['close_spread'].groupby(['date', 'series']).last()\n",
+ "\n",
+ "widen, tighten = [], []\n",
+ "#approximately 1,3,6 months move (22 each months)\n",
+ "for days in [22, 66, 132]: \n",
+ " calc = df.unstack().pct_change(freq= str(days)+'B').stack().groupby('date').last()\n",
+ " widen.append(calc.max())\n",
+ " tighten.append(calc.min())\n",
+ "pd.DataFrame([widen, tighten], columns=['1M', '3M', '6M'], index=['widen', 'tighten'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Current tranche and swaptions positions\n",
+ "t_sql_string = (\"SELECT id, sum(notional * case when protection='Buyer' then -1 else 1 end) \"\n",
+ " \"OVER (partition by security_id, attach) AS ntl_agg \"\n",
+ " \"FROM cds WHERE swap_type='CD_INDEX_TRANCHE' AND termination_cp IS NULL \"\n",
+ " \"AND trade_date <= %s\")\n",
+ "swaption_sql_string = (\"select id, security_desc from swaptions where date(expiration_date) \"\n",
+ " \"> %s and swap_type = 'CD_INDEX_OPTION' \"\n",
+ " \"AND trade_date <= %s AND termination_date iS NULL\")\n",
+ "index_sql_string = (\"SELECT id, sum(notional * case when protection='Buyer' then -1 else 1 end) \"\n",
+ " \"OVER (partition by security_id, attach) AS ntl_agg \"\n",
+ " \"FROM cds WHERE swap_type='CD_INDEX' AND termination_cp IS null \"\n",
+ " \"AND folder = 'IGOPTDEL' OR folder = 'HYOPTDEL' \"\n",
+ " \"AND trade_date <= %s\")\n",
+ "with conn.cursor() as c:\n",
+ " #Get Tranche Trade Ids\n",
+ " c.execute(t_sql_string, (date,))\n",
+ " t_trade_ids = [dealid for dealid, ntl in c if ntl != 0]\n",
+ " #Get Swaption Trade Ids\n",
+ " c.execute(swaption_sql_string, (date, date))\n",
+ " swaption_trades = c.fetchall()\n",
+ " #Get Index/deltas Trade Ids\n",
+ " c.execute(index_sql_string, (date,))\n",
+ " index_trade_ids = [dealid for dealid, ntl in c if ntl != 0]\n",
+ " \n",
+ "portf = Portfolio([DualCorrTranche.from_tradeid(dealid) for dealid in t_trade_ids],\n",
+ " t_trade_ids)\n",
+ "for row in swaption_trades:\n",
+ " option_delta = CreditIndex(row[1].split()[1], row[1].split()[3][1:], '5yr', date)\n",
+ " option_delta.mark()\n",
+ " portf.add_trade(BlackSwaption.from_tradeid(row[0], option_delta), 'opt_' + str(row[0]))\n",
+ "for index_id in index_trade_ids:\n",
+ " portf.add_trade(CreditIndex.from_tradeid(index_id), 'index_' + str(index_id))\n",
+ " \n",
+ "#Update manually - positive notional = long risk\n",
+ "non_trancheSwap_risk_notional = 49119912 \n",
+ "\n",
+ "portf.add_trade(CreditIndex('HY', on_the_run('HY'), '5yr', value_date = date, notional = -non_trancheSwap_risk_notional), 'port')\n",
+ " \n",
+ "portf.value_date = date\n",
+ "portf.mark(interp_method=\"bivariate_linear\")\n",
+ "portf.reset_pv()\n",
+ " \n",
+ "vs = BlackSwaptionVolSurface(portf.swaptions[0].index.index_type, \n",
+ " portf.swaptions[0].index.series, \n",
+ " value_date=date, \n",
+ " interp_method = \"bivariate_linear\")\n",
+ "vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
+ "vol_shock = [0]\n",
+ "corr_shock = [0]\n",
+ "spread_shock = widen + tighten\n",
+ "date_range = [pd.Timestamp(date)]\n",
+ "\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\"],\n",
+ " spread_shock=spread_shock,\n",
+ " vol_shock=vol_shock,\n",
+ " corr_shock=corr_shock,\n",
+ " vol_surface=vol_surface)\n",
+ "\n",
+ "scens.sum(axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/notebooks/Single Names Monitoring.ipynb b/python/notebooks/Single Names Monitoring.ipynb
index 0c32e617..62a1383a 100644
--- a/python/notebooks/Single Names Monitoring.ipynb
+++ b/python/notebooks/Single Names Monitoring.ipynb
@@ -23,7 +23,7 @@
"outputs": [],
"source": [
"w = widgets.Dropdown(\n",
- " options=['IG', 'HY', 'EU'],\n",
+ " options=['IG', 'HY', 'EU', 'XO'],\n",
" value='IG',\n",
" description='Index:',\n",
" disabled=False,\n",
@@ -37,7 +37,7 @@
"metadata": {},
"outputs": [],
"source": [
- "w_1 = widgets.IntSlider(value=30, min=22, max=30, description = 'Series')\n",
+ "w_1 = widgets.IntSlider(value=31, min=22, max=31, description = 'Series')\n",
"w_1"
]
},
@@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
- "trade_date = (pd.datetime.today() - pd.offsets.BDay(1)).date()\n",
+ "value_date = (pd.datetime.today() - pd.offsets.BDay(2)).date()\n",
"index_type = w.value\n",
"series = w_1.value"
]
@@ -59,8 +59,8 @@
"outputs": [],
"source": [
"sql_string = \"select * from index_members(%s, %s)\"\n",
- "df = pd.read_sql_query(sql_string, engine, params=(index_type + str(series), trade_date), index_col=['markit_ticker'])\n",
- "df1 = pd.read_sql_query(sql_string, engine, params=(index_type + str(series-1), trade_date), index_col=['markit_ticker'])"
+ "df = pd.read_sql_query(sql_string, engine, params=(index_type + str(series), value_date), index_col=['markit_ticker'])\n",
+ "df1 = pd.read_sql_query(sql_string, engine, params=(index_type + str(series-2), value_date), index_col=['markit_ticker'])"
]
},
{
@@ -89,7 +89,7 @@
"metadata": {},
"outputs": [],
"source": [
- "date_range = pd.bdate_range(trade_date - 52 * pd.offsets.Week(), trade_date, freq='5B')\n",
+ "date_range = pd.bdate_range(value_date - 52 * pd.offsets.Week(), value_date, freq='5B')\n",
"index = MarkitBasketIndex(index_type, series, ['5yr'])\n",
"default_prob = {}\n",
"maturity = np.array([np.datetime64(index.maturities[0]).view('int') + 134774])\n",
@@ -107,10 +107,13 @@
"outputs": [],
"source": [
"#Top 20 highest cumulative\n",
- "top20 = default_prob.unstack(-1)[default_prob[trade_date].nlargest(20).index]\n",
+ "top20 = default_prob.unstack(-1)[default_prob[value_date].nlargest(20).index]\n",
"top20.index.name='date'\n",
"top20.columns.name='tickers'\n",
- "top20.plot(title=f'market implied default probabilities to {index.maturities[0]}')"
+ "ax = top20.plot(title=f'market implied default probabilities to {index.maturities[0]}', figsize=(10,6))\n",
+ "ax.legend(loc='upper center', bbox_to_anchor=(1.3, 1), ncol=1)\n",
+ "ax.set(xlabel='date', ylabel='probability')\n",
+ "plt.tight_layout()"
]
},
{
@@ -118,16 +121,7 @@
"execution_count": null,
"metadata": {},
"outputs": [],
- "source": [
- "title = 'market implied default probabilities to ' + str(temp.maturities[0])\n",
- "fig, ax = plt.subplots()\n",
- "ax.plot(dp_top.T)\n",
- "ax.set(xlabel='date', ylabel='probability',\n",
- " title=title)\n",
- "ax.legend(dp_top.index, loc='upper center', bbox_to_anchor=(1.3, 1), ncol=1)\n",
- "fig.set_size_inches(10, 6)\n",
- "fig.tight_layout()"
- ]
+ "source": []
}
],
"metadata": {
@@ -146,7 +140,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.5"
+ "version": "3.7.0"
}
},
"nbformat": 4,
diff --git a/python/notebooks/VaR.ipynb b/python/notebooks/VaR.ipynb
index 3bb3e83d..178d72b3 100644
--- a/python/notebooks/VaR.ipynb
+++ b/python/notebooks/VaR.ipynb
@@ -10,13 +10,16 @@
"from analytics.index_data import get_index_quotes\n",
"from analytics.scenarios import run_portfolio_scenarios\n",
"from analytics import Swaption, BlackSwaption, CreditIndex, BlackSwaptionVolSurface, Portfolio, ProbSurface, DualCorrTranche\n",
- "from db import dbconn\n",
+ "from db import dbconn, dbengine\n",
"\n",
"import datetime\n",
"import exploration.VaR as var\n",
"import pandas as pd\n",
+ "import numpy as np\n",
"\n",
- "conn = dbconn('dawndb')"
+ "conn = dbconn('dawndb')\n",
+ "dawndb = dbengine('dawndb')\n",
+ "serenitasdb = dbengine('serenitasdb')"
]
},
{
@@ -25,7 +28,7 @@
"metadata": {},
"outputs": [],
"source": [
- "date = (datetime.date.today() - pd.tseries.offsets.BDay(1)).date()\n",
+ "date = (datetime.date.today() - pd.tseries.offsets.BDay(3)).date()\n",
"report_date = (date + pd.tseries.offsets.BMonthEnd(-1)).date()\n",
"index_type = \"IG\"\n",
"quantile = .025"
@@ -63,7 +66,8 @@
"outputs": [],
"source": [
"#Mortgage Hedge VaR - use IG spread relative move for VaR\n",
- "df = var.get_pos(date, 'HEDGE_MBS')\n",
+ "df = pd.read_sql_query(\"SELECT * from list_cds_marks_by_strat(%s) where strategy ='HEDGE_MBS'\",\n",
+ " dawndb, params=(date,))\n",
"portf = Portfolio([CreditIndex(row.p_index, row.p_series, row.tenor,\n",
" report_date, -row.notional)\n",
" for row in df[['p_index', 'tenor', 'p_series', 'notional']].\n",
@@ -136,7 +140,6 @@
" \"FROM cds WHERE swap_type='CD_INDEX' AND termination_cp IS null \"\n",
" \"AND folder = 'IGOPTDEL' OR folder = 'HYOPTDEL' \"\n",
" \"AND trade_date <= %s\")\n",
- "conn = dbconn('dawndb')\n",
"with conn.cursor() as c:\n",
" #Get Tranche Trade Ids\n",
" c.execute(t_sql_string, (date,))\n",
@@ -158,14 +161,18 @@
" portf.add_trade(CreditIndex.from_tradeid(index_id), 'index_' + str(index_id))\n",
" \n",
"#Update manually - positive notional = long risk\n",
- "non_trancheSwap_risk_notional = 33763230\n",
- "portf.add_trade(CreditIndex('HY', on_the_run('HY'), '5yr', value_date = date, notional = -non_trancheSwap_risk_notional), 'port')\n",
+ "non_trancheSwap_risk_notional = 49119912 \n",
+ "\n",
+ "portf.add_trade(CreditIndex('HY', on_the_run('HY'), '5yr', value_date = date, notional = -non_trancheSwap_risk_notional), 'bond')\n",
" \n",
"portf.value_date = date\n",
- "portf.mark()\n",
+ "portf.mark(interp_method=\"bivariate_spline\")\n",
"portf.reset_pv()\n",
" \n",
- "vs = BlackSwaptionVolSurface(portf.swaptions[0].index.index_type, portf.swaptions[0].index.series, value_date=date)\n",
+ "vs = BlackSwaptionVolSurface(portf.swaptions[0].index.index_type, \n",
+ " portf.swaptions[0].index.series, \n",
+ " value_date=date, \n",
+ " interp_method = \"bivariate_spline\")\n",
"vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
"vol_shock = [0]\n",
"corr_shock = [0]\n",
@@ -187,8 +194,50 @@
"metadata": {},
"outputs": [],
"source": [
- "var.cleared_cds_margins(report_date)"
+ "spread_shock = np.arange(-.4, 2.2, .2)\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\"],\n",
+ " spread_shock=spread_shock,\n",
+ " vol_shock=vol_shock,\n",
+ " corr_shock=corr_shock,\n",
+ " vol_surface=vol_surface)\n",
+ "scens.sum(axis=1)\n",
+ "\n",
+ "risk_notional = [t.notional * t._index.duration for t in portf.indices]\n",
+ "portf.trades[0]._index.duration()"
]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Calculate the margins for cleared CDS required for each strategy\n",
+ "df = pd.read_sql_query(\"SELECT * from list_cds_marks_by_strat(%s)\",\n",
+ " dawndb, params=(date,))\n",
+ "percentile = .95 #monthly 90%tile case.\n",
+ "shocks, widen, tighten, onTR_dur, onTR_spread = {}, {}, {}, {}, {}\n",
+ "for ind in ['IG', 'HY', 'EU']:\n",
+ " shocks[ind], onTR_spread[ind], onTR_dur[ind] = rel_spread_diff(date, index=ind)\n",
+ " widen[ind] = shocks[ind].quantile(percentile)\n",
+ " tighten[ind] = shocks[ind].quantile(1-percentile)\n",
+ "\n",
+ "df['onTR_notional'] = df.apply(lambda df:\n",
+ " df.notional * df.factor * df.duration / onTR_dur[df.p_index], axis=1)\n",
+ "df['widen'] = df.apply(lambda df:\n",
+ " df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * widen[df.p_index]/10000, axis=1)\n",
+ "df['tighten'] = df.apply(lambda df:\n",
+ " df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * tighten[df.p_index]/10000, axis=1)\n",
+ "delta_alloc = df.groupby('strategy').sum()\n",
+ "delta_alloc['total'] = delta_alloc.apply(lambda df: max(abs(df.widen), abs(df.tighten)), axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
}
],
"metadata": {
@@ -207,7 +256,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.0"
+ "version": "3.7.1"
}
},
"nbformat": 4,
diff --git a/python/notebooks/tranche and swaption portfolio strategy.ipynb b/python/notebooks/tranche and swaption portfolio strategy.ipynb
index 1b0e0a08..c0355fb0 100644
--- a/python/notebooks/tranche and swaption portfolio strategy.ipynb
+++ b/python/notebooks/tranche and swaption portfolio strategy.ipynb
@@ -18,7 +18,7 @@
"from datetime import date\n",
"from graphics import plot_color_map\n",
"\n",
- "value_date = (pd.datetime.today() - pd.offsets.BDay(2)).date()"
+ "value_date = (pd.datetime.today() - pd.offsets.BDay(1)).date()"
]
},
{
@@ -27,7 +27,50 @@
"metadata": {},
"outputs": [],
"source": [
- "#Construct IG Swaption Portfolio\n",
+ "def color_plots(portf, scens, options_names):\n",
+ " sort_order = [True, False]\n",
+ " scens_pnl = scens.xs('pnl', axis=1, level=1)\n",
+ " just_spread = scens_pnl.xs((0,0), level=['corr_shock', 'vol_shock'])\n",
+ " combined = just_spread.sum(axis=1)\n",
+ " combined.name = 'combined_pnl'\n",
+ " plot_color_map(combined, sort_order)\n",
+ "\n",
+ " swaptions_only = just_spread[options_names].sum(axis=1)\n",
+ " swaptions_only.name = 'swaptions_pnl'\n",
+ " plot_color_map(swaptions_only, sort_order)\n",
+ "\n",
+ " tranches_only = just_spread[[x for x in portf.trade_ids if x not in options_names]].sum(axis=1)\n",
+ " tranches_only.name = 'tranches_pnl'\n",
+ " plot_color_map(tranches_only, sort_order)\n",
+ "\n",
+ " #Plot delta, swaption delta is in protection terms: switch to risk terms\n",
+ " sort_order = [True, False]\n",
+ " scens_delta = scens.xs('delta', axis=1, level=1)\n",
+ " scens_delta = scens_delta.mul(pd.Series(portf.notionals))\n",
+ " if 'delta' in portf.trade_ids:\n",
+ " scens_delta['delta'] = portf.notionals['delta']\n",
+ " scens_delta = scens_delta.xs((0,0), level=['corr_shock', 'vol_shock'])\n",
+ "\n",
+ " combined = scens_delta.sum(axis=1)\n",
+ " combined.name = 'Combined Delta'\n",
+ " plot_color_map(combined, sort_order)\n",
+ "\n",
+ " swaptions_only = scens_delta[options_names].sum(axis=1)\n",
+ " swaptions_only.name = 'Swaptions Only Delta'\n",
+ " plot_color_map(swaptions_only, sort_order)\n",
+ "\n",
+ " tranches_only = scens_delta[[x for x in portf.trade_ids if x not in options_names]].sum(axis=1)\n",
+ " tranches_only.name = 'Tranches Only Delta'\n",
+ " plot_color_map(tranches_only, sort_order)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Package 1\n",
"index = 'IG'\n",
"series = 30\n",
"option_delta = CreditIndex(index, series, '5yr', value_date=value_date)\n",
@@ -42,22 +85,17 @@
"option2.notional = 300_000_000\n",
"option_delta.notional = option1.notional * option1.delta + option2.notional * option2.delta\n",
"\n",
- "#Get current Tranche positions\n",
- "sql_string = (\"SELECT id, sum(notional * case when protection='Buyer' then -1 else 1 end) \"\n",
- " \"OVER (partition by security_id, attach) AS ntl_agg \"\n",
- " \"FROM cds WHERE swap_type='CD_INDEX_TRANCHE' AND termination_cp IS NULL\")\n",
- "conn = dbconn('dawndb')\n",
- "with conn.cursor() as c:\n",
- " c.execute(sql_string)\n",
- " trade_ids = [dealid for dealid, ntl in c if ntl != 0]\n",
- "portf = Portfolio([DualCorrTranche.from_tradeid(dealid) for dealid in trade_ids],\n",
- " trade_ids)\n",
+ "equity = DualCorrTranche('IG', 29, '5yr', attach=0, detach=3, corr_attach=np.nan, \n",
+ " corr_detach=.35, tranche_running=100, notional=-40000000, use_trunc=True)\n",
+ "mezz = DualCorrTranche('IG', 29, '5yr', attach=7, detach=15, corr_attach=.45, \n",
+ " corr_detach=.55, tranche_running=100, notional=240000000, use_trunc=True)\n",
+ "portf = Portfolio([equity, mezz], ['equity', 'mezz'])\n",
"portf.trades.extend([option1, option2, option_delta])\n",
"portf.trade_ids.extend(['opt1', 'opt2', 'delta'])\n",
"\n",
- "spread_shock = np.arange(-.3, 1.1, .1)\n",
+ "spread_shock = np.round(np.arange(-.3, 1, .1),2)\n",
"corr_shock = np.arange(0, .1, 0.1)\n",
- "vol_shock = np.arange(-.1, .3, 0.1)\n",
+ "vol_shock = np.arange(0, 0.1, 0.1)\n",
"earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
"date_range = pd.date_range(value_date, earliest_expiry, periods=5)\n",
"vs = BlackSwaptionVolSurface(index, series, value_date=value_date)\n",
@@ -67,12 +105,14 @@
"portf.mark()\n",
"portf.reset_pv()\n",
"\n",
- "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\"],\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\", 'delta'],\n",
" spread_shock=spread_shock,\n",
" corr_shock=corr_shock,\n",
" vol_shock=vol_shock,\n",
" vol_surface=vol_surface)\n",
- "scens = round(scens,2)"
+ "scens = round(scens,2)\n",
+ "\n",
+ "color_plots(portf, scens, ['opt1', 'opt2', 'delta'])"
]
},
{
@@ -81,11 +121,41 @@
"metadata": {},
"outputs": [],
"source": [
- "sort_order = [True, False]\n",
- "output = scens.xs((0,0), level=['corr_shock', 'vol_shock']).sum(axis=1)\n",
- "(1+output.index.get_level_values(1)) * portf.swaptions[0].ref\n",
- "output.name = 'pnl'\n",
- "plot_color_map(output, sort_order)"
+ "#simple IG package: sell OTM swaption vs. short 3-7 delta neutral at start\n",
+ "index = 'IG'\n",
+ "series = 30\n",
+ "option_delta = CreditIndex(index, series, '5yr', value_date=value_date)\n",
+ "option_delta.spread = 60\n",
+ "option2 = BlackSwaption(option_delta, date(2018, 11, 21), 85, option_type=\"payer\")\n",
+ "option2.sigma = .588\n",
+ "option2.direction = 'Short'\n",
+ "option2.notional = 500_000_000\n",
+ "option_delta.notional = 1\n",
+ "\n",
+ "mezz = DualCorrTranche('IG', 29, '5yr', attach=7, detach=15, corr_attach=.45, \n",
+ " corr_detach=.55, tranche_running=100, notional=50000000, use_trunc=True)\n",
+ "portf = Portfolio([option2, mezz], ['opt2', 'mezz'])\n",
+ "\n",
+ "spread_shock = np.round(np.arange(-.3, 1, .1),2)\n",
+ "corr_shock = np.arange(0, .1, 0.1)\n",
+ "vol_shock = np.arange(0, 0.1, 0.1)\n",
+ "earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
+ "date_range = pd.date_range(value_date, earliest_expiry, periods=5)\n",
+ "vs = BlackSwaptionVolSurface(index, series, value_date=value_date)\n",
+ "ps = ProbSurface(index, series, value_date=value_date)\n",
+ "vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
+ "portf.value_date = value_date\n",
+ "portf.mark()\n",
+ "portf.reset_pv()\n",
+ "\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\", 'delta'],\n",
+ " spread_shock=spread_shock,\n",
+ " corr_shock=corr_shock,\n",
+ " vol_shock=vol_shock,\n",
+ " vol_surface=vol_surface)\n",
+ "scens = round(scens,2)\n",
+ "\n",
+ "color_plots(portf, scens, ['opt2'])"
]
},
{
@@ -94,6 +164,48 @@
"metadata": {},
"outputs": [],
"source": [
+ "#simple HY package: sell OTM swaption vs. short 3-7 delta neutral at start\n",
+ "index = 'HY'\n",
+ "series = 30\n",
+ "option_delta = CreditIndex(index, series, '5yr', value_date=value_date)\n",
+ "option_delta.price = 106.75\n",
+ "option2 = BlackSwaption(option_delta, date(2018, 11, 21), 102, option_type=\"payer\")\n",
+ "option2.sigma = .469\n",
+ "option2.direction = 'Short'\n",
+ "option2.notional = 200_000_000\n",
+ "\n",
+ "mezz = DualCorrTranche('HY', 29, '5yr', attach=15, detach=25, corr_attach=.35, \n",
+ " corr_detach=.45, tranche_running=100, notional=16000000, use_trunc=True)\n",
+ "portf = Portfolio([option2, mezz], ['opt2', 'mezz'])\n",
+ "\n",
+ "spread_shock = np.round(np.arange(-.3, 1, .1),2)\n",
+ "corr_shock = np.arange(0, .1, 0.1)\n",
+ "vol_shock = np.arange(0, 0.1, 0.1)\n",
+ "earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
+ "date_range = pd.date_range(value_date, earliest_expiry, periods=5)\n",
+ "vs = BlackSwaptionVolSurface(index, series, value_date=value_date)\n",
+ "ps = ProbSurface(index, series, value_date=value_date)\n",
+ "vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
+ "portf.value_date = value_date\n",
+ "portf.mark()\n",
+ "portf.reset_pv()\n",
+ "\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\", 'delta'],\n",
+ " spread_shock=spread_shock,\n",
+ " corr_shock=corr_shock,\n",
+ " vol_shock=vol_shock,\n",
+ " vol_surface=vol_surface)\n",
+ "scens = round(scens,2)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "(1+output.index.get_level_values(1)) * portf.swaptions[0].ref\n",
+ "\n",
"#negative notional == sell protection\n",
"hy_tranche = DualCorrTranche('HY', 29, '5yr', attach=0, detach=15, corr_attach=np.nan, \n",
" corr_detach=.35, tranche_running=500, notional=-10000000)\n",
@@ -112,15 +224,60 @@
"metadata": {},
"outputs": [],
"source": [
+ "#Current tranche and swaptions positions\n",
+ "t_sql_string = (\"SELECT id, sum(notional * case when protection='Buyer' then -1 else 1 end) \"\n",
+ " \"OVER (partition by security_id, attach) AS ntl_agg \"\n",
+ " \"FROM cds WHERE swap_type='CD_INDEX_TRANCHE' AND termination_cp IS NULL\")\n",
+ "swaption_sql_string = (\"select id, security_desc from swaptions where date(expiration_date) \"\n",
+ " \"> %s and swap_type = 'CD_INDEX_OPTION'\")\n",
+ "index_sql_string = (\"SELECT id, sum(notional * case when protection='Buyer' then -1 else 1 end) \"\n",
+ " \"OVER (partition by security_id, attach) AS ntl_agg \"\n",
+ " \"FROM cds WHERE swap_type='CD_INDEX' AND termination_cp IS null and folder = 'IGOPTDEL'\")\n",
+ "conn = dbconn('dawndb')\n",
+ "with conn.cursor() as c:\n",
+ " c.execute(t_sql_string)\n",
+ " t_trade_ids = [dealid for dealid, ntl in c if ntl != 0]\n",
+ " c.execute(swaption_sql_string, (value_date,))\n",
+ " swaption_trades = c.fetchall()\n",
+ " c.execute(index_sql_string)\n",
+ " index_trade_ids = [dealid for dealid, ntl in c if ntl != 0]\n",
+ " \n",
+ "portf = Portfolio([DualCorrTranche.from_tradeid(dealid) for dealid in t_trade_ids],\n",
+ " t_trade_ids)\n",
+ "for row in swaption_trades:\n",
+ " option_delta = CreditIndex(row[1].split()[1], row[1].split()[3][1:], '5yr', value_date)\n",
+ " option_delta.mark()\n",
+ " portf.add_trade(BlackSwaption.from_tradeid(row[0], option_delta), 'opt_' + str(row[0]))\n",
+ "for index_id in index_trade_ids:\n",
+ " portf.add_trade(CreditIndex.from_tradeid(index_id), 'index_' + str(index_id))\n",
+ " \n",
+ "spread_shock = np.round(np.arange(-.3, 1, .1),2)\n",
+ "corr_shock = np.arange(0, .1, 0.1)\n",
+ "vol_shock = np.arange(0, 0.1, 0.1)\n",
+ "#date_range = pd.date_range(value_date, date(2018,12,31), periods=5)\n",
+ "earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
+ "date_range = pd.date_range(value_date, earliest_expiry, periods=5)\n",
+ "portf.swaptions[0]\n",
+ "vs = BlackSwaptionVolSurface(portf.swaptions[0].index.index_type, portf.swaptions[0].index.series, value_date=value_date)\n",
+ "vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
+ "portf.value_date = value_date\n",
+ "portf.mark()\n",
+ "portf.reset_pv()\n",
"\n",
- "##\n",
- "scens_more = run_portfolio_scenarios(portf, date_range, params=['pnl', 'delta'],\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\", 'delta'],\n",
" spread_shock=spread_shock,\n",
" corr_shock=corr_shock,\n",
" vol_shock=vol_shock,\n",
" vol_surface=vol_surface)\n",
- "#swaption delta is in protection terms: switch to risk terms\n",
- "swaption_scens.delta = -swaption_scens.delta"
+ "\n",
+ "color_plots(portf, scens, ['opt_20', 'opt_21', 'index_954'])\n",
+ "\n",
+ "#sort_order = [True, False]\n",
+ "#scens_pnl = scens.xs('pnl', axis=1, level=1)\n",
+ "#just_spread = scens_pnl.xs(0, level=1)\n",
+ "#combined = just_spread.sum(axis=1)\n",
+ "#combined.name = 'tranche_pnl'\n",
+ "#plot_color_map(combined, sort_order)"
]
},
{
@@ -263,7 +420,7 @@
"swaption_scens.delta = -swaption_scens.delta\n",
"\n",
"notional = 30_000_000_000\n",
- "t = bkt.TrancheBasket('IG', '29', '3yr')\n",
+ "t = bkt.TrancheBasket('IG', '29', '5yr')\n",
"t.build_skew()\n",
"#get back to 17bps, .36 delta\n",
"port_spread = 67\n",
@@ -280,70 +437,28 @@
"metadata": {},
"outputs": [],
"source": [
- "#IG Bullish Risk Reversal vs. shorting IG 7-15 risk\n",
"index = 'IG'\n",
- "series = 30\n",
- "option_delta = Index.from_name(index, series, '5yr', value_date)\n",
- "option_delta.spread = 62\n",
- "option1 = BlackSwaption(option_delta, date(2018, 9, 19), 60, option_type=\"receiver\")\n",
- "option2 = BlackSwaption(option_delta, date(2018, 9, 19), 90, option_type=\"payer\")\n",
- "option1.sigma = .344\n",
- "option2.sigma = .585\n",
- "option1.notional = 200_000_000\n",
- "option2.notional = 400_000_000\n",
- "option1.direction = 'Long'\n",
- "option2.direction = 'Short'\n",
- "option_delta.notional = 1\n",
- "option_delta.direction = 'Seller' if option_delta.notional > 0 else 'Buyer'\n",
- "option_delta.notional = abs(option_delta.notional)\n",
- "portf = Portfolio([option1, option2, option_delta])\n",
- "#Plot Scenarios Inputs: Portfolio, spread shock tightening%, spread shock widening%, snapshot period)\n",
- "portf\n",
+ "series = 29\n",
+ "ss = DualCorrTranche('IG', 29, '5yr', attach=15, detach=100, corr_attach=.59685, \n",
+ " corr_detach=.7, tranche_running=100, notional=-230000000)\n",
+ "mezz = DualCorrTranche('IG', 29, '5yr', attach=7, detach=15, corr_attach=.46984, \n",
+ " corr_detach=.59685, tranche_running=100, notional=50000000)\n",
+ "portf = Portfolio([ss, mezz], ['ss', 'mezz'])\n",
"\n",
+ "spread_shock = np.round(np.arange(-.3, 1, .1), 2)\n",
+ "corr_shock = np.arange(0, .1, 0.1)\n",
+ "vol_shock = np.arange(0, 0.1, 0.1)\n",
+ "date_range = pd.date_range(value_date, date(2018,12,31), periods=5)\n",
+ "#portf.value_date = value_date\n",
+ "#portf.mark()\n",
"portf.reset_pv()\n",
- "#Run Swaption sensitivities\n",
- "#Set Shock range\n",
- "shock_min = -.5\n",
- "shock_max = 1\n",
- "spread_shock = np.arange(shock_min, shock_max, 0.1)\n",
- "#Set Date range\n",
- "earliest_expiry = min(portf.swaptions, key=lambda x: x.exercise_date).exercise_date\n",
- "date_range = pd.bdate_range(value_date, earliest_expiry - pd.offsets.BDay(), freq='10B')\n",
- "#Setup Vol Surface\n",
- "vs = BlackSwaptionVolSurface(index,series, value_date=value_date)\n",
- "ps = ProbSurface(index,series, value_date=value_date)\n",
- "vol_surface = vs[vs.list(option_type='payer')[-1]]\n",
- "swaption_scens = run_portfolio_scenarios(portf, date_range, spread_shock, np.array([0]),\n",
- " vol_surface, params=[\"pnl\", \"delta\"])\n",
- "#swaption delta is in protection terms: switch to risk terms\n",
- "swaption_scens.delta = -swaption_scens.delta\n",
- "\n",
- "notional = -100_000_000\n",
- "t = bkt.TrancheBasket('IG', '29', '5yr')\n",
- "t.build_skew()\n",
- "spread_range = (1+ spread_shock) * option_delta.spread\n",
- "tranches_scens = run_tranche_scenarios_rolldown(t, spread_range, date_range, corr_map=False)\n",
- "tranches_scens = notional*tranches_scens.xs('7-15', axis=1, level=1)\n",
"\n",
- "#Create snapshot of the the first scenario date\n",
- "total_scens = swaption_scens.reset_index().merge(tranches_scens.reset_index(), \n",
- " left_on=['date', 'spread'], \n",
- " right_on=['date', 'spread_range'], \n",
- " suffixes=['_s', '_t'])\n",
- "total_scens['pnl'] = total_scens['pnl_s'] + total_scens['pnl_t']\n",
- "total_scens['delta'] = total_scens['delta_s'] + total_scens['delta_t']\n",
- "total_scens_single_date = total_scens.set_index('date').xs(date_range[0])\n",
- "total_scens_single_date = total_scens_single_date.set_index('spread', drop=True)\n",
- "\n",
- "#tranche positions delta at different spreads\n",
- "ax = total_scens_single_date.delta_t.plot(title = 'delta vs. spread levels')\n",
- "ax.ticklabel_format(style='plain')\n",
- "plt.tight_layout()\n",
+ "scens = run_portfolio_scenarios(portf, date_range, params=[\"pnl\", 'delta'],\n",
+ " spread_shock=spread_shock,\n",
+ " corr_shock=corr_shock)\n",
+ "scens = round(scens,2)\n",
"\n",
- "#Tranche + Swaptions positions delta at different spreads\n",
- "ax1 = total_scens_single_date.delta.plot()\n",
- "ax1.ticklabel_format(style='plain')\n",
- "plt.tight_layout()"
+ "color_plots(portf, scens, ['opt1', 'opt2', 'delta'])"
]
},
{
@@ -379,7 +494,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.0"
+ "version": "3.7.1"
}
},
"nbformat": 4,
diff --git a/python/notebooks/tranches numbers.ipynb b/python/notebooks/tranches numbers.ipynb
index fc936b7a..d655f8ba 100644
--- a/python/notebooks/tranches numbers.ipynb
+++ b/python/notebooks/tranches numbers.ipynb
@@ -54,7 +54,7 @@
"outputs": [],
"source": [
"sql_string = '''SELECT a.*, b.trancheupfrontmid, b.indexrefspread, b.tranchedelta, b.trancherunningmid\n",
- " FROM risk_numbers_new a \n",
+ " FROM risk_numbers a \n",
" join tranche_quotes b on a.tranche_id = b.id\n",
" where a.index <> 'EU'\n",
" '''\n",
@@ -435,6 +435,74 @@
"execution_count": null,
"metadata": {},
"outputs": [],
+ "source": [
+ "#Back test tranche pair trades \n",
+ "sql_str = \"select * from markit_tranche_quotes a left join \" \\\n",
+ " \"(select index, series, version, basketid from index_version) b \" \\\n",
+ " \"using (basketid) order by quotedate asc\"\n",
+ "index_columns=['index', 'series', 'version', 'tenor', 'attach', 'detach']\n",
+ "df = pd.read_sql_query(sql_str, engine, parse_dates=['quotedate'], index_col=index_columns)\n",
+ "df['day_frac'] = (df.groupby(index_columns)['quotedate'].\n",
+ " transform(lambda s: s.\n",
+ " diff().astype('timedelta64[D]') / 360))\n",
+ "df['close_price'] = 1-df['upfront_mid']\n",
+ "df = df.set_index('quotedate', append=True)\n",
+ "df['price_return'] = df['close_price'].groupby(level=index_columns).diff()\n",
+ "df['price_return'] += df.day_frac * df.tranche_spread/10000\n",
+ "df = df.drop(['basketid', 'upfront_bid', 'upfront_ask', 'upfront_mid', 'index_price', 'day_frac', 'tranche_spread', 'close_price'], axis=1)\n",
+ "df = df.dropna()\n",
+ "#Focus on IG\n",
+ "ig_tranches = df.xs(('IG', '5yr', 23), level = ['index', 'tenor', 'series'])\n",
+ "ig_tranches = ig_tranches.reset_index(['version', 'detach'], drop=True)\n",
+ "ig_tranches = ig_tranches.unstack(level='attach')\n",
+ "ig_tranches.columns = ig_tranches.columns.droplevel()\n",
+ "\n",
+ "#carry strat = long 15-100 and short 7-15: 4.6 by 1, 50bps IA\n",
+ "carrystrat = (4.6 * ig_tranches[15] - ig_tranches[7])/.05\n",
+ "cum_return = (carrystrat+1).cumprod()\n",
+ "cum_return.plot()\n",
+ "#equity gamma strat = long 0-3 and short 7-15: 1 by 6, 12% IA\n",
+ "equitygammastrat = (1 * ig_tranches[0] - 6 * ig_tranches[7])/.12\n",
+ "cum_return = (equitygammastrat+1).cumprod()\n",
+ "cum_return.plot()\n",
+ "#mezz gamma strat = long 3-7 and short 7-15: 1 by 2.75, 2.5% IA\n",
+ "mezzgammastrat =(1 * ig_tranches[3] - 2.75 * ig_tranches[7])/.025\n",
+ "cum_return = (mezzgammastrat+1).cumprod()\n",
+ "cum_return.plot()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "r=[]\n",
+ "r.append(carrystrat.mean()/carrystrat.std() * math.sqrt(252))\n",
+ "r.append(equitygammastrat.mean()/equitygammastrat.std() * math.sqrt(252))\n",
+ "r.append(mezzgammastrat.mean()/mezzgammastrat.std() * math.sqrt(252))\n",
+ "r"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
"source": []
}
],
@@ -473,7 +541,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.6"
+ "version": "3.7.1"
}
},
"nbformat": 4,