aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--python/exploration/VaR.py113
-rw-r--r--python/notebooks/Risk Management.ipynb104
-rw-r--r--python/notebooks/VaR.ipynb151
3 files changed, 235 insertions, 133 deletions
diff --git a/python/exploration/VaR.py b/python/exploration/VaR.py
index cf47c71c..1e6a02a3 100644
--- a/python/exploration/VaR.py
+++ b/python/exploration/VaR.py
@@ -1,34 +1,89 @@
-import datetime
-from analytics.curve_trades import curve_pos, on_the_run
-from analytics.index_data import index_returns
+from analytics.curve_trades import on_the_run
+from analytics.index_data import index_returns, get_index_quotes, index_returns
+from analytics import Portfolio
+from db import dbengine, dbconn
+from pandas.tseries.offsets import BDay, BMonthEnd
+from copy import deepcopy
+
import numpy as np
import pandas as pd
+import math
+import datetime
+
+dawndb = dbengine('dawndb')
+serenitasdb = dbengine('serenitasdb')
+
+def hist_var(portf, index_type='IG', quantile=.05, years=5):
+ df = index_returns(index=index_type, years=years,
+ tenor=['3yr', '5yr', '7yr', '10yr'])
+ df = (df.reset_index(['index'], drop=True).
+ reorder_levels(['date', 'series', 'tenor']))
+ returns = df.spread_return.dropna().reset_index('series')
+ returns['dist_on_the_run'] = (returns.
+ groupby('date')['series'].
+ transform(lambda x: x.max() - x))
+ del returns['series']
+ returns = returns.set_index('dist_on_the_run', append=True).unstack('tenor')
+ returns.columns = returns.columns.droplevel(0)
+ portf.reset_pv()
+
+ otr = on_the_run(index_type)
+ spreads = pd.DataFrame({'spread': portf.spread,
+ 'tenor': [ind.tenor for ind in portf.indices],
+ 'dist_on_the_run': [otr - ind.series for ind in portf.indices]})
+ spreads = spreads.set_index(['dist_on_the_run', 'tenor'])
+ r = []
+ for k, g in returns.groupby(level='date', as_index=False):
+ shocks = g.reset_index('date', drop=True).stack('tenor')
+ shocks.name = 'shocks'
+ portf.spread = spreads.spread * (1 + spreads.join(shocks).shocks)
+ r.append((k, portf.pnl))
+ pnl = pd.DataFrame.from_records(r, columns=['date', 'pnl'], index=['date'])
+ return pnl.quantile(quantile) * math.sqrt(12)
+
+def rel_spread_diff(report_date = datetime.date.today(), index='HY', rolling=10):
+ otr = on_the_run(index)
+ ## look at spreads
+ df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
+ tenor=['3yr', '5yr', '7yr', '10yr'], years=5)
+
+ duration = df.duration.xs((report_date,'5yr', otr), level = ['date','tenor', 'series'])[-1]
+
+ df = df.xs('5yr', level='tenor')['closespread'].groupby(['date']).last()
+ df = df.loc['2013-01-15':report_date]
+ curr_spread = df.iloc[-1]
+ df = df.pct_change(freq='22B').dropna()
+
+ return df.groupby('date').last(), curr_spread, duration
+
+def get_pos(report_date):
+ strats = ["HEDGE_CSO", "HEDGE_MBS", "HEDGE_CLO", "HEDGE_MAC",
+ "SER_IGINX", "SER_HYINX", "SER_IGCURVE", "SER_ITRXCURVE",
+ "IGOPTDEL", "HYOPTDEL"]
+ r = {}
+ for st in strats:
+ r[st] = pd.read_sql_query("SELECT * from list_cds_marks(%s, %s)",
+ dawndb, params=(report_date, st))
+ return pd.concat(r, names=['strategy']).reset_index()
+
+def cleared_cds_margins(report_date=datetime.date.today()):
-index_type = "IG"
-portf = curve_pos(datetime.date(2018, 5, 3), index_type)
+ df = get_pos(report_date)
-df = index_returns(index=index_type, years=5,
- tenor=['3yr', '5yr', '7yr', '10yr'])
-df = (df.reset_index(['index'], drop=True).
- reorder_levels(['date', 'series', 'tenor']))
-returns = df.spread_return.dropna().reset_index('series')
-returns['dist_on_the_run'] = (returns.
- groupby('date')['series'].
- transform(lambda x: x.max() - x))
-del returns['series']
-returns = returns.set_index('dist_on_the_run', append=True).unstack('tenor')
-returns.columns = returns.columns.droplevel(0)
-portf.reset_pv()
+ #Cap Allocation for Deltas
+ percentile = .95 #monthly 90%tile case...
+ shocks, widen, tighten, onTR_dur, onTR_spread = {}, {}, {}, {}, {}
+ for ind in ['IG', 'HY', 'EU']:
+ shocks[ind], onTR_spread[ind], onTR_dur[ind] = rel_spread_diff(report_date, index=ind)
+ widen[ind] = shocks[ind].quantile(percentile)
+ tighten[ind] = shocks[ind].quantile(1-percentile)
-otr = on_the_run(index_type)
-spreads = pd.DataFrame({'spread': portf.spread,
- 'tenor': [ind.tenor for ind in portf.indices],
- 'dist_on_the_run': [otr - ind.series for ind in portf.indices]})
-spreads = spreads.set_index(['dist_on_the_run', 'tenor'])
-r = []
-for k, g in returns.groupby(level='date', as_index=False):
- shocks = g.reset_index('date', drop=True).stack('tenor')
- shocks.name = 'shocks'
- portf.spread = spreads.spread * (1 + spreads.join(shocks).shocks)
- r.append((k, portf.pnl))
-pnl = pd.DataFrame.from_records(r, columns=['date', 'pnl'], index=['date'])
+ df['onTR_notional'] = df.apply(lambda df:
+ df.notional * df.factor * df.duration / onTR_dur[df.p_index], axis=1)
+ df['widen'] = df.apply(lambda df:
+ df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * widen[df.p_index]/10000, axis=1)
+ df['tighten'] = df.apply(lambda df:
+ df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * tighten[df.p_index]/10000, axis=1)
+ delta_alloc = df.groupby('strategy').sum()
+ delta_alloc['total'] = delta_alloc.apply(lambda df: max(abs(df.widen), abs(df.tighten)), axis=1)
+ return delta_alloc
diff --git a/python/notebooks/Risk Management.ipynb b/python/notebooks/Risk Management.ipynb
deleted file mode 100644
index 8a4a5f72..00000000
--- a/python/notebooks/Risk Management.ipynb
+++ /dev/null
@@ -1,104 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import portfolio_var as port\n",
- "from analytics import Swaption, BlackSwaption, Index, VolatilitySurface, Portfolio\n",
- "from analytics.scenarios import run_swaption_scenarios, run_index_scenarios, run_portfolio_scenarios\n",
- "import datetime\n",
- "import pandas as pd\n",
- "from pandas.tseries.offsets import BDay, BMonthEnd\n",
- "\n",
- "#import exploration.swaption_calendar_spread as spread\n",
- "import exploration.swaption_calendar_spread as spread"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Oct ME Bond HY Equiv\n",
- "report_date = (datetime.date.today() + BMonthEnd(-1)).date()\n",
- "bond_HY_equiv = -.12088\n",
- "percentile = .95"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#The 95%tile \n",
- "df, spread, dur = port.rel_spread_diff(report_date)\n",
- "stress = pd.DataFrame()\n",
- "stress.at[('2SD_widen', 'spread')] = df.quantile(.975) \n",
- "stress.at[('2SD_tighten', 'spread')] = df.quantile(.025) \n",
- "stress.at[('worst_widen', 'spread')] = df.max()\n",
- "stress['pts'] = -stress * spread * dur/100\n",
- "stress['nav_impact'] = bond_HY_equiv * stress['pts']"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "stress"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "port.cleared_cds_margins(report_date, percentile)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "port.index_curve_margins(report_date)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/python/notebooks/VaR.ipynb b/python/notebooks/VaR.ipynb
new file mode 100644
index 00000000..fd48ded2
--- /dev/null
+++ b/python/notebooks/VaR.ipynb
@@ -0,0 +1,151 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from analytics.curve_trades import curve_pos\n",
+ "from analytics import Index, Portfolio\n",
+ "\n",
+ "import datetime\n",
+ "import exploration.VaR as var\n",
+ "import pandas as pd"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "date = (datetime.date.today() - pd.tseries.offsets.BDay(1)).date()\n",
+ "report_date = (date + pd.tseries.offsets.BMonthEnd(-1)).date()\n",
+ "index_type = \"IG\"\n",
+ "quantile = .025"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#IG Curve VaR\n",
+ "portf = curve_pos(date, index_type)\n",
+ "ig_curve_var = abs(var.hist_var(portf, quantile=quantile))\n",
+ "ig_curve_var"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#EU Curve VaR\n",
+ "index_type = \"EU\"\n",
+ "portf = curve_pos(date, index_type)\n",
+ "eu_curve_var = abs(var.hist_var(portf, quantile=quantile))\n",
+ "eu_curve_var"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Mortgage Hedge VaR - use IG spread relative move for VaR\n",
+ "df = var.get_pos(date)\n",
+ "df = df[df.strategy == 'HEDGE_MBS']\n",
+ "portf = Portfolio([Index.from_name(row.p_index, row.p_series, row.tenor,\n",
+ " report_date, -row.notional)\n",
+ " for row in df[['p_index', 'tenor', 'p_series', 'notional']].\n",
+ " itertuples(index=False)])\n",
+ "portf.mark()\n",
+ "mort_hedge_var = abs(var.hist_var(portf, index_type = \"IG\", quantile=quantile, years=3))\n",
+ "mort_hedge_var"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Import the IM at the FCM account: calculate the IM share of different strategies as a share of VaR\n",
+ "filename = date.strftime('%Y%m%d') + \"_OTC_MARGIN_EX_DEF.csv\"\n",
+ "margin_df = pd.read_csv(\"/home/serenitas/Daily/SG_reports/\" + filename, index_col='Currency')\n",
+ "morg_hedge_im = mort_hedge_var + mort_hedge_var/(mort_hedge_var + ig_curve_var) * margin_df.loc[('USD', 'SG IMR')]\n",
+ "morg_hedge_im"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Oct ME Bond HY Equiv\n",
+ "bond_HY_equiv = -.12088\n",
+ "percentile = .95"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#95%tile \n",
+ "df, spread, dur = var.rel_spread_diff(report_date)\n",
+ "stress = pd.DataFrame()\n",
+ "stress.at[('2SD_widen', 'spread')] = df.quantile(.975) \n",
+ "stress.at[('2SD_tighten', 'spread')] = df.quantile(.025) \n",
+ "stress.at[('worst_widen', 'spread')] = df.max()\n",
+ "stress['pts'] = -stress * spread * dur/100\n",
+ "stress['nav_impact'] = bond_HY_equiv * stress['pts']\n",
+ "stress"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "port.cleared_cds_margins(report_date, percentile)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}