aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--python/notebooks/Reto Report.ipynb222
-rw-r--r--python/notebooks/Single Names Monitoring.ipynb280
2 files changed, 368 insertions, 134 deletions
diff --git a/python/notebooks/Reto Report.ipynb b/python/notebooks/Reto Report.ipynb
index 6769a123..2e1eb626 100644
--- a/python/notebooks/Reto Report.ipynb
+++ b/python/notebooks/Reto Report.ipynb
@@ -16,6 +16,7 @@
"from analytics.index_data import get_index_quotes\n",
"from analytics.scenarios import run_portfolio_scenarios\n",
"from analytics import BlackSwaption, CreditIndex, BlackSwaptionVolSurface, Portfolio,DualCorrTranche\n",
+ "from copy import deepcopy\n",
"\n",
"from utils.db import dbconn, dbengine\n",
"\n",
@@ -99,6 +100,7 @@
"temp = pd.concat([paydowns, df.principal_payment, df.accrued_payment], axis=1).fillna(0)\n",
"turnover = (temp.sum(axis=1)/nav.begbooknav).rolling(12).sum()\n",
"turnover[12:].plot()\n",
+ "turnover.min(), turnover.max(), turnover.mean()\n",
"turnover[-1]"
]
},
@@ -108,11 +110,11 @@
"metadata": {},
"outputs": [],
"source": [
- "################################### Average Portfolio Sales Turnover - as of last monthend from today\n",
+ "################################### BRINKER: Average Portfolio Sales Turnover - as of last monthend from today\n",
"#(total Bond Sales Proceeds + paydown)/average starting 12 months NAV\n",
"#Actually: Rolling 12 months sum of (total bond sales proceeds + paydown)/monthly NAV\n",
"nav = go.get_net_navs()\n",
- "fund='SERCGMAST'\n",
+ "fund='BRINKER'\n",
"sql_string = \"SELECT * FROM bonds WHERE buysell IS False and fund = %s\"\n",
"df = pd.read_sql_query(sql_string, dawn_engine,\n",
" parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n",
@@ -138,15 +140,75 @@
"metadata": {},
"outputs": [],
"source": [
- "################################### Average Monthly Traded Volume\n",
+ "################################### Number of position (min/max/average) /position size (min/max/average) /Top 10 position size\n",
+ "portfolio = go.get_portfolio()\n",
"nav = go.get_net_navs()\n",
- "sql_string = \"SELECT * FROM bonds\"\n",
- "df = pd.read_sql_query(sql_string, dawn_engine,\n",
+ "exc_port_list = [None, 'SERCGLLC__SERCGLLC', 'CASH', 'SERCGLTD__SERCGLTD', 'GFS_HELPER_BUSINESS_UNIT', 'SER_TEST__SER_TEST']\n",
+ "exc_inst_list = ['CAD', 'CADF', 'SEREONUS', 'USD', 'USDF', 'USDLOAN', 'EUR', 'EURLOAN', 'USDCASHINT',\n",
+ " 'USDLOANOLD', 'USDSWAPFEE', 'EURF','CADCASHINT','COMMISSIONFEES', 'EURCASHINT', 'COMMUNICATIONFEES']\n",
+ "exc_inst_list2 = ['86359DUR6OLD2','004375DV0OLD4','32027GAD8OLD7','75406DAC7OLD7','86359DMN4OLD7','45661EAW4OLD7']\n",
+ "\n",
+ "portfolio = portfolio[~portfolio.port.isin(exc_port_list) &\n",
+ " ~portfolio.identifier.isin(exc_inst_list) &\n",
+ " ~portfolio.identifier.isin(exc_inst_list2)]\n",
+ "\n",
+ "all_positions = portfolio.groupby(['periodenddate', 'identifier'])['endbooknav'].sum() \n",
+ "num_pos = all_positions.groupby('periodenddate').count()\n",
+ "#min/max/mean number of positions\n",
+ "num_pos.min(), num_pos.max(), num_pos.mean()\n",
+ "\n",
+ "bonds = portfolio[(portfolio.custacctname == 'V0NSCLMAMB') &\n",
+ " (portfolio.identifier != 'USD') &\n",
+ " (portfolio.endqty != 0) &\n",
+ " (portfolio.port.isin(['MORTGAGES', 'STRUCTURED', 'CLO'])) &\n",
+ " (~portfolio.strat.isin(['MBSCDS']))]\n",
+ "\n",
+ "monthend_bonds = bonds.groupby(pd.Grouper(freq=\"M\"), group_keys=False).apply(\n",
+ " lambda df: df.loc[df.index[-1]]\n",
+ " )\n",
+ "monthend_bonds = monthend_bonds.groupby(['periodenddate', 'identifier']).sum()\n",
+ "nav.index.rename('periodenddate', inplace=True)\n",
+ "monthend_bonds = monthend_bonds.merge(nav, left_index=True, right_index=True, suffixes=('_bond', '_fund'))\n",
+ "monthend_bonds['percentage'] = monthend_bonds.endbooknav_bond/monthend_bonds.endbooknav_fund\n",
+ "last_date = monthend_bonds.index.get_level_values(0).max() \n",
+ "latest = monthend_bonds.loc[last_date]\n",
+ "#min/max/mean position size\n",
+ "latest['percentage'][latest['percentage']>0.0000001].min(), latest['percentage'].max(), latest['percentage'].mean()\n",
+ "#10th largest positions\n",
+ "ten_largest = monthend_bonds.groupby('periodenddate').apply(lambda df: df['percentage'].nlargest(10).sum())\n",
+ "ten_largest.min(), ten_largest.max(), ten_largest.mean()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "################################### Average Traded Volume\n",
+ "nav = go.get_net_navs()\n",
+ "sql_string = \"SELECT * FROM bonds where fund='SERCGMAST'\"\n",
+ "bond_trades = pd.read_sql_query(sql_string, dawn_engine,\n",
" parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n",
" index_col = 'trade_date')\n",
- "df = df.groupby(pd.Grouper(freq='M')).sum()\n",
- "volume = df.principal_payment/nav.endbooknav\n",
- "volume.mean()"
+ "g = bond_trades['principal_payment'].groupby(pd.Grouper(freq='M'))\n",
+ "#min/max/mean bond trades by count (weekly = /4)\n",
+ "g.count().min()/4, g.count().max()/4, g.count().mean()/4\n",
+ "#min/max/mean bond trades by MV (weekly = /4)\n",
+ "volume = g.sum()/nav.endbooknav\n",
+ "volume.min()/4, volume.max()/4, volume.mean()/4\n",
+ "\n",
+ "sql_string = \"SELECT * FROM cds where fund='SERCGMAST'\"\n",
+ "cds_trades = pd.read_sql_query(sql_string, dawn_engine,\n",
+ " parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n",
+ " index_col = 'trade_date')\n",
+ "g = cds_trades['notional'].groupby(pd.Grouper(freq='M'))\n",
+ "#min/max/mean cds trades by count\n",
+ "g.count().min()/4, g.count().max()/4, g.count().mean()/4\n",
+ "#min/max/mean cds trades by notional\n",
+ "volume = g.sum()/nav.endbooknav\n",
+ "volume.fillna(0, inplace=True)\n",
+ "volume.min(), volume.max()/4, volume.mean()/4"
]
},
{
@@ -159,7 +221,7 @@
"#Time series of bond portfolio age (portfolio date - latest buy date of position) - weighted by MV of all bonds.\n",
"#Problem is if we buy the same position again it resets to the holding period to 0\n",
"nav = go.get_net_navs()\n",
- "sql_string = \"SELECT * FROM bonds order by trade_date desc\"\n",
+ "sql_string = \"SELECT * FROM bonds where fund = 'SERCGMAST' order by trade_date desc\"\n",
"df = pd.read_sql_query(sql_string, dawn_engine,\n",
" parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n",
" index_col = 'trade_date')\n",
@@ -173,7 +235,9 @@
"buy_dates['hold_days'] = (buy_dates.index - buy_dates.buy_date)/np.timedelta64(1, 'D')\n",
"def weighted_average(df):\n",
" return np.average(df.hold_days,weights=df.endbooknav)\n",
- "hold_period = buy_dates.groupby('periodenddate').apply(func = weighted_average)"
+ "hold_period = buy_dates.groupby('periodenddate').apply(func = weighted_average)\n",
+ "hold_period_last_five = hold_period.loc[datetime.date.today()- datetime.timedelta(weeks=52*5)::]\n",
+ "hold_period_last_five.min(), hold_period_last_five.max(), hold_period_last_five.mean()"
]
},
{
@@ -208,11 +272,84 @@
"metadata": {},
"outputs": [],
"source": [
+ "################################## Leverage Ratio\n",
+ "nav = go.get_net_navs()\n",
+ "portfolio = go.get_portfolio()\n",
+ "monthend_portfolio = portfolio.groupby(pd.Grouper(freq=\"M\"), group_keys=False).apply(\n",
+ " lambda df: df.loc[df.index[-1]]\n",
+ " )\n",
+ "monthend_byinvid = monthend_portfolio.groupby(['periodenddate','invid']).sum()\n",
+ "positive = monthend_byinvid['endbooknav'].groupby(['periodenddate']).agg(lambda x: x[x>0].sum())\n",
+ "nav = nav.merge(positive, left_index=True, right_index=True)\n",
+ "nav['leverage'] = nav.endbooknav_y/nav.endbooknav_x\n",
+ "nav['leverage'].plot()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "##################################\n",
+ "def build_portf(position_date, spread_date):\n",
+ " conn = dawn_engine.raw_connection()\n",
+ " mysql_engine = dbengine('rmbs_model')\n",
+ " mysqlcrt_engine = dbengine('crt')\n",
+ "\n",
+ " portf = get_tranche_portfolio(position_date, conn, False, 'SERCGMAST')\n",
+ " s_portf = get_swaption_portfolio(position_date, conn)\n",
+ " for t, id in zip(s_portf.trades, s_portf.trade_ids):\n",
+ " portf.add_trade(t, id)\n",
+ "\n",
+ " #index positions\n",
+ " df = pd.read_sql_query(\"SELECT * from list_cds_positions_by_strat(%s)\",\n",
+ " dawn_engine, params=(position_date,))\n",
+ " df_no_curve = df[~df.folder.str.contains(\"CURVE\")]\n",
+ " for t in df_no_curve.itertuples(index=False):\n",
+ " portf.add_trade(CreditIndex(redcode=t.security_id, maturity=t.maturity, notional=t.notional),\n",
+ " (t.folder, t.security_desc))\n",
+ "\n",
+ " #separately add in curve delta\n",
+ " df_curve = df[df.folder.str.contains(\"CURVE\")]\n",
+ " curve_portf = Portfolio([CreditIndex(redcode=t.security_id, maturity=t.maturity, notional=t.notional)\n",
+ " for t in df_curve.itertuples(index=False)])\n",
+ " curve_portf.value_date = spread_date\n",
+ " curve_portf.mark()\n",
+ " \n",
+ " hyontr = deepcopy(analytics._ontr)\n",
+ " hyontr.notional = curve_portf.hy_equiv\n",
+ " portf.add_trade(hyontr, ('curve_trades', ''))\n",
+ "\n",
+ " #get bond risks:\n",
+ " with dbconn('etdb') as etconn, dbconn('dawndb') as dawnconn:\n",
+ " rmbs_pos = subprime_risk(position_date, dawnconn, mysql_engine)\n",
+ " clo_pos = clo_risk(position_date, dawnconn, etconn)\n",
+ " crt_pos = crt_risk(position_date, dawnconn, mysqlcrt_engine)\n",
+ " notional = 0\n",
+ " for pos in [rmbs_pos, clo_pos, crt_pos]:\n",
+ " notional += pos['hy_equiv'].sum() if pos is not None else 0\n",
+ " \n",
+ " hyontr_1 = deepcopy(analytics._ontr)\n",
+ " hyontr_1.notional = -notional\n",
+ " portf.add_trade(hyontr_1, ('bonds', ''))\n",
+ " \n",
+ " portf.value_date = spread_date\n",
+ " portf.mark(interp_method=\"bivariate_linear\")\n",
+ " portf.reset_pv()\n",
+ " \n",
+ " return portf"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"################################### Calculate stress scenario \n",
"position_date = (datetime.date.today() - BDay(1)).date()\n",
- "shock_date = (datetime.date.today() - BDay(1)).date()\n",
- "spread_date = shock_date\n",
- "(position_date, spread_date, shock_date)\n",
+ "spread_date = (datetime.date.today() - BDay(1)).date()\n",
"analytics.init_ontr(spread_date)"
]
},
@@ -243,59 +380,20 @@
"metadata": {},
"outputs": [],
"source": [
- "#tranche positions\n",
- "conn = dawn_engine.raw_connection()\n",
- "mysql_engine = dbengine('rmbs_model')\n",
- "mysqlcrt_engine = dbengine('crt')\n",
- "\n",
- "portf = get_tranche_portfolio(position_date, conn, False, 'SERCGMAST')\n",
- "s_portf = get_swaption_portfolio(position_date, conn)\n",
- "for t, id in zip(s_portf.trades, s_portf.trade_ids):\n",
- " portf.add_trade(t, id)\n",
- "\n",
- "#index positions\n",
- "df = pd.read_sql_query(\"SELECT * from list_cds_positions_by_strat(%s)\",\n",
- " dawn_engine, params=(position_date,))\n",
- "df_no_curve = df[~df.folder.str.contains(\"CURVE\")]\n",
- "for t in df_no_curve.itertuples(index=False):\n",
- " portf.add_trade(CreditIndex(redcode=t.security_id, maturity=t.maturity, notional=t.notional),\n",
- " (t.folder, t.security_desc))\n",
- " \n",
- "#separately add in curve delta\n",
- "df_curve = df[df.folder.str.contains(\"CURVE\")]\n",
- "curve_portf = Portfolio([CreditIndex(redcode=t.security_id, maturity=t.maturity, notional=t.notional)\n",
- " for t in df_curve.itertuples(index=False)])\n",
- "curve_portf.value_date = spread_date\n",
- "curve_portf.mark()\n",
- "\n",
- "portf.add_trade(CreditIndex('HY', on_the_run('HY', spread_date), '5yr', \n",
- " value_date=spread_date, \n",
- " notional=curve_portf.hy_equiv), ('curve_trades', ''))\n",
- "\n",
- "#get bond risks:\n",
- "with dbconn('etdb') as etconn, dbconn('dawndb') as dawnconn:\n",
- " rmbs_pos = subprime_risk(position_date, dawnconn, mysql_engine)\n",
- " clo_pos = clo_risk(position_date, dawnconn, etconn)\n",
- " crt_pos = crt_risk(position_date, dawnconn, mysqlcrt_engine)\n",
- "if clo_pos is None:\n",
- " notional = rmbs_pos['hy_equiv'].sum() + crt_pos['hy_equiv'].sum()\n",
- "else:\n",
- " notional = rmbs_pos['hy_equiv'].sum() + clo_pos['hy_equiv'].sum() + crt_pos['hy_equiv'].sum()\n",
- "portf.add_trade(CreditIndex('HY', on_the_run('HY', spread_date), '5yr', \n",
- " value_date = spread_date, \n",
- " notional = -notional), ('bonds', ''))\n",
- " \n",
- "portf.value_date = spread_date\n",
- "portf.mark(interp_method=\"bivariate_linear\")\n",
- "portf.reset_pv()\n",
+ "#tranche/swaption positions\n",
+ "portf = build_portf(position_date, spread_date)\n",
"\n",
"vol_surface = {}\n",
"for trade in portf.swaptions:\n",
- " vs = BlackSwaptionVolSurface(trade.index.index_type, trade.index.series, \n",
- " value_date=spread_date, interp_method = \"bivariate_linear\")\n",
+ " try:\n",
+ " vs = BlackSwaptionVolSurface(trade.index.index_type, trade.index.series, \n",
+ " value_date=spread_date, interp_method = \"bivariate_linear\")\n",
+ " except:\n",
+ " vs = BlackSwaptionVolSurface(trade.index.index_type, trade.index.series + 1, \n",
+ " value_date=spread_date, interp_method = \"bivariate_linear\")\n",
" vol_surface[(trade.index.index_type, trade.index.series, trade.option_type)] = vs[vs.list(source='MS', option_type=trade.option_type)[-1]]\n",
"\n",
- "scens = run_portfolio_scenarios(portf, date_range=[pd.Timestamp(shock_date)], params=[\"pnl\"],\n",
+ "scens = run_portfolio_scenarios(portf, date_range=[pd.Timestamp(spread_date)], params=[\"pnl\"],\n",
" spread_shock=widen,\n",
" vol_shock=[0],\n",
" corr_shock = [0],\n",
@@ -338,7 +436,7 @@
" groupby(level=0, axis=1).sum())\n",
"\n",
"options = ['HYOPTDEL', 'HYPAYER', 'HYREC', 'IGOPTDEL', 'IGPAYER', 'IGREC']\n",
- "tranches = ['HYMEZ', 'HYINX', 'HYEQY', 'IGMEZ', 'IGINX', 'IGEQY', 'IGSNR', 'IGINX', 'BSPK']\n",
+ "tranches = ['HYMEZ', 'HYINX', 'HYEQY', 'IGMEZ', 'IGINX', 'IGEQY', 'IGSNR', 'IGINX', 'BSPK', 'XOMEZ', 'XOINX']\n",
"\n",
"scenarios['options'] = scenarios[set(scenarios.columns).intersection(options)].sum(axis=1)\n",
"scenarios['tranches'] = scenarios[set(scenarios.columns).intersection(tranches)].sum(axis=1)\n",
diff --git a/python/notebooks/Single Names Monitoring.ipynb b/python/notebooks/Single Names Monitoring.ipynb
index 8b734db9..46230799 100644
--- a/python/notebooks/Single Names Monitoring.ipynb
+++ b/python/notebooks/Single Names Monitoring.ipynb
@@ -13,10 +13,19 @@
"from analytics.basket_index import MarkitBasketIndex\n",
"from analytics import on_the_run\n",
"import matplotlib.pyplot as plt\n",
+ "import statsmodels.formula.api as smf\n",
+ "from pygam import LinearGAM, s, f, GAM\n",
"\n",
"from utils.db import dbengine\n",
- "serenitas_engine = dbengine('serenitasdb')\n",
- "\n",
+ "serenitas_engine = dbengine('serenitasdb')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"%matplotlib inline"
]
},
@@ -27,8 +36,8 @@
"outputs": [],
"source": [
"value_date = (pd.datetime.today() - pd.offsets.BDay(1)).date()\n",
- "index_type = 'HY'\n",
- "series = 32"
+ "index_type = 'XO'\n",
+ "series = 28"
]
},
{
@@ -127,36 +136,8 @@
"metadata": {},
"outputs": [],
"source": [
- "#Dispersion: std_dev of default_prob/average default_prob\n",
- "date_range = pd.bdate_range(value_date - 52 * 4 * pd.offsets.Week(), value_date, freq='5B')\n",
- "default_prob = {}\n",
- "ontr = on_the_run(index_type, date_range[0])\n",
- "index = MarkitBasketIndex(index_type, ontr, ['5yr'])\n",
- "for d in date_range:\n",
- " if ontr != on_the_run(index_type, d):\n",
- " ontr = on_the_run(index_type, d)\n",
- " index = MarkitBasketIndex(index_type, ontr, ['5yr'])\n",
- " try:\n",
- " index.value_date = d\n",
- " surv_prob, tickers = index.survival_matrix()\n",
- " default_prob[d] = pd.Series(1 - np.ravel(surv_prob), index=tickers)\n",
- " except:\n",
- " continue\n",
- "default_prob = pd.concat(default_prob)\n",
- "dispersion = default_prob.unstack(level=0)\n",
- "dispersion = dispersion.std()/dispersion.mean()\n",
- "dispersion.plot()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
"def gini(array):\n",
" \"\"\"Calculate the Gini coefficient of a numpy array.\"\"\"\n",
- " array = array.values\n",
" # based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif\n",
" # from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n",
" if np.amin(array) < 0:\n",
@@ -165,7 +146,7 @@
" array = np.sort(array) #values must be sorted\n",
" index = np.arange(1,array.shape[0]+1) #index per array element\n",
" n = array.shape[0]#number of array elements\n",
- " return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array))) #Gini coefficient"
+ " return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array))) "
]
},
{
@@ -174,7 +155,159 @@
"metadata": {},
"outputs": [],
"source": [
- "#Instead of std dev of spread get Gini Factor default prob\n",
+ "def get_gini_spreadstdev(row):\n",
+ " indices = MarkitBasketIndex(row['index'], row.series, [row.tenor], value_date = row.name)\n",
+ " spreads = indices.spreads()\n",
+ " spreads = spreads[spreads<1]\n",
+ " return (gini(spreads), np.std(spreads))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "####################### NAV Basis\n",
+ "\n",
+ "# HY | IG\n",
+ "#+ve index trades risk rich | index trades risk cheap\n",
+ "#-ve single trades risk rich | single trades risk cheap\n",
+ "\n",
+ "sql_string = \"select * from index_quotes where index = %s and tenor = '5yr'\"\n",
+ "df = pd.read_sql_query(sql_string, serenitas_engine, params=(index_type,), index_col=['date'])\n",
+ "df[\"dist_on_the_run\"] = df.groupby(\"date\")[\"series\"].transform(\n",
+ " lambda x: x.max() - x\n",
+ ")\n",
+ "df = df.groupby(['date', 'series']).nth(-1) #take the last version\n",
+ "df['basis'] = df.closespread - df.modelspread if index_type == 'IG' else df.closeprice - df.modelprice\n",
+ "df.set_index('dist_on_the_run', append=True, inplace=True)\n",
+ "df.reset_index('series', inplace=True)\n",
+ "basis = df['basis'].unstack()\n",
+ "stats = pd.DataFrame([basis.min(), basis.mean(), basis.max(), \n",
+ " basis.quantile(.01), basis.quantile(.05), basis.quantile(.95), basis.quantile(.99)],\n",
+ " index=['min', 'mean', 'max', \n",
+ " '1%tile', '5%tile', '95%tile', '99%tile'])\n",
+ "stats"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "####################### Get Gini on indices: this calc bombs a lot so let's do the ones that we were able to calc before (dropna)\n",
+ "df_gini_calc = df.dropna().loc[datetime.date(2019,1,1):, :].reset_index('dist_on_the_run')[\n",
+ " ['index','series', 'tenor', 'duration', 'basis', 'closespread']]\n",
+ "temp = df_gini_calc.apply(get_gini_spreadstdev, axis=1)\n",
+ "temp = pd.DataFrame(temp.values.tolist(), columns=['gini_spread','std_spread'], index=temp.index)\n",
+ "df_gini_calc = df_gini_calc.merge(temp, left_index=True, right_index=True).dropna()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#######################GLS regression of NAV basis to spread/duration\n",
+ "#basis_gini_model = smf.gls(\"basis ~ np.log(duration) + np.log(closespread) + np.log(gini_spread)\", data=df_gini_calc).fit()\n",
+ "#basis_gini_model.summary()\n",
+ "\n",
+ "#Let's use a GAM model instead?\n",
+ "X = np.array(df_gini_calc[['duration', 'closespread', 'gini_spread']])\n",
+ "y = np.array(df_gini_calc[['basis']])\n",
+ "\n",
+ "basis_model = GAM(s(0, constraints='concave') +\n",
+ " s(1, constraints='concave') +\n",
+ " s(2, constraints='concave'))\n",
+ "\n",
+ "lam = np.logspace(-3, 5, 5, base=10)\n",
+ "lams = [lam] * 3\n",
+ "\n",
+ "basis_model.gridsearch(X, y, lam=lams)\n",
+ "basis_model.summary()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "## plotting\n",
+ "fig, axs = plt.subplots(1,3);\n",
+ "\n",
+ "titles = ['duration', 'closespread', third_variable]\n",
+ "for i, ax in enumerate(axs):\n",
+ " XX = basis_model.generate_X_grid(term=i)\n",
+ " ax.plot(XX[:, i], basis_model.partial_dependence(term=i, X=XX))\n",
+ " ax.plot(XX[:, i], basis_model.partial_dependence(term=i, X=XX, width=.95)[1], c='r', ls='--')\n",
+ " if i == 0:\n",
+ " ax.set_ylim(-30,30)\n",
+ " ax.set_title(titles[i]);"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "############## predict\n",
+ "predict = basis_model.predict(X)\n",
+ "plt.scatter(y, predict)\n",
+ "plt.xlabel('actual basis')\n",
+ "plt.ylabel('predicted basis')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "############## today's basis\n",
+ "y[-1], predict[-1]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#######################Dispersion: std_dev/mean of default_prob\n",
+ "date_range = pd.bdate_range(value_date - 52 * 4 * pd.offsets.Week(), value_date, freq='5B')\n",
+ "default_prob, index_spreads = {}, {}\n",
+ "for d in date_range:\n",
+ " try:\n",
+ " index = MarkitBasketIndex(index_type, on_the_run(index_type, d), ['5yr'], value_date =d)\n",
+ " surv_prob, tickers = index.survival_matrix()\n",
+ " spreads = index.spreads()\n",
+ " spreads = spreads[spreads<1] #filter out crazy spreads\n",
+ " default_prob[d] = pd.Series(1 - np.ravel(surv_prob), index=tickers)\n",
+ " index_spreads[d] = pd.Series(spreads, index=tickers)\n",
+ " except:\n",
+ " continue\n",
+ "default_prob = pd.concat(default_prob)\n",
+ "index_spreads = pd.concat(index_spreads)\n",
+ "dispersion = default_prob.unstack(level=0)\n",
+ "dispersion = dispersion.std()/dispersion.mean()\n",
+ "dispersion_spread = index_spreads.unstack(level=0)\n",
+ "dispersion_spread = dispersion_spread.std()/dispersion_spread.mean()\n",
+ "dispersion.plot()\n",
+ "dispersion_spread.plot()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Get Gini factor\n",
"sql_string = \"select * from index_version where index = %s\"\n",
"idx_ver = pd.read_sql_query(sql_string, serenitas_engine, params=[index_type,], parse_dates=['lastdate'])\n",
"idx_ver['date'] = pd.to_datetime([d.strftime('%Y-%m-%d') if not pd.isnull(d) else datetime.date(2050,1,1) for d in idx_ver['lastdate']])\n",
@@ -188,29 +321,10 @@
"risk.set_index('date', inplace=True) \n",
"risk['moneyness'] = risk.apply(lambda df: (df.detach-df.cumulativeloss)/df.indexfactor/df.index_expected_loss, axis=1)\n",
"\n",
- "single_day_risk = {}\n",
- "date_range = pd.bdate_range(value_date - 52 * 5 * pd.offsets.Week(), value_date, freq='5B')\n",
- "for d in date_range:\n",
- " default_prob={}\n",
- " try:\n",
- " df = risk.loc[d]\n",
- " except:\n",
- " continue\n",
- " for s in df.series.unique():\n",
- " tenors = list(df[df.series==s]['tenor'].sort_values().unique())\n",
- " indices = MarkitBasketIndex(index_type, s, tenors)\n",
- " try:\n",
- " indices.value_date = d\n",
- " surv_prob, tickers = indices.survival_matrix()\n",
- " default_prob[s] = pd.DataFrame(1 - surv_prob, index=tickers, columns=tenors)\n",
- " except:\n",
- " continue\n",
- " if default_prob:\n",
- " default_prob = pd.concat(default_prob, names=['series', 'name'], sort=True)\n",
- " default_prob.columns.name = 'tenor'\n",
- " gini_coeff = default_prob.stack().groupby(['series', 'tenor']).apply(gini)\n",
- " single_day_risk[d] = df.merge(gini_coeff.rename('gini_coeff').reset_index(), on=['series', 'tenor'])\n",
- "tranche_risk = pd.concat(single_day_risk, names=['date', 'idx'], sort=True)"
+ "date_range = pd.bdate_range(value_date - 52 * 3 * pd.offsets.Week(), value_date, freq='5B')\n",
+ "gini_calc = risk[(risk.index.isin(date_range)) & (risk.attach == 0)]\n",
+ "temp = gini_calc.apply(get_gini_spreadstdev, axis=1)\n",
+ "gini_calc[['gini_spread', 'std_spread']] = pd.DataFrame(temp.values.tolist(), columns=['gini_spread','std_spread'], index=temp.index)"
]
},
{
@@ -219,8 +333,8 @@
"metadata": {},
"outputs": [],
"source": [
- "to_plot_gini = tranche_risk[(tranche_risk.tenor == '5yr') & (tranche_risk.attach ==0)].groupby(['date', 'series']).nth(-1)\n",
- "to_plot_gini['gini_coeff'].unstack().plot()"
+ "to_plot_gini = gini_calc[(gini_calc.tenor == '5yr')].groupby(['date', 'series']).nth(-1)\n",
+ "to_plot_gini['gini_spread'].unstack().plot()"
]
},
{
@@ -229,11 +343,7 @@
"metadata": {},
"outputs": [],
"source": [
- "import statsmodels.formula.api as smf\n",
- "equity = tranche_risk[tranche_risk.attach==0]\n",
- "#use a subset for modeling purposes?\n",
- "equity = equity[(equity.tenor=='5yr') & (equity.series >= 27)]\n",
- "gini_model = smf.gls(\"corr_at_detach ~ gini_coeff + duration + moneyness\", data=equity).fit()\n",
+ "gini_model = smf.gls(\"corr_at_detach ~ gini_spread + duration + moneyness\", data=equity).fit()\n",
"gini_model.summary()"
]
},
@@ -243,8 +353,8 @@
"metadata": {},
"outputs": [],
"source": [
- "predict_today = equity.reset_index()[['gini_coeff', 'duration', 'moneyness']].iloc[-1]\n",
- "spread_gls_model.predict(predict_today)"
+ "predict_today = equity.reset_index()[['gini_spread', 'duration', 'moneyness']].iloc[-1]\n",
+ "gini_model.predict(predict_today)"
]
},
{
@@ -254,15 +364,19 @@
"outputs": [],
"source": [
"#Let's use a GAM model instead?\n",
- "from pygam import LinearGAM, s, f\n",
- "X = np.array(equity[['gini_coeff', 'duration', 'moneyness']])\n",
+ "#only use the 5yr point for modeling\n",
+ "equity = gini_calc[(gini_calc.tenor=='5yr') & (gini_calc.series >= 23)]\n",
+ "X = np.array(equity[['gini_spread', 'duration', 'moneyness']])\n",
"y = np.array(equity['corr_at_detach'])\n",
"\n",
- "gam_model = LinearGAM(s(0) + s(1) + s(2))\n",
+ "#Fit for Lamda\n",
+ "gam_model = GAM(s(0, n_splines=5) +\n",
+ " s(1, n_splines=5) +\n",
+ " s(2, n_splines=5))\n",
"lam = np.logspace(-3, 5, 5, base=3)\n",
"lams = [lam] * 3\n",
- "\n",
"gam_model.gridsearch(X, y, lam=lams)\n",
+ "\n",
"gam_model.summary()"
]
},
@@ -273,10 +387,9 @@
"outputs": [],
"source": [
"## plotting\n",
- "plt.figure();\n",
"fig, axs = plt.subplots(1,3);\n",
"\n",
- "titles = ['gini_coeff', 'duration', 'moneyness']\n",
+ "titles = ['gini_spread', 'duration', 'moneyness']\n",
"for i, ax in enumerate(axs):\n",
" XX = gam_model.generate_X_grid(term=i)\n",
" ax.plot(XX[:, i], gam_model.partial_dependence(term=i, X=XX))\n",
@@ -285,6 +398,29 @@
" ax.set_ylim(-30,30)\n",
" ax.set_title(titles[i]);"
]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "predict = gam_model.predict(X)\n",
+ "plt.scatter(y, predict)\n",
+ "plt.xlabel('actual correlation')\n",
+ "plt.ylabel('predicted correlation')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "today = (equity.loc[max(equity.index)])\n",
+ "predict_HY31 = gam_model.predict(np.array(today[today.series==31][['gini_spread', 'duration', 'moneyness']]))\n",
+ "today[today.series==31][['corr_at_detach']], predict_HY31"
+ ]
}
],
"metadata": {