{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import datetime\n", "import globeop_reports as go\n", "import pandas as pd\n", "import numpy as np\n", "\n", "from pandas.tseries.offsets import BDay, MonthEnd, CustomBusinessMonthEnd\n", "\n", "from risk.bonds import subprime_risk, crt_risk, clo_risk\n", "from risk.portfolio import build_portfolio, generate_vol_surface\n", "\n", "import serenitas.analytics as ana\n", "from serenitas.analytics.index_data import load_all_curves\n", "from serenitas.analytics.scenarios import run_portfolio_scenarios\n", "from serenitas.analytics.basket_index import BasketIndex\n", "from serenitas.analytics.base import Trade\n", "from serenitas.utils.db2 import dbconn, serenitas_pool, dawn_pool\n", "from serenitas.utils.db import serenitas_engine, dawn_engine\n", "#from dates import bond_cal" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#Set dates\n", "position_date = (datetime.date.today() - BDay(1)).date()\n", "spread_date = (datetime.date.today() - BDay(1)).date()\n", "ana._local = False\n", "Trade.init_ontr(spread_date)\n", "fund ='SERCGMAST'" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### Run scenarios\n", "spread_shock = np.array([-100., -25., 1., +25. , 100., 200., 300.])\n", "spread_shock /= Trade._ontr['HY'].spread\n", "portf, _ = build_portfolio(position_date, spread_date, fund)\n", "vol_surface = generate_vol_surface(portf, 10, 'MS')\n", "portf.reset_pv()\n", "scens = run_portfolio_scenarios(portf, date_range=[pd.Timestamp(spread_date)], params=['pnl', 'hy_equiv'],\n", " spread_shock=spread_shock,\n", " vol_shock=[0.0],\n", " corr_shock=[0.0],\n", " vol_surface=vol_surface)\n", "\n", "strategies = {}\n", "strategies['options'] = ['HYOPTDEL', 'HYPAYER', 'HYREC', \n", " 'IGOPTDEL', 'IGPAYER', 'IGREC']\n", "strategies['tranches'] = ['HYSNR', 'HYMEZ', 'HYINX', 'HYEQY', \n", " 'IGSNR', 'IGMEZ', 'IGINX', 'IGEQY', \n", " 'EUSNR', 'EUMEZ', 'EUINX', 'EUEQY', \n", " 'XOSNR', 'XOMEZ', 'XOINX', 'XOEQY', \n", " 'BSPK']\n", "if fund == 'BRINKER': \n", " scens = scens.xs(0, level='corr_shock')\n", "else:\n", " scens = scens.xs((0.0, 0.0), level=['vol_shock', 'corr_shock'])\n", " \n", "scens.columns.names=['strategy', 'trade_id', 'scen_type']\n", "\n", "results = {}\n", "for i, g in scens.groupby(level='scen_type', axis =1):\n", " temp = g.groupby(level='strategy', axis =1).sum()\n", " for key, item in strategies.items():\n", " exist_columns = set(temp.columns).intersection(item)\n", " temp[key] = temp[exist_columns].sum(axis=1)\n", " temp.drop(exist_columns, axis=1, inplace=True)\n", " temp['total'] = temp.sum(axis = 1)\n", " results[i] = temp\n", "results = pd.concat(results)\n", "results.to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results.to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#####our jump risks\n", "jtd = portf.jtd_single_names()\n", "with serenitas_pool.connection() as conn:\n", " surv_curves = load_all_curves(conn, spread_date)\n", "surv_curves['spread'] = surv_curves['curve'].apply(lambda sc: sc.to_series(forward=False)[5] * (1-sc.recovery_rates[5]))\n", "jtd_sabo = jtd[[jtd.columns[0]]].join(surv_curves.groupby(level=0).first()[['name', 'company_id', 'spread']])\n", "jtd_sabo.columns = ['jtd', 'name', 'company_id', 'spread']\n", "jtd_sabo = jtd_sabo.groupby(['company_id', 'name']).sum()\n", "jtd_sabo = jtd_sabo.sort_values('spread', ascending=False)\n", "top_5_avg_loss = jtd_sabo.nlargest(5, columns='jtd')['jtd'].mean()\n", "widest_5_total_loss = jtd_sabo.nlargest(5, columns='spread')['jtd'].sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### Find the strategies that are not defined: undefined needs to be mapped in strat_map\n", "strats = pd.read_csv('/home/serenitas/edwin/Python/strat_map.csv')\n", "nav = go.get_net_navs()\n", "m_pnl = go.get_monthly_pnl(['strat', 'custacctname'])\n", "m_pnl = m_pnl.reset_index().merge(strats, on=['strat', 'custacctname'], how='left')\n", "m_pnl = m_pnl.loc[~m_pnl['pnl'].isin(['Test', 'Feeder'])]\n", "undefined = m_pnl[m_pnl.pnl.isna()].groupby(['strat', 'custacctname']).last()\n", "#Get PNL Allocation\n", "#Input latest NAVS to: '/home/serenitas/edwin/Python/subscription_fee_data.csv'\n", "pnl_alloc = m_pnl.groupby(['date', 'pnl']).sum()\n", "pnl_alloc = pnl_alloc.join(nav.begbooknav)\n", "pnl_alloc['strat_return'] = pnl_alloc.mtdtotalbookpl / pnl_alloc.begbooknav\n", "#rolling 12 months PNL per strategy - copy to RiskMonitor\n", "start_date = position_date - MonthEnd(1) - pd.tseries.offsets.MonthEnd(11)\n", "rolling_return = pnl_alloc[start_date:position_date - MonthEnd(1)].groupby('pnl').sum()['strat_return']\n", "rolling_return.to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### Average Portfolio Sales Turnover\n", "#Rolling min(month from inception, 12 months) sum of (total bond sales proceeds + paydown)/monthly NAV\n", "nav = go.get_net_navs()\n", "fund='SERCGMAST'\n", "sql_string = \"SELECT * FROM bond_trades WHERE NOT buysell and fund = %s\"\n", "df = pd.read_sql_query(sql_string, dawn_engine,\n", " parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n", " params=[fund,],\n", " index_col = 'trade_date')\n", "df = df.groupby(pd.Grouper(freq='M')).sum()\n", "#Average traded volume (Bonds only)\n", "\n", "#Now get portfolio paydown per month\n", "portfolio = go.get_portfolio()\n", "portfolio = portfolio[(portfolio.custacctname == 'V0NSCLMAMB') &\n", " (portfolio.identifier != 'USD') &\n", " (portfolio.endqty != 0)]\n", "cf = pd.read_sql_query(\"select date, principal_bal, principal, cusip as identifier \"\n", " \"from cashflow_history a left join \"\n", " \"(select figi, cusip from securities) b on a.identifier = b.figi\", dawn_engine,\n", " parse_dates=['date'],\n", " index_col=['date']).sort_index()\n", "portfolio = portfolio.set_index('identifier', append=True)\n", "portfolio = portfolio['endqty'].groupby(['identifier', 'periodenddate']).sum()\n", "portfolio = portfolio.reset_index('identifier')\n", "df_1 = pd.merge_asof(cf, portfolio.sort_index(), left_index=True, right_index=True, by='identifier')\n", "df_1 = df_1.dropna(subset=['endqty'])\n", "df_1 = df_1[(df_1.principal_bal != 0) & (df_1.principal != 0)]\n", "df_1['paydown'] = df_1.apply(lambda df: df.endqty/df.principal_bal * df.principal, axis=1)\n", "paydowns = df_1.paydown.groupby(pd.Grouper(freq='M')).sum()\n", "temp = pd.concat([paydowns, df.principal_payment, df.accrued_payment], axis=1).fillna(0)\n", "turnover = (temp.sum(axis=1)/nav.begbooknav).rolling(12, min_periods=1).sum()\n", "turnover" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### Number of position (min/max/average) /position size (min/max/average) /Top 10 position size\n", "portfolio = go.get_portfolio()\n", "nav = go.get_net_navs()\n", "exc_port_list = [None, 'SERCGLLC__SERCGLLC', 'CASH', 'SERCGLTD__SERCGLTD', 'GFS_HELPER_BUSINESS_UNIT', 'SER_TEST__SER_TEST']\n", "exc_inst_list = ['CAD', 'CADF', 'SEREONUS', 'USD', 'USDF', 'USDLOAN', 'EUR', 'EURLOAN', 'USDCASHINT',\n", " 'USDLOANOLD', 'USDSWAPFEE', 'EURF','CADCASHINT','COMMISSIONFEES', 'EURCASHINT', 'COMMUNICATIONFEES']\n", "exc_inst_list2 = ['86359DUR6OLD2','004375DV0OLD4','32027GAD8OLD7','75406DAC7OLD7','86359DMN4OLD7','45661EAW4OLD7']\n", "\n", "portfolio = portfolio[~portfolio.port.isin(exc_port_list) &\n", " ~portfolio.identifier.isin(exc_inst_list) &\n", " ~portfolio.identifier.isin(exc_inst_list2)]\n", "\n", "all_positions = portfolio.groupby(['periodenddate', 'identifier'])['endbooknav'].sum() \n", "num_pos = all_positions.groupby('periodenddate').count()\n", "#min/max/mean number of positions\n", "num_pos.min(), num_pos.max(), num_pos.mean()\n", "\n", "bonds = portfolio[(portfolio.custacctname == 'V0NSCLMAMB') &\n", " (portfolio.identifier != 'USD') &\n", " (portfolio.endqty != 0) &\n", " (portfolio.port.isin(['MORTGAGES', 'STRUCTURED', 'CLO'])) &\n", " (~portfolio.strat.isin(['MBSCDS']))]\n", "\n", "monthend_bonds = bonds.groupby(pd.Grouper(freq=\"M\"), group_keys=False).apply(\n", " lambda df: df.loc[df.index[-1]]\n", " )\n", "monthend_bonds = monthend_bonds.groupby(['periodenddate', 'identifier']).sum()\n", "nav.index.rename('periodenddate', inplace=True)\n", "monthend_bonds = monthend_bonds.merge(nav, left_index=True, right_index=True, suffixes=('_bond', '_fund'))\n", "monthend_bonds['percentage'] = monthend_bonds.endbooknav_bond/monthend_bonds.endbooknav_fund\n", "last_date = monthend_bonds.index.get_level_values(0).max() \n", "latest = monthend_bonds.loc[last_date]\n", "#min/max/mean position size\n", "latest['percentage'][latest['percentage']>0.0000001].min(), latest['percentage'].max(), latest['percentage'].mean()\n", "#10 largest positions\n", "ten_largest = monthend_bonds.groupby('periodenddate').apply(lambda df: df['percentage'].nlargest(10).sum())\n", "print(\"ten largest position - min/max/mean\", ten_largest.min(), ten_largest.max(), ten_largest.mean())\n", "#5 largest positions in the last 5 years\n", "five_largest = monthend_bonds.groupby('periodenddate').apply(lambda df: df['percentage'].nlargest(5).sum())\n", "print(\"five largest position - min/max/mean\",\n", " five_largest[datetime.date.today() - pd.DateOffset(years=5):].min(),\n", " five_largest[datetime.date.today() - pd.DateOffset(years=5):].max(),\n", " five_largest[datetime.date.today() - pd.DateOffset(years=5):].mean())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### Average Traded Volume\n", "nav = go.get_net_navs()\n", "sql_string = \"SELECT * FROM bond_trades where fund='SERCGMAST'\"\n", "bond_trades = pd.read_sql_query(sql_string, dawn_engine,\n", " parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n", " index_col = 'trade_date')\n", "g = bond_trades['principal_payment'].groupby(pd.Grouper(freq='M'))\n", "#min/max/mean bond trades by count (weekly = /4)\n", "g.count().min()/4, g.count().max()/4, g.count().mean()/4\n", "#min/max/mean bond trades by MV (weekly = /4)\n", "volume = g.sum()/nav.endbooknav\n", "volume.min()/4, volume.max()/4, volume.mean()/4\n", "\n", "sql_string = \"SELECT * FROM cds where fund='SERCGMAST'\"\n", "cds_trades = pd.read_sql_query(sql_string, dawn_engine,\n", " parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n", " index_col = 'trade_date')\n", "g = cds_trades['notional'].groupby(pd.Grouper(freq='M'))\n", "#min/max/mean cds trades by count\n", "g.count().min()/4, g.count().max()/4, g.count().mean()/4\n", "#min/max/mean cds trades by notional\n", "volume = g.sum()/nav.endbooknav\n", "volume.fillna(0, inplace=True)\n", "volume.min(), volume.max()/4, volume.mean()/4\n", "\n", "#Max trades per day - CDS trades only, bond trades only, combined bond/cds trades\n", "cds_trades[cds_trades.cp_code != 'CONTRA'].groupby(pd.Grouper(freq='D')).count().max()\n", "bond_trades.groupby(pd.Grouper(freq='D')).count().max()\n", "cds_trades[cds_trades.cp_code != 'CONTRA'].id.append(bond_trades.id).groupby(pd.Grouper(freq='D')).count().max()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### Average Holding Period\n", "#Time series of bond portfolio age (portfolio date - latest buy date of position) - weighted by MV of all bonds.\n", "#Problem is if we buy the same position again it resets to the holding period to 0\n", "nav = go.get_net_navs()\n", "sql_string = \"SELECT * FROM bond_trades where fund = 'SERCGMAST' order by trade_date desc\"\n", "df = pd.read_sql_query(sql_string, dawn_engine,\n", " parse_dates={'lastupdate':{'utc':True}, 'trade_date': {}, 'settle_date':{}},\n", " index_col = 'trade_date')\n", "buys = df[df.buysell == True].sort_index()\n", "buys['buy_date'] = buys.index\n", "#get portfolio \n", "port = go.get_portfolio()\n", "port.sort_index(inplace=True)\n", "buy_dates = pd.merge_asof(port, buys[['buy_date', 'identifier']], left_index=True, right_index=True,by='identifier', direction='backward')\n", "buy_dates = buy_dates[['identifier', 'endbooknav','buy_date']][~buy_dates.buy_date.isna()]\n", "buy_dates['hold_days'] = (buy_dates.index - buy_dates.buy_date)/np.timedelta64(1, 'D')\n", "def weighted_average(df):\n", " return np.average(df.hold_days,weights=df.endbooknav)\n", "hold_period = buy_dates.groupby('periodenddate').apply(func = weighted_average)\n", "hold_period_last_five = hold_period.loc[datetime.date.today()- datetime.timedelta(weeks=52*5)::]\n", "hold_period_last_five.min(), hold_period_last_five.max(), hold_period_last_five.mean()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################## Calculate Historical Bond Duration/Yield\n", "fund = 'SERCGMAST'\n", "CBM = CustomBusinessMonthEnd(calendar=np.busdaycalendar())\n", "dates = pd.bdate_range(\"2015-1-31\", datetime.date.today() - MonthEnd(1), \n", " freq=CBM)\n", "bond_stats=pd.DataFrame()\n", "with dawn_pool.connection() as conn, dbconn(\"etdb\") as et_conn:\n", " for d in dates:\n", " sub = subprime_risk(d.date(), dawnconn , dbengine(\"rmbs_model\"), fund=fund)\n", " sub=sub[sub.pv1>0]\n", " crt = crt_risk(d.date(), dawnconn , dbengine(\"crt\"), fund=fund)\n", " clo = clo_risk(d.date(), dawnconn , dbconn(\"etdb\"), fund=fund)\n", " bonds = pd.concat([sub,crt,clo]).dropna(subset=['modDur', 'usd_market_value'])\n", " bond_stats.at[d, 'dur']= sum(bonds.notional * bonds.factor * bonds.modDur)/sum(bonds.notional * bonds.factor)\n", " bond_stats.at[d, 'yield'] = (sum(bonds.usd_market_value * bonds.modDur * bonds.bond_yield) /\n", " sum(bonds.usd_market_value * bonds.modDur))\n", " bond_stats.at[d, 'swap_rate'] = (sum(bonds.usd_market_value * bonds.modDur * bonds.swap_rate) /\n", " sum(bonds.usd_market_value * bonds.modDur))\n", "bond_stats['dm'] = bond_stats['yield'] - bond_stats['swap_rate']" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################## Leverage Ratio - Positive and negative PV trades. The only thing missing in the calc are the USD/EUR Loans\n", "nav = go.get_net_navs()\n", "portf = go.get_portfolio()\n", "df = portf.groupby(pd.Grouper(freq=\"M\"), group_keys=False).apply(\n", " lambda df: df.loc[df.index[-1]]\n", " )\n", "df = df[~df['invid'].isin(['USDLOAN', 'EURLOAN'])]\n", "df = df[~df['port'].isin(['SER_TEST__SER_TEST', 'GFS_HELPER_BUSINESS_UNIT'])]\n", "pvs = df.groupby(['periodenddate', df['endbooknav'] >0])['endbooknav'].sum().unstack().rename(\n", " columns={True:'endbooknav_pos', False:'endbooknav_neg'})\n", "nav = nav.merge(pvs, left_index=True, right_index=True)\n", "nav['long_leverage'] = nav['endbooknav_pos']/nav.endbooknav\n", "nav['gross_leverage'] = (nav['endbooknav_pos']-nav['endbooknav_neg'])/nav.endbooknav\n", "print (\"positive pv/nav: \" + str(nav['long_leverage'].mean()), \"gross pv/nav: \" + str(nav['gross_leverage'].mean()))\n", "################################### Broken out by stratey too\n", "pvs = df.groupby(['periodenddate', 'port', df['endbooknav'] >0])['endbooknav'].sum().unstack().rename(\n", " columns={True:'endbooknav_pos', False:'endbooknav_neg'}).unstack(level=1)\n", "data={}\n", "for name in ['endbooknav_pos', 'endbooknav_neg']:\n", " pv_gross = pvs.xs(name, level = 'endbooknav', axis=1)\n", " pv_gross.loc[:, 'TRANCHE'] = pv_gross[['IG', 'HY', 'LQD_TRANCH','TRANCHE']].fillna(0).sum(axis=1)\n", " pv_gross.rename({'CURVE': 'CREDIT CURVES',\n", " 'HEDGE_MAC': 'MACRO HEDGE',\n", " 'IR': 'INTEREST RATE DERIVATIVES',\n", " 'MORTGAGES': 'MORTGAGE BONDS',\n", " 'OPTIONS': 'CREDIT OPTIONS',\n", " 'STRUCTURED': 'CSO BONDS',\n", " 'TRANCHE': 'CREDIT TRANCHES'}, axis=1, inplace=True)\n", " pv_gross.drop(['LQD_TRANCH', 'IG', 'HY'], inplace=True, axis=1)\n", " pv_gross = pv_gross.merge(nav['endbooknav'], left_index=True, right_index=True)\n", " data[name] = pv_gross.iloc[:,:-1].div(pv_gross['endbooknav'], axis=0)\n", "data['endbooknav_pos'].to_clipboard()\n", "data['endbooknav_neg'].to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "bond_stats.to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################## FX Exposure, any net CAD/EUR exposures are FX exposure. \n", "################### doesn't add up to 1 including the USD as we now sum up all the NAVs after adjusting the Futures\n", "nav = go.get_net_navs()\n", "portfolio = go.get_portfolio()\n", "monthend_portfolio = portfolio.groupby(pd.Grouper(freq=\"M\"), group_keys=False).apply(\n", " lambda df: df.loc[df.index[-1]]\n", " )\n", "#Adjust the endbooknav of futures\n", "tickers = ['CD_CME', 'EC_CME']\n", "factors = [100000, 125000]\n", "currency = ['CAD', 'EUR']\n", "for a, b, c in zip(tickers, factors, currency):\n", " new_endbooknav = monthend_portfolio['endqty'] * monthend_portfolio['endlocalmarketprice'] * b\n", " monthend_portfolio['endbooknav'] = new_endbooknav.where(monthend_portfolio['invid'].str.contains(a), monthend_portfolio['endbooknav'])\n", " monthend_portfolio.loc[monthend_portfolio['invid'].str.contains(a), 'invccy'] = c\n", "\n", "monthend_portfolio = monthend_portfolio.merge(nav, left_index=True, right_index=True, suffixes=('_inst', '_fund'))\n", "monthend_portfolio.index.name = 'periodenddate'\n", "monthend_portfolio['percent_nav'] = monthend_portfolio['endbooknav_inst']/monthend_portfolio['endbooknav_fund']\n", "\n", "collateral_filter =monthend_portfolio['invid'].str.contains('LOAN')\n", "futures_filter = monthend_portfolio['invid'].str.contains('|'.join(tickers))\n", "cash_filter = ((monthend_portfolio['invid'] == 'CAD') | (monthend_portfolio['invid'] == 'EUR'))\n", "trades = monthend_portfolio[(~futures_filter) & (~collateral_filter) & (~cash_filter)]\n", "names = ['collateral', 'futures', 'cash', 'trades']\n", "categories = [monthend_portfolio[collateral_filter], \n", " monthend_portfolio[futures_filter], \n", " monthend_portfolio[cash_filter],\n", " trades]\n", "exposure = {}\n", "for n, x in zip(names, categories):\n", " exposure[n] = x.groupby(['periodenddate', 'invccy']).sum()\n", "exposure = pd.concat(exposure)['percent_nav']\n", "exposure.unstack(level=1).T.to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################## historical cash balances: NAV - bondNAV - IA - IM\n", "#Make sure every strategy is defined\n", "nav = go.get_net_navs()\n", "portf = go.get_portfolio()\n", "strats = pd.read_csv('/home/serenitas/edwin/Python/strat_map.csv')\n", "nav['bdate_end'] = pd.bdate_range(start=nav.index.min(), end=nav.index.max(), freq=\"BM\")\n", "\n", "df = portf.groupby(pd.Grouper(freq=\"M\"), group_keys=False).apply(\n", " lambda df: df.loc[df.index[-1]]\n", " )\n", "df = df[~df['invid'].isin(['USDLOAN', 'EURLOAN'])]\n", "df = df[~df['port'].isin(['SER_TEST__SER_TEST', 'GFS_HELPER_BUSINESS_UNIT'])]\n", "df = df.reset_index().merge(strats, on=['strat', 'custacctname'], how='left')\n", "df = df.dropna(subset=['capital'])\n", "bondNAV = df[df['capital'].str.contains('Bonds')].groupby(['periodenddate'])['endbooknav'].sum()\n", "bondNAV.name = 'bondNAV'\n", "\n", "#now get IM - note need to deal with EUR, it is not correct\n", "fx = pd.read_sql_query(\"select date, eurusd from fx\", dawn_engine, parse_dates='date', index_col = 'date')\n", "sql_string = \"select date, currency, current_im from fcm_moneyline where currency <> 'ZZZZZ'\"\n", "im = pd.read_sql_query(sql_string, dawn_engine, parse_dates='date', index_col = 'date').join(fx)\n", "im['im'] = im.apply(lambda x: x.eurusd * x.current_im if x.currency == 'EUR' else x.current_im, axis =1)\n", "im = im['im'].groupby('date').sum()\n", "\n", "#now get IA - from tranches, swaptions and interest rate derivatives\n", "ia = pd.DataFrame()\n", "sqlt = \"SELECT initial_margin FROM tranche_risk_agg(%s)\"\n", "sqls = \"SELECT initial_margin from list_swaption_positions_and_risks(%s)\"\n", "for d in nav['bdate_end']:\n", " for s in [sqlt, sqls]:\n", " temp = pd.read_sql_query(s, dawn_engine, params=[d.date(),])\n", " temp['date'] = d.date()\n", " ia = ia.append(temp)\n", "ia = ia.groupby('date').sum()\n", "ia.index = pd.to_datetime(ia.index)\n", "\n", "#now get it all together\n", "nav = nav.join(bondNAV, how='left')\n", "nav = nav.merge(im, left_on='bdate_end', right_index=True, how='left')\n", "nav = nav.merge(ia, left_on='bdate_end', right_index=True, how='left')\n", "nav['fcash'] = nav['endbooknav'] - nav[['im', 'initial_margin', 'bondNAV']].sum(axis=1)\n", "nav['fcashPercent'] = nav['fcash']/nav['endbooknav']" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################## Historical Notioinals and HY Equiv\n", "dates = pd.date_range(datetime.date(2019, 12, 30), datetime.datetime.today() - MonthEnd(1), freq=\"BM\")\n", "#look for a day with HY quotes... we need that to construct HY Equiv\n", "sql_string = 'select distinct(date) from index_quotes where index = %s order by date asc'\n", "hy_dates = pd.read_sql_query(sql_string, serenitas_engine, parse_dates = 'date', params=['HY',])\n", "def nearest(items, pivot):\n", " return min(items, key=lambda x: abs(x - pivot))\n", "#hy_dates.apply(lambda x: nearest(dates, x))\n", "#pd.merge_asof(pd.DataFrame(dates), hy_dates, left_on='0', right_on='date')\n", "dates = pd.merge_asof(pd.DataFrame(dates, columns=['date']), hy_dates, left_on='date', right_on='date')\n", "portfs = {}\n", "hye = {}\n", "for d in dates.date:\n", " d = d.date()\n", " portfs[d], _ = build_portfolio(d, d)\n", " hye[d] = portfs[d].hy_equiv" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "################################### PNL Breakdown by income and appreciation\n", "strats = pd.read_csv('/home/serenitas/edwin/Python/strat_map.csv')\n", "nav = go.get_net_navs()\n", "m_pnl = go.get_monthly_pnl(['strat', 'custacctname'])\n", "m_pnl = m_pnl.reset_index().merge(strats, on=['strat', 'custacctname'], how='left')\n", "m_pnl = m_pnl.loc[~m_pnl['pnl'].isin(['Test', 'Feeder'])]\n", "\n", "pnl_alloc = m_pnl.groupby(['date', 'pnl_lvl1']).sum()\n", "pnl_alloc['income'] = pnl_alloc['mtdbookunrealincome'] + pnl_alloc['mtdbookrealincome']\n", "pnl_alloc['appreciation'] = pnl_alloc['mtdtotalbookpl'] - pnl_alloc['income']\n", "returns = nav.merge(pnl_alloc[['income', 'appreciation']], left_index=True, right_index=True)\n", "returns['income'] /= returns['endbooknav']\n", "returns['appreciation'] /= returns['endbooknav']\n", "income = returns[['income']].unstack(level=0)\n", "income.columns = income.columns.droplevel(0)\n", "appreciation = returns[['appreciation']].unstack(level=0)\n", "appreciation.columns = appreciation.columns.droplevel(0)\n", "\n", "#copy to pnl_breakdown_by_pnl_type - Monthly and Annually\n", "income.sort_index().to_clipboard()\n", "appreciation.sort_index().to_clipboard()\n", "income.T.groupby(pd.Grouper(freq='A')).sum().T.sort_index().to_clipboard()\n", "appreciation.T.groupby(pd.Grouper(freq='A')).sum().T.sort_index().to_clipboard()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#####our jump test\n", "from serenitas.analytics import DualCorrTranche\n", "from serenitas.analytics import Swaption, BlackSwaption, CreditIndex, BlackSwaptionVolSurface, Portfolio, ProbSurface\n", "trade = DualCorrTranche('HY', 29, '5yr', attach=0, detach=15, corr_attach=np.nan, \n", " corr_detach=.35, tranche_running=100, notional=-10000000, use_trunc=True)\n", "portf = Portfolio([trade, ], ['trade', ])\n", "portf.mark()\n", "jtd = portf.jtd_single_names()\n", "conn = serenitas_pool.getconn()\n", "surv_curves = load_all_curves(conn, spread_date)\n", "serenitas_pool.putconn(conn)\n", "surv_curves['spread'] = surv_curves['curve'].apply(lambda sc: sc.to_series(forward=False)[5] * (1-sc.recovery_rates[5]))\n", "jtd_sabo = jtd[[jtd.columns[0]]].join(surv_curves.groupby(level=0).first()[['name', 'company_id', 'spread']])\n", "jtd_sabo.columns = ['jtd', 'name', 'company_id', 'spread']\n", "jtd_sabo = jtd_sabo.groupby(['company_id', 'name']).sum()\n", "jtd_sabo = jtd_sabo.sort_values('spread', ascending=False)\n", "top_5_avg_loss = jtd_sabo.nlargest(5, columns='jtd')['jtd'].mean()\n", "widest_5_total_loss = jtd_sabo.nlargest(5, columns='spread')['jtd'].sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.2" } }, "nbformat": 4, "nbformat_minor": 4 }