aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/db.py7
-rw-r--r--python/globeop_reports.py68
-rw-r--r--python/mark_backtest_backfill.py3
-rw-r--r--python/notebooks/Allocation Reports.ipynb14
-rw-r--r--python/notebooks/Valuation Backtest.ipynb48
5 files changed, 85 insertions, 55 deletions
diff --git a/python/db.py b/python/db.py
index e0ae3dd4..2a10b0bd 100644
--- a/python/db.py
+++ b/python/db.py
@@ -49,9 +49,14 @@ def dbengine(dbname):
host="debian", database=dbname,
query={'option_files': os.path.expanduser('~/.my.cnf')})
else:
+ if dbname == 'etdb':
+ dbname= 'ET'
+ user_name = 'et_user'
+ else:
+ user_name = dbname[:-2] + '_user'
uri = URL(drivername="postgresql",
host=os.environ.get("PGHOST", "debian"),
- username=dbname[:-2] + '_user',
+ username=user_name,
database=dbname)
return create_engine(uri, paramstyle="format")
diff --git a/python/globeop_reports.py b/python/globeop_reports.py
index 9ac2d013..55f21325 100644
--- a/python/globeop_reports.py
+++ b/python/globeop_reports.py
@@ -35,15 +35,6 @@ def get_portfolio(report_date = None):
return df
-def curr_port_PNL(date = datetime.date.today(), asset_class='Subprime'):
- date = (date - off.MonthEnd(1)).date()
- sql_string = "SELECT * FROM risk_positions(%s, %s) WHERE notional > 0"
- df_positions = pd.read_sql_query(sql_string, dbengine('dawndb'), params=[date, asset_class])
- df_pnl = get_monthly_pnl()[:date]
- df_all = df_positions.merge(df_pnl.groupby('identifier').sum().reset_index(), on=['identifier'])
- return df_all
-
-
def trade_performance():
sql_string = "SELECT * FROM bonds"
df_trades = pd.read_sql_query(sql_string, dbengine('dawndb'), parse_dates={'lastupdate': {'utc': True}, 'trade_date': {}, 'settle_date': {}})
@@ -133,10 +124,11 @@ def get_rmbs_pos_df(date = None):
df = df.sort_index().loc[:end_date]
df = df[(df.port == 'MORTGAGES') &
(df.endbookmv > 0) &
- (df['invid'].str.len() == 9)]
+ (df.custacctname == 'V0NSCLMAMB') &
+ (df['invid'].str.len() >= 9)]
sql_string = "SELECT distinct timestamp FROM priced where normalization = 'current_notional'"
timestamps = pd.read_sql_query(sql_string, engine)
- df = df[['endbooknav', 'endlocalmarketprice', 'identifier']]
+ df = df[['endbookmv', 'endlocalmarketprice', 'identifier']]
calc_df = pd.DataFrame()
for d, g in df.groupby(pd.Grouper(freq='M')):
@@ -153,7 +145,6 @@ def get_rmbs_pos_df(date = None):
sql_string = """
SELECT date(timestamp) as timestamp, cusip, model_version, pv, moddur, delta_yield, delta_ir
FROM priced where date(timestamp) = %s
- and normalization ='current_notional'
and model_version <> 2
and model_id_sub = %s"""
params_list = [model_date, model_id]
@@ -161,25 +152,62 @@ def get_rmbs_pos_df(date = None):
sql_string = """
SELECT date(timestamp) as timestamp, cusip, model_version, pv, moddur, delta_yield, delta_ir
FROM priced where date(timestamp) = %s
- and model_version <> 2
- and normalization ='current_notional'"""
+ and model_version <> 2"""
params_list = [model_date]
model = pd.read_sql_query(sql_string, engine, parse_dates=['timestamp'],
params=params_list)
- comb_g = g.loc[d].groupby('identifier').agg({'endbooknav': np.sum,
+ model = model[model.pv != 0]
+ comb_g = g.loc[d].groupby('identifier').agg({'endbookmv': np.sum,
'endlocalmarketprice': np.mean})
model = pd.merge(comb_g, model, left_on = 'identifier', right_on='cusip')
positions = model.set_index(['cusip', 'model_version']).unstack(1).dropna()
- positions = positions[positions.pv.iloc[:,0] != 0]
v1 = positions.xs(1, level='model_version', axis=1)
v3 = positions.xs(3, level='model_version', axis=1)
- v3 = v3.assign(curr_ntl = v3.endbooknav/v3.endlocalmarketprice *100)
+ v3 = v3.assign(curr_ntl = v3.endbookmv/v3.endlocalmarketprice *100)
v3 = v3.assign(b_yield = v3.moddur.apply(lambda x:
float(yc.zero_rate(x)) - libor))
v3.b_yield += np.minimum((v1.pv / v1.endlocalmarketprice * 100)
- ** (1/v1.moddur) - 1, 10).dropna()
- v3.delta_yield = v3.delta_yield * (v3.endlocalmarketprice/100)/ v3.pv * v3.curr_ntl
- v3.delta_ir = v3.delta_ir * np.minimum(1, 1/v3.moddur) * (v3.endlocalmarketprice/100)/ v3.pv * v3.curr_ntl
+ ** (1/v1.moddur) - 1, 1).dropna()
+ v3.delta_yield *= v3.endbookmv / v3.pv
+ v3.delta_ir *= np.minimum(1, 1/v3.moddur) * (v3.endlocalmarketprice/100)/ v3.pv * v3.curr_ntl
calc_df = calc_df.append(v3)
return calc_df.reset_index().set_index('timestamp').sort_index()
+
+def get_clo_pos_df(date = None):
+
+ etengine = dbengine('etdb')
+ dawnengine = dbengine('dawndb')
+ end_date = pd.datetime.today() - MonthEnd(1)
+
+ if date is not None:
+ date = date + MonthEnd(0)
+ df = get_portfolio(date)
+ df = df.sort_index().loc[:end_date]
+ df = df[(df.port == 'CLO') &
+ (df.endbookmv > 0) &
+ (df.custacctname == 'V0NSCLMAMB') &
+ (df['invid'].str.len() >= 9)]
+ df = df[['endbookmv', 'endlocalmarketprice', 'identifier']]
+ sql_string = "select distinct cusip, identifier from bonds where asset_class = 'CLO'"
+ cusip_map = pd.read_sql_query(sql_string, dawnengine)
+
+ r = {}
+ for d, g in df.groupby(pd.Grouper(freq='M')):
+ cusip_list = g.loc[g.index[-1]].identifier
+ if isinstance(cusip_list, str):
+ pos = pd.merge(pd.DataFrame(g.loc[g.index[-1]]).T, cusip_map, on='identifier')
+ else:
+ pos = pd.merge(g.loc[g.index[-1]], cusip_map, on='identifier')
+ cusip_list = pos.cusip.tolist()
+ placeholders = ",".join(["%s"] * (1+len(cusip_list)))
+ sql_string = f"SELECT * FROM historical_cusip_risk({placeholders})"
+ model = pd.read_sql_query(sql_string, etengine, parse_dates = ['pricingdate'],
+ params=[d.date()] + cusip_list)
+ model = pd.concat([model, pd.Series(cusip_list, name='cusip')], axis=1)
+ model = model.dropna().set_index('pricingdate')
+ r[d] = pd.merge(model, pos, on='cusip').set_index('cusip')
+ calc_df = pd.concat(r, names=['date', 'cusip'])
+ calc_df['hy_equiv'] = calc_df.delta * calc_df.endbookmv
+ return calc_df
+
diff --git a/python/mark_backtest_backfill.py b/python/mark_backtest_backfill.py
index fb0394a8..8ea4f6d9 100644
--- a/python/mark_backtest_backfill.py
+++ b/python/mark_backtest_backfill.py
@@ -97,6 +97,9 @@ def get_globs():
return chain.from_iterable(globs)
settings = {
+ 'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20180601.20180630.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
+ 'ReviewedPack.jipark.SERENITAS.SERCGMAST.20180501.20180531.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
+ 'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20180401.20180430.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20180301.20180331.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
'ReviewedPack.pcharubh.SERENITAS.SERCGMAST.20180201.20180228.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
'ReviewedPack.bseeman.SERENITAS.SERCGMAST.20180101.20180131.Draft.xlsx': ("JA:JX", "Securities Valuation Details", "Y"),
diff --git a/python/notebooks/Allocation Reports.ipynb b/python/notebooks/Allocation Reports.ipynb
index b8399305..1a30f348 100644
--- a/python/notebooks/Allocation Reports.ipynb
+++ b/python/notebooks/Allocation Reports.ipynb
@@ -220,8 +220,9 @@
"metadata": {},
"outputs": [],
"source": [
- "#RMBS Positions and Risks\n",
- "rmbs_pos = go.get_rmbs_pos_df()"
+ "#Positions and Risks\n",
+ "rmbs_pos = go.get_rmbs_pos_df()\n",
+ "clo_pos = go.get_clo_pos_df()"
]
},
{
@@ -236,7 +237,7 @@
"bond_dur, bond_yield = {}, {}\n",
"for d, g in rmbs_pos.groupby(pd.Grouper(freq='M')):\n",
" bond_dur[d] = sum(g.curr_ntl * g.moddur)/sum(g.curr_ntl)\n",
- " bond_yield[d] = sum(g.endbooknav * g.moddur * g.b_yield) /sum(g.endbooknav * g.moddur)\n",
+ " bond_yield[d] = sum(g.endbookmv * g.moddur * g.b_yield) /sum(g.endbookmv * g.moddur)\n",
"a = pd.Series(bond_dur)\n",
"b = pd.Series(bond_yield)\n",
"a.name = 'Duration'\n",
@@ -266,9 +267,10 @@
"source": [
"#RMBS Risk - need RMBS Positions and Risks\n",
"sql_string = \"select date, duration, series from on_the_run where index = 'HY'\"\n",
- "duration = pd.read_sql_query(sql_string, Sengine, parse_dates=['date'])\n",
+ "duration = pd.read_sql_query(sql_string, Sengine, parse_dates=['date'], index_col=['date'])\n",
"df = pd.merge_asof(rmbs_pos.sort_index(), duration, left_index=True, right_index=True)\n",
- "df.groupby('timestamp').apply(lambda df: sum(df.delta_yield/df.duration * 100))"
+ "rmbs_hy_equiv = df.groupby('timestamp').apply(lambda df: sum(df.delta_yield/df.duration * 100))\n",
+ "#hy_equiv.plot()"
]
},
{
@@ -325,7 +327,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.5"
+ "version": "3.6.6"
}
},
"nbformat": 4,
diff --git a/python/notebooks/Valuation Backtest.ipynb b/python/notebooks/Valuation Backtest.ipynb
index 698a25f1..870fb33d 100644
--- a/python/notebooks/Valuation Backtest.ipynb
+++ b/python/notebooks/Valuation Backtest.ipynb
@@ -6,13 +6,15 @@
"metadata": {},
"outputs": [],
"source": [
- "import matplotlib.pyplot as plt\n",
- "from matplotlib.ticker import FuncFormatter \n",
"from datetime import datetime\n",
- "import pandas as pd\n",
+ "from db import dbengine\n",
"\n",
"import mark_backtest_underpar as mark\n",
- "import globeop_reports as ops"
+ "import globeop_reports as ops\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "engine = dbengine('dawndb')"
]
},
{
@@ -205,29 +207,19 @@
"metadata": {},
"outputs": [],
"source": [
- "pnl_breakdown = ops.curr_port_PNL()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Unrealized MTM Gains/Loss\n",
- "breakdown_summary = pd.DataFrame(index = {'unreal mark-to-market'}, columns={'Gains','Loss','Net'})\n",
- "breakdown_summary['Gains'] = pnl_breakdown[pnl_breakdown['mtdbookunrealmtm']>=0].sum()['mtdbookunrealmtm']\n",
- "breakdown_summary['Loss'] = pnl_breakdown[pnl_breakdown['mtdbookunrealmtm']<0].sum()['mtdbookunrealmtm']\n",
- "breakdown_summary['Net'] = pnl_breakdown.sum()['mtdbookunrealmtm']"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "breakdown_summary / nav[-1]"
+ "#Portfolio MTM Gains/Loss/Net\n",
+ "df_pnl = ops.get_monthly_pnl()[:date]\n",
+ "r=[]\n",
+ "for d, g in df_pnl.reset_index('identifier').groupby(pd.Grouper(freq='M')):\n",
+ " sql_string = \"SELECT * FROM risk_positions(%s, 'Subprime') WHERE notional > 0\"\n",
+ " pos = pd.read_sql_query(sql_string, engine, params=[g.index[-1].date()])\n",
+ " pos.identifier = pos.identifier.str[:9]\n",
+ " pos = pos.merge(df_pnl.groupby('identifier').cumsum().loc[g.index[-1]],\n",
+ " on='identifier')['mtdtotalbookpl'] / nav.loc[d]\n",
+ " r.append([g.index[-1], pos[pos>=0].sum(), pos[pos<0].sum()])\n",
+ "summary = pd.DataFrame.from_records(r, index='date', columns=['date','gains','loss'])\n",
+ "summary['Net'] = summary.gains + summary.loss\n",
+ "summary.plot()"
]
},
{
@@ -267,7 +259,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.4"
+ "version": "3.6.6"
}
},
"nbformat": 4,