aboutsummaryrefslogtreecommitdiffstats
path: root/python/globeop_reports.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/globeop_reports.py')
-rw-r--r--python/globeop_reports.py57
1 files changed, 37 insertions, 20 deletions
diff --git a/python/globeop_reports.py b/python/globeop_reports.py
index 4562c605..9ac2d013 100644
--- a/python/globeop_reports.py
+++ b/python/globeop_reports.py
@@ -125,18 +125,20 @@ def calc_trade_performance_stats():
def get_rmbs_pos_df(date = None):
engine = dbengine('dawndb')
- calc_df = pd.DataFrame()
end_date = pd.datetime.today() - MonthEnd(1)
if date is not None:
date = date + MonthEnd(0)
df = get_portfolio(date)
df = df.sort_index().loc[:end_date]
- mask = (df.port == 'MORTGAGES') & (df.endbookmv > 0) & (df['invid'].str.len() == 9)
- df = df[mask]
- sql_string = "SELECT distinct timestamp FROM priced"
+ df = df[(df.port == 'MORTGAGES') &
+ (df.endbookmv > 0) &
+ (df['invid'].str.len() == 9)]
+ sql_string = "SELECT distinct timestamp FROM priced where normalization = 'current_notional'"
timestamps = pd.read_sql_query(sql_string, engine)
+ df = df[['endbooknav', 'endlocalmarketprice', 'identifier']]
+ calc_df = pd.DataFrame()
for d, g in df.groupby(pd.Grouper(freq='M')):
model_date = pd.to_datetime(timestamps[timestamps.timestamp <= d+off.DateOffset(days=1)].max()[0]).date()
yc = YC(evaluation_date=model_date)
@@ -148,21 +150,36 @@ def get_rmbs_pos_df(date = None):
#special case
if model_date == pd.datetime(2017, 10, 27).date():
model_id = 4
- sql_string = "SELECT * FROM priced where date(timestamp) = %s and model_id_sub = %s"
- model = pd.read_sql_query(sql_string, engine, params=[model_date, model_id])
+ sql_string = """
+ SELECT date(timestamp) as timestamp, cusip, model_version, pv, moddur, delta_yield, delta_ir
+ FROM priced where date(timestamp) = %s
+ and normalization ='current_notional'
+ and model_version <> 2
+ and model_id_sub = %s"""
+ params_list = [model_date, model_id]
else:
- sql_string = "SELECT * FROM priced where date(timestamp) = %s"
- model = pd.read_sql_query(sql_string, engine, params=[model_date])
- model['timestamp'] = model['timestamp'].dt.date
- model = model[model.normalization == 'current_notional']
- model = model.set_index(['cusip', 'model_version']).unstack(1)
- temp = pd.merge(g.loc[d], model, left_on='identifier', right_index=True)
- temp['curr_ntl'] = temp.endbooknav/temp.endlocalmarketprice *100
- temp['b_yield'] = np.minimum((temp[('pv', 1)]/temp.endlocalmarketprice*100) ** (1/temp[('moddur', 1)]) - 1, 10)
- temp = temp.dropna(subset=['b_yield'])
- temp['b_yield'] = temp.apply(lambda df: df['b_yield'] + float(yc.zero_rate(df[('moddur', 3)])) - libor, axis=1)
- temp = temp[(temp[('pv', 3)] != 0)]
- temp['percent_model'] = temp.apply(lambda df: df.endlocalmarketprice/100/df[('pv', 3)], axis=1)
- calc_df = calc_df.append(temp)
+ sql_string = """
+ SELECT date(timestamp) as timestamp, cusip, model_version, pv, moddur, delta_yield, delta_ir
+ FROM priced where date(timestamp) = %s
+ and model_version <> 2
+ and normalization ='current_notional'"""
+ params_list = [model_date]
+ model = pd.read_sql_query(sql_string, engine, parse_dates=['timestamp'],
+ params=params_list)
+ comb_g = g.loc[d].groupby('identifier').agg({'endbooknav': np.sum,
+ 'endlocalmarketprice': np.mean})
+ model = pd.merge(comb_g, model, left_on = 'identifier', right_on='cusip')
+ positions = model.set_index(['cusip', 'model_version']).unstack(1).dropna()
+ positions = positions[positions.pv.iloc[:,0] != 0]
+ v1 = positions.xs(1, level='model_version', axis=1)
+ v3 = positions.xs(3, level='model_version', axis=1)
+ v3 = v3.assign(curr_ntl = v3.endbooknav/v3.endlocalmarketprice *100)
+ v3 = v3.assign(b_yield = v3.moddur.apply(lambda x:
+ float(yc.zero_rate(x)) - libor))
+ v3.b_yield += np.minimum((v1.pv / v1.endlocalmarketprice * 100)
+ ** (1/v1.moddur) - 1, 10).dropna()
+ v3.delta_yield = v3.delta_yield * (v3.endlocalmarketprice/100)/ v3.pv * v3.curr_ntl
+ v3.delta_ir = v3.delta_ir * np.minimum(1, 1/v3.moddur) * (v3.endlocalmarketprice/100)/ v3.pv * v3.curr_ntl
+ calc_df = calc_df.append(v3)
- return calc_df
+ return calc_df.reset_index().set_index('timestamp').sort_index()