aboutsummaryrefslogtreecommitdiffstats
path: root/python/mark_backtest_underpar.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/mark_backtest_underpar.py')
-rw-r--r--python/mark_backtest_underpar.py222
1 files changed, 99 insertions, 123 deletions
diff --git a/python/mark_backtest_underpar.py b/python/mark_backtest_underpar.py
index da65b5df..d34c7a4d 100644
--- a/python/mark_backtest_underpar.py
+++ b/python/mark_backtest_underpar.py
@@ -7,13 +7,14 @@ import statsmodels.api as sm
from statsmodels.formula.api import gls
import seaborn as sb
+import globeop_reports as ops
+
def get_mark_df(asset_class = 'Subprime'):
#Filter marks > 1000 where the marks are weird...
df_external_marks = pd.read_sql_query("select * from external_marks_mapped where mark < 1000"
, dbengine('dawndb'))
- date_range = df_external_marks.date.unique()
df_trades = pd.DataFrame()
- for date in date_range:
+ for date in df_external_marks.date.unique():
df_temp= pd.read_sql_query("select description, identifier, usd_market_value/price*100 as curr_ntl from risk_positions(%s, %s) where price >0 and length(identifier) = 9 "
, dbengine('dawndb'), params = [date, asset_class])
df_temp['date'] = date
@@ -21,161 +22,136 @@ def get_mark_df(asset_class = 'Subprime'):
df = df_trades.merge(df_external_marks).dropna()
return df.set_index(['date','identifier'])
-def calc_mark_diff(df):
+def calc_mark_diff(df, sources= ['PRICESERVE', 'PRICINGDIRECT','BVAL','MARKIT','BROKER', 'REUTERS', 'S&P', 'PB', 'IDC']):
+
#All Sources (including manager...?!) - average, manager mark only, median, closest
g = df.groupby(level = ['date','identifier'])
- avg_mark = g.mean()
- avg_mark = avg_mark.join(df[df.source == 'MANAGER']['mark'], rsuffix = '_manager')
- avg_mark = avg_mark.join(g.median()['mark'], rsuffix = '_median_all')
-
- def closest(x):
- if x.mark.count() > 1:
- x.dist = abs(x.mark - x.mark[x.source == 'MANAGER'])
- mark = x.mark[x.dist == x.dist[x.dist>0].min()].iloc[0]
- else:
- mark = x.mark[0]
- return mark
-
+ diff = g.mean()
+ diff = diff.join(df[df.source == 'MANAGER']['mark'], rsuffix = '_manager')
+ diff = diff.join(g.median()['mark'], rsuffix = '_median_all')
temp = g.apply(closest)
temp = temp.rename('mark_closest_all')
- avg_mark = avg_mark.join(temp)
+ diff = diff.join(temp)
- #Filtered Sources - choose PRICESERVE,PRICINGDIRECT,MARKIT,BVAL
- #Filtered Sources - now really it is everything
- sources = ['PRICESERVE', 'PRICINGDIRECT','BVAL','MARKIT','BROKER', 'REUTERS', 'S&P', 'PB', 'IDC']
+ #Filtered Sources - mean, median, remove max min
df_filtered = df[df.source.isin(sources)]
g1 = df_filtered.groupby(level = ['date','identifier'])
-
- #Filtered Sources - average,median, remove max min, closest
- avg_mark = avg_mark.join(g1.mean()['mark'], rsuffix = '_filtered')
- avg_mark = avg_mark.join(g1.median()['mark'], rsuffix = '_filtered_median')
-
- def remove_max_min(x):
- if x.count() >= 4:
- mark = (x.sum() - x.max() - x.min())/(x.count() -2)
- else:
- mark = x.mean()
- return mark
-
- avg_mark = avg_mark.join(g1.mark.apply(remove_max_min), rsuffix = '_filtered_no_max_min')
+ diff = diff.join(g1.mean()['mark'], rsuffix = '_filtered_mean')
+ diff = diff.join(g1.median()['mark'], rsuffix = '_filtered_median')
+ diff = diff.join(g1.mark.apply(remove_max_min), rsuffix = '_filtered_no_max_min')
#calculate difference: negative means Serenitas marks higher
- avg_mark1 = avg_mark.multiply(avg_mark.curr_ntl/100, axis = 'index')
- del avg_mark1['curr_ntl']
- diff = avg_mark1.apply(lambda x: (x-x.mark_manager), axis = 1)
-
- #diff = difference to copy to performance number sheet
- diff = diff.groupby(level = 'date').sum()
+ diff = diff.multiply(diff.curr_ntl/100, axis = 'index')
+ del diff['curr_ntl']
+ diff = diff.rename(columns = {'mark':'mark_mean_all'})
+ diff = diff.apply(lambda x: (x-x.mark_manager), axis = 1)
- #count of each source
- g2 = df.set_index('source', append=True).groupby(level = ['date','source'])
- count = g2['mark'].count().unstack(-1)
+ return diff.groupby(level = 'date').sum()
- #diff_by_source: now calculate the pricing by source
- dftemp = df.drop('description', 1)
- dftemp = dftemp.set_index(['source'], append=True).apply(lambda x: x.curr_ntl * x.mark/100, axis = 1)
- dftemp = dftemp.groupby(level =['date','identifier','source']).mean()
- dftemp = dftemp.unstack(-1).apply(lambda x: (x-x.MANAGER), axis = 1)
- diff_by_source = dftemp.groupby(level = 'date').sum()
+def closest(x):
+ if x.mark.count() > 1:
+ x.dist = abs(x.mark - x.mark[x.source == 'MANAGER'])
+ return x.mark[x.dist == x.dist[x.dist>0].min()].iloc[0]
+ else:
+ return x.mark[0]
- #Diff by MV per source.
- diff_by_month = dftemp.groupby(level = 1).diff()
+def remove_max_min(x):
+ if x.count() >= 4:
+ return (x.sum() - x.max() - x.min())/(x.count() -2)
+ else:
+ return x.mean()
- return(diff, diff_by_source, count)
+def diff_by_source(df):
+ #diff_by_source: input get_mark_df(), calculate the pricing by source
+ df = df.drop('description', 1)
+ df = df.set_index(['source'], append=True).apply(lambda x: x.curr_ntl * x.mark/100, axis = 1)
+ df = df.groupby(level =['date','identifier','source']).mean()
+ df = df.unstack(-1).apply(lambda x: (x-x.MANAGER), axis = 1)
+ return df.groupby(level = 'date').sum()
-def back_test():
- df = pd.read_sql_table('external_marks_mapped', dbengine('dawndb'),
- parse_dates=['date'])
- df = df[df.source.notnull()]
- df_wide = (pd.pivot_table(df, 'mark', ['identifier', 'date'], 'source').
- reset_index().
- sort_values('date'))
+def diff_by_source_percentage(df):
+ df = diff_by_source(df)
+ df = df.join(ops.get_net_navs()['endbooknav'])
+ df = df.apply(lambda x: (x/x.endbooknav), axis = 1)
+ del df['endbooknav']
+ return df
- df_trades = {}
- df_trades['sell'] = pd.read_sql_query("select trade_date, identifier, faceamount, principal_payment, price " \
- "from bonds where buysell = 'f'"
- , dbengine('dawndb'),
- parse_dates=['trade_date'])
- df_trades['buy'] = pd.read_sql_query("select trade_date, identifier, faceamount, principal_payment, price " \
- "from bonds where buysell = 't'"
- , dbengine('dawndb'),
- parse_dates=['trade_date'])
- for direction, trades in df_trades.items():
- df_trades[direction] = trades.sort_values('trade_date')
+def count_sources(df):
+ #input get_mark_df(), plot count of each source
+ g2 = df.set_index('source', append=True).groupby(level = ['date','source'])
+ # there are a good amount of Bloomberg duplicates, not a big deal but should clean them up
+ g2['mark'].count().unstack(-1).plot()
- df_sell_wide = pd.merge_asof(df_trades['sell'], df_wide, left_on='trade_date', right_on='date', by='identifier')
- df_wide_temp = df_wide.set_index('date').shift(periods = -1, freq= 'M', axis = 1).reset_index()
- df_buy_wide = pd.merge_asof(df_trades['buy'], df_wide_temp
- , left_on='trade_date', right_on='date', by='identifier')
+def alt_navs():
+ navs = ops.get_net_navs()
+ df = calc_mark_diff(get_mark_df())
+ end_nav, beg_nav, returns, nav_100 = pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(index = df.index, columns=df.columns)
+ for col in df.columns:
+ end_nav[col] = df[col] + navs.endbooknav
+ beg_nav[col] = end_nav[col].shift(1) + navs.net_flow.shift(1)
+ beg_nav[col].iloc[0] = 12500000
+ returns[col] = (end_nav[col] - navs.incentive)/beg_nav[col] -1
+ for i, row in returns.dropna().reset_index().iterrows():
+ nav_100.iloc[i] = 100 if i == 0 else nav_100.iloc[i-1]
+ nav_100.iloc[i] = nav_100.iloc[i] * (1 + returns.iloc[i])
+ return returns, nav_100
- d_sell = {}
- d_buy = {}
- df_long = {}
+def annual_performance(nav_100):
+ perf = nav_100.groupby(pd.TimeGrouper(freq = 'A')).last()
+ perf_ann = perf/perf.shift(1) - 1
+ perf_ann['2013'] = perf['2013']/100-1
+ return perf_ann
- for source, g in df.groupby('source'):
- d_sell[source] = pd.merge_asof(df_trades['sell'], g, left_on='trade_date', right_on='date', by='identifier')
- d_sell[source]['dir'] = 'sell'
- d_buy[source] = pd.merge_asof(df_trades['buy'], g.set_index('date').shift(periods = -1, freq= 'BM', axis = 1).reset_index(), left_on='trade_date', right_on='date', by='identifier')
- d_buy[source]['dir'] = 'buy'
+def alt_nav_impact():
+ navs = ops.get_net_navs()
+ df = calc_mark_diff(get_mark_df())
+ df = df.join(navs.endbooknav)
+ return df.iloc[-1]/df.iloc[-1]['endbooknav']
- #add a column saying it is buy or sell
+def back_test(begindate = '2013-01-01', enddate = '2018-01-01', sell_price_threshold = 200):
+ df = pd.read_sql_query("SELECT * FROM external_marks_mapped WHERE source IS NOT NULL", dbengine('dawndb'),
+ parse_dates=['date'])
+ df_wide = (pd.pivot_table(df, 'mark', ['identifier', 'date'], 'source').reset_index().sort_values('date'))
+ df_trades = pd.read_sql_query("select trade_date, identifier, price, buysell from bonds",
+ dbengine('dawndb'), parse_dates=['trade_date'])
+ df_trades.sort_values('trade_date', inplace = True)
+ df_sell_wide = pd.merge_asof(df_trades[df_trades.buysell == False], df_wide, left_on='trade_date', right_on='date', by='identifier').drop('date', 1)
- df_long = pd.concat(d_sell, join='inner')
- df_long = df_long.append(pd.concat(d_buy, join='inner'))
- df_long = df_long.reset_index().dropna(subset=['price','mark'])
+ df_long = df_sell_wide.set_index(['trade_date','identifier','price','buysell']).stack()
+ df_long = df_long.reset_index().rename(columns={'level_4': 'source', 0:'mark'})
df_long['difference'] = (df_long['price'] - df_long['mark'])/df_long['mark']
#filtering
- diff_threshold = 5
- sell_price_threshold = 200
- begindate = '2013-01-01'
- enddate = '2018-01-01'
-
df_long = df_long[df_long.identifier.str.len() == 9]
df_long = df_long[df_long.price < sell_price_threshold]
df_long = df_long[(df_long['trade_date'] > begindate) & (df_long['trade_date'] < enddate)]
df_long.loc[df_long.source == 'MANAGER','source'] = 'LMCG'
- #pretty plot
- #df_long['dotsize'] = df_long['level_0'].apply(lambda x: 400 if x == 'MANAGER' else 20)
+ return df_long
- plt.switch_backend('Agg')
- sb.set_style("whitegrid")
- sb.set_context("notebook")
+def stats(df_long, diff_threshold = 5):
- #plt.switch_backend('Qt4Agg')
- order = ['LMCG','BROKER','BVAL','IDC','MARKIT','PB','PRICESERVE','PRICINGDIRECT','REUTERS','S&P']
- sb.set_palette(sb.hls_palette(10, l=.4, s=.8))
+ g = df_long[df_long.difference < diff_threshold].groupby('source')
- g_sell = sb.FacetGrid(df_long[df_long.dir == 'sell'], hue='source', hue_kws={'s':[50] + [20]*9, 'marker': ["o"]+["s"]*9, 'alpha': [1]+[.4]*9}, legend_out=True, aspect = 2.1, size = 4, hue_order = order)
- g_sell.set(ylim=(0, 105), xlim=(0, 105))
- ax_sell = g_sell.map(plt.scatter, 'mark', 'price').add_legend()
- ax_sell.set_axis_labels('Mark','Sale Price')
- ax_sell.fig.savefig("/home/serenitas/edwin/backtest_sales.png")
+ #fit all the models at once
+ params = g.apply(lambda df: gls('price~mark', df).fit().params)
+ error = pd.DataFrame([g.difference.mean(),g.difference.std()])
+ error.index = ['average', 'standard deviation']
- g_buy = sb.FacetGrid(df_long[df_long.dir == 'buy'], hue='source', hue_kws={'s':[50] + [20]*9, 'marker': ["o"]+["s"]*9, 'alpha': [1]+[.4]*9}, legend_out=True, aspect = 2.1, size = 4, hue_order = order)
- g_buy.set(ylim=(0, 105), xlim=(0, 105))
- ax_buy = g_buy.map(plt.scatter, 'mark', 'price').add_legend()
+ return params, error
- ax_buy.fig.savefig("/home/serenitas/edwin/backtest_buys.png")
+def pretty_plot(df_long):
- params = {}
- error = {}
- trade_PNL = {}
- trade_PNL_stat = {}
+ #plt.switch_backend('Agg')
+ sb.set_style("whitegrid")
+ sb.set_context("notebook")
- #fit all the models at once
- for direction, df_temp in df_long[(df_long.difference < diff_threshold)].groupby('dir'):
- params[direction] = (df_temp.
- groupby('source').
- apply(lambda df: gls('price~mark', df).fit().params))
- error[direction] = [df_temp.groupby('source').mean()['difference'],
- df_temp.groupby('source').std()['difference']]
- trade_PNL[direction] = df_temp.groupby(['date', 'source']).apply(
- lambda df: sum(df.principal_payment * df.difference)/
- sum(df.principal_payment))
- trade_PNL_stat[direction] = [trade_PNL[direction].groupby(level = 'source').mean(),
- trade_PNL[direction].groupby(level = 'source').std()]
+ order = ['LMCG','BROKER','BVAL','IDC','MARKIT','PB','PRICESERVE','PRICINGDIRECT','REUTERS','S&P']
+ sb.set_palette(sb.hls_palette(10, l=.4, s=.8))
- buychart = trade_PNL['buy'].unstack(-1).plot()
+ grid = sb.FacetGrid(df_long, hue='source', hue_kws={'s':[50] + [20]*9, 'marker': ["o"]+["s"]*9, 'alpha': [1]+[.4]*9}, legend_out=True, aspect = 2.1, size = 4, hue_order = order)
+ grid.set(ylim=(0, 105), xlim=(0, 105))
+ ax = grid.map(plt.scatter, 'mark', 'price').add_legend()
+ ax.set_axis_labels('Mark', 'sale price')
+ ax.fig.savefig("/home/serenitas/edwin/backtest.png")