aboutsummaryrefslogtreecommitdiffstats
path: root/python/analytics/curve_trades.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/analytics/curve_trades.py')
-rw-r--r--python/analytics/curve_trades.py62
1 files changed, 43 insertions, 19 deletions
diff --git a/python/analytics/curve_trades.py b/python/analytics/curve_trades.py
index 36ff319e..4fd7641e 100644
--- a/python/analytics/curve_trades.py
+++ b/python/analytics/curve_trades.py
@@ -20,13 +20,14 @@ dawndb = dbengine('dawndb')
def on_the_run(index):
r = serenitasdb.execute("SELECT max(series) FROM index_version WHERE index=%s",
- (index,))
+ (index,))
series, = r.fetchone()
return series
+
def curve_spread_diff(index='IG', rolling=6, years=3, percentage=False, percentage_base='5yr'):
otr = on_the_run(index)
- ## look at spreads
+ # look at spreads
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'], years=years)
spreads = df.groupby(level=['date', 'tenor']).nth(-1)['closespread'].unstack(-1)
@@ -35,18 +36,21 @@ def curve_spread_diff(index='IG', rolling=6, years=3, percentage=False, percenta
spreads_diff.columns = ['3-5', '5-7', '7-10']
spreads_diff['5-10'] = spreads_diff['5-7'] + spreads_diff['7-10']
if percentage is True:
- spreads_diff = spreads.apply(lambda df: df/df[percentage_base], axis = 1)
+ spreads_diff = spreads.apply(lambda df: df/df[percentage_base], axis=1)
return spreads_diff
+
def spreads_diff_table(spreads_diff):
def current(s):
return s.iat[-1]
+
def zscore(s):
return (s.iat[-1] - s.mean()) / s.std()
- df = spreads_diff.agg(['min', 'max','mean', current, zscore])
+ df = spreads_diff.agg(['min', 'max', 'mean', current, zscore])
((spreads_diff - spreads_diff.mean())/spreads_diff.std()).plot()
return df
+
def theta_matrix_by_series(index='IG', rolling=6):
otr = on_the_run(index)
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
@@ -56,17 +60,20 @@ def theta_matrix_by_series(index='IG', rolling=6):
theta_matrix = theta_matrix.loc[theta_matrix.index[-1][0]].unstack(0)
return theta_matrix[['3yr', '5yr', '7yr', '10yr']]
+
def ratio_within_series(index='IG', rolling=6, param='duration'):
otr = on_the_run(index)
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr']).unstack()
ratio = (df[param].
apply(lambda s: s / df[param]['5yr'].values, raw=True))
- ratio.columns = pd.MultiIndex.from_product([[param + '_ratio_to_5yr'], ratio.columns])
+ ratio.columns = pd.MultiIndex.from_product([[f"{param}_ratio_to_5yr"],
+ ratio.columns])
df = df.join(ratio).groupby(['date']).tail(1)
df = df.reset_index(level=['index', 'version'], drop=True)
return df
+
def on_the_run_theta(index='IG', rolling=6):
otr = on_the_run(index)
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
@@ -75,12 +82,13 @@ def on_the_run_theta(index='IG', rolling=6):
theta_matrix = df.groupby(level=['date', 'tenor']).nth(-1)['theta_per_dur']
theta_matrix.unstack(-1).plot()
+
def curve_returns(index='IG', rolling=6):
- ## look at returns
+ # look at returns
otr = on_the_run(index)
df = index_returns(index=index, series=list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'])
- ## on-the-run returns
+ # on-the-run returns
df = df.reset_index().set_index(['date', 'series', 'tenor'])
returns = df.price_return.dropna().unstack(-1).groupby(level='date').nth(-1)
@@ -103,26 +111,28 @@ def curve_returns(index='IG', rolling=6):
results = strategies_return.agg([sharpe, lambda df: df.nsmallest(10).mean()])
sharpe_monthly = strategies_return_monthly.agg(sharpe, period="monthly")
sharpe_monthly.name = 'Monthly Sharpe'
- results.index=['Sharpe', 'Mean Worst 10 Days DrawDown']
+ results.index = ['Sharpe', 'Mean Worst 10 Days DrawDown']
return results.append(sharpe_monthly)
+
def cross_series_curve(index='IG', rolling=6):
otr = on_the_run(index)
df = index_returns(index= index, series=list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'])
- ## look cross series - 3y to 5y
+ # look cross series - 3y to 5y
df = df.reset_index().set_index(['date', 'index', 'tenor', 'series'])
- returns1 = df.xs(['5yr', index], level = ['tenor','index']).price_return.unstack(-1)
+ returns1 = df.xs(['5yr', index], level=['tenor','index']).price_return.unstack(-1)
price_diff = pd.DataFrame()
for ind in list(range(otr - 2, otr + 1)):
price_diff[ind] = returns1[ind] - 1.6 * returns1[ind - 4]
- price_diff = price_diff.stack().groupby(level = 'date').nth(-1)
+ price_diff = price_diff.stack().groupby(level='date').nth(-1)
monthly_returns_cross_series = (price_diff.
groupby(pd.Grouper(freq='M')).
agg(lambda df: (1 + df).prod() - 1))
plt.plot(monthly_returns_cross_series)
+
def forward_loss(index='IG'):
start_date = (pd.Timestamp.now() - pd.DateOffset(years=3)).date()
@@ -140,6 +150,7 @@ def forward_loss(index='IG'):
# annual change, to take out some noise
df['fwd_loss_rate'] = df.indexel.diff(2)/df.duration.diff(2)
+
def curve_model(tenor_1='5yr', tenor_2='10yr'):
#OLS model
df = ratio_within_series(param='closespread')
@@ -155,6 +166,7 @@ def curve_model(tenor_1='5yr', tenor_2='10yr'):
data=df).fit()
return df, ols_model
+
def curve_model_results(df, model):
df = df.dropna()
prstd_ols, df['down_2_stdev'], df['up_2_stdev'] = wls_prediction_std(model)
@@ -168,9 +180,10 @@ def curve_model_results(df, model):
df['dr_dspread'] = np.exp(model.params[0]) * model.params[2] * df.duration1 ** model.params[1] * df.closespread ** (model.params[2] - 1)
return df
+
def spread_fin_crisis(index='IG'):
otr = on_the_run(index)
- ## look at spreads
+ # look at spreads
df = get_index_quotes(index, list(range(8, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'], years=20)
spreads = df.groupby(level=['date', 'tenor']).nth(-1)['closespread'].unstack(-1)
@@ -192,6 +205,7 @@ def spread_fin_crisis(index='IG'):
plt.show()
+
def forward_spread(report_date, index='IG', series=None, tenors=['3yr', '5yr', '7yr', '10yr']):
if series is None:
@@ -206,6 +220,7 @@ def forward_spread(report_date, index='IG', series=None, tenors=['3yr', '5yr', '
f_spread.append(b_index.spread())
return pd.concat(f_spread, keys=date_range).unstack(-1)
+
def spot_forward(index='IG', series=None, tenors=['3yr', '5yr', '7yr', '10yr']):
'''
@@ -231,6 +246,7 @@ def spot_forward(index='IG', series=None, tenors=['3yr', '5yr', '7yr', '10yr']):
df['maturity'] = [b_index.value_date, maturity_1yr] + b_index.maturities
return df.reset_index().set_index('maturity')
+
def curve_pos(value_date, index='IG'):
'''
@@ -255,22 +271,26 @@ def curve_pos(value_date, index='IG'):
portf.mark()
return portf
+
def curve_shape(value_date, index='IG', percentile=.95, spread=None):
'''
- Returns a function to linearly interpolate between the curve based on maturity (in years)'''
+ Returns a function to linearly interpolate between the curve
+ based on maturity (in years)'''
curve_shape = curve_spread_diff(index, 10, 5, True)
steepness = (curve_shape['10yr']/curve_shape['3yr'])
series = on_the_run(index)
if spread is None:
- sql_string = "SELECT closespread FROM index_quotes where index = %s and series = %s and tenor = %s and date = %s"
+ sql_string = "SELECT closespread FROM index_quotes where index = %s " \
+ "and series = %s and tenor = %s and date = %s"
spread_df = pd.read_sql_query(sql_string, serenitasdb,
- params=[index, series, '5yr', value_date.date()])
+ params=[index, series, '5yr', value_date])
spread = spread_df.iloc[0][0]
sql_string = "SELECT tenor, maturity FROM index_maturity where index = %s and series = %s"
- lookup_table = pd.read_sql_query(sql_string, serenitasdb, parse_dates=['maturity'], params=[index, series])
+ lookup_table = pd.read_sql_query(sql_string, serenitasdb, parse_dates=['maturity'],
+ params=[index, series])
df = curve_shape[steepness == steepness.quantile(percentile, 'nearest')]
df = df * spread/df['5yr'][0]
@@ -279,10 +299,12 @@ def curve_shape(value_date, index='IG', percentile=.95, spread=None):
df['year_frac'] = (df.maturity - pd.to_datetime(value_date)).dt.days/365
return interp1d(np.hstack([0, df.year_frac]), np.hstack([0, df.spread]))
+
def pos_pnl_abs(portf, value_date, index='IG', rolling=6, years=3):
'''
- Runs PNL analysis on portf using historical on-the-run spread levels - off-the-runs spreads are duration linearly interpolated'''
+ Runs PNL analysis on portf using historical on-the-run spread levels -
+ off-the-runs spreads are duration linearly interpolated'''
series = on_the_run(index)
df = get_index_quotes(index, list(range(series - rolling, series + 1)),
@@ -290,7 +312,8 @@ def pos_pnl_abs(portf, value_date, index='IG', rolling=6, years=3):
df = df.groupby(level=['date', 'tenor']).nth(-1)['closespread'].unstack(-1)
sql_string = "SELECT tenor, maturity FROM index_maturity where index = %s and series = %s"
- lookup_table = pd.read_sql_query(sql_string, serenitasdb, parse_dates=['maturity'], params=[index, series])
+ lookup_table = pd.read_sql_query(sql_string, serenitasdb, parse_dates=['maturity'],
+ params=[index, series])
lookup_table['year_frac'] = (lookup_table.maturity - pd.to_datetime(value_date)).dt.days/365
portf_copy = deepcopy(portf)
@@ -305,6 +328,7 @@ def pos_pnl_abs(portf, value_date, index='IG', rolling=6, years=3):
df = pd.DataFrame.from_records(chain(*r), columns=['date', 'five_yr_spread', 'pnl'])
return df.set_index('date')
+
def curve_scen_table(portf, shock=10):
'''
Runs PNL scenario on portf by shocking different points on the curve.
@@ -318,7 +342,7 @@ def curve_scen_table(portf, shock=10):
shocks = np.full(4, 0)
shocks[i+1] += shock
shocks[j+1] -= shock
- #f is the shock amount interpolated based on tenor
+ # f is the shock amount interpolated based on tenor
f = interp1d(np.hstack([0, otr_year_frac]), shocks)
portf_copy = deepcopy(portf)
portf_copy.reset_pv()