aboutsummaryrefslogtreecommitdiffstats
path: root/python/option_trades.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/option_trades.py')
-rw-r--r--python/option_trades.py328
1 files changed, 0 insertions, 328 deletions
diff --git a/python/option_trades.py b/python/option_trades.py
deleted file mode 100644
index 7aae31ba..00000000
--- a/python/option_trades.py
+++ /dev/null
@@ -1,328 +0,0 @@
-import cvxpy
-import datetime
-import math
-import numpy as np
-import pandas as pd
-
-from pandas.tseries.offsets import BDay
-from arch import arch_model
-from db import dbengine, dbconn
-from scipy.interpolate import interp1d
-from analytics import Index
-from dates import bond_cal
-serenitasdb = dbengine('serenitasdb')
-
-def get_daily_pnl(index, series, tenor, coupon=1):
- sql_str = "SELECT date, adjcloseprice AS close, closespread AS spread, duration, theta FROM index_quotes " \
- "WHERE index=%s and series=%s and tenor = %s"
- df = pd.read_sql_query(sql_str, serenitasdb, parse_dates=['date'],
- index_col=['date'], params=(index, series, tenor))
- df.sort_index(inplace=True)
- df['dt'] = df.index.to_series().diff().astype('timedelta64[D]')
- df['pnl'] = df['close'].ffill().diff() + df.dt/360*coupon
- return df
-
-def daily_spreads(index, series, tenor):
- """computes daily spreads returns
-
- Parameters
- ----------
- index : string
- series : int
- tenor : string
- """
- sql_str = "SELECT date, closespread AS spread FROM index_quotes " \
- "WHERE index=%s and series=%s and tenor = %s"
- df = pd.read_sql_query(sql_str, serenitasdb, parse_dates=['date'], index_col=['date'],
- params=(index.upper(), series, tenor))
- df.sort_index(inplace=True)
- return df.spread.pct_change().dropna()
-
-def insert_quotes():
- # backpopulate some version i+1 quotes one day before they start trading so that
- # we get continuous time series in the returns
- dates = pd.DatetimeIndex(['2014-05-21', '2015-02-19', '2015-03-05','2015-06-23'])
- df = pd.read_sql_query("SELECT DISTINCT ON (date) * FROM index_quotes " \
- "WHERE index='HY' AND tenor='5yr' " \
- "ORDER BY date, series DESC, version DESC",
- serenitasdb, parse_dates=['date'], index_col=['date'])
- df = df.loc[dates]
- for tup in df.itertuples():
- result = serenitasdb.execute("SELECT indexfactor, cumulativeloss FROM index_version " \
- "WHERE index = 'HY' AND series=%s AND version in (%s, %s)" \
- "ORDER BY version",
- (tup.series, tup.version, tup.version+1))
- factor1, cumloss1 = result.fetchone()
- factor2, cumloss2 = result.fetchone()
- recovery = 1-(cumloss2-cumloss1)
- version2_price = (factor1 * tup.closeprice - 100*recovery)/factor2
- print(version2_price)
- serenitasdb.execute("INSERT INTO index_quotes(date, index, series, version, tenor, closeprice)" \
- "VALUES(%s, %s, %s, %s, %s, %s)",
- (tup.Index, 'HY', tup.series, tup.version+1, tup.tenor, version2_price))
-
-def index_returns(date=None, years=3, index="IG", tenor="5yr"):
- """computes on the run spread returns"""
- if date is None:
- date = (pd.Timestamp.now() - pd.DateOffset(years=years)).date()
- sql_str = "SELECT date, series, version, closespread AS spread FROM index_quotes " \
- "WHERE index=%s and date>=%s and tenor = %s"
- df = pd.read_sql_query(sql_str, serenitasdb, parse_dates=['date'],
- index_col=['date', 'series', 'version'],
- params=(index.upper(), date, tenor))
- df.sort_index(inplace=True)
- return (df.groupby(level=['series', 'version']).
- transform(lambda x: x.pct_change()).
- dropna().
- groupby(level='date').
- last())
-
-
-def index_price_returns(date=None, years=3, index="IG", tenor="5yr"):
- """computes on the run price returns taking coupons into account"""
- if date is None:
- date = (pd.Timestamp.now() - pd.DateOffset(years=years)).date()
- sql_str = "SELECT date, series, version, closeprice AS price FROM index_quotes " \
- "WHERE index=%s and date>=%s and tenor = %s"
- df = pd.read_sql_query(sql_str, serenitasdb, parse_dates=['date'],
- index_col=['date', 'series', 'version'],
- params=(index.upper(), date, tenor))
- df.sort_index(inplace=True)
- ## get rid of holidays
- dates = df.index.levels[0]
- holidays = bond_cal().holidays(start=dates[0], end=dates[-1])
- df = df.loc(axis=0)[dates.difference(holidays),:,:]
-
- def returns(df, coupon=1):
- df['returns'] = df.price.pct_change() + \
- coupon * df.index.levels[0].to_series().diff().dt.days/360/df.price
- return df
- return (df.groupby(level=['series', 'version']).
- apply(returns, (1 if index=='IG' else 5,)).
- groupby(level='date')['returns'].last())
-
-def realized_vol(index, series, tenor, date=None, years=None):
- """computes the realized spread volatility"""
- if date is None:
- if years is None:
- raise ValueError("need to provide at least one of date or years")
- date = (pd.Timestamp.now() - pd.DateOffset(years=years)).date()
- returns = daily_spreads(index, series, tenor)
- # GARCH(1,1) volatility process with constant mean
- am = arch_model(returns)
- res = am.fit(update_freq=0, disp='off')
- return (res.conditional_volatility * math.sqrt(252), res)
-
-def lr_var(res):
- """ computes long run variance of the garch process
-
- .. math::
-
- \sigma^2=\frac{\omega}{1-\sum_{i=1}^p \alpha_i + \sum_{i=1}^q \beta_i}
-
- """
- names = res.model.volatility.parameter_names()
- ## names[0] is omega, rest is alpha[1],..., alpha[p], beta[1],...,beta[q]
- var = res.params[names[0]]/(1 - res.params[names[1:]])
- return math.sqrt(var * 252)
-
-def atm_vol_fun(v, ref_is_price=False, moneyness=0.2):
- f = interp1d(v.strike.values, v.vol.values, fill_value='extrapolate')
- atm_val = v['fwdspread'].iat[0]
- otm_val = atm_val * (1 + moneyness) ## doesn't make sense for HY
- return pd.Series(f(np.array([atm_val, otm_val])), index = ['atm_vol', 'otm_vol'])
-
-def atm_vol(index, series, moneyness=0.2):
- df = pd.read_sql_query('SELECT quotedate, expiry, strike, vol from swaption_quotes ' \
- 'WHERE index = %s and series = %s',
- serenitasdb, index_col=['quotedate', 'expiry'],
- params = (index.upper(), series))
- index_data = pd.read_sql_query(
- 'SELECT quotedate, expiry, fwdspread from swaption_ref_quotes ' \
- 'WHERE index= %s and series = %s',
- serenitasdb, index_col = ['quotedate', 'expiry'],
- params = (index.upper(), series))
-
- df = df.join(index_data)
- df = df.groupby(level=['quotedate', 'expiry']).filter(lambda x: len(x)>=2)
- df = df.groupby(level=['quotedate', 'expiry']).apply(atm_vol_fun, index=="HY", moneyness)
- df = df.reset_index(level=-1) #move expiry back to the column
- return df
-
-def atm_vol_date(index, date):
- df = pd.read_sql_query('SELECT quotedate, series, expiry, strike, vol ' \
- 'FROM swaption_quotes ' \
- 'WHERE index = %s and quotedate >= %s',
- serenitasdb,
- index_col=['quotedate', 'expiry', 'series'],
- params=(index.upper(), date))
- index_data = pd.read_sql_query(
- 'SELECT quotedate, expiry, series, fwdspread FROM swaption_ref_quotes ' \
- 'WHERE index= %s and quotedate >= %s',
- serenitasdb, index_col=['quotedate', 'expiry', 'series'],
- params = (index.upper(), date))
- df = df.join(index_data)
- df = df.groupby(df.index).filter(lambda x: len(x)>=2)
- df = df.groupby(level=['quotedate', 'expiry', 'series']).apply(atm_vol_fun)
- df = df.reset_index(level=['expiry', 'series']) #move expiry and series back to the columns
- return df
-
-def rolling_vol(df, col='atm_vol', term=[3]):
- """compute the rolling volatility for various terms"""
- df = df.groupby(df.index).filter(lambda x: len(x)>2)
- def aux(s, col, term):
- k = s.index[0]
- f = interp1d(s.expiry.values.astype('float'), s[col].values, fill_value='extrapolate')
- x = np.array([(k + pd.DateOffset(months=t)).to_datetime64().astype('float') \
- for t in term])
- return pd.Series(f(x), index=[str(t)+'m' for t in term])
-
- df = df.groupby(level='quotedate').apply(aux, col, term)
- # MS quotes don't have fwdspread so they end up as NA
- return df.dropna()
-
-def vol_var(percentile=0.99, index='IG'):
- df = atm_vol_date("IG", datetime.date(2014, 6, 11))
- df = rolling_vol(df, term=[1,2,3])
- df = df.sort_index()
- df = df.groupby(df.index.date).last()
- return df.pct_change().quantile(percentile)
-
-def index_rolling_returns(date=None, years=3, index="IG", tenor="5yr"):
- """computes on the run returns"""
- if date is None:
- date = (pd.Timestamp.now() - pd.DateOffset(years=years)).date()
- sql_str = "SELECT date, series, closespread AS spread FROM index_quotes " \
- "WHERE index=%s and date>=%s and tenor = %s"
- df = pd.read_sql_query(sql_str, serenitasdb, parse_dates=['date'], index_col=['date', 'series'],
- params=(index.upper(), date, tenor))
- df.sort_index(inplace=True)
- df = df.groupby(level='series').pct_change()
- return df.groupby(level='date').last()
-
-def get_index_spread(index, series, date, conn):
- with conn.cursor() as c:
- c.execute("SELECT closespread from index_quotes " \
- "WHERE index=%s and series=%s and date=%s and tenor='5yr'",
- (index, series, date))
- try:
- spread, = c.fetchone()
- except TypeError:
- spread = None
- conn.commit()
- return spread
-
-def get_index_ref(index, series, date, expiry, conn):
- with conn.cursor() as c:
- c.execute("SELECT ref, fwdspread from swaption_ref_quotes " \
- "WHERE index=%s and series=%s and quotedate::date=%s "\
- "AND expiry=%s ORDER BY quotedate desc",
- (index, series, date, expiry))
- try:
- ref, fwdspread = c.fetchone()
- except TypeError:
- ref, fwdspread = None, None
- conn.commit()
- return ref, fwdspread
-
-def get_option_pnl(strike, expiry, index, series, start_date, engine):
- for s in [strike, strike+2.5, strike-2.5, strike+5]:
- df = pd.read_sql_query("SELECT quotedate, (pay_bid+pay_offer)/2 AS pay_mid, " \
- "(rec_bid+rec_offer)/2 AS rec_mid FROM swaption_quotes " \
- "WHERE strike=%s and expiry=%s and index=%s and series=%s" \
- "and quotedate>=%s", engine,
- params=(s, expiry, index, series, start_date),
- index_col='quotedate',
- parse_dates=['quotedate'])
- if not df.empty and df.index[0].date() == start_date:
- strike = s
- break
- else:
- raise ValueError("Couldn't find data starting from that date")
-
- if not pd.api.types.is_datetime64tz_dtype(df.index):
- df.index = df.index.tz_localize('utc')
-
- df = df.groupby(df.index.normalize()).last()
- if expiry < datetime.date.today():
- spread = get_index_spread(index, series, expiry, engine.raw_connection())
- underlying = Index.from_name(index, series, "5yr", expiry, 1e4)
- underlying.spread = spread
- pv = underlying.pv
- underlying.spread = strike
- if spread > strike:
- pay_mid, rec_mid = pv-underlying.pv, 0
- else:
- pay_mid, rec_mid = 0, underlying.pv - pv
- pv = underlying.pv
- df = df.append(pd.DataFrame([[pay_mid, rec_mid]],
- columns=['pay_mid', 'rec_mid'],
- index=[pd.Timestamp(expiry, tz='UTC')]))
- return df, strike
-
-def sell_vol_strategy(index="IG", months=3):
- engine = dbengine('serenitasdb')
- conn = engine.raw_connection()
- with conn.cursor() as c1, conn.cursor() as c2:
- c1.execute("SELECT DISTINCT series, expiry FROM " \
- "swaption_quotes ORDER BY expiry, series desc")
- d = {}
- for series, expiry in c1:
- start_date = BDay().rollback(expiry - pd.DateOffset(months=months)).date()
- if start_date > datetime.date.today():
- break
- c2.execute("SELECT max(quotedate::date) FROM swaption_quotes WHERE " \
- "index=%s AND series=%s AND expiry=%s AND quotedate<=%s",
- (index, series, expiry, start_date))
- actual_start_date, = c2.fetchone()
- if actual_start_date is None or (start_date - actual_start_date).days > 5:
- continue
- ref, fwdspread = get_index_ref(index, series, actual_start_date, expiry, conn)
- if fwdspread is None:
- fwdspread = ref + months / 50 #TODO: use actual values
- strike = round(fwdspread/2.5) * 2.5
- pnl, strike = get_option_pnl(strike, expiry, index, series, actual_start_date, engine)
- d[(series, strike, expiry)] = pnl
- conn.commit()
- return d
-
-def aggregate_trades(d):
- r = pd.Series()
- for v in d.values():
- r = r.add(-v.sum(1).diff().dropna(), fill_value=0)
- return r
-
-
-def compute_allocation(df):
- Sigma = df.cov().values
- gamma = cvxpy.Parameter(sign='positive')
- mu = df.mean().values
- w = cvxpy.Variable(3)
- ret = mu.T*w
- risk = cvxpy.quad_form(w, Sigma)
- prob = cvxpy.Problem(cvxpy.Maximize(ret - gamma * risk),
- [cvxpy.sum_entries(w) == 1,
- w >= -2,
- w <= 2])
-
- gamma_x = np.linspace(0, 0.02, 500)
- W = np.empty((3, gamma_x.size))
- for i, val in enumerate(gamma_x):
- gamma.value = val
- prob.solve()
- W[:,i] = np.asarray(w.value).squeeze()
-
- fund_return = mu @ W
- fund_vol= np.array([math.sqrt(W[:,i] @ Sigma @W[:,i]) for i in range(gamma_x.size)])
- return (W, fund_return, fund_vol)
-
-
-if __name__ == "__main__":
- d1 = sell_vol_strategy(months=1)
- d2 = sell_vol_strategy(months=2)
- d3 = sell_vol_strategy(months=3)
- all_tenors = pd.concat([aggregate_trades(d) for d in [d1, d2, d3]], axis=1)
- all_tenors.columns = ['1m', '2m', '3m']
- all_tenors['optimal'] = ((1.2*all_tenors['1m']).
- sub(1.2*all_tenors['2m'], fill_value=0).
- add(all_tenors['3m'], fill_value=0))