diff options
| -rw-r--r-- | python/analytics/index.py | 7 | ||||
| -rw-r--r-- | python/analytics/option.py | 4 | ||||
| -rw-r--r-- | python/index_data.py | 14 |
3 files changed, 13 insertions, 12 deletions
diff --git a/python/analytics/index.py b/python/analytics/index.py index 265e470d..0600deb9 100644 --- a/python/analytics/index.py +++ b/python/analytics/index.py @@ -18,10 +18,11 @@ from sqlalchemy import exc from pyisda.curve import SpreadCurve from .utils import previous_twentieth, build_table from bbg_helpers import BBG_IP, retrieve_data, init_bbg_session + from yieldcurve import YC, ql_to_jp, roll_yc, rate_helpers from weakref import WeakSet -engine = dbengine('serenitasdb') +_engine = dbengine('serenitasdb') def g(index, spread, exercise_date, forward_yc=None, pv=None): """computes the strike clean price using the expected forward yield curve. """ @@ -366,7 +367,7 @@ class Index(object): ref_data = retrieve_data(session, security, field) self.ref = ref_data[security][field] else: - run = engine.execute("""SELECT * FROM index_quotes + run = _engine.execute("""SELECT * FROM index_quotes WHERE index=%s AND series=%s AND tenor=%s AND date=%s""", (index_type, series, tenor, self.trade_date)) rec = run.fetchone() @@ -389,7 +390,7 @@ class Index(object): raise ValueError("Not enough information to load the index.") try: df = pd.read_sql_query(sql_str, - engine, parse_dates=['lastdate', 'issue_date'], + _engine, parse_dates=['lastdate', 'issue_date'], params=params) maturity = df.maturity[0] coupon = df.coupon[0] diff --git a/python/analytics/option.py b/python/analytics/option.py index e366c51b..5e764b68 100644 --- a/python/analytics/option.py +++ b/python/analytics/option.py @@ -10,7 +10,7 @@ from db import dbengine from .black import black, Nx from .sabr import sabr from .utils import GHquad, build_table -from .index import g, ForwardIndex, Index, engine +from .index import g, ForwardIndex, Index, _engine from yieldcurve import roll_yc from pandas.tseries.offsets import BDay @@ -464,7 +464,7 @@ class VolatilitySurface(ForwardIndex): "WHERE quotedate::date = %s AND index= %s AND series = %s " \ "AND quote_source != 'SG' " \ "ORDER BY quotedate DESC", - engine, + _engine, parse_dates = ['quotedate', 'expiry'], params=(trade_date, index_type.upper(), series)) self._quotes.loc[(self._quotes.quote_source == "GS") & (self._quotes['index'] =="HY"), diff --git a/python/index_data.py b/python/index_data.py index 91fe2f49..11935194 100644 --- a/python/index_data.py +++ b/python/index_data.py @@ -3,7 +3,7 @@ from dates import bond_cal import datetime import pandas as pd -serenitasdb = dbengine('serenitasdb') +_serenitas_engine = dbengine('serenitasdb') def insert_quotes(): """ @@ -22,7 +22,7 @@ def insert_quotes(): df = pd.read_sql_query("SELECT DISTINCT ON (date) * FROM index_quotes " \ "WHERE index='HY' AND tenor='5yr' " \ "ORDER BY date, series DESC, version DESC", - serenitasdb, parse_dates=['date'], index_col=['date']) + _serenitas_engine, parse_dates=['date'], index_col=['date']) df = df.loc[dates] for tup in df.itertuples(): result = serenitasdb.execute("SELECT indexfactor, cumulativeloss FROM index_version " \ @@ -34,9 +34,9 @@ def insert_quotes(): recovery = 1-(cumloss2-cumloss1) version2_price = (factor1 * tup.closeprice - 100*recovery)/factor2 print(version2_price) - serenitasdb.execute("INSERT INTO index_quotes(date, index, series, version, tenor, closeprice)" \ - "VALUES(%s, %s, %s, %s, %s, %s)", - (tup.Index, 'HY', tup.series, tup.version+1, tup.tenor, version2_price)) + _serenitas_engine.execute("INSERT INTO index_quotes(date, index, series, version, tenor, closeprice)" \ + "VALUES(%s, %s, %s, %s, %s, %s)", + (tup.Index, 'HY', tup.series, tup.version+1, tup.tenor, version2_price)) def get_index_quotes(index=None, series=None, tenor=None, from_date=None, years=3): args = locals().copy() @@ -66,7 +66,7 @@ def get_index_quotes(index=None, series=None, tenor=None, from_date=None, years= return {k: tuple(v) if isinstance(v, list) else v for k, v in args.items() if v is not None} - df = pd.read_sql_query(sql_str, serenitasdb, parse_dates=['date'], + df = pd.read_sql_query(sql_str, _serenitas_engine, parse_dates=['date'], index_col=['date', 'index', 'series', 'version', 'tenor'], params = make_params(args)) df.sort_index(inplace=True) @@ -105,7 +105,7 @@ def index_returns(df=None, index=None, series=None, tenor=None, from_date=None, df.columns = ['spread_return', 'price_return'] df = df.groupby(level=['date', 'index', 'series', 'tenor']).nth(0) coupon_data = pd.read_sql_query("SELECT index, series, tenor, coupon FROM " \ - "index_maturity WHERE coupon is NOT NULL", serenitasdb, + "index_maturity WHERE coupon is NOT NULL", _serenitas_engine, index_col=['index', 'series', 'tenor']) def add_accrued(df): coupon = coupon_data.loc[df.index[0][1:],'coupon'] * 1e-4 |
