aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--python/calibrate_tranches_BC.py184
-rw-r--r--python/cds_curve.py43
-rw-r--r--python/quote_parsing/__main__.py101
-rw-r--r--python/trade_dataclasses.py2
4 files changed, 167 insertions, 163 deletions
diff --git a/python/calibrate_tranches_BC.py b/python/calibrate_tranches_BC.py
index 3da19a69..7ea71277 100644
--- a/python/calibrate_tranches_BC.py
+++ b/python/calibrate_tranches_BC.py
@@ -36,7 +36,7 @@ def build_sql_str(df, use_markit=False):
if __name__ == "__main__":
from serenitas.utils import SerenitasFileHandler
- from serenitas.utils.db import dbconn
+ from serenitas.utils.db2 import serenitas_pool
from serenitas.utils.env import CONFIG_DIR
logger = logging.getLogger("tranche_calib")
@@ -128,102 +128,106 @@ if __name__ == "__main__":
"eu36": datetime.date(2021, 9, 24),
}
- serenitas_conn = dbconn("serenitasdb")
- if args.config is None:
- if args.index is None:
- raise ValueError("Please provide an index to run")
- config = {"runs": [(args.index, args.tenor, args.skewtype)]}
- else:
- with (CONFIG_DIR / args.config).open("r") as fh:
- config = full_load(fh)
-
- for index, tenor, skewtype in config["runs"]:
- begin_date = None
- index, series = index[:2].upper(), int(index[2:])
- if args.start_from is not None:
- begin_date = args.start_from
- if args.update:
- begin_date = get_lastdate(serenitas_conn, index, series, tenor)
- if begin_date is None:
- continue
- if not args.update and begin_date is None:
- try:
- begin_date = start_dates[f"{index.lower()}{series}"]
- except KeyError:
- print(index, series)
- continue
+ with serenitas_pool.connection() as serenitas_conn:
+ if args.config is None:
+ if args.index is None:
+ raise ValueError("Please provide an index to run")
+ config = {"runs": [(args.index, args.tenor, args.skewtype)]}
+ else:
+ with (CONFIG_DIR / args.config).open("r") as fh:
+ config = full_load(fh)
- dr = pd.bdate_range(begin_date, args.until)
- if dr.empty:
- continue
- logger.info(f"calibrating {index}, {series}, {tenor}")
- tranche_index = None
+ for index, tenor, skewtype in config["runs"]:
+ begin_date = None
+ index, series = index[:2].upper(), int(index[2:])
+ if args.start_from is not None:
+ begin_date = args.start_from
+ if args.update:
+ begin_date = get_lastdate(serenitas_conn, index, series, tenor)
+ if begin_date is None:
+ continue
+ if not args.update and begin_date is None:
+ try:
+ begin_date = start_dates[f"{index.lower()}{series}"]
+ except KeyError:
+ print(index, series)
+ continue
- data = {}
- for d in dr.date:
- logger.debug(f"calibrating for {d}")
- try:
- if tranche_index is None:
- tranche_index = TrancheBasket(index, series, tenor, value_date=d)
- else:
- tranche_index.value_date = d
- except (RuntimeError, ValueError) as e:
- logger.error(e)
+ dr = pd.bdate_range(begin_date, args.until)
+ if dr.empty:
continue
+ logger.info(f"calibrating {index}, {series}, {tenor}")
+ tranche_index = None
- try:
- tranche_index.tweak()
- except ValueError as e:
- logger.error(e)
- break
- try:
- tranche_index.build_skew(skewtype)
- except ValueError as e:
- logger.error(e)
- logger.debug("Trying topdown")
- tranche_index.rho[:] = np.nan
+ data = {}
+ for d in dr.date:
+ logger.debug(f"calibrating for {d}")
try:
- tranche_index.build_skew("topdown")
- except ValueError:
+ if tranche_index is None:
+ tranche_index = TrancheBasket(
+ index, series, tenor, value_date=d
+ )
+ else:
+ tranche_index.value_date = d
+ except (RuntimeError, ValueError) as e:
logger.error(e)
continue
- df = pd.concat(
- [
- tranche_index.tranche_deltas(),
- tranche_index.tranche_fwd_deltas(),
- tranche_index.tranche_durations(),
- tranche_index.tranche_EL(),
- tranche_index.tranche_spreads(),
- ],
- axis=1,
- )
- try:
- df["theta"] = tranche_index.tranche_thetas(method="TLP")
- except ValueError:
- df["theta"] = None
+ try:
+ tranche_index.tweak()
+ except ValueError as e:
+ logger.error(e)
+ break
+ try:
+ tranche_index.build_skew(skewtype)
+ except ValueError as e:
+ logger.error(e)
+ logger.debug("Trying topdown")
+ tranche_index.rho[:] = np.nan
+ try:
+ tranche_index.build_skew("topdown")
+ except ValueError:
+ logger.error(e)
+ continue
+
+ df = pd.concat(
+ [
+ tranche_index.tranche_deltas(),
+ tranche_index.tranche_fwd_deltas(),
+ tranche_index.tranche_durations(),
+ tranche_index.tranche_EL(),
+ tranche_index.tranche_spreads(),
+ ],
+ axis=1,
+ )
+ try:
+ df["theta"] = tranche_index.tranche_thetas(method="TLP")
+ except ValueError:
+ df["theta"] = None
- (
- df["index_duration"],
- df["index_expected_loss"],
- df["index_price"],
- ) = tranche_index.index_pv(clean=True)
- df["index_expected_loss"] *= -1
- df["index_basis"] = tranche_index.tweaks[0]
- df["index_theta"] = tranche_index.theta()[tenor]
- df["tranche_id"] = tranche_index.tranche_quotes.id.values
- df["corr_at_detach"] = tranche_index.rho[1:]
- df["corr01"] = tranche_index.tranche_corr01()
- del df["fwd_gamma"]
- df["quote_price"] = (
- 1 - tranche_index.tranche_quotes.quotes.values - tranche_index._accrued
- )
- df["calibrated_price"] = tranche_index.tranche_pvs().bond_price
- data[d] = df
+ (
+ df["index_duration"],
+ df["index_expected_loss"],
+ df["index_price"],
+ ) = tranche_index.index_pv(clean=True)
+ df["index_expected_loss"] *= -1
+ df["index_basis"] = tranche_index.tweaks[0]
+ df["index_theta"] = tranche_index.theta()[tenor]
+ df["tranche_id"] = tranche_index.tranche_quotes.id.values
+ df["corr_at_detach"] = tranche_index.rho[1:]
+ df["corr01"] = tranche_index.tranche_corr01()
+ del df["fwd_gamma"]
+ df["quote_price"] = (
+ 1
+ - tranche_index.tranche_quotes.quotes.values
+ - tranche_index._accrued
+ )
+ df["calibrated_price"] = tranche_index.tranche_pvs().bond_price
+ data[d] = df
- if data:
- data = pd.concat(data)
- sql_str = build_sql_str(data, args.markit)
- with serenitas_conn.cursor() as c:
- c.executemany(sql_str, data.to_dict(orient="records"))
- serenitas_conn.commit()
+ if data:
+ data = pd.concat(data)
+ sql_str = build_sql_str(data, args.markit)
+ with serenitas_conn.cursor() as c:
+ c.executemany(sql_str, data.to_dict(orient="records"))
+ serenitas_conn.commit()
diff --git a/python/cds_curve.py b/python/cds_curve.py
index cdbb545a..14637c2e 100644
--- a/python/cds_curve.py
+++ b/python/cds_curve.py
@@ -7,7 +7,7 @@ import logging
import pandas as pd
from serenitas.utils import SerenitasFileHandler
-from serenitas.utils.db import dbconn
+from serenitas.utils.db2 import serenitas_pool
logger = logging.getLogger(__name__)
@@ -84,19 +84,20 @@ if __name__ == "__main__":
)
args = parser.parse_args()
index, series = args.index, args.series
- conn = dbconn("serenitasdb")
if args.latest:
- with conn.cursor() as c:
- c.execute(
- "SELECT max(date) FROM index_quotes_pre "
- "RIGHT JOIN index_risk2 USING (id) "
- "WHERE index=%s AND series=%s "
- "AND tenor in ('3yr', '5yr', '7yr', '10yr')",
- (index, series),
- )
- (start_date,) = c.fetchone()
- start_date = pd.Timestamp(start_date)
+ with serenitas_pool.connection() as conn:
+ with conn.cursor() as c:
+ c.execute(
+ "SELECT max(date) FROM index_quotes_pre "
+ "RIGHT JOIN index_risk2 USING (id) "
+ "WHERE index=%s AND series=%s "
+ "AND tenor in ('3yr', '5yr', '7yr', '10yr')",
+ (index, series),
+ )
+ (start_date,) = c.fetchone()
+ start_date = pd.Timestamp(start_date)
+ conn.commit()
else:
start_date = None
@@ -118,12 +119,12 @@ if __name__ == "__main__":
for c in ("theta", "duration", "tweak", "dispersion", "gini")
]
)
- with conn.cursor() as c:
- for id, t in g:
- c.execute(
- "INSERT INTO index_risk2 VALUES(%s, %s, %s, %s, %s, %s) ON CONFLICT (id) "
- f"DO UPDATE SET {update_str}",
- (id,) + tuple(t),
- )
- conn.commit()
- conn.close()
+ with serenitas_pool.connection() as conn:
+ with conn.cursor() as c:
+ for id, t in g:
+ c.execute(
+ "INSERT INTO index_risk2 VALUES(%s, %s, %s, %s, %s, %s) ON CONFLICT (id) "
+ f"DO UPDATE SET {update_str}",
+ (id,) + tuple(t),
+ )
+ conn.commit()
diff --git a/python/quote_parsing/__main__.py b/python/quote_parsing/__main__.py
index 557edd45..30f607de 100644
--- a/python/quote_parsing/__main__.py
+++ b/python/quote_parsing/__main__.py
@@ -9,7 +9,7 @@ from serenitas.utils.env import DATA_DIR
from serenitas.utils import SerenitasRotatingFileHandler
from . import logger
from .parse_emails import parse_email, write_todb
-from serenitas.utils.db import serenitas_pool
+from serenitas.utils.db2 import serenitas_pool
fh = SerenitasRotatingFileHandler("emails_parsing.log", 1_000_000, 5)
logger.addHandler(fh)
@@ -40,60 +40,59 @@ try:
except FileNotFoundError:
already_uploaded = {}
-conn = serenitas_pool.getconn()
-for f in emails:
- date_composed, msg_id = f.name.split("_")
- date_composed = datetime.datetime.strptime(date_composed, "%Y-%m-%d %H-%M-%S")
- if msg_id == "16e4b563f6cff219":
- # GS message has IG quotes with a HY header
- continue
- if msg_id == "17b40531791bb7c2":
- # There is a % sign with no space, breaking it
- continue
- if msg_id in already_uploaded:
- continue
- else:
- try:
- key, (option_stack, fwd_index) = parse_email(f, date_composed, conn)
- except RuntimeError as e:
- logger.error(e)
- except ValueError as e:
- raise ValueError(f"{f.name}") from e
+with serenitas_pool.connection() as conn:
+ for f in emails:
+ date_composed, msg_id = f.name.split("_")
+ date_composed = datetime.datetime.strptime(date_composed, "%Y-%m-%d %H-%M-%S")
+ if msg_id == "16e4b563f6cff219":
+ # GS message has IG quotes with a HY header
+ continue
+ if msg_id == "17b40531791bb7c2":
+ # There is a % sign with no space, breaking it
+ continue
+ if msg_id in already_uploaded:
+ continue
else:
- if key[0] is None or len(option_stack) == 0:
- logger.error(f"Something wrong with email: {f.name}")
- continue
- swaption_stack[key] = pd.concat(
- option_stack, names=["expiry", "series", "version"]
- )
- fwd_index["msg_id"] = int(msg_id, 16)
- index_data = index_data.append(fwd_index)
- already_uploaded[msg_id] = key[0]
-if index_data.empty:
- sys.exit()
-for col in ["fwdbpv", "fwdprice", "fwdspread", "ref"]:
- if col in index_data:
- index_data[col] = pd.to_numeric(index_data[col])
-index_data["index"] = index_data["index"].astype("category")
+ try:
+ key, (option_stack, fwd_index) = parse_email(f, date_composed, conn)
+ except RuntimeError as e:
+ logger.error(e)
+ except ValueError as e:
+ raise ValueError(f"{f.name}") from e
+ else:
+ if key[0] is None or len(option_stack) == 0:
+ logger.error(f"Something wrong with email: {f.name}")
+ continue
+ swaption_stack[key] = pd.concat(
+ option_stack, names=["expiry", "series", "version"]
+ )
+ fwd_index["msg_id"] = int(msg_id, 16)
+ index_data = index_data.append(fwd_index)
+ already_uploaded[msg_id] = key[0]
+ if index_data.empty:
+ sys.exit()
+ for col in ["fwdbpv", "fwdprice", "fwdspread", "ref"]:
+ if col in index_data:
+ index_data[col] = pd.to_numeric(index_data[col])
+ index_data["index"] = index_data["index"].astype("category")
-index_names = ["quotedate", "index", "quote_source"]
-swaption_stack = pd.concat(swaption_stack, names=index_names, sort=False)
-dup = swaption_stack.index.duplicated()
-if dup.any():
- logger.warning("duplicated data")
- swaption_stack = swaption_stack[~dup]
+ index_names = ["quotedate", "index", "quote_source"]
+ swaption_stack = pd.concat(swaption_stack, names=index_names, sort=False)
+ dup = swaption_stack.index.duplicated()
+ if dup.any():
+ logger.warning("duplicated data")
+ swaption_stack = swaption_stack[~dup]
-swaption_stack = swaption_stack.reset_index().set_index(
- ["quotedate", "index", "series", "version", "expiry", "quote_source"]
-)
-swaption_stack = swaption_stack.sort_index()
-index_data = index_data.reset_index()
-index_data = index_data.drop_duplicates(
- ["quotedate", "index", "series", "version", "expiry", "quote_source"]
-)
+ swaption_stack = swaption_stack.reset_index().set_index(
+ ["quotedate", "index", "series", "version", "expiry", "quote_source"]
+ )
+ swaption_stack = swaption_stack.sort_index()
+ index_data = index_data.reset_index()
+ index_data = index_data.drop_duplicates(
+ ["quotedate", "index", "series", "version", "expiry", "quote_source"]
+ )
-write_todb(swaption_stack, index_data, conn)
-serenitas_pool.putconn(conn)
+ write_todb(swaption_stack, index_data, conn)
with open(".pickle", "wb") as fh:
pickle.dump(already_uploaded, fh)
diff --git a/python/trade_dataclasses.py b/python/trade_dataclasses.py
index 379fd858..b9201386 100644
--- a/python/trade_dataclasses.py
+++ b/python/trade_dataclasses.py
@@ -8,7 +8,7 @@ from enum import Enum
from psycopg2.extensions import register_adapter, AsIs
from serenitas.analytics.dates import next_business_day, previous_twentieth
from serenitas.analytics.index import CreditIndex
-from serenitas.utils.db import dbconn
+from serenitas.utils.db2 import dbconn
from lru import LRU
from psycopg2.errors import UniqueViolation
import logging