from utils.db import dbconn from analytics import TrancheBasket from pandas.tseries.offsets import BDay import datetime import logging import os import pandas as pd from pathlib import Path from yaml import load import argparse def get_lastdate(conn, index, series, tenor): sql_str = ( "SELECT (max(date) AT TIME ZONE 'America/New_York')::date + 1 " "AS date FROM risk_numbers " "WHERE index=%s and series = %s and tenor = %s" ) with conn.cursor() as c: c.execute(sql_str, (index, series, tenor)) date, = c.fetchone() conn.commit() return date def build_sql_str(df): cols = ",".join(df.columns) cols_ex_tranche_id = ",".join([c for c in df.columns if c != "tranche_id"]) cols_excluded = ",".join([f"excluded.{c}" for c in df.columns if c != "tranche_id"]) place_holders = ",".join([f"%({c})s" for c in df.columns]) sql_str = ( f"INSERT INTO tranche_risk({cols}) " f"VALUES({place_holders}) ON CONFLICT (tranche_id) DO " f"UPDATE SET ({cols_ex_tranche_id}) = ({cols_excluded})" ) return sql_str if __name__ == "__main__": from utils import SerenitasFileHandler logger = logging.getLogger("tranche_calib") parser = argparse.ArgumentParser() parser.add_argument( "-u", "--update", action="store_true", default=False, help="Update from the last run date [default %default]", ) parser.add_argument( "-c", "--config", metavar="config_file", help="Runs the list of indices provided in CONFIG_FILE", ) parser.add_argument("-i", "--index", help="Index name we want to run") parser.add_argument( "--tenor", default="5yr", help="Tenor we want to run [default '5yr']" ) parser.add_argument( "--until", default=pd.Timestamp.now() - BDay(), type=lambda s: pd.Timestamp(s) ) parser.add_argument("--start_from", default=None, type=lambda s: pd.Timestamp(s)) parser.add_argument( "-d", "--debug", action="store_true", help="more verbose logging" ) args = parser.parse_args() logger.setLevel(logging.DEBUG if args.debug else logging.INFO) CODE_DIR = Path(os.environ["CODE_DIR"]) if not args.debug: handler = SerenitasFileHandler(f"calib_tranches_{datetime.date.today()}.log") else: handler = logging.StreamHandler() handler.setFormatter(SerenitasFileHandler._formatter) logger.addHandler(handler) start_dates = { # 'hy10': datetime.date(2014, 8, 11), # 'hy15': datetime.date(2014, 6, 10), # 'hy17': datetime.date(2013, 1, 1), "hy19": datetime.date(2013, 2, 1), "hy21": datetime.date(2013, 10, 4), "hy23": datetime.date(2014, 10, 16), "hy25": datetime.date(2015, 10, 1), "hy27": datetime.date(2016, 10, 4), "hy29": datetime.date(2017, 10, 3), "hy31": datetime.date(2018, 10, 2), "ig9": datetime.date(2013, 1, 1), "ig19": datetime.date(2013, 5, 1), "ig21": datetime.date(2013, 9, 26), "ig23": datetime.date(2014, 10, 14), "ig25": datetime.date(2015, 9, 22), "ig27": datetime.date(2016, 9, 27), "ig29": datetime.date(2017, 9, 26), "ig31": datetime.date(2018, 9, 25), "xo22": datetime.date(2014, 10, 20), "xo24": datetime.date(2015, 9, 28), "xo26": datetime.date(2016, 9, 27), "xo28": datetime.date(2017, 9, 28), "eu9": datetime.date(2014, 9, 15), "eu19": datetime.date(2013, 4, 3), "eu21": datetime.date(2014, 3, 27), "eu22": datetime.date(2014, 10, 22), "eu24": datetime.date(2015, 9, 23), "eu26": datetime.date(2016, 9, 27), "eu28": datetime.date(2017, 9, 28), "eu30": datetime.date(2018, 9, 25), "xo30": datetime.date(2018, 9, 25), } serenitas_conn = dbconn("serenitasdb") if args.config is None: if args.index is None: raise ValueError("Please provide an index to run") config = {"runs": [(args.index, args.tenor)]} else: with (CODE_DIR / "etc" / args.config).open("r") as fh: config = load(fh) for index, tenor in config["runs"]: begin_date = None index, series = index[:2].upper(), int(index[2:]) if args.start_from is not None: begin_date = args.start_from if args.update: begin_date = get_lastdate(serenitas_conn, index, series, tenor) if not args.update and begin_date is None: try: begin_date = start_dates[f"{index.lower()}{series}"] except KeyError: print(index, series) continue dr = pd.bdate_range(begin_date, args.until) if dr.empty: continue logger.info(f"calibrating {index}, {series}, {tenor}") tranche_index = None data = {} for d in dr: logger.debug(f"calibrating for {d.date()}") try: if tranche_index is None: tranche_index = TrancheBasket( index, series, tenor, value_date=d.date() ) else: tranche_index.value_date = d.date() except (RuntimeError, ValueError) as e: logger.error(e) continue try: tranche_index.tweak() except ValueError as e: logger.error(e) break try: tranche_index.build_skew() except ValueError as e: logger.error(e) continue df = pd.concat( [ tranche_index.tranche_deltas(), tranche_index.tranche_fwd_deltas(), tranche_index.tranche_durations(), tranche_index.tranche_EL(), tranche_index.tranche_spreads(), ], axis=1, ) try: df["theta"] = tranche_index.tranche_thetas(method="TLP") except ValueError: df["theta"] = None df["index_duration"], df["index_expected_loss"], df[ "index_price" ] = tranche_index.index_pv() df["index_expected_loss"] *= -1 df["index_duration"] -= tranche_index.accrued() df["index_basis"] = tranche_index.tweaks[0] df["index_theta"] = tranche_index.theta()[tenor] df["tranche_id"] = tranche_index.tranche_quotes.id.values df["corr_at_detach"] = tranche_index.rho[1:] df["corr01"] = tranche_index.tranche_corr01() del df["fwd_gamma"] data[d] = df if data: data = pd.concat(data) sql_str = build_sql_str(data) with serenitas_conn.cursor() as c: c.executemany(sql_str, data.to_dict(orient="record")) serenitas_conn.commit()