aboutsummaryrefslogtreecommitdiffstats
path: root/python/calibrate_tranches_BC.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/calibrate_tranches_BC.py')
-rw-r--r--python/calibrate_tranches_BC.py173
1 files changed, 100 insertions, 73 deletions
diff --git a/python/calibrate_tranches_BC.py b/python/calibrate_tranches_BC.py
index b6d7a37f..8fde82e9 100644
--- a/python/calibrate_tranches_BC.py
+++ b/python/calibrate_tranches_BC.py
@@ -11,9 +11,11 @@ import argparse
def get_lastdate(conn, index, series, tenor):
- sql_str = ("SELECT (max(date) AT TIME ZONE 'America/New_York')::date + 1 "
- "AS date FROM risk_numbers "
- "WHERE index=%s and series = %s and tenor = %s")
+ sql_str = (
+ "SELECT (max(date) AT TIME ZONE 'America/New_York')::date + 1 "
+ "AS date FROM risk_numbers "
+ "WHERE index=%s and series = %s and tenor = %s"
+ )
with conn.cursor() as c:
c.execute(sql_str, (index, series, tenor))
date, = c.fetchone()
@@ -22,31 +24,47 @@ def get_lastdate(conn, index, series, tenor):
def build_sql_str(df):
- cols = ','.join(df.columns)
- cols_ex_tranche_id = ','.join([c for c in df.columns if c != 'tranche_id'])
- cols_excluded = ','.join([f"excluded.{c}" for c in df.columns if c != 'tranche_id'])
+ cols = ",".join(df.columns)
+ cols_ex_tranche_id = ",".join([c for c in df.columns if c != "tranche_id"])
+ cols_excluded = ",".join([f"excluded.{c}" for c in df.columns if c != "tranche_id"])
place_holders = ",".join([f"%({c})s" for c in df.columns])
- sql_str = (f"INSERT INTO tranche_risk({cols}) "
- f"VALUES({place_holders}) ON CONFLICT (tranche_id) DO "
- f"UPDATE SET ({cols_ex_tranche_id}) = ({cols_excluded})")
+ sql_str = (
+ f"INSERT INTO tranche_risk({cols}) "
+ f"VALUES({place_holders}) ON CONFLICT (tranche_id) DO "
+ f"UPDATE SET ({cols_ex_tranche_id}) = ({cols_excluded})"
+ )
return sql_str
if __name__ == "__main__":
from utils import SerenitasFileHandler
- logger = logging.getLogger('tranche_calib')
+
+ logger = logging.getLogger("tranche_calib")
parser = argparse.ArgumentParser()
- parser.add_argument("-u", "--update", action="store_true", default=False,
- help="Update from the last run date [default %default]")
- parser.add_argument("-c", "--config", metavar="config_file",
- help="Runs the list of indices provided in CONFIG_FILE")
+ parser.add_argument(
+ "-u",
+ "--update",
+ action="store_true",
+ default=False,
+ help="Update from the last run date [default %default]",
+ )
+ parser.add_argument(
+ "-c",
+ "--config",
+ metavar="config_file",
+ help="Runs the list of indices provided in CONFIG_FILE",
+ )
parser.add_argument("-i", "--index", help="Index name we want to run")
- parser.add_argument("--tenor", default="5yr", help="Tenor we want to run [default '5yr']")
- parser.add_argument("--until", default=pd.Timestamp.now()-BDay(),
- type=lambda s: pd.Timestamp(s))
- parser.add_argument("--start_from", default=None,
- type=lambda s: pd.Timestamp(s))
- parser.add_argument("-d", "--debug", action="store_true", help="more verbose logging")
+ parser.add_argument(
+ "--tenor", default="5yr", help="Tenor we want to run [default '5yr']"
+ )
+ parser.add_argument(
+ "--until", default=pd.Timestamp.now() - BDay(), type=lambda s: pd.Timestamp(s)
+ )
+ parser.add_argument("--start_from", default=None, type=lambda s: pd.Timestamp(s))
+ parser.add_argument(
+ "-d", "--debug", action="store_true", help="more verbose logging"
+ )
args = parser.parse_args()
logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
CODE_DIR = Path(os.environ["CODE_DIR"])
@@ -57,48 +75,49 @@ if __name__ == "__main__":
handler.setFormatter(SerenitasFileHandler._formatter)
logger.addHandler(handler)
- start_dates = {# 'hy10': datetime.date(2014, 8, 11),
- # 'hy15': datetime.date(2014, 6, 10),
- # 'hy17': datetime.date(2013, 1, 1),
- 'hy19': datetime.date(2013, 2, 1),
- 'hy21': datetime.date(2013, 10, 4),
- 'hy23': datetime.date(2014, 10, 16),
- 'hy25': datetime.date(2015, 10, 1),
- 'hy27': datetime.date(2016, 10, 4),
- 'hy29': datetime.date(2017, 10, 3),
- 'hy31': datetime.date(2018, 10, 2),
- 'ig9': datetime.date(2013, 1, 1),
- 'ig19': datetime.date(2013, 5, 1),
- 'ig21': datetime.date(2013, 9, 26),
- 'ig23': datetime.date(2014, 10, 14),
- 'ig25': datetime.date(2015, 9, 22),
- 'ig27': datetime.date(2016, 9, 27),
- 'ig29': datetime.date(2017, 9, 26),
- 'ig31': datetime.date(2018, 9, 25),
- 'xo22': datetime.date(2014, 10, 20),
- 'xo24': datetime.date(2015, 9, 28),
- 'xo26': datetime.date(2016, 9, 27),
- 'xo28': datetime.date(2017, 9, 28),
- 'eu9': datetime.date(2014, 9, 15),
- 'eu19': datetime.date(2013, 4, 3),
- 'eu21': datetime.date(2014, 3, 27),
- 'eu22': datetime.date(2014, 10, 22),
- 'eu24': datetime.date(2015, 9, 23),
- 'eu26': datetime.date(2016, 9, 27),
- 'eu28': datetime.date(2017, 9, 28),
- 'eu30': datetime.date(2018, 9, 25),
- 'xo30': datetime.date(2018, 9, 25)}
+ start_dates = { # 'hy10': datetime.date(2014, 8, 11),
+ # 'hy15': datetime.date(2014, 6, 10),
+ # 'hy17': datetime.date(2013, 1, 1),
+ "hy19": datetime.date(2013, 2, 1),
+ "hy21": datetime.date(2013, 10, 4),
+ "hy23": datetime.date(2014, 10, 16),
+ "hy25": datetime.date(2015, 10, 1),
+ "hy27": datetime.date(2016, 10, 4),
+ "hy29": datetime.date(2017, 10, 3),
+ "hy31": datetime.date(2018, 10, 2),
+ "ig9": datetime.date(2013, 1, 1),
+ "ig19": datetime.date(2013, 5, 1),
+ "ig21": datetime.date(2013, 9, 26),
+ "ig23": datetime.date(2014, 10, 14),
+ "ig25": datetime.date(2015, 9, 22),
+ "ig27": datetime.date(2016, 9, 27),
+ "ig29": datetime.date(2017, 9, 26),
+ "ig31": datetime.date(2018, 9, 25),
+ "xo22": datetime.date(2014, 10, 20),
+ "xo24": datetime.date(2015, 9, 28),
+ "xo26": datetime.date(2016, 9, 27),
+ "xo28": datetime.date(2017, 9, 28),
+ "eu9": datetime.date(2014, 9, 15),
+ "eu19": datetime.date(2013, 4, 3),
+ "eu21": datetime.date(2014, 3, 27),
+ "eu22": datetime.date(2014, 10, 22),
+ "eu24": datetime.date(2015, 9, 23),
+ "eu26": datetime.date(2016, 9, 27),
+ "eu28": datetime.date(2017, 9, 28),
+ "eu30": datetime.date(2018, 9, 25),
+ "xo30": datetime.date(2018, 9, 25),
+ }
- serenitas_conn = dbconn('serenitasdb')
+ serenitas_conn = dbconn("serenitasdb")
if args.config is None:
if args.index is None:
raise ValueError("Please provide an index to run")
- config = {'runs': [(args.index, args.tenor)]}
+ config = {"runs": [(args.index, args.tenor)]}
else:
with (CODE_DIR / "etc" / args.config).open("r") as fh:
config = load(fh)
- for index, tenor in config['runs']:
+ for index, tenor in config["runs"]:
begin_date = None
index, series = index[:2].upper(), int(index[2:])
if args.start_from is not None:
@@ -123,7 +142,9 @@ if __name__ == "__main__":
logger.debug(f"calibrating for {d.date()}")
try:
if tranche_index is None:
- tranche_index = TrancheBasket(index, series, tenor, value_date=d.date())
+ tranche_index = TrancheBasket(
+ index, series, tenor, value_date=d.date()
+ )
else:
tranche_index.value_date = d.date()
except (RuntimeError, ValueError) as e:
@@ -141,31 +162,37 @@ if __name__ == "__main__":
logger.error(e)
continue
- df = pd.concat([tranche_index.tranche_deltas(),
- tranche_index.tranche_fwd_deltas(),
- tranche_index.tranche_durations(),
- tranche_index.tranche_EL(),
- tranche_index.tranche_spreads()], axis=1)
+ df = pd.concat(
+ [
+ tranche_index.tranche_deltas(),
+ tranche_index.tranche_fwd_deltas(),
+ tranche_index.tranche_durations(),
+ tranche_index.tranche_EL(),
+ tranche_index.tranche_spreads(),
+ ],
+ axis=1,
+ )
try:
- df['theta'] = tranche_index.tranche_thetas(method="TLP")
+ df["theta"] = tranche_index.tranche_thetas(method="TLP")
except ValueError:
- df['theta'] = None
+ df["theta"] = None
- df['index_duration'], df['index_expected_loss'], df['index_price'] = \
- tranche_index.index_pv()
- df['index_expected_loss'] *= -1
- df['index_duration'] -= tranche_index.accrued()
- df['index_basis'] = tranche_index.tweaks[0]
- df['index_theta'] = tranche_index.theta()[tenor]
- df['tranche_id'] = tranche_index.tranche_quotes.id.values
- df['corr_at_detach'] = tranche_index.rho[1:]
- df['corr01'] = tranche_index.tranche_corr01()
- del df['fwd_gamma']
+ df["index_duration"], df["index_expected_loss"], df[
+ "index_price"
+ ] = tranche_index.index_pv()
+ df["index_expected_loss"] *= -1
+ df["index_duration"] -= tranche_index.accrued()
+ df["index_basis"] = tranche_index.tweaks[0]
+ df["index_theta"] = tranche_index.theta()[tenor]
+ df["tranche_id"] = tranche_index.tranche_quotes.id.values
+ df["corr_at_detach"] = tranche_index.rho[1:]
+ df["corr01"] = tranche_index.tranche_corr01()
+ del df["fwd_gamma"]
data[d] = df
if data:
data = pd.concat(data)
sql_str = build_sql_str(data)
with serenitas_conn.cursor() as c:
- c.executemany(sql_str, data.to_dict(orient='record'))
+ c.executemany(sql_str, data.to_dict(orient="record"))
serenitas_conn.commit()