diff options
Diffstat (limited to 'python/calibrate_tranches_BC.py')
| -rw-r--r-- | python/calibrate_tranches_BC.py | 88 |
1 files changed, 67 insertions, 21 deletions
diff --git a/python/calibrate_tranches_BC.py b/python/calibrate_tranches_BC.py index 31cf5061..30040b19 100644 --- a/python/calibrate_tranches_BC.py +++ b/python/calibrate_tranches_BC.py @@ -1,5 +1,6 @@ from serenitas.analytics.tranche_basket import TrancheBasket, MarkitTrancheBasket from serenitas.analytics.dates import prev_business_day +from psycopg import sql import datetime import logging import numpy as np @@ -28,12 +29,33 @@ def build_sql_str(df, use_markit=False): place_holders = ",".join([f"%({c})s" for c in df.columns]) sql_str = ( f"INSERT INTO {'markit_' if use_markit else ''}tranche_risk({cols}) " - f"VALUES({place_holders}) ON CONFLICT (tranche_id) DO " + f"VALUES ({place_holders}) ON CONFLICT (tranche_id) DO " f"UPDATE SET ({cols_ex_tranche_id}) = ({cols_excluded})" ) return sql_str +def build_sql(table, cols, conflict): + return sql.SQL( + "INSERT INTO {table}({cols}) VALUES ({ph}) " + "ON CONFLICT ({conflict}) DO " + "UPDATE SET ({new}) = ({excluded})" + ).format( + table=sql.Identifier(table), + cols=sql.SQL(",").join([sql.Identifier(c) for c in cols]), + ph=sql.SQL(",").join(sql.Placeholder() * len(cols)), + conflict=sql.Identifier(conflict), + new=sql.SQL(",").join([sql.Identifier(c) for c in cols if c != conflict]), + excluded=sql.SQL(",").join( + [ + sql.SQL("EXCLUDED.{}").format(sql.Identifier(c)) + for c in cols + if c != conflict + ] + ), + ) + + if __name__ == "__main__": from serenitas.utils import SerenitasFileHandler from serenitas.utils.db2 import NaNtoNone @@ -132,6 +154,21 @@ if __name__ == "__main__": "eu38": datetime.date(2022, 9, 20), } + index_query = build_sql( + "tranche_risk_index", + [ + "quoteset", + "price", + "basis", + "expected_loss", + "duration", + "theta", + "skew_x", + "skew_c", + ], + "quoteset", + ) + with serenitas_pool.connection() as serenitas_conn: if args.config is None: if args.index is None: @@ -162,7 +199,8 @@ if __name__ == "__main__": logger.info(f"calibrating {index}, {series}, {tenor}") tranche_index = None - data = {} + tranche_data = [] + index_data = [] for d in dr.date: logger.debug(f"calibrating for {d}") try: @@ -207,16 +245,21 @@ if __name__ == "__main__": df["theta"] = tranche_index.tranche_thetas(method="TLP") except ValueError: df["theta"] = None - - ( - df["index_duration"], - df["index_expected_loss"], - df["index_price"], - ) = tranche_index.index_pv(clean=True) - df["index_expected_loss"] *= -1 - df["index_basis"] = tranche_index.tweaks[0] - df["index_theta"] = tranche_index.theta()[tenor] + duration, el, price = tranche_index.index_pv(clean=True) + index_data.append( + ( + int(tranche_index.tranche_quotes.quoteset.values[0]), + price, + tranche_index.tweaks[0], + -el, + duration, + tranche_index.theta()[tenor], + tranche_index._skew.x.tobytes(), + tranche_index._skew.c.tobytes(), + ) + ) df["tranche_id"] = tranche_index.tranche_quotes.id.values + df["quoteset"] = tranche_index.tranche_quotes.quoteset.values df["corr_at_detach"] = tranche_index.rho[1:] df["corr01"] = tranche_index.tranche_corr01() del df["fwd_gamma"] @@ -226,17 +269,20 @@ if __name__ == "__main__": - tranche_index._accrued ) df["calibrated_price"] = tranche_index.tranche_pvs().bond_price - data[d] = df - - if data: - data = pd.concat(data) - sql_str = build_sql_str(data, args.markit) + tranche_data.append(df) + if index_data: + tranche_data = pd.concat(tranche_data) + cols = tranche_data.columns + tranches_query = build_sql("tranche_risk_tranches", cols, "tranche_id") with serenitas_conn.cursor() as c: c.executemany( - sql_str, - map( - lambda d: {k: NaNtoNone(v) for k, v in d.items()}, - data.to_dict(orient="records"), - ), + tranches_query, + [ + tuple(map(NaNtoNone, t)) + for t in tranche_data.itertuples(index=False) + ], + ) + c.executemany( + index_query, [tuple(map(NaNtoNone, t)) for t in index_data] ) serenitas_conn.commit() |
