diff options
Diffstat (limited to 'python')
| -rw-r--r-- | python/reto.py | 148 |
1 files changed, 55 insertions, 93 deletions
diff --git a/python/reto.py b/python/reto.py index 6fca0c50..d6114583 100644 --- a/python/reto.py +++ b/python/reto.py @@ -2,7 +2,6 @@ import datetime import pandas as pd import numpy as np import argparse -from copy import deepcopy from risk.portfolio import build_portfolio, generate_vol_surface @@ -10,38 +9,19 @@ import serenitas.analytics as ana from serenitas.analytics.scenarios import run_portfolio_scenarios from serenitas.analytics.base import Trade from serenitas.utils.db2 import dbconn -from serenitas.utils.db import dawn_engine from serenitas.analytics.dates import prev_business_day -def parse_args(): - """Parses command line arguments""" - parser = argparse.ArgumentParser(description="Shock data and insert into DB") - parser.add_argument( - "date", - nargs="?", - type=datetime.date.fromisoformat, - default=prev_business_day(datetime.date.today()), - ) - parser.add_argument("-n", "--no-upload", action="store_true", help="do not upload") - return parser.parse_args() - - def gen_shocks(portf, shock_date, fund): Trade.init_ontr(shock_date) ana._local = False + ontr_spread = Trade._ontr["HY"].spread spread_shock = np.array([-25.0, 1.0, +25.0, 100.0, 200.0, 500, 1000]) - spread_shock /= Trade._ontr["HY"].spread + spread_shock /= ontr_spread # Add in 2020 HY Wides, 2021 HY Tights, 2022 HY Wides scenarios - spread_shock = np.append( - spread_shock, - [ - (872 / Trade._ontr["HY"].spread - 1), - (269 / Trade._ontr["HY"].spread - 1), - (626 / Trade._ontr["HY"].spread - 1), - ], - ) - vol_surface = generate_vol_surface(portf, try_days_back=10, source="BAML") + historic_spreads = np.array([872, 269, 626]) + spread_shock = np.append(spread_shock, historic_spreads / ontr_spread - 1.0) + vol_surface = generate_vol_surface(portf, lookback=10, source="BAML") portf.reset_pv() scens = run_portfolio_scenarios( portf, @@ -52,78 +32,52 @@ def gen_shocks(portf, shock_date, fund): corr_shock=[0.0], vol_surface=vol_surface, ) - - strategies = {} - strategies["options"] = [ - "HYOPTDEL", - "HYPAYER", - "HYREC", - "IGOPTDEL", - "IGPAYER", - "IGREC", - ] - strategies["tranches"] = [ - "HYSNR", - "HYMEZ", - "HYINX", - "HYEQY", - "IGSNR", - "IGMEZ", - "IGINX", - "IGEQY", - "EUSNR", - "EUMEZ", - "EUINX", - "EUEQY", - "XOSNR", - "XOMEZ", - "XOINX", - "XOEQY", - "BSPK", - ] + strategies = { + s: "options" + for s in ["HYOPTDEL", "HYPAYER", "HYREC", "IGOPTDEL", "IGPAYER", "IGREC"] + } | { + s: "tranches" + for s in [ + "HYSNR", + "HYMEZ", + "HYINX", + "HYEQY", + "IGSNR", + "IGMEZ", + "IGINX", + "IGEQY", + "EUSNR", + "EUMEZ", + "EUINX", + "EUEQY", + "XOSNR", + "XOMEZ", + "XOINX", + "XOEQY", + "BSPK", + ] + } if fund == "BRINKER": scens = scens.xs(0, level="corr_shock") else: scens = scens.xs((0.0, 0.0), level=["vol_shock", "corr_shock"]) scens.columns.names = ["strategy", "trade_id", "scen_type"] - - results = {} - for i, g in scens.groupby(level="scen_type", axis=1): - temp = g.groupby(level="strategy", axis=1).sum() - for key, item in strategies.items(): - exist_columns = list(set(temp.columns).intersection(item)) - temp[key] = temp[exist_columns].sum(axis=1) - temp = temp.drop(exist_columns, axis=1) - temp["total"] = temp.sum(axis=1) - results[i] = temp - results = pd.concat(results) + results = scens.stack(level="scen_type").reorder_levels([2, 0, 1]).sort_index() + results = results.groupby(["strategy"], axis=1).sum() + results = results.groupby(lambda s: strategies.get(s, s), axis=1).sum() + # map shocks back to absolute spread diff results.index = results.index.set_levels( - results.index.levels[results.index.names.index("spread_shock")] - * Trade._ontr["HY"].spread, - level="spread_shock", + results.index.levels[2] * ontr_spread, level="spread_shock" ) + results["total"] = results.sum(axis=1) + results = results.stack().reset_index() + results.scen_type = results.scen_type.str.upper() + results.insert(0, "date", results.pop("date")) return results -def process_shocks_df(raw_df): - """Clean and transform the input dataframe to insert into database.""" - transformed_df = raw_df.reset_index() - transformed_df = transformed_df.rename(columns={"level_0": "unit"}) - strategy_columns = transformed_df.columns[3:] - transformed_df = pd.melt( - transformed_df, - id_vars=["unit", "date", "spread_shock"], - value_vars=strategy_columns, - ) - transformed_df = transformed_df.rename( - columns={"variable": "strategy", "value": "value", "unit": "risk_type"} - ) - transformed_df.risk_type = transformed_df.risk_type.str.upper() - return transformed_df - - -def save_shocks(date, fund, df): +def save_shocks(date, df, fund): with conn.cursor() as c: c.execute( "DELETE FROM shocks WHERE fund=%s AND date=%s", @@ -133,17 +87,25 @@ def save_shocks(date, fund, df): ), ) conn.commit() - df["fund"] = fund - df.to_sql("shocks", dawn_engine, if_exists="append", index=False) + with conn.cursor() as c: + c.executemany( + "INSERT INTO shocks VALUES (%s, %s, %s, %s, %s, %s)", + [(*t, fund) for t in df.itertuples(index=False)], + ) if __name__ == "__main__": - args = parse_args() + parser = argparse.ArgumentParser(description="Shock data and insert into DB") + parser.add_argument( + "date", + nargs="?", + type=datetime.date.fromisoformat, + default=prev_business_day(datetime.date.today()), + ) + parser.add_argument("-n", "--no-upload", action="store_true", help="do not upload") + args = parser.parse_args() conn = dbconn("dawndb") for fund in ("SERCGMAST", "BOWDST", "ISOSEL", "BRINKER"): portf, _ = build_portfolio(args.date, args.date, fund) - - # Shocks process shocks = gen_shocks(portf, args.date, fund) - df = process_shocks_df(shocks) - save_shocks(args.date, fund, df) + save_shocks(args.date, shocks, fund) |
