import datetime import math import logging import subprocess from bs4 import BeautifulSoup import pandas as pd from exchangelib import HTMLBody from io import StringIO from sqlalchemy.engine import Engine logger = logging.getLogger(__name__) CASH_STRATEGY_MAPPING = { "COCSH": ["IGREC", "IGPAYER", "HYPAYER", "HYREC", "HYOPTDEL", "IGOPTDEL", "VOLRV"], "IRDEVCSH": ["DV01", "STEEP", "FLAT"], "TCSH": [ "IGMEZ", "IGSNR", "IGEQY", "HYMEZ", "HYEQY", "HYSNR", "BSPK", "XOMEZ", "XOEQY", "IGINX", "HYINX", "XOINX", "EUMEZ", "EUINX", ], "MBSCDSCSH": ["HEDGE_MBS", "MBSCDS"], "MACCDSCSH": ["HEDGE_MAC", "CASH_BASIS"], "CVECSH": ["SER_ITRXCURVE", "SER_IGCURVE", "SER_HYCURVE", "XCURVE"], "CLOCDSCSH": ["HEDGE_CLO", "M_CLO_BB20"], "M_CSH_CASH": [ "CRT_LD", "CRT_LD_JNR", "CRT_SD", "MTG_FP", "MTG_LMG", "M_MTG_FP", "M_MTG_LMG", ], } STRATEGY_CASH_MAPPING = {e: k for k, v in CASH_STRATEGY_MAPPING.items() for e in v} def compare_notionals( df: pd.DataFrame, positions: pd.DataFrame, fcm: str, fund: str ) -> None: check_notionals = ( positions.groupby(level=["security_id", "maturity"])[["notional"]] .sum() .join(df["NOTIONAL"], how="left") ) diff_notionals = check_notionals[ (check_notionals.notional != check_notionals.NOTIONAL) & (check_notionals.notional != 0.0) ] if not diff_notionals.empty: logger.error(f"Database and {fcm} FCM at {fund} know different notionals") for t in diff_notionals.itertuples(): logger.error( f"{t.Index[0]}\t{t.Index[1]:%Y-%m-%d}\t{t.notional}\t{t.NOTIONAL}" ) def compare_notionals_rates( df: pd.DataFrame, positions: pd.DataFrame, fcm: str ) -> None: check_notionals = positions.join(df["NOTIONAL"], how="left") diff_notionals = check_notionals[ (check_notionals.notional != check_notionals.NOTIONAL) & (check_notionals.notional != 0.0) ] if not diff_notionals.empty: logger.error(f"Database and {fcm} FCM know different notionals") for t in diff_notionals.itertuples(): if hasattr(t, "effective_date"): msg = f"{t.Index[0]}\t{t.effective_date:%Y-%m-%d}\t{t.notional}\t{t.NOTIONAL}" else: msg = f"{t.Index[0]}\t{t.Index[1]:%Y-%m-%d}\t{t.notional}\t{t.NOTIONAL}" logger.error(msg) def get_bilateral_trades(d: datetime.date, fund: str, engine: Engine) -> pd.DataFrame: df_cds = pd.read_sql_query( "SELECT cpty_id, folder, initial_margin_percentage * abs(notional) / 100 as IA " "FROM list_cds2(%s::date, %s) " "WHERE orig_attach IS NOT NULL or cpty_id='6SIT0'", # that way we get all tranches + the ABS_CDS engine, params=(d, fund), ) df_swaptions = pd.read_sql_query( "SELECT cpty_id, folder, initial_margin_percentage * abs(notional) / 100 AS IA " "FROM list_swaptions(%s::date, %s) ", engine, params=(d, fund), ) df_caps = pd.read_sql_query( "SELECT cpty_id, folder, initial_margin_percentage * amount / 100 AS IA " "FROM capfloors " "WHERE cpty_id IS NOT NULL " "AND trade_date <= %s AND fund=%s", engine, params=(d, fund), ) df_forwards = pd.read_sql_query( "SELECT cpty_id, folder, ia FROM (" " SELECT cpty_id, folder, initial_margin_percentage * buy_amount / 100 AS ia," " trade_date, settle_date, fund FROM spots" " UNION" " SELECT UNNEST(ARRAY[near_cpty_id, far_cpty_id]) AS cpty_id, folder, 0.0 AS ia," " trade_date, unnest(ARRAY[near_settle_date, far_settle_date]) AS settle_date," " fund FROM fx_swaps" ") a " "WHERE cpty_id IS NOT NULL AND trade_date <=%s AND fund=%s AND settle_date >=%s", engine, params=(d, fund, d), ) df_trs = pd.read_sql_query( "SELECT cpty_id, folder, initial_margin_percentage * notional/100 AS ia FROM trs " "WHERE cpty_id IS NOT NULL AND trade_date <= %s AND fund=%s", engine, params=(d, fund), ) df_options = pd.read_sql_query( "SELECT cpty_id, folder, initial_margin AS ia FROM equityoptions " "WHERE cpty_id IS NOT NULL AND trade_date <= %s AND fund=%s", engine, params=(d, fund), ) df_terminations = pd.read_sql_query( "SELECT cpty_id, folder, 0 as ia FROM termination_collateral_mapping " "WHERE termination_date <= %s AND fund=%s", engine, params=(d, fund), ) df = pd.concat( [ df_cds, df_swaptions, df_caps, df_forwards, df_trs, df_options, df_terminations, ] ) df = df.replace({"folder": STRATEGY_CASH_MAPPING}) return df def send_email(d: datetime.date, df: pd.DataFrame) -> None: from serenitas.utils.exchange import ExchangeMessage pd.set_option("display.float_format", "{:.2f}".format) df = df.drop("date", axis=1).set_index("broker") cp_mapping = { "CITI": "Citi", "MS": "Morgan Stanley", "GS": "Goldman Sachs", "BAML_FCM": "Baml FCM", "BAML_ISDA": "Baml OTC", "WELLS": "Wells Fargo", "BNP": "BNP Paribas", "CS": "Credit Suisse", "JPM": "JP Morgan", "BARCLAYS": "Barclays", } buf = StringIO() buf.write("
\n") for cp, df in df.groupby(level="broker"): name = cp_mapping[cp] buf.write(f"