from . import DAILY_DIR from serenitas.analytics.utils import get_fx from bs4 import BeautifulSoup from io import BytesIO from pandas.tseries.offsets import BDay import logging import pandas as pd import pathlib import requests import xlrd import zipfile from urllib.parse import urlsplit, parse_qs, urlunsplit, urljoin from xlrd import open_workbook, xldate_as_tuple logger = logging.getLogger(__name__) def download_from_secure_id( secure_id: str, brand: str, path: pathlib.Path, base_url="https://secmail.bankofamerica.com", ): password = { "ghorel@lmcg.com": "v4vdMvH9Qe9t", "nyops@lmcg.com": "Serenitas21", } payload = {} with requests.Session() as session: r = session.get( urljoin(base_url, "formpostdir/securereader"), params={"id": secure_id, "brand": brand}, verify="secmail-bankofamerica-com-chain.pem", ) soup = BeautifulSoup(r.content, features="lxml") form = soup.find(id="dialog") if "messagenotfound" in form["action"]: raise ValueError("message not found") for inp in form.find_all("input"): payload[inp["name"]] = inp["value"] payload["dialog:password"] = password[payload["dialog:username"]] r = session.post(base_url + form["action"], data=payload) soup = BeautifulSoup(r.content, features="lxml") form = soup.find(id="readTB") payload = { "readTB": "readTB", "readTB:downloadZipButton": "readTB:downloadZipButton", } for inp in form.find_all("input"): if "ViewState" in inp["name"]: payload["javax.faces.ViewState"] = inp["value"] r = session.post(urljoin(base_url, "securereader/read.jsf"), data=payload) if r.headers["content-type"] == "application/octet-stream": with zipfile.ZipFile(BytesIO(r.content)) as z: for f in z.namelist(): if not f.endswith("html") and not (path / f).exists(): z.extract(f, path=path) paths = { "Serenitas": ["NYops", "Margin Calls Baml"], "BowdSt": ["BowdoinOps", "Margin BoA"], "Selene": ["SeleneOps", "Margin BoA"], } def read_secure_message(msg, dest, logger): soup = BeautifulSoup(msg.body, features="lxml") a = soup.find("a") url = urlsplit(a["href"]) query = parse_qs(url.query) base_url = urlunsplit(url[:2] + ("",) * 3) try: download_from_secure_id(query["id"][0], query["brand"][0], dest, base_url) except ValueError as e: if logger: logger.error(e) def download_files(em, d=None, count=20, *, fund="Serenitas", **kwargs): DATA_DIR = DAILY_DIR / fund / "BoA_reports" if fund not in paths: return emails = em.get_msgs(path=paths[fund], count=count) for msg in emails: match msg.sender.name: case "us_otc_client_valuation@baml.com" | "us_otc_client_valuation@bofa.com" if msg.body.body_type == "HTML": read_secure_message(msg, DATA_DIR, logger) case "us_otc_client_valuation@baml.com" | "us_otc_client_valuation@bofa.com" | "bank_of_america_collateral_operations@bankofamerica.com": for attach in msg.attachments: if attach.name.endswith("xls") or attach.name.endswith("pdf"): p = DATA_DIR / attach.name if not p.exists(): p.write_bytes(attach.content) case _: continue def load_excel_old(fname): wb = open_workbook(fname) s = wb.sheet_by_index(0) headers = s.row_values(6, 0) i = 7 rows = [] while s.cell(i, 0).value != "": r = [] for e in s.row_slice(i, 0): if e.ctype == xlrd.XL_CELL_DATE: r.append(pd.Timestamp(*xldate_as_tuple(e.value, wb.datemode))) else: r.append(e.value) rows.append(r) i += 1 df = pd.DataFrame.from_records(rows, columns=headers) for col in ["Original Notional", "Notional"]: df[col] = pd.to_numeric(df[col].str.replace(",", "")) return df def load_excel(fname): wb = open_workbook(fname) s = wb.sheet_by_index(0) headers = s.row_values(13, 0) i = 19 rows = [] while s.cell(i, 0).ctype != xlrd.XL_CELL_DATE: content = s.cell(i, 0).value if content.startswith("NOP"): break if ( content == "" or content.startswith("Credit") or content.startswith("FX") or content.startswith("Unspecified") or content.startswith("Equity - Option") ): i += 1 continue r = [] for e in s.row_slice(i, 0): if e.ctype == xlrd.XL_CELL_DATE: r.append(pd.Timestamp(*xldate_as_tuple(e.value, wb.datemode))) else: r.append(e.value) rows.append(r) i += 1 df = pd.DataFrame.from_records(rows, columns=headers) if not df.empty: df["fx"] = df[["Market Value Date", "Ccy1"]].apply(lambda x: get_fx(*x), axis=1) df["Market Value Amount"] /= df["fx"] df = df.rename( columns={ "Contract ID ": "Trade ID", "Market Value Amount": "local_nav", "Market Value Amount in CA Ccy": "base_nav", "Trade Date ": "Trade Date", "Cpty Independent Amount": "ia", } ) df[["local_nav", "base_nav"]] *= -1.0 return df def collateral(d, dawn_trades, *, fund="Serenitas", **kwargs): match fund: case "Serenitas": tag = "TSLP" case "BowdSt": tag = "TLLC" case "Selene": tag = "INC" report_date = d + BDay() REPORTS_DIR = DAILY_DIR / fund / "BoA_reports" try: fname = next( REPORTS_DIR.glob(f"Collat_LMCG_INVESTMEN{tag}_CSA_{report_date:%m%d%Y}_*") ) except StopIteration: raise ValueError(f"no collateral data for date {report_date}") df = pd.read_excel(fname, skiprows=6, skipfooter=6) if df.empty: collateral = 0 logger.warning("empty collateral file") else: df = df.drop(0, axis=0) try: collateral = float(df.Notional) except TypeError: collateral = df.Notional.sum() try: fname = next( REPORTS_DIR.glob(f"301__LMCG_INVESTMEN{tag}_CSA_{report_date:%m%d%Y}_*") ) except StopIteration: raise ValueError(f"no trade-level data for date {report_date}") df = load_excel(fname) df = df[["Trade ID", "base_nav", "ia"]] df = df.merge( dawn_trades.drop("ia", axis=1), how="left", left_on="Trade ID", right_on="cpty_id", ) missing_ids = df.loc[df.cpty_id.isnull(), "Trade ID"] if not missing_ids.empty: raise ValueError(f"{missing_ids.tolist()} not in the database") df = df[["folder", "base_nav", "ia"]] # we put all the FX collateral in TCSH for now df.loc[df.folder == "M_CSH_CASH", "folder"] = "TCSH" df = df.groupby("folder").sum() df = (df["ia"] - df["base_nav"]).to_frame(name="Amount") df["Currency"] = "USD" df = df.reset_index() df.columns = ["Strategy", "Amount", "Currency"] df.Amount *= -1 df.loc[df.Strategy == "M_CSH_CASH", "Strategy"] = "TCSH" df.loc[len(df.index)] = ["M_CSH_CASH", -collateral - df.Amount.sum(), "USD"] df["date"] = d return df.set_index("Strategy")