from . import DAILY_DIR from bs4 import BeautifulSoup from io import BytesIO from pandas.tseries.offsets import BDay import logging import pandas as pd import pathlib import requests import xlrd import zipfile from urllib.parse import urlsplit, parse_qs, urlunsplit, urljoin from xlrd import open_workbook, xldate_as_tuple logger = logging.getLogger(__name__) def download_from_secure_id( secure_id: str, brand: str, path: pathlib.Path, base_url="https://secmail.bankofamerica.com", ): password = { "ghorel@lmcg.com": "v4vdMvH9Qe9t", "nyops@lmcg.com": "a6lAkBfqDSHsrkGspYSS", } payload = {} with requests.Session() as session: r = session.get( urljoin(base_url, "formpostdir/securereader"), params={"id": secure_id, "brand": brand}, verify="secmail-bankofamerica-com-chain.pem", ) soup = BeautifulSoup(r.content, features="lxml") form = soup.find(id="dialog") if "messagenotfound" in form["action"]: raise ValueError("message not found") for inp in form.find_all("input"): payload[inp["name"]] = inp["value"] payload["dialog:password"] = password[payload["dialog:username"]] r = session.post(base_url + form["action"], data=payload) soup = BeautifulSoup(r.content, features="lxml") form = soup.find(id="readTB") payload = { "readTB": "readTB", "readTB:downloadZipButton": "readTB:downloadZipButton", } for inp in form.find_all("input"): if "ViewState" in inp["name"]: payload["javax.faces.ViewState"] = inp["value"] r = session.post(urljoin(base_url, "securereader/read.jsf"), data=payload) if r.headers["content-type"] == "application/octet-stream": with zipfile.ZipFile(BytesIO(r.content)) as z: for f in z.namelist(): if not f.endswith("html") and not (path / f).exists(): z.extract(f, path=path) paths = { "Serenitas": ["NYops", "Margin Calls Baml"], "BowdSt": ["BowdoinOps", "Margin BoA"], } def download_files(em, d=None, count=20, *, fund="Serenitas", **kwargs): DATA_DIR = DAILY_DIR / fund / "BoA_reports" if fund not in paths: return emails = em.get_msgs(path=paths[fund], count=count) for msg in emails: if ( msg.sender.name == "us_otc_client_valuation@baml.com" or msg.sender.name == "us_otc_client_valuation@bofa.com" ): soup = BeautifulSoup(msg.body, features="lxml") a = soup.find("a") url = urlsplit(a["href"]) query = parse_qs(url.query) base_url = urlunsplit(url[:2] + ("",) * 3) try: download_from_secure_id( query["id"][0], query["brand"][0], DATA_DIR, base_url ) except ValueError as e: logger.error(e) continue if msg.sender.name == "bank_of_america_collateral_operations@bankofamerica.com": for attach in msg.attachments: if attach.name.endswith("xls"): p = DATA_DIR / attach.name if not p.exists(): p.write_bytes(attach.content) def load_excel_old(fname): wb = open_workbook(fname) s = wb.sheet_by_index(0) headers = s.row_values(6, 0) i = 7 rows = [] while s.cell(i, 0).value != "": r = [] for e in s.row_slice(i, 0): if e.ctype == xlrd.XL_CELL_DATE: r.append(pd.Timestamp(*xldate_as_tuple(e.value, wb.datemode))) else: r.append(e.value) rows.append(r) i += 1 df = pd.DataFrame.from_records(rows, columns=headers) for col in ["Original Notional", "Notional"]: df[col] = pd.to_numeric(df[col].str.replace(",", "")) return df def load_excel(fname): wb = open_workbook(fname) s = wb.sheet_by_index(0) headers = s.row_values(13, 0) i = 19 rows = [] while s.cell(i, 0).ctype != xlrd.XL_CELL_DATE: if s.cell(i, 0).value == "" or s.cell(i, 0).value.startswith("Credit"): i += 1 continue r = [] for e in s.row_slice(i, 0): if e.ctype == xlrd.XL_CELL_DATE: r.append(pd.Timestamp(*xldate_as_tuple(e.value, wb.datemode))) else: r.append(e.value) rows.append(r) i += 1 df = pd.DataFrame.from_records(rows, columns=headers) df = df.rename( columns={ "Contract ID ": "Trade ID", "Market Value Amount": "local_nav", "Market Value Amount in CA Ccy": "base_nav", "Trade Date ": "Trade Date", "Cpty Independent Amount": "ia", } ) df[["local_nav", "base_nav"]] *= -1.0 return df def collateral(d, dawn_trades, *, fund="Serenitas", **kwargs): report_date = d + BDay() REPORTS_DIR = DAILY_DIR / fund / "BoA_reports" try: fname = next(REPORTS_DIR.glob(f"Collat_*{report_date:%m%d%Y}_*.xls")) except StopIteration: raise ValueError(f"no collateral data for date {report_date}") df = pd.read_excel(fname, skiprows=6, skipfooter=6) df = df.drop(0, axis=0) try: collateral = float(df.Notional) except TypeError: collateral = df.Notional.sum() try: tag = "SLP" if fund == "Serenitas" else "LLC" fname = next( REPORTS_DIR.glob(f"301__LMCG_INVESTMENT{tag}_CSA_{report_date:%m%d%Y}_*") ) except StopIteration: raise ValueError(f"no trade-level data for date {report_date}") df = load_excel(fname) df = df[["Trade ID", "base_nav", "ia"]] df = df.merge( dawn_trades.drop("ia", axis=1), how="left", left_on="Trade ID", right_on="cpty_id", ) missing_ids = df.loc[df.cpty_id.isnull(), "Trade ID"] if not missing_ids.empty: raise ValueError(f"{missing_ids.tolist()} not in the database") df = df[["folder", "base_nav", "ia"]] df = df.groupby("folder").sum() df = (df["ia"] - df["base_nav"]).to_frame(name="Amount") df["Currency"] = "USD" df = df.reset_index() df.columns = ["Strategy", "Amount", "Currency"] df.Amount *= -1 df = df.append( { "Strategy": "M_CSH_CASH", "Amount": -collateral - df.Amount.sum(), "Currency": "USD", }, ignore_index=True, ) df["date"] = d return df.set_index("Strategy")