aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/bowdst.py348
1 files changed, 0 insertions, 348 deletions
diff --git a/python/bowdst.py b/python/bowdst.py
deleted file mode 100644
index c86aea15..00000000
--- a/python/bowdst.py
+++ /dev/null
@@ -1,348 +0,0 @@
-import datetime
-import pandas as pd
-import pathlib
-import warnings
-from exchangelib import FileAttachment
-from io import StringIO
-from typing import Tuple
-from serenitas.analytics.dates import bus_day
-from serenitas.utils.db import dbconn, dawn_engine
-from serenitas.utils.env import DAILY_DIR
-from report_ops.misc import get_dir
-
-
-def download_messages(em):
- bowdst_wire_recent = True
- for msg in em.get_msgs(
- 20,
- path=["BowdoinOps", "Reports"],
- subject__startswith="Document(s) from Reporting",
- ):
- if msg.sender == "notify@bnymellon.com":
- for attach in msg.attachments:
- fname = attach.name
- if fname.endswith("csv"):
- base_name = fname.removesuffix(".csv")
- file_type, date_part = base_name.split("_")
- match file_type:
- case (
- "Asset Detail"
- | "Net Investment Earned Income by Security"
- | "Settled Cash Statement"
- ):
- date = datetime.datetime.strptime(
- date_part, "%d %b %Y"
- ).date()
- case "BowdstWires":
- try:
- date = datetime.datetime.strptime(
- date_part, "%Y%m%d%H%M%S"
- ).date()
- except ValueError:
- date = datetime.datetime.strptime(
- date_part, "%d %b %Y%H%M%S"
- ).date()
- case "Live-cash":
- date = datetime.datetime.strptime(
- date_part, "%Y%m%d%H%M%S"
- ).date()
- case _:
- warnings.warn(f"Unknown report type {file_type}")
- continue
- p = DAILY_DIR / str(date) / "Reports" / fname
- if not p.parent.exists():
- p.parent.mkdir(parents=True, exist_ok=True)
- if not p.exists() or (
- fname.startswith("BowdstWires") and bowdst_wire_recent
- ):
- if fname.startswith("BowdstWires"):
- bowdst_wire_recent = False
- p.write_bytes(attach.content)
-
-
-def load_val_report(workdate: datetime.date):
- p = get_dir(workdate) / f"Asset Detail_{workdate:%d %b %Y}.csv"
- df = pd.read_csv(
- p, thousands=",", parse_dates=["As Of Date", "Maturity Date", "Report Run Date"]
- )
- df = df.drop(
- [
- "Reporting Account Number",
- "Reporting Account Name",
- "Source Account Name",
- "Xref Security ID",
- "Country Name",
- "Country Code",
- "Local Currency Name",
- "Acct Base Currency Name",
- "Acct Base Currency Code",
- "CINS",
- "Issuer ID",
- "SEDOL",
- "Valoren",
- "Sicovam",
- "WPK",
- "Quick",
- "Underlying Sec ID",
- "Loan ID",
- "Manager",
- "Book Yield Value",
- "Counterparty",
- "Ticker with Exchange Code",
- "Ticker with Yellow Key",
- "Accounting Status",
- "Primary GSP Account",
- "Extended GSP Account Number",
- "Percent Of Total",
- ],
- axis=1,
- )
- if "Acctg Status Update (EDT)" in df:
- del df["Acctg Status Update (EDT)"]
- elif "Acctg Status Update (EST)" in df:
- del df["Acctg Status Update (EST)"]
- df["Source Account Number"] = df["Source Account Number"].str[-4:].astype("int")
- df.columns = df.columns.str.replace(" ", "_").str.lower()
- df = df.rename(
- columns={
- "shares/par": "current_notional",
- "local_unrealized_gain/loss": "local_unrealized_pnl",
- "base_unrealized_gain/loss": "base_unrealized_pnl",
- }
- )
- for col in [
- "current_notional",
- "local_price",
- "base_price",
- "local_cost",
- "base_cost",
- "local_market_value",
- "base_market_value",
- "local_unrealized_pnl",
- "base_unrealized_pnl",
- "local_notional_cost",
- "base_notional_cost",
- "local_notional_value",
- "base_notional_value",
- ]:
- if df[col].dtype != "float64":
- df[col] = df[col].apply(lambda s: "-" + s[1:-1] if s.startswith("(") else s)
- df[col] = pd.to_numeric(df[col].str.replace(",", ""))
- df["row"] = df.index
- df.to_sql("bowdst_val", dawn_engine, if_exists="append", index=False)
-
-
-def load_pnl_report(workdate: datetime.date):
- if workdate.weekday() == 0:
- workdate -= datetime.timedelta(days=2)
- p = (
- get_dir(workdate)
- / f"Net Investment Earned Income by Security_{workdate:%d %b %Y}.csv"
- )
- df = pd.read_csv(p, thousands=",", parse_dates=["Begin Date", "End Date"])
- df = df.drop(
- [
- "Reporting Account Number",
- "Reporting Account Name",
- "Reporting Account Base Currency",
- "Accounting Status",
- "Security Cross Reference Type",
- "Security Cross Reference Cusip",
- "Local Currency Description",
- "Country Of Issue",
- "Country Of Issue Description",
- "State Code",
- "Asset Type Code",
- "5500 category code",
- "5500 class code Description",
- "CINS",
- "SEDOL",
- "Valoren",
- "Sicovam",
- "WPK",
- "QUICK",
- "Underlying Sec ID",
- "Loan ID",
- "Counterparty",
- "Source Account Name",
- "Source Account Number",
- "Fair Value Hierarchy - Beginning of Period",
- "Fair Value Override - Beginning of Period",
- "Fair Value Hierarchy - End of Period",
- "Fair Value Override - End of Period",
- "Country of Registration name",
- "Country of Registration code",
- ],
- axis=1,
- )
- df.columns = df.columns.str.replace(" ", "_").str.lower()
- df = df.rename(
- columns={
- "change_in_unrealized_currency_gain_loss": "unrealized_currency",
- "change_in_unrealized_investment_gain_loss": "unrealized_investment",
- "total_change_in_unrealized_gain_loss": "total_unrealized",
- "accretion/amortization": "accretion_amortization",
- "journal_entry_accretion/amortization": "journal_entry_accretion_amortization",
- "realized_gain/loss": "realized_gain_loss",
- "journal_entry_realized_g/l": "journal_entry_realized_gl",
- "manager": "link_ref",
- "realized_gain/loss_investment": "realized_investment",
- "realized_gain/loss_currency": "realized_currency",
- "realized_gain/loss_settled": "realized_settled",
- "realized_gain/loss_traded": "realized_traded",
- }
- )
- for col in [
- "unrealized_currency",
- "unrealized_investment",
- "total_unrealized",
- "ending_shares_par",
- "opening_receivables",
- "closing_receivables",
- "income",
- "journal_entry_income",
- "accretion_amortization",
- "journal_entry_accretion_amortization",
- "realized_gain_loss",
- "journal_entry_realized_gl",
- "realized_loss_impaired_securities",
- "net_investment_income",
- "realized_investment",
- "realized_currency",
- "realized_settled",
- "realized_traded",
- ]:
- if df[col].dtypes == "object":
- df[col] = df[col].apply(lambda s: "-" + s[1:-1] if s.startswith("(") else s)
- df[col] = pd.to_numeric(df[col].str.replace(",", ""))
- df["row"] = df.index
- df.to_sql("bowdst_pnl", dawn_engine, if_exists="append", index=False)
-
-
-def cmp_positions(cob: datetime.date, df_blotter: pd.DataFrame) -> pd.DataFrame:
- workdate = (cob + bus_day).date()
- p = DAILY_DIR / str(workdate) / "Reports" / f"Asset Detail_{workdate:%d %b %Y}.csv"
- df = pd.read_csv(p, thousands=",")
- df = df[df["Asset Type"] == "FIXED INCOME SECURITIES"]
- df = df.set_index("CUSIP")
- df = df[["Shares/Par", "Base Price", "Local Market Value"]]
- for col in df.select_dtypes(include=["object"]).columns:
- df[col] = df[col].apply(lambda s: s[1:-1] if s.startswith("(") else s)
- df[col] = pd.to_numeric(df[col].str.replace(",", ""))
- check = df_blotter.join(df)
- return check
-
-
-def load_cash_report(workdate: datetime.date, cob):
- p = (
- DAILY_DIR
- / str(workdate)
- / "Reports"
- / f"Settled Cash Statement_{workdate:%d %b %Y}.csv"
- )
- df = pd.read_csv(p, thousands=",")
- df = df[
- df["Transaction Type"].isna() | df["Transaction Type"].isin(["BUY", "SELL"])
- ]
- df["Opening Balance Local"] = (
- df["Opening Balance Local"]
- .replace("[(]", "-", regex=True)
- .replace("[),]", "", regex=True)
- .astype("float")
- )
- df = df.groupby(["Account Name", "Account Number", "Local Currency Code"]).sum()
- df["date"] = cob
- df["fund"] = "BOWDST"
- df = df[["Opening Balance Local", "date", "fund"]]
- df.reset_index(inplace=True)
- df["Account Number"] = df["Account Number"].astype(
- "int64"
- ) # Account Numbers are read in as float
- df = df.rename(
- {
- "Account Name": "account_name",
- "Account Number": "account_number",
- "Local Currency Code": "currency_code",
- "Opening Balance Local": "balance",
- },
- axis=1,
- )
- df.to_sql("cash_balances", dawn_engine, if_exists="append", index=False)
-
-
-def get_positions(
- cob: datetime.date,
-) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
- dawndb = dbconn("dawndb")
- df_blotter = pd.read_sql_query(
- "SELECT * FROM risk_positions(%s, NULL, %s)",
- dawndb,
- params=(cob, "BOWDST"),
- index_col=["identifier"],
- )
-
- cds_positions = pd.read_sql_query(
- "SELECT * FROM list_cds_marks_pre(%s, NULL, %s)",
- dawndb,
- params=(cob, "BOWDST"),
- index_col=["security_id"],
- )
- tranche_positions = pd.read_sql_query(
- "SELECT id, security_id, security_desc, maturity, a.notional, "
- "protection, orig_attach, orig_detach, tranche_factor, clean_nav, "
- "accrued, cp_code, cpty_id from list_cds(%s, %s) a "
- "LEFT JOIN tranche_risk ON id=tranche_id AND date=%s "
- "WHERE orig_attach IS NOT NULL",
- dawndb,
- params=(cob, "BOWDST", cob),
- index_col=["id"],
- )
-
- return df_blotter, cds_positions, tranche_positions
-
-
-def send_email(
- em,
- workdate: datetime.date,
- df_bonds: pd.DataFrame,
- df_cds: pd.DataFrame,
- df_tranches: pd.DataFrame,
-):
- attachments = []
- for name, df in zip(("bonds", "cds", "tranches"), (df_bonds, df_cds, df_tranches)):
- buf = StringIO()
- df.to_csv(buf)
- attachments.append(
- FileAttachment(
- name=f"{workdate} {name}.csv", content=buf.getvalue().encode()
- )
- )
- buf.close()
- em.send_email(
- f"{workdate} EOD positions",
- "",
- to_recipients=("hm-operations@bnymellon.com",),
- cc_recipients=("bowdoin-ops@lmcg.com",),
- attach=attachments,
- )
-
-
-if __name__ == "__main__":
- import argparse
- from serenitas.utils.exchange import ExchangeMessage
-
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "workdate",
- nargs="?",
- type=datetime.date.fromisoformat,
- default=datetime.date.today(),
- help="working date",
- )
- args = parser.parse_args()
- em = ExchangeMessage()
- download_messages(em)
- cob = (args.workdate - bus_day).date()
- df_bonds, df_cds, df_tranches = get_positions(cob)
- send_email(em, cob, df_bonds, df_cds, df_tranches)
- load_cash_report(args.workdate, cob)