aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/api_quotes/__init__.py0
-rw-r--r--python/api_quotes/__main__.py23
-rw-r--r--python/api_quotes/api.py45
-rw-r--r--python/api_quotes/quotes.py66
-rw-r--r--python/bbg_cds_quotes.py5
-rw-r--r--python/bowdst.py348
-rw-r--r--python/collateral/baml_fcm.py2
-rw-r--r--python/exploration/dispersion.py96
-rw-r--r--python/fcm_fx.py (renamed from python/baml_fcm_fx.py)25
-rw-r--r--python/load_globeop_report.py3
-rw-r--r--python/mtm_status.py8
-rw-r--r--python/notebooks/dispersion_tranche_model.ipynb198
-rw-r--r--python/pnl_explain.py4
-rw-r--r--python/report_ops/__main__.py4
-rw-r--r--python/report_ops/custodians.py36
-rw-r--r--python/report_ops/misc.py7
-rw-r--r--python/report_ops/utils.py25
-rw-r--r--python/risk/__main__.py5
-rw-r--r--python/risk/tranches.py83
-rw-r--r--python/strat_cash_realloc.py2
-rw-r--r--python/task_server/globeop.py6
21 files changed, 380 insertions, 611 deletions
diff --git a/python/api_quotes/__init__.py b/python/api_quotes/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/python/api_quotes/__init__.py
diff --git a/python/api_quotes/__main__.py b/python/api_quotes/__main__.py
new file mode 100644
index 00000000..e0cf8708
--- /dev/null
+++ b/python/api_quotes/__main__.py
@@ -0,0 +1,23 @@
+from .api import MarkitAPI
+from .quotes import Quote
+import pandas as pd
+import logging
+
+logger = logging.getLogger(__name__)
+
+if __name__ == "__main__":
+ asset_class = "CD"
+ after = None
+ while True:
+ if data := MarkitAPI.get_data(asset_class, after):
+ for row in data:
+ try:
+ quote = Quote.from_markit_line(row)
+ except ValueError as e:
+ logger.error(f"Couldn't pase {row['quoteid']}: {e}")
+ else:
+ quote.stage()
+ quote.commit()
+ after = f"{row['receiveddatetime']},{asset_class}-9480-{row['quoteid']}"
+ else:
+ break
diff --git a/python/api_quotes/api.py b/python/api_quotes/api.py
new file mode 100644
index 00000000..af479d67
--- /dev/null
+++ b/python/api_quotes/api.py
@@ -0,0 +1,45 @@
+from serenitas.utils.misc import get_credential_path
+import json
+import posixpath
+from urllib.parse import urljoin
+from typing import ClassVar
+import requests
+import pandas as pd
+
+
+def load_api_key():
+ with get_credential_path("markit_api").open() as fh:
+ creds = json.load(fh)
+ base_url = creds.pop("url")
+ r = requests.post(
+ urljoin(base_url, "apikey"),
+ data=creds,
+ )
+ return base_url, r.text
+
+
+def lowercase_keys(d):
+ return {k.lower(): v for k, v in d.items()}
+
+
+class MarkitAPI:
+ base_url, api_key = load_api_key()
+
+ @classmethod
+ def get_data(cls, asset_class, after=None, service="latest"):
+ params = {
+ "format": "json",
+ "assetClass": asset_class,
+ "apikey": cls.api_key,
+ "limit": 1000,
+ "sortBy": "receivedDateTime",
+ "descending": "true",
+ "dateformat": "MILLISECONDSSINCEEPOCH",
+ }
+ if after:
+ params["after"] = after
+ print(params)
+ path = posixpath.join("parsing", "Quote", service)
+ url = urljoin(cls.base_url, path)
+ r = requests.get(url, params)
+ return map(lowercase_keys, json.loads(r.text))
diff --git a/python/api_quotes/quotes.py b/python/api_quotes/quotes.py
new file mode 100644
index 00000000..28ca708b
--- /dev/null
+++ b/python/api_quotes/quotes.py
@@ -0,0 +1,66 @@
+from serenitas.ops.trade_dataclasses import Deal
+from dataclasses import dataclass
+import datetime
+from typing import Literal
+from serenitas.utils.db2 import dbconn
+
+firmness = Literal["FIRM", "INDICATIVE"]
+asset_class = Literal["CD"]
+
+
+def maturity_dt(d):
+ try:
+ return datetime.date(
+ int(d["maturityyear"]), int(d["maturitymonth"]), int(d["maturityday"])
+ )
+ except (
+ ValueError,
+ KeyError,
+ ): # Sometimes maturity isn't included but we still have tenor
+ return
+
+
+@dataclass
+class Quote(Deal, table_name="markit_singlename_quotes", deal_type=None):
+ quoteid: int
+ assetclass: asset_class
+ msg_id: str
+ quotesource: str
+ confidence: int
+ redcode: str = None
+ ticker: str = None
+ maturity: datetime.date = None
+ tenor: int = None
+ runningcoupon: int = None
+ bidconventionalspread: float = None
+ bidupfront: float = None
+ bidsize: float = None
+ askconventionalspread: float = None
+ askupfront: float = None
+ asksize: float = None
+ firmness: firmness = None
+ quotedate: datetime.datetime = None
+
+ @classmethod
+ def from_markit_line(cls, d):
+ additional_attributes = {
+ "maturity": maturity_dt(d),
+ "msg_id": d["message"]["id"],
+ "quotedate": datetime.datetime.fromtimestamp(
+ d["receiveddatetime"] / 1000
+ ).replace(tzinfo=datetime.timezone.utc),
+ "quotesource": d["sourceshortname"],
+ "tenor": f"{d['tenor']}Y",
+ }
+ d.update(additional_attributes)
+ return cls.from_dict(**d)
+
+ @property
+ def message(self):
+ return QuoteDetails.from_tradeid(self.msg_id)
+
+
+Quote.init_dbconn(dbconn("serenitasdb"))
+Quote._sql_insert = Quote._sql_insert.replace(
+ "RETURNING *", "ON CONFLICT (quoteid) DO NOTHING RETURNING *"
+)
diff --git a/python/bbg_cds_quotes.py b/python/bbg_cds_quotes.py
index b921b838..ac96bf79 100644
--- a/python/bbg_cds_quotes.py
+++ b/python/bbg_cds_quotes.py
@@ -31,7 +31,6 @@ def bbg_call(session, securities, fields, r):
r = {}
bbg_call(securities, fields, r)
-
sql_str = f"INSERT INTO cds_quotes VALUES({','.join(['%s'] * 8)})"
@@ -48,10 +47,10 @@ def build_tuple(k, v, workdate, source):
)
-for pcs in ["CBGN", "CMAN"]:
+for pcs, data in r.items():
logger.info(f"Inserting quotes for {pcs}")
with serenitasdb.cursor() as c:
c.executemany(
- sql_str, [build_tuple(k, v, workdate, pcs) for k, v in r[pcs].items()]
+ sql_str, [build_tuple(k, v, workdate, pcs) for k, v in data.items()]
)
serenitasdb.commit()
diff --git a/python/bowdst.py b/python/bowdst.py
deleted file mode 100644
index c86aea15..00000000
--- a/python/bowdst.py
+++ /dev/null
@@ -1,348 +0,0 @@
-import datetime
-import pandas as pd
-import pathlib
-import warnings
-from exchangelib import FileAttachment
-from io import StringIO
-from typing import Tuple
-from serenitas.analytics.dates import bus_day
-from serenitas.utils.db import dbconn, dawn_engine
-from serenitas.utils.env import DAILY_DIR
-from report_ops.misc import get_dir
-
-
-def download_messages(em):
- bowdst_wire_recent = True
- for msg in em.get_msgs(
- 20,
- path=["BowdoinOps", "Reports"],
- subject__startswith="Document(s) from Reporting",
- ):
- if msg.sender == "notify@bnymellon.com":
- for attach in msg.attachments:
- fname = attach.name
- if fname.endswith("csv"):
- base_name = fname.removesuffix(".csv")
- file_type, date_part = base_name.split("_")
- match file_type:
- case (
- "Asset Detail"
- | "Net Investment Earned Income by Security"
- | "Settled Cash Statement"
- ):
- date = datetime.datetime.strptime(
- date_part, "%d %b %Y"
- ).date()
- case "BowdstWires":
- try:
- date = datetime.datetime.strptime(
- date_part, "%Y%m%d%H%M%S"
- ).date()
- except ValueError:
- date = datetime.datetime.strptime(
- date_part, "%d %b %Y%H%M%S"
- ).date()
- case "Live-cash":
- date = datetime.datetime.strptime(
- date_part, "%Y%m%d%H%M%S"
- ).date()
- case _:
- warnings.warn(f"Unknown report type {file_type}")
- continue
- p = DAILY_DIR / str(date) / "Reports" / fname
- if not p.parent.exists():
- p.parent.mkdir(parents=True, exist_ok=True)
- if not p.exists() or (
- fname.startswith("BowdstWires") and bowdst_wire_recent
- ):
- if fname.startswith("BowdstWires"):
- bowdst_wire_recent = False
- p.write_bytes(attach.content)
-
-
-def load_val_report(workdate: datetime.date):
- p = get_dir(workdate) / f"Asset Detail_{workdate:%d %b %Y}.csv"
- df = pd.read_csv(
- p, thousands=",", parse_dates=["As Of Date", "Maturity Date", "Report Run Date"]
- )
- df = df.drop(
- [
- "Reporting Account Number",
- "Reporting Account Name",
- "Source Account Name",
- "Xref Security ID",
- "Country Name",
- "Country Code",
- "Local Currency Name",
- "Acct Base Currency Name",
- "Acct Base Currency Code",
- "CINS",
- "Issuer ID",
- "SEDOL",
- "Valoren",
- "Sicovam",
- "WPK",
- "Quick",
- "Underlying Sec ID",
- "Loan ID",
- "Manager",
- "Book Yield Value",
- "Counterparty",
- "Ticker with Exchange Code",
- "Ticker with Yellow Key",
- "Accounting Status",
- "Primary GSP Account",
- "Extended GSP Account Number",
- "Percent Of Total",
- ],
- axis=1,
- )
- if "Acctg Status Update (EDT)" in df:
- del df["Acctg Status Update (EDT)"]
- elif "Acctg Status Update (EST)" in df:
- del df["Acctg Status Update (EST)"]
- df["Source Account Number"] = df["Source Account Number"].str[-4:].astype("int")
- df.columns = df.columns.str.replace(" ", "_").str.lower()
- df = df.rename(
- columns={
- "shares/par": "current_notional",
- "local_unrealized_gain/loss": "local_unrealized_pnl",
- "base_unrealized_gain/loss": "base_unrealized_pnl",
- }
- )
- for col in [
- "current_notional",
- "local_price",
- "base_price",
- "local_cost",
- "base_cost",
- "local_market_value",
- "base_market_value",
- "local_unrealized_pnl",
- "base_unrealized_pnl",
- "local_notional_cost",
- "base_notional_cost",
- "local_notional_value",
- "base_notional_value",
- ]:
- if df[col].dtype != "float64":
- df[col] = df[col].apply(lambda s: "-" + s[1:-1] if s.startswith("(") else s)
- df[col] = pd.to_numeric(df[col].str.replace(",", ""))
- df["row"] = df.index
- df.to_sql("bowdst_val", dawn_engine, if_exists="append", index=False)
-
-
-def load_pnl_report(workdate: datetime.date):
- if workdate.weekday() == 0:
- workdate -= datetime.timedelta(days=2)
- p = (
- get_dir(workdate)
- / f"Net Investment Earned Income by Security_{workdate:%d %b %Y}.csv"
- )
- df = pd.read_csv(p, thousands=",", parse_dates=["Begin Date", "End Date"])
- df = df.drop(
- [
- "Reporting Account Number",
- "Reporting Account Name",
- "Reporting Account Base Currency",
- "Accounting Status",
- "Security Cross Reference Type",
- "Security Cross Reference Cusip",
- "Local Currency Description",
- "Country Of Issue",
- "Country Of Issue Description",
- "State Code",
- "Asset Type Code",
- "5500 category code",
- "5500 class code Description",
- "CINS",
- "SEDOL",
- "Valoren",
- "Sicovam",
- "WPK",
- "QUICK",
- "Underlying Sec ID",
- "Loan ID",
- "Counterparty",
- "Source Account Name",
- "Source Account Number",
- "Fair Value Hierarchy - Beginning of Period",
- "Fair Value Override - Beginning of Period",
- "Fair Value Hierarchy - End of Period",
- "Fair Value Override - End of Period",
- "Country of Registration name",
- "Country of Registration code",
- ],
- axis=1,
- )
- df.columns = df.columns.str.replace(" ", "_").str.lower()
- df = df.rename(
- columns={
- "change_in_unrealized_currency_gain_loss": "unrealized_currency",
- "change_in_unrealized_investment_gain_loss": "unrealized_investment",
- "total_change_in_unrealized_gain_loss": "total_unrealized",
- "accretion/amortization": "accretion_amortization",
- "journal_entry_accretion/amortization": "journal_entry_accretion_amortization",
- "realized_gain/loss": "realized_gain_loss",
- "journal_entry_realized_g/l": "journal_entry_realized_gl",
- "manager": "link_ref",
- "realized_gain/loss_investment": "realized_investment",
- "realized_gain/loss_currency": "realized_currency",
- "realized_gain/loss_settled": "realized_settled",
- "realized_gain/loss_traded": "realized_traded",
- }
- )
- for col in [
- "unrealized_currency",
- "unrealized_investment",
- "total_unrealized",
- "ending_shares_par",
- "opening_receivables",
- "closing_receivables",
- "income",
- "journal_entry_income",
- "accretion_amortization",
- "journal_entry_accretion_amortization",
- "realized_gain_loss",
- "journal_entry_realized_gl",
- "realized_loss_impaired_securities",
- "net_investment_income",
- "realized_investment",
- "realized_currency",
- "realized_settled",
- "realized_traded",
- ]:
- if df[col].dtypes == "object":
- df[col] = df[col].apply(lambda s: "-" + s[1:-1] if s.startswith("(") else s)
- df[col] = pd.to_numeric(df[col].str.replace(",", ""))
- df["row"] = df.index
- df.to_sql("bowdst_pnl", dawn_engine, if_exists="append", index=False)
-
-
-def cmp_positions(cob: datetime.date, df_blotter: pd.DataFrame) -> pd.DataFrame:
- workdate = (cob + bus_day).date()
- p = DAILY_DIR / str(workdate) / "Reports" / f"Asset Detail_{workdate:%d %b %Y}.csv"
- df = pd.read_csv(p, thousands=",")
- df = df[df["Asset Type"] == "FIXED INCOME SECURITIES"]
- df = df.set_index("CUSIP")
- df = df[["Shares/Par", "Base Price", "Local Market Value"]]
- for col in df.select_dtypes(include=["object"]).columns:
- df[col] = df[col].apply(lambda s: s[1:-1] if s.startswith("(") else s)
- df[col] = pd.to_numeric(df[col].str.replace(",", ""))
- check = df_blotter.join(df)
- return check
-
-
-def load_cash_report(workdate: datetime.date, cob):
- p = (
- DAILY_DIR
- / str(workdate)
- / "Reports"
- / f"Settled Cash Statement_{workdate:%d %b %Y}.csv"
- )
- df = pd.read_csv(p, thousands=",")
- df = df[
- df["Transaction Type"].isna() | df["Transaction Type"].isin(["BUY", "SELL"])
- ]
- df["Opening Balance Local"] = (
- df["Opening Balance Local"]
- .replace("[(]", "-", regex=True)
- .replace("[),]", "", regex=True)
- .astype("float")
- )
- df = df.groupby(["Account Name", "Account Number", "Local Currency Code"]).sum()
- df["date"] = cob
- df["fund"] = "BOWDST"
- df = df[["Opening Balance Local", "date", "fund"]]
- df.reset_index(inplace=True)
- df["Account Number"] = df["Account Number"].astype(
- "int64"
- ) # Account Numbers are read in as float
- df = df.rename(
- {
- "Account Name": "account_name",
- "Account Number": "account_number",
- "Local Currency Code": "currency_code",
- "Opening Balance Local": "balance",
- },
- axis=1,
- )
- df.to_sql("cash_balances", dawn_engine, if_exists="append", index=False)
-
-
-def get_positions(
- cob: datetime.date,
-) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
- dawndb = dbconn("dawndb")
- df_blotter = pd.read_sql_query(
- "SELECT * FROM risk_positions(%s, NULL, %s)",
- dawndb,
- params=(cob, "BOWDST"),
- index_col=["identifier"],
- )
-
- cds_positions = pd.read_sql_query(
- "SELECT * FROM list_cds_marks_pre(%s, NULL, %s)",
- dawndb,
- params=(cob, "BOWDST"),
- index_col=["security_id"],
- )
- tranche_positions = pd.read_sql_query(
- "SELECT id, security_id, security_desc, maturity, a.notional, "
- "protection, orig_attach, orig_detach, tranche_factor, clean_nav, "
- "accrued, cp_code, cpty_id from list_cds(%s, %s) a "
- "LEFT JOIN tranche_risk ON id=tranche_id AND date=%s "
- "WHERE orig_attach IS NOT NULL",
- dawndb,
- params=(cob, "BOWDST", cob),
- index_col=["id"],
- )
-
- return df_blotter, cds_positions, tranche_positions
-
-
-def send_email(
- em,
- workdate: datetime.date,
- df_bonds: pd.DataFrame,
- df_cds: pd.DataFrame,
- df_tranches: pd.DataFrame,
-):
- attachments = []
- for name, df in zip(("bonds", "cds", "tranches"), (df_bonds, df_cds, df_tranches)):
- buf = StringIO()
- df.to_csv(buf)
- attachments.append(
- FileAttachment(
- name=f"{workdate} {name}.csv", content=buf.getvalue().encode()
- )
- )
- buf.close()
- em.send_email(
- f"{workdate} EOD positions",
- "",
- to_recipients=("hm-operations@bnymellon.com",),
- cc_recipients=("bowdoin-ops@lmcg.com",),
- attach=attachments,
- )
-
-
-if __name__ == "__main__":
- import argparse
- from serenitas.utils.exchange import ExchangeMessage
-
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "workdate",
- nargs="?",
- type=datetime.date.fromisoformat,
- default=datetime.date.today(),
- help="working date",
- )
- args = parser.parse_args()
- em = ExchangeMessage()
- download_messages(em)
- cob = (args.workdate - bus_day).date()
- df_bonds, df_cds, df_tranches = get_positions(cob)
- send_email(em, cob, df_bonds, df_cds, df_tranches)
- load_cash_report(args.workdate, cob)
diff --git a/python/collateral/baml_fcm.py b/python/collateral/baml_fcm.py
index 7508aad1..1d986941 100644
--- a/python/collateral/baml_fcm.py
+++ b/python/collateral/baml_fcm.py
@@ -145,7 +145,7 @@ def collateral(d, positions, *, engine, fund="Serenitas", positions_irs, **kwarg
}
)
df_margin = df_margin.set_index("AT", append=True)
- df_margin = df_margin.groupby(level=(0, 1)).sum()
+ df_margin = df_margin.groupby(level=(0, 1)).sum(numeric_only=True)
df_margin = df_margin.reset_index()
df_margin_fut.AT = "USD"
df_margin_fut = df_margin_fut.reset_index()
diff --git a/python/exploration/dispersion.py b/python/exploration/dispersion.py
index e633264d..a2165754 100644
--- a/python/exploration/dispersion.py
+++ b/python/exploration/dispersion.py
@@ -5,10 +5,16 @@ import statsmodels.api as sm
import statsmodels.formula.api as smf
from serenitas.analytics.basket_index import MarkitBasketIndex
-from serenitas.analytics import CreditIndex
+from serenitas.analytics.api import CreditIndex
from scipy.special import logit, expit
from serenitas.utils.db import dbengine
+from sklearn.feature_selection import RFECV, RFE
+from sklearn.model_selection import train_test_split
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import PolynomialFeatures, PowerTransformer
+from sklearn.linear_model import LinearRegression
+
def get_corr_data(index_type, series, engine):
sql_str = (
@@ -106,11 +112,6 @@ def create_models(conn, df) -> (pd.DataFrame, float):
f.predict(bottom_stack)
)
- def aux(s):
- temp = s.values
- temp[-1] = 1 - temp[:-1].sum()
- return temp
-
df["predict"] = df.groupby(["index", "series", "date"])["predict"].transform(aux)
df = df.assign(
mispricing=(df.exp_percentage - df.predict)
@@ -148,11 +149,6 @@ def create_models_v2(conn, df, weights=None) -> (pd.DataFrame, float):
df.predict_tranche_loss * df.thickness / df.index_expected_loss
)
- def aux(s):
- temp = s.values
- temp[-1] = 1 - temp[:-1].sum()
- return temp
-
df["predict"] = df.groupby(["index", "series", "date"])["predict"].transform(aux)
df = df.assign(
mispricing=(df.exp_percentage - df.predict)
@@ -202,6 +198,84 @@ def create_separate_models(df):
return (calc, model)
+def aux(s):
+ temp = s.values
+ temp[-1] = 1 - temp[:-1].sum()
+ return temp
+
+
+def create_rfe_models(df, print_score=False):
+ # Takes the output of get_tranche_data
+ attach_max = df.index.get_level_values("attach").max()
+ bottom_stack = df[df.index.get_level_values("attach") != attach_max]
+ bottom_stack = bottom_stack[bottom_stack.tranche_loss_per > 0].dropna()
+
+ # prepare the variables
+ y = logit(bottom_stack["tranche_loss_per"])
+ X = bottom_stack[
+ ["index_duration", "index_basis", "att_moneyness", "det_moneyness", "gini"]
+ ]
+
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
+
+ pipe_rfe = make_pipeline(
+ PowerTransformer(),
+ PolynomialFeatures(interaction_only=True),
+ RFECV(estimator=LinearRegression(), cv=10, min_features_to_select=1),
+ )
+
+ pipe_rfe.fit(X_train, y_train)
+ pipe_rfe.steps[-1] = (
+ "rfe",
+ RFE(
+ estimator=LinearRegression(),
+ n_features_to_select=pipe_rfe["rfecv"].n_features_,
+ ),
+ )
+ model = pipe_rfe.fit(X_train, y_train)
+
+ df = pd.merge(
+ df,
+ pd.DataFrame(
+ expit(model.predict(X)), index=X.index, columns=["predict_tranche_loss"]
+ ),
+ how="left",
+ left_index=True,
+ right_index=True,
+ )
+
+ df.loc[
+ df.index.get_level_values("attach") != attach_max,
+ "predict_tranche_loss_per_index",
+ ] = (
+ df.predict_tranche_loss * df.thickness / df.index_expected_loss
+ )
+
+ df["predict_tranche_loss_per_index"] = df.groupby(["index", "series", "date"])[
+ "predict_tranche_loss_per_index"
+ ].transform(aux)
+ df = df.assign(
+ mispricing=(df.exp_percentage - df.predict_tranche_loss_per_index)
+ * df.index_expected_loss
+ / (df.detach_adj - df.attach_adj)
+ )
+
+ if print_score:
+ index_type = df.index[0][1]
+ print(index_type, " num features: ", model.feature_names_in_)
+ print(
+ index_type,
+ " Chosen columns: ",
+ np.array(model["polynomialfeatures"].get_feature_names_out(X.columns))[
+ model["rfe"].support_
+ ],
+ )
+ print(index_type, " Training Score: ", model.score(X_train, y_train))
+ print(index_type, " Testing Score: ", model.score(X_test, y_test))
+
+ return (df, model)
+
+
if __name__ == "__main__":
index_type = "HY"
series = 29
diff --git a/python/baml_fcm_fx.py b/python/fcm_fx.py
index 4c0d327e..2e470e67 100644
--- a/python/baml_fcm_fx.py
+++ b/python/fcm_fx.py
@@ -5,13 +5,19 @@ from report_ops.misc import _recipients, _cc_recipients
from serenitas.utils.exchange import ExchangeMessage
from exchangelib import HTMLBody
-_fcm_alias = {"V0NSCLMSPT": "6MZ20049"}
+_account_alias = {"V0NSCLMSPT": "6MZ20049"}
def email_fcm(em, d, fund):
- fcm_account = _fcm_alias.get(d["cash_account"], d["cash_account"])
+ account = _account_alias.get(d["cash_account"], d["cash_account"])
+ match d["cp_code"]:
+ case "BAMSNY":
+ recipients = _recipients["BAML_FCM"]
+ case "GOLDNY":
+ recipients = _recipients["GS_FCM"]
+ additional_instructions = f"Could you please instruct moves from DCS 057575201 to Futures {account} to settle this trade? "
em.send_email(
- f"FX Details: {fcm_account} Trade Date: {d['trade_date']}",
+ f"FX Details: {account} Trade Date: {d['trade_date']}",
HTMLBody(
f"""
<html>
@@ -22,11 +28,11 @@ def email_fcm(em, d, fund):
</style>
</head>
<body>
- Hello,<br><br>Please see below details for an FX Spot Trade we did with the desk today for account {fcm_account} Please let me know if you need more information.<br><br>{to_tabulate(d)}
+ Hello,<br><br>Please see below details for an FX Spot Trade we did with the desk today for account {account}. {additional_instructions if additional_instructions else ""}Please let me know if you need more information.<br><br>{to_tabulate(d)}
</body>
</html>"""
),
- to_recipients=_recipients["BAML_FCM"],
+ to_recipients=recipients,
cc_recipients=_cc_recipients[fund],
)
@@ -36,9 +42,9 @@ def to_tabulate(d):
key1, key2 = "buy", "sell"
else:
key1, key2 = "sell", "buy"
- fcm_account = _fcm_alias.get(d["cash_account"], d["cash_account"])
+ account = _account_alias.get(d["cash_account"], d["cash_account"])
line = [
- fcm_account,
+ account,
d[f"{key1}_currency"],
d[f"{key1}_amount"],
d[f"{key2}_currency"],
@@ -71,7 +77,9 @@ def to_tabulate(d):
def main(trade_date, conn, fund, em):
with conn.cursor() as c:
c.execute(
- "SELECT spots.cash_account, buy_currency, sell_currency, buy_amount, sell_amount, spot_rate, settle_date, trade_date FROM spots LEFT JOIN accounts2 USING (cash_account) WHERE account_type='Fcm' AND spots.cp_code='BAMSNY' AND spots.trade_date =%s AND spots.fund=%s",
+ "SELECT spots.cash_account, buy_currency, sell_currency, buy_amount, sell_amount, spot_rate, settle_date, trade_date, spots.cp_code FROM spots "
+ "LEFT JOIN accounts2 USING (cash_account) "
+ "WHERE account_type in ('Fcm', 'Future') AND spots.cp_code in ('BAMSNY', 'GOLDNY') AND spots.trade_date =%s AND spots.fund=%s",
(trade_date, fund),
)
for rec in c:
@@ -96,5 +104,6 @@ if __name__ == "__main__":
for fund in (
"SERCGMAST",
"ISOSEL",
+ "BOWDST",
):
main(args.date, conn, fund, em)
diff --git a/python/load_globeop_report.py b/python/load_globeop_report.py
index 519ecaa0..678046ea 100644
--- a/python/load_globeop_report.py
+++ b/python/load_globeop_report.py
@@ -33,6 +33,7 @@ def read_valuation_report(f):
df["PeriodEndDate"] = date - bus_day
df["row"] = df.index
df["Fund"] = df.Fund.str.replace("BOS_PAT_BOWDOIN", "BOWDST")
+ df["CounterPartyCode"] = df["CounterPartyCode"].str.replace("BTIGNY", "BTIG")
if "AccountingPeriod" in df:
del df["AccountingPeriod"]
if "CounterParty" in df:
@@ -154,7 +155,7 @@ def read_cds_report(f):
df.loc[df.strategy == "SERCGMAST__MBSCDS", "strategy"] = "MBSCDS"
df.strategy = df.strategy.str.replace("SER_", "")
df["buy/sell"] = df["buy/sell"].astype("category")
- df["buy/sell"].cat.categories = ["Buyer", "Seller"]
+ df["buy/sell"] = df["buy/sell"].cat.rename_categories(["Buyer", "Seller"])
del df["independent_%"]
df2 = df2.rename(columns={"independent_%": "independent_perc"})
df.prime_broker = df.prime_broker.where(df.prime_broker != "NONE")
diff --git a/python/mtm_status.py b/python/mtm_status.py
index f9d9305a..59c25a6b 100644
--- a/python/mtm_status.py
+++ b/python/mtm_status.py
@@ -21,11 +21,15 @@ def run(conn, date):
) as e:
logger.warning(e)
pass
- files = [f for f in sftp.client.listdir("outbound") if date.strftime("%m%d%Y") in f]
+ files = [
+ f
+ for f in sftp.client.listdir("outbound")
+ if date.strftime("%m%d%Y") in f
+ if "mtm" in f
+ ]
if not files:
return
target_file = max(files, key=latest)
-
with sftp.client.open(f"outbound/{target_file}") as sftp_handle:
with ZipFile(sftp_handle).open(target_file.removesuffix(".zip")) as zip_handle:
df = pd.read_csv(zip_handle, skiprows=2, na_filter=False)
diff --git a/python/notebooks/dispersion_tranche_model.ipynb b/python/notebooks/dispersion_tranche_model.ipynb
index 46eb348c..56255a42 100644
--- a/python/notebooks/dispersion_tranche_model.ipynb
+++ b/python/notebooks/dispersion_tranche_model.ipynb
@@ -18,7 +18,7 @@
"import serenitas.analytics.tranche_data as tdata\n",
"\n",
"from serenitas.analytics.basket_index import MarkitBasketIndex\n",
- "from serenitas.analytics import on_the_run\n",
+ "from serenitas.analytics.index_data import on_the_run\n",
"from statsmodels.graphics.regressionplots import plot_fit\n",
"from scipy.special import logit, expit\n",
"from serenitas.utils.db import dbengine, dbconn\n",
@@ -52,117 +52,18 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Section 1----------------------------------------------------\n",
- "#index basis doesn't work with HY (opposite reaction to what I think)\n",
- "#RFE\n",
- "drop_variable_list = ['tranche_loss_per', 'tranche_id', 'index_price', 'detach', 'corr_at_detach', \n",
- " 'corr01', 'exp_percentage', 'indexfactor', 'duration', 'index_expected_loss',\n",
- " 'index_theta', 'delta', 'expected_loss', 'attach_adj', 'detach_adj',\n",
- " 'cumulativeloss', \n",
- " 'forward_delta', \n",
- " #Comment out to include\n",
- " # 'index_duration',\n",
- " 'thickness',\n",
- " 'moneyness',\n",
- " # 'index_basis',\n",
- " # 'att_moneyness', \n",
- " # 'det_moneyness',\n",
- " 'dispersion',\n",
- " # 'gini', \n",
- " 'gamma',\n",
- " 'theta',\n",
- " 'index_theta'\n",
- " ]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
- "def run_rfe(index_type):\n",
- " risk = disp.get_tranche_data(dbconn(\"serenitasdb\"), index_type)\n",
- " attach_max = risk.index.get_level_values(\"attach\").max()\n",
- " bottom_stack = risk[risk.index.get_level_values(\"attach\") != attach_max]\n",
- " bottom_stack = bottom_stack[bottom_stack.tranche_loss_per > 0].dropna()\n",
- "\n",
- " #prepare the variables\n",
- " y = logit(bottom_stack['tranche_loss_per'])\n",
- " X = bottom_stack.drop(drop_variable_list, axis=1)\n",
- " \n",
- " X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n",
- " \n",
- " pipe_rfe = make_pipeline (PowerTransformer(),\n",
- " #PolynomialFeatures(degree=2),\n",
- " PolynomialFeatures(interaction_only=True),\n",
- " RFECV(estimator=LinearRegression(), \n",
- " cv=10,\n",
- " min_features_to_select=1))\n",
- " \n",
- " pipe_rfe.fit(X_train, y_train)\n",
- " n_features_to_select = pipe_rfe['rfecv'].n_features_\n",
- " pipe_rfe.steps[-1]= ('rfe', RFE(estimator=LinearRegression(), n_features_to_select = n_features_to_select))\n",
- " model = pipe_rfe.fit(X_train, y_train)\n",
- " \n",
- " #RandomForest\n",
- " #params = {'n_estimators': 100,\n",
- " # 'min_samples_split': 3,\n",
- " # 'verbose':1,\n",
- " # 'n_jobs': -1}\n",
- " #randomforest = RandomForestRegressor(**params)\n",
- " \n",
- " \n",
- " #gradientboost\n",
- " #params = {'n_estimators': 500,\n",
- " # 'max_depth': 10,\n",
- " # 'min_samples_split': 3,\n",
- " # 'learning_rate': 0.01,\n",
- " # 'loss': 'huber',\n",
- " # 'verbose':1}\n",
- " #gb = GradientBoostingRegressor(**params).fit(X_train, y_train)\n",
- " \n",
- " #model = VotingRegressor([('rf', model), ('gb', gb)]).fit(X_train, y_train)\n",
- " #model = VotingRegressor([('lr', pipe_rfe)]).fit(X, logit(y))\n",
- "\n",
- " df = pd.merge(risk, \n",
- " pd.DataFrame(expit(model.predict(X)), \n",
- " index=X.index, \n",
- " columns=['predict_tranche_loss']),\n",
- " how='left', left_index=True, right_index=True)\n",
- "\n",
- " df.loc[df.index.get_level_values(\"attach\") != attach_max, \"predict_tranche_loss_per_index\"] = (\n",
- " df.predict_tranche_loss * df.thickness / df.index_expected_loss\n",
- " )\n",
- "\n",
- " def aux(s):\n",
- " temp = s.values\n",
- " temp[-1] = 1 - temp[:-1].sum()\n",
- " return temp\n",
- "\n",
- " df[\"predict_tranche_loss_per_index\"] = df.groupby([\"index\", \"series\", \"date\"])[\"predict_tranche_loss_per_index\"].transform(aux)\n",
- " df = df.assign(\n",
- " mispricing=(df.exp_percentage - df.predict_tranche_loss_per_index)\n",
- " * df.index_expected_loss\n",
- " / (df.detach_adj - df.attach_adj)\n",
- " )\n",
- " rfe_result = pipe_rfe\n",
- " print(index_type, \" num features: \", n_features_to_select)\n",
- " print(index_type, \" Chosen columns: \", np.array(rfe_result['polynomialfeatures'].get_feature_names_out(X.columns))[rfe_result['rfe'].support_])\n",
- " print(index_type, \" Training Score: \", model.score(X_train, y_train))\n",
- " print(index_type, \" Testing Score: \", model.score(X_test, y_test))\n",
- " \n",
- " return model, df, X\n",
- "\n",
- "gini_model, gini_results, gini_X = {}, {}, {}\n",
+ "#Run RFE model\n",
+ "gini_model, gini_results = {}, {}\n",
"fieldlist = ['exp_percentage','dispersion','gini','tranche_loss_per','mispricing']\n",
"for index_type in ['HY', 'IG', 'EU', 'XO']:\n",
- " gini_model[index_type], gini_results[index_type], gini_X[index_type] = run_rfe(index_type)\n",
+ " risk = disp.get_tranche_data(dbconn(\"serenitasdb\"), index_type)\n",
+ " risk = risk[risk.index_duration > 1] #filter out the short duration ones\n",
+ " gini_results[index_type], gini_model[index_type] = disp.create_rfe_models(risk)\n",
" gini_results[index_type][fieldlist].to_csv('/home/serenitas/edwin/DispersionModel/' + index_type + '_results_rfecv.csv')"
]
},
@@ -179,20 +80,27 @@
"for index_type in ['HY', 'IG', 'EU', 'XO']:\n",
" plots = {}\n",
" tranche_attach = []\n",
- "\n",
- " for i, X in gini_X[index_type].groupby('attach'):\n",
+ " \n",
+ " res = gini_results[index_type]\n",
+ " mod = gini_model[index_type]\n",
+ " \n",
+ " Xs = res[mod.feature_names_in_]\n",
+ " \n",
+ " for i, X in Xs.groupby('attach'):\n",
" tranche_attach.append(X.index[0][5])\n",
" for var in X.columns:\n",
" bins = np.linspace(X[var].min(), X[var].max(),num=steps)\n",
" testing_df = pd.DataFrame(bins, columns=[var])\n",
" for var_1 in X.drop(var, axis=1).columns:\n",
" testing_df = pd.concat([testing_df, pd.Series(np.repeat(X.iloc[-1][var_1], steps),name=var_1)], axis=1)\n",
- " plots[i, var] = pd.Series(expit(gini_model[index_type].predict(testing_df[X.columns])), index=testing_df[var])\n",
+ " plots[i, var] = pd.Series(expit(mod.predict(testing_df[X.columns])), index=testing_df[var])\n",
"\n",
+ " #breakpoint()\n",
+ " \n",
" sensitivies = pd.concat(plots, names=['attach', 'shock', 'value'])\n",
" sensitivies.to_csv('/home/serenitas/edwin/DispersionModel/' + index_type + '_sensitivies.csv')\n",
"\n",
- " fig, axes = plt.subplots(nrows=3, ncols=len(X.columns), figsize = (20,10))\n",
+ " fig, axes = plt.subplots(nrows=4, ncols=len(X.columns), figsize = (20,10))\n",
" for i, p in enumerate(plots):\n",
" x_loc = int(i/len(X.columns))\n",
" y_loc = i % len(X.columns)\n",
@@ -206,7 +114,7 @@
" rotation=90)\n",
" fig.savefig(\"/home/serenitas/edwin/PythonGraphs/dispersion_model.png\", bbox_inches='tight')\n",
"\n",
- " fig_1, axes_1 = plt.subplots(nrows=3, ncols=1, figsize = (15,8))\n",
+ " fig_1, axes_1 = plt.subplots(nrows=4, ncols=1, figsize = (15,8))\n",
" for i, p in enumerate(plots):\n",
" x_loc = int(i/len(X.columns))\n",
" plots[p].plot(ax=axes_1[x_loc], label=p[1], xlabel=\"\", legend=True)\n",
@@ -234,7 +142,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "tags": []
+ },
"outputs": [],
"source": [
"#Section 3----------------------------------------------------\n",
@@ -259,7 +169,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "tags": []
+ },
"outputs": [],
"source": [
"#plot the residuals\n",
@@ -289,69 +201,13 @@
"data = risk[['gini', 'index_duration', 'index_expected_loss']]\n",
"ols_model = smf.ols(\"gini ~ np.log(index_duration) + np.log(index_expected_loss)\", data=data).fit()\n"
]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3.9.1 64-bit",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
- "name": "python39164bit6ddd573894c04d6a858a9a58880cc9d4"
+ "name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -363,7 +219,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.2"
+ "version": "3.10.8"
}
},
"nbformat": 4,
diff --git a/python/pnl_explain.py b/python/pnl_explain.py
index 1df6983b..18c4c5fc 100644
--- a/python/pnl_explain.py
+++ b/python/pnl_explain.py
@@ -159,7 +159,7 @@ def get_tranche_pv(
# add terminations
with conn.cursor() as c:
c.execute(
- "SELECT termination_fee, currency "
+ "SELECT termination_fee, cds.currency "
"FROM terminations JOIN cds USING (dealid) "
"WHERE termination_date=%s AND dealid LIKE 'SCCDS%%' AND fund=%s",
(prev_day, fund),
@@ -233,7 +233,7 @@ def get_tranche_pv2(
df = df[["clean_nav", "accrued"]]
with conn.cursor() as c:
c.execute(
- "SELECT termination_date AS date, cds.id, folder, currency, "
+ "SELECT termination_date AS date, cds.id, folder, cds.currency, "
"termination_fee AS principal "
"FROM terminations "
"JOIN cds USING (dealid) "
diff --git a/python/report_ops/__main__.py b/python/report_ops/__main__.py
index 68708109..2f49169c 100644
--- a/python/report_ops/__main__.py
+++ b/python/report_ops/__main__.py
@@ -1,4 +1,5 @@
from serenitas.analytics.dates import prev_business_day
+from serenitas.utils.exchange import ExchangeMessage
from serenitas.utils.db import dbconn
import logging
import argparse
@@ -93,8 +94,9 @@ if args.wire_reports:
if args.send_to_custodians:
conn = dbconn("dawndb")
+ em = ExchangeMessage()
for account in ("UMB", "BBH"):
try:
- upload_to_custodian(account, args.date, conn, not args.no_upload)
+ upload_to_custodian(account, args.date, conn, not args.no_upload, em)
except ValueError as e:
logger.info(e)
diff --git a/python/report_ops/custodians.py b/python/report_ops/custodians.py
index eab6cb9f..36703f54 100644
--- a/python/report_ops/custodians.py
+++ b/python/report_ops/custodians.py
@@ -1,8 +1,8 @@
-from serenitas.utils.exchange import ExchangeMessage
+from serenitas.utils.exchange import ExchangeMessage, FileAttachment
from serenitas.utils.env import DAILY_DIR
import warnings
import datetime
-from .misc import get_dir
+from .misc import get_dir, _recipients, _cc_recipients
import gpg
from serenitas.ops.trade_dataclasses import BondDeal
from serenitas.ops.funds import Service
@@ -11,21 +11,28 @@ from dataclasses import dataclass
_sql = (
"INSERT INTO bond_csv_upload (allocationid, identifier, principal, interest) SELECT id, identifier, principal_payment, "
- "accrued_payment FROM bond_trades WHERE trade_date=%s AND account=%s AND tradeid IS NOT NULL ON CONFLICT DO NOTHING RETURNING *"
+ "accrued_payment FROM bond_trades WHERE trade_date=%s AND account=%s AND tradeid IS NOT NULL ON CONFLICT DO NOTHING RETURNING allocationid;"
)
+_bond_query = "SELECT * FROM bond_trades WHERE id in %s;"
-def upload_to_custodian(account, trade_date, conn, upload):
- _fund = {"BBH": "BRINKER", "UMB": "UMB"}
- custodian = Service[_fund[account]]
+
+def upload_to_custodian(account, trade_date, conn, upload, em):
+ _service = {"BBH": "BRINKER", "UMB": "UMB"}
+ _fund = {"BBH": "BRINKER", "UMB": "SERCGMAST"}
+ custodian = Service[_service[account]]
with conn.cursor() as c:
c.execute(
_sql,
(
trade_date,
- account,
+ "BAC" if account == "UMB" else account,
),
)
+ tids = tuple(row.allocationid for row in c)
+ if not tids:
+ return
+ c.execute(_bond_query, (tids,))
for row in c:
trade = BondDeal.from_dict(**row._asdict(), scaled=True)
match account:
@@ -37,6 +44,15 @@ def upload_to_custodian(account, trade_date, conn, upload):
custodian.staging_queue.clear()
conn.commit()
if upload:
+ em = ExchangeMessage()
+ em.send_email(
+ f"{account}: Bond Positions Uploaded for {trade_date}",
+ "Hi, \nWe've just uploaded the positions via SFTP. File receipt attached to this email",
+ _recipients.get(account, _cc_recipients[_fund[account]]),
+ cc_recipients=_cc_recipients[_fund[account]],
+ reply_to=_cc_recipients[_fund[account]],
+ attach=(FileAttachment(name=dest.name, content=buf),),
+ )
custodian.upload(buf, dest.name, confirm=account != "UMB")
@@ -82,7 +98,7 @@ class UMB(Custodian, account="UMB"):
for attach in msg.attachments:
timestamp = attach.last_modified_time
if (
- attach.name.startswith("cash_reporting")
+ attach.name.startswith("cash_balances_umb")
and timestamp.date() == date
):
dest = get_dir(timestamp.date(), archived=False)
@@ -138,7 +154,3 @@ class BNY(Custodian, account="BONY2"):
p.parent.mkdir(parents=True, exist_ok=True)
if not p.exists():
p.write_bytes(attach.content)
-
-
-class BBH(Custodian, account="BBH"):
- pass
diff --git a/python/report_ops/misc.py b/python/report_ops/misc.py
index 6d435efe..76ec9cbb 100644
--- a/python/report_ops/misc.py
+++ b/python/report_ops/misc.py
@@ -18,7 +18,13 @@ _recipients = {
"SERENITAS.ops@sscinc.com",
),
"BAML_FCM": ("footc_margin_csr_amrs@bofa.com",),
+ "GS_FCM": (
+ "Susan.Olesky@ny.email.gs.com",
+ "Divyanshi.Girotra@gs.com",
+ "gs-margin-calls-dcs@ny.email.gs.com",
+ ),
"NYOPS": ("nyops@lmcg.com",),
+ "UMB": ("lmcgcustody@umb.com",),
}
_sma_recipients = {
@@ -38,6 +44,7 @@ _cc_recipients = {
"ISOSEL": ("selene-ops@lmcg.com",),
"BOWDST": ("bowdoin-ops@lmcg.com",),
"SERCGMAST": ("nyops@lmcg.com",),
+ "BRINKER": ("nyops@lmcg.com",),
}
diff --git a/python/report_ops/utils.py b/python/report_ops/utils.py
index 1eadde96..b624b1f8 100644
--- a/python/report_ops/utils.py
+++ b/python/report_ops/utils.py
@@ -275,31 +275,6 @@ class GFSMonitor(Payment):
)
-class BamlFcmNotify:
- @classmethod
- def email_fcm(cls, date, cash_account, data):
- em = ExchangeMessage()
- em.send_email(
- f"FX Details: {cash_account} {date}",
- HTMLBody(
- f"""
-<html>
- <head>
- <style>
- table, th, td {{ border: 1px solid black; border-collapse: collapse;}}
- th, td {{ padding: 5px; }}
- </style>
- </head>
- <body>
- Hello,<br><br>Please see below details for an FX Spot Trade we did with the desk today for account {cash_account} Please let me know if you need more information.<br><br>{data}
- </body>
-</html>"""
- ),
- to_recipients=_recipients["BAML_FCM"],
- cc_recipients=("nyops@lmcg.com",),
- )
-
-
@dataclass
class EmailOps:
_em = ExchangeMessage()
diff --git a/python/risk/__main__.py b/python/risk/__main__.py
index 3ba9f077..f8721965 100644
--- a/python/risk/__main__.py
+++ b/python/risk/__main__.py
@@ -9,6 +9,7 @@ from serenitas.utils.pool import dawn_pool
from .bonds import subprime_risk, clo_risk, crt_risk, insert_subprime_risk
from serenitas.analytics.base import Trade
from serenitas.analytics.dates import prev_business_day
+from serenitas.analytics.utils import run_local
from .indices import insert_curve_risk, insert_index_risk
from .ir_swap import insert_ir_swap_portfolio
from .ir_swaption import insert_ir_swaption_portfolio
@@ -41,6 +42,7 @@ mysql_engine = dbengine("rmbs_model")
mysqlcrt_engine = dbengine("crt")
funds = ("SERCGMAST", "BOWDST", "BRINKER", "ISOSEL")
+
with dawn_pool.connection() as conn:
for fund in funds:
insert_curve_risk(
@@ -55,7 +57,8 @@ with dawn_pool.connection() as conn:
insert_ir_swap_portfolio(ir_swap_portf, conn)
insert_index_risk(workdate, conn, fund)
portf = get_tranche_portfolio(workdate, conn, funds=funds)
- insert_tranche_pnl_explain(portf, conn)
+ with run_local():
+ insert_tranche_pnl_explain(portf, conn)
insert_tranche_risk(portf, conn)
portf = get_swaption_portfolio(workdate, conn, source_list=["MS"])
insert_swaption_portfolio(portf, conn)
diff --git a/python/risk/tranches.py b/python/risk/tranches.py
index 134baa24..01c08951 100644
--- a/python/risk/tranches.py
+++ b/python/risk/tranches.py
@@ -1,3 +1,4 @@
+from pyisda.date import cds_accrued
from serenitas.analytics.api import Portfolio, DualCorrTranche
from serenitas.analytics.dates import prev_business_day
from serenitas.analytics.utils import get_fx
@@ -55,37 +56,63 @@ def insert_tranche_pnl_explain(portf, conn):
with conn.cursor(binary=True) as c:
c.execute("SELECT * FROM tranche_risk WHERE date=%s", (prev_day,))
prev_day_risk = {rec.tranche_id: rec for rec in c}
- c.execute("SELECT id, upfront FROM cds WHERE trade_date=%s", (value_date,))
+ c.execute(
+ "SELECT cds.id, cds.upfront, cds_delta.upfront AS delta_upfront, "
+ "cds_delta.notional * (CASE WHEN cds_delta.protection='Buyer' THEN -1.0 ELSE 1.0 END) AS notional, "
+ "cds.currency::text FROM cds "
+ " LEFT JOIN cds AS cds_delta ON cds_delta.id=cds.delta_id "
+ "WHERE cds.trade_date=%s",
+ (value_date,),
+ )
daily_trades = {rec.id: rec for rec in c}
c.execute(
- "SELECT dealid, termination_amount, termination_fee "
- "FROM terminations WHERE deal_type='CDS' AND termination_date=%s",
+ "SELECT terminations.dealid, termination_amount, termination_fee, terminations.currency::text, "
+ "cds.notional * delta_alloc * (CASE WHEN cds.protection='Buyer' THEN -1.0 ELSE 1.0 END) AS notional, "
+ "cds.upfront * delta_alloc AS delta_upfront "
+ "FROM terminations LEFT JOIN cds ON cds.id=terminations.delta_id "
+ "WHERE deal_type='CDS' AND termination_date=%s",
(value_date,),
)
terminations = {int(rec.dealid.removeprefix("SCCDS")): rec for rec in c}
- today_trades = {trade_id: trade for (strat, trade_id), trade in portf.items()}
- all_ids = today_trades.keys() | prev_day_risk.keys()
+ current_trades = {trade_id: trade for (strat, trade_id), trade in portf.items()}
+ all_ids = current_trades.keys() | prev_day_risk.keys()
to_insert = []
for trade_id in all_ids:
pnl = 0
+ fx_pnl = 0
if trade_id in daily_trades:
- pnl += daily_trades[trade_id].upfront * get_fx(
- value_date, trade.currency
- )
+ trade = daily_trades[trade_id]
+ pnl = trade.upfront * get_fx(value_date, trade.currency)
if trade_id in terminations:
- pnl += terminations[trade_id].termination_fee * get_fx(
- value_date, trade.currency
+ term = terminations[trade_id]
+ pnl += term.termination_fee * get_fx(value_date, term.currency)
+ fx_pnl += term.termination_fee * (
+ get_fx(value_date, term.currency) - get_fx(prev_day, term.currency)
)
- if trade_id not in today_trades:
+ if trade_id not in current_trades:
previous_risk = prev_day_risk[trade_id]
pnl = pnl - (previous_risk.clean_nav + previous_risk.accrued)
+ dirty_index_pv = (
+ 1
+ - previous_risk.index_refprice * 0.01
+ - cds_accrued(prev_day, previous_risk.running * 1e-4)
+ )
+ if (
+ term.delta_upfront
+ ): # if None means either no delta or we didn't populate
+ delta_pnl = (
+ term.delta_upfront
+ - term.notional * dirty_index_pv * previous_risk.index_factor
+ )
+ else:
+ delta_pnl = 0.0
else:
- trade = today_trades[trade_id]
+ trade = current_trades[trade_id]
if trade_id in prev_day_risk:
previous_risk = prev_day_risk[trade_id]
- pnl = trade.pv * get_fx(value_date, trade.currency) - (
+ pnl += trade.pv * get_fx(value_date, trade.currency) - (
previous_risk.clean_nav + previous_risk.accrued
)
@@ -99,21 +126,31 @@ def insert_tranche_pnl_explain(portf, conn):
* previous_risk.notional
* (
float(trade._index.pv())
+ * get_fx(value_date, trade.currency)
- (1 - previous_risk.index_refprice * 0.01)
+ * get_fx(prev_day, trade.currency)
)
- * get_fx(prev_day, trade.currency)
)
else:
- fx_pnl = trade.pv * (
- get_fx(value_date, trade.currency)
- - get_fx(prev_day, trade.currency)
- )
- delta_pnl = 0.0
- to_insert.append((value_date, trade_id, pnl, fx_pnl, delta_pnl))
+ fx_pnl = 0.0
+ day_trade = daily_trades[trade_id]
+ dirty_index_pv = float(trade._index.pv() - trade._index.accrued())
+ if day_trade.notional:
+ delta_pnl = (
+ day_trade.notional * dirty_index_pv * trade._index.factor
+ - day_trade.delta_upfront
+ )
+ else: # if None means either no delta or we didn't populate
+ delta_pnl = 0
+ pnl += trade.pv * get_fx(value_date, trade.currency)
+ unexplained = pnl - delta_pnl - fx_pnl
+ to_insert.append(
+ (value_date, trade_id, pnl, fx_pnl, delta_pnl, unexplained)
+ )
c.executemany(
- "INSERT INTO tranche_pnl_explain(date, tranche_id, pnl, fx_pnl, delta_pnl) "
- "VALUES (%s, %s, %s, %s, %s)",
+ "INSERT INTO tranche_pnl_explain(date, tranche_id, pnl, fx_pnl, delta_pnl, unexplained) "
+ "VALUES (%s, %s, %s, %s, %s, %s)",
to_insert,
)
conn.commit()
@@ -151,8 +188,6 @@ def insert_tranche_risk(portf, conn):
" ON CONFLICT (date, tranche_id) DO UPDATE "
f"SET {update_str}"
)
- value_date = portf.value_date
- prev_day = prev_business_day(value_date)
with conn.cursor(binary=True) as c:
for (strat, trade_id), trade in portf.items():
logger.info(f"marking tranche {trade_id} in {strat}")
diff --git a/python/strat_cash_realloc.py b/python/strat_cash_realloc.py
index f8889f54..0bfafce0 100644
--- a/python/strat_cash_realloc.py
+++ b/python/strat_cash_realloc.py
@@ -18,7 +18,7 @@ def generate_csv(date, fund="BOWDST"):
if c.fetchone():
return
c.execute(
- "SELECT * FROM list_orphaned_cash(%s, %s) ",
+ "SELECT * FROM list_orphaned_cash(%s, %s) WHERE abs(amount) > 1",
(
date,
fund,
diff --git a/python/task_server/globeop.py b/python/task_server/globeop.py
index 560dcee3..0b9747cd 100644
--- a/python/task_server/globeop.py
+++ b/python/task_server/globeop.py
@@ -164,6 +164,12 @@ def insert_todb(engine, workdate: datetime.date, fund="SERCGMAST"):
if not report_file.exists():
continue
df = fun(report_file)
+ if workdate >= datetime.date(2022, 11, 25):
+ match report:
+ case "Valuation" | "Pnl":
+ df.loc[~df.invid.isin(["USD", "EUR"]), "custacctname"] = df.loc[
+ ~df.invid.isin(["USD", "EUR"])
+ ].custacctname.str.replace("V0NSCLMAMB", "159260.1")
if report == "Valuation":
period_end_date = pd.Timestamp(df.periodenddate[0])
sql_str = (