aboutsummaryrefslogtreecommitdiffstats
path: root/python/analytics
diff options
context:
space:
mode:
Diffstat (limited to 'python/analytics')
-rw-r--r--python/analytics/tranche_data.py65
1 files changed, 37 insertions, 28 deletions
diff --git a/python/analytics/tranche_data.py b/python/analytics/tranche_data.py
index f4a13a9e..e3c2de96 100644
--- a/python/analytics/tranche_data.py
+++ b/python/analytics/tranche_data.py
@@ -30,10 +30,10 @@ def get_tranche_quotes(
op = "IN"
return "{} IN %({})s".format(key, key)
elif key == "from_date":
- col_key = "quotedate"
+ col_key = "date"
op = ">="
elif key == "end_date":
- col_key = "quotedate"
+ col_key = "date"
op = "<="
else:
op = "="
@@ -44,8 +44,8 @@ def get_tranche_quotes(
)
sql_str = (
"SELECT * from "
- "(SELECT quotedate, b.index, b.series, a.tenor, b.version, "
- "a.attach, a.detach, upfront_mid, a.index_price, indexfactor, "
+ "(SELECT quotedate as date, b.index, b.series, a.tenor, b.version, "
+ "a.attach, a.detach, (1-upfront_mid) as close_price, a.index_price, indexfactor, "
"cumulativeloss, c.delta, a.tranche_spread "
"from markit_tranche_quotes a "
"left join index_version b using (basketid)"
@@ -66,19 +66,14 @@ def get_tranche_quotes(
df = pd.read_sql_query(
sql_str,
serenitas_engine,
- parse_dates={"date": {"utc": True}},
- index_col=["quotedate", "index", "series", "version"],
+ parse_dates={"date"},
+ index_col=["date", "index", "series", "version"],
params=make_params(args),
)
df.tenor = df.tenor.astype(tenor_t)
df = df.set_index("tenor", append=True)
df.sort_index(inplace=True)
df = df.assign(
- close_price=lambda x: x.upfront_mid
- if index in ["IG", "EU"]
- else 1 - x.upfront_mid
- )
- df = df.assign(
attach_adj=lambda x: np.maximum(
(x.attach - x.cumulativeloss) / df.indexfactor, 0
),
@@ -118,27 +113,41 @@ def tranche_returns(
"""
if df is None:
df = get_tranche_quotes(index, series, tenor, from_date, end_date, years)
-
- df = df.groupby(level=["quotedate", "index", "series", "tenor", "attach"]).nth(0)
- g = df.groupby(level=["index", "series", "tenor", "attach"])
- tranche_return = g.close_price.shift(0) * g.adj_thickness.shift(
- 0
- ) / g.adj_thickness.shift(1) - g.close_price.shift(1)
- index_return = g.index_price.diff()
- returns = pd.concat(
- [index_return, tranche_return], axis=1, keys=["index_return", "tranche_return"]
+ df = df.groupby(level=["date", "index", "series", "tenor", "attach"]).nth(0)
+ coupon_data = pd.read_sql_query(
+ "SELECT index, series, tenor, coupon * 1e-4 AS coupon "
+ " FROM index_maturity WHERE coupon is NOT NULL",
+ serenitas_engine,
+ index_col=["index", "series", "tenor"],
)
- df = df.merge(returns, left_index=True, right_index=True)
+ df = df.join(coupon_data)
+ df["date_1"] = df.index.get_level_values(level="date")
- df["date"] = df.index.get_level_values(level="quotedate")
- df["day_frac"] = df.groupby(level=["index", "series", "tenor", "attach"])[
- "date"
- ].transform(lambda s: s.diff().astype("timedelta64[D]") / 360)
- df["tranche_return"] += df.day_frac * df.tranche_spread / 10000
+ # skip missing dates
+ returns = []
+ for i, g in df.groupby(level=["index", "series", "tenor", "attach"]):
+ g = g.dropna()
+ day_frac = g["date_1"].transform(
+ lambda s: s.diff().astype("timedelta64[D]") / 360
+ )
+ tranche_return = g.close_price.shift(0) * g.adj_thickness.shift(
+ 0
+ ) / g.adj_thickness.shift(1) - g.close_price.shift(1)
+ index_return = g.index_price.diff()
+ tranche_return += day_frac * g.tranche_spread / 10000
+ index_return += day_frac * g.coupon
+ deladj_return = tranche_return - g.delta.shift(1) * index_return
+ returns.append(
+ pd.concat(
+ [index_return, tranche_return, deladj_return],
+ axis=1,
+ keys=["index_return", "tranche_return", "deladj_return"],
+ )
+ )
- df = df.assign(deladj_return=lambda x: x.tranche_return - x.delta * x.index_return)
+ df = df.merge(pd.concat(returns), left_index=True, right_index=True, how="left")
df = df.drop(
- ["date", "day_frac", "tranche_spread", "cumulativeloss", "detach"], axis=1
+ ["date_1", "tranche_spread", "cumulativeloss", "detach", "coupon"], axis=1
)
return df