1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
|
from analytics import CreditIndex
from utils.db import serenitas_pool
import numpy as np
import pandas as pd
def load_sheet(index, series):
df = pd.read_excel(
"/home/serenitas/CorpCDOs/Tranche_data/USTrancheClientFile11072019.XLS",
sheet_name=f"5Y {index.upper()}{series}",
skiprows=[0, 1, 2, 3, 4],
)
sql_str = (
"INSERT INTO tranche_quotes("
"quotedate, index, series, version, tenor, attach, detach, "
"trancheupfrontmid, trancherunningmid, indexrefprice, "
"tranchedelta, corratdetachment, quotesource) "
f"VALUES({','.join(['%s'] * 13)})"
)
df = df.set_index("Date")
if index == "HY":
cols = [0.0, 0.15, 0.25, 0.35]
else:
cols = [0.0, 0.03, 0.07, 0.15]
if index == "HY":
df_upfront = df[["0% - 15%", "15% - 25%", "25% - 35%", "35% - 100%"]]
else:
df_upfront = df[["0-3%", "3-7%", "7-15%", "15-100%"]]
df_upfront.columns = cols
df_upfront = df_upfront.stack()
df_upfront.name = "upfront"
df_delta = df[["Delta", "Delta.1", "Delta.2", "Delta.3"]]
df_delta.columns = cols
df_delta = df_delta.stack()
df_delta.name = "delta"
df_corr = df[["Correlation", "Correlation.1", "Correlation.2"]]
df_corr.columns = cols[:-1]
df_corr = df_corr.stack()
df_corr.name = "correlation"
df_detach = pd.DataFrame(
np.repeat([cols[1:] + [1.0]], len(df.index), 0), index=df.index, columns=cols
).stack()
df_detach.name = "detach"
df_merged = pd.concat([df_upfront, df_delta, df_corr, df_detach], axis=1)
df_merged.index.names = ["date", "attach"]
if index == "HY":
df_merged["price"] = 100.0 * (1 - df_merged.upfront)
else:
df_merged["price"] = 100 * df_merged.upfront
df_merged = df_merged.reset_index("attach")
df_final = df_merged.join(df["Spread (bp)"])
df_final = df_final.rename(columns={"Spread (bp)": "indexspread"})
conn = serenitas_pool.getconn()
credit_index = CreditIndex(index, series, "5yr")
with conn.cursor() as c:
for t in df_final.itertuples():
credit_index.value_date = t.Index.date()
credit_index.spread = t.indexspread
c.execute(
sql_str,
(
t.Index + pd.DateOffset(hours=17),
index,
series,
credit_index.version,
"5yr",
int(t.attach * 100),
int(t.detach * 100),
t.price,
500,
credit_index.price,
t.delta,
t.correlation,
"MSre",
),
)
conn.commit()
serenitas_pool.putconn(conn)
if __name__ == "__main__":
for index in ("IG", "HY"):
for series in (25, 27, 29, 31, 33):
if index == "IG" and series <= 29:
continue
load_sheet(index, series)
|