aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--python/load_ms_data.py78
1 files changed, 50 insertions, 28 deletions
diff --git a/python/load_ms_data.py b/python/load_ms_data.py
index 856a418c..0c493a36 100644
--- a/python/load_ms_data.py
+++ b/python/load_ms_data.py
@@ -3,48 +3,55 @@ from utils.db import serenitas_pool
import numpy as np
import pandas as pd
+
def load_sheet(index, series):
- df = pd.read_excel("/home/serenitas/CorpCDOs/Tranche_data/USTrancheClientFile11072019.XLS",
- sheet_name=f'5Y {index.upper()}{series}', skiprows=[0, 1, 2, 3, 4])
+ df = pd.read_excel(
+ "/home/serenitas/CorpCDOs/Tranche_data/USTrancheClientFile11072019.XLS",
+ sheet_name=f"5Y {index.upper()}{series}",
+ skiprows=[0, 1, 2, 3, 4],
+ )
- sql_str = ("INSERT INTO tranche_quotes("
- "quotedate, index, series, version, tenor, attach, detach, "
- "trancheupfrontmid, trancherunningmid, indexrefprice, "
- "tranchedelta, corratdetachment, quotesource) "
- f"VALUES({','.join(['%s'] * 13)})")
+ sql_str = (
+ "INSERT INTO tranche_quotes("
+ "quotedate, index, series, version, tenor, attach, detach, "
+ "trancheupfrontmid, trancherunningmid, indexrefprice, "
+ "tranchedelta, corratdetachment, quotesource) "
+ f"VALUES({','.join(['%s'] * 13)})"
+ )
df = df.set_index("Date")
if index == "HY":
- cols = [0., 0.15, 0.25, 0.35]
+ cols = [0.0, 0.15, 0.25, 0.35]
else:
- cols = [0., 0.03, 0.07, 0.15]
+ cols = [0.0, 0.03, 0.07, 0.15]
if index == "HY":
- df_upfront = df[['0% - 15%', '15% - 25%', '25% - 35%', '35% - 100%']]
+ df_upfront = df[["0% - 15%", "15% - 25%", "25% - 35%", "35% - 100%"]]
else:
- df_upfront = df[['0-3%', '3-7%', '7-15%', '15-100%']]
+ df_upfront = df[["0-3%", "3-7%", "7-15%", "15-100%"]]
df_upfront.columns = cols
df_upfront = df_upfront.stack()
- df_upfront.name = 'upfront'
- df_delta = df[['Delta', 'Delta.1', 'Delta.2', 'Delta.3']]
+ df_upfront.name = "upfront"
+ df_delta = df[["Delta", "Delta.1", "Delta.2", "Delta.3"]]
df_delta.columns = cols
df_delta = df_delta.stack()
- df_delta.name = 'delta'
- df_corr = df[['Correlation', 'Correlation.1', 'Correlation.2']]
+ df_delta.name = "delta"
+ df_corr = df[["Correlation", "Correlation.1", "Correlation.2"]]
df_corr.columns = cols[:-1]
df_corr = df_corr.stack()
- df_corr.name = 'correlation'
- df_detach = pd.DataFrame(np.repeat([cols[1:] + [1.]], len(df.index), 0),
- index=df.index, columns=cols).stack()
- df_detach.name='detach'
+ df_corr.name = "correlation"
+ df_detach = pd.DataFrame(
+ np.repeat([cols[1:] + [1.0]], len(df.index), 0), index=df.index, columns=cols
+ ).stack()
+ df_detach.name = "detach"
df_merged = pd.concat([df_upfront, df_delta, df_corr, df_detach], axis=1)
- df_merged.index.names = ['date', 'attach']
+ df_merged.index.names = ["date", "attach"]
if index == "HY":
- df_merged['price'] = 100. * (1 - df_merged.upfront)
+ df_merged["price"] = 100.0 * (1 - df_merged.upfront)
else:
- df_merged['price'] = 100 * df_merged.upfront
+ df_merged["price"] = 100 * df_merged.upfront
df_merged = df_merged.reset_index("attach")
- df_final = df_merged.join(df['Spread (bp)'])
- df_final = df_final.rename(columns={'Spread (bp)': 'indexspread'})
+ df_final = df_merged.join(df["Spread (bp)"])
+ df_final = df_final.rename(columns={"Spread (bp)": "indexspread"})
conn = serenitas_pool.getconn()
credit_index = CreditIndex(index, series, "5yr")
@@ -52,13 +59,28 @@ def load_sheet(index, series):
for t in df_final.itertuples():
credit_index.value_date = t.Index.date()
credit_index.spread = t.indexspread
- c.execute(sql_str, (t.Index + pd.DateOffset(hours=17), index, series,
- credit_index.version, "5yr",
- int(t.attach * 100),
- int(t.detach*100), t.price, 500, credit_index.price, t.delta, t.correlation, "MSre"))
+ c.execute(
+ sql_str,
+ (
+ t.Index + pd.DateOffset(hours=17),
+ index,
+ series,
+ credit_index.version,
+ "5yr",
+ int(t.attach * 100),
+ int(t.detach * 100),
+ t.price,
+ 500,
+ credit_index.price,
+ t.delta,
+ t.correlation,
+ "MSre",
+ ),
+ )
conn.commit()
serenitas_pool.putconn(conn)
+
if __name__ == "__main__":
for index in ("IG", "HY"):
for series in (25, 27, 29, 31, 33):