aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/analytics/index_data.py10
-rw-r--r--python/markit/import_quotes.py74
2 files changed, 70 insertions, 14 deletions
diff --git a/python/analytics/index_data.py b/python/analytics/index_data.py
index fc660df6..ae4ec982 100644
--- a/python/analytics/index_data.py
+++ b/python/analytics/index_data.py
@@ -251,6 +251,16 @@ def get_singlenames_curves(
return fun(index_type, series, min(datetime.date.today(), trade_date), tenors)
+def get_singlenames_curves2(index_type, series, trade_date):
+ conn = serenitas_pool.getconn()
+ with conn.cursor() as c:
+ c.execute(
+ "SELECT * FROM index_curves(%s, %s)", (f"{index_type}{series}", trade_date)
+ )
+ r = [(w, SpreadCurve.from_bytes(b, True)) for w, b in c]
+ serenitas_pool.putconn(conn)
+
+
def get_tranche_quotes(
index_type, series, tenor, date=datetime.date.today(), source="Serenitas"
):
diff --git a/python/markit/import_quotes.py b/python/markit/import_quotes.py
index f224df53..9947b8dd 100644
--- a/python/markit/import_quotes.py
+++ b/python/markit/import_quotes.py
@@ -6,8 +6,11 @@ import pandas as pd
import os
from collections import defaultdict
+from dataclasses import dataclass
from itertools import chain
from pandas.tseries.offsets import BDay
+from pyisda.curve import SpreadCurve, DocClause, Seniority
+from yieldcurve import get_curve
logger = logging.getLogger(__name__)
@@ -16,7 +19,7 @@ def convert(x):
try:
return float(x[:-1])
except ValueError:
- return None
+ return np.nan
def get_index_list(database, workdate):
@@ -45,31 +48,42 @@ DOC_CLAUSE_MAPPING = {
}
+@dataclass(frozen=True)
+class CurveKey:
+ ticker: str
+ tier: str
+ currency: str
+ short_code: str
+ spread: int
+
+
def get_markit_bbg_mapping(database, basketid_list, workdate):
markit_bbg_mapping = defaultdict(set)
all_tickers = set([])
with database.cursor() as c:
c.execute(
"SELECT markit_ticker, markit_tier, spread, currency, cds_curve, "
- " short_code FROM historical_cds_issuers(%s) "
+ " short_code, company_id, seniority FROM historical_cds_issuers(%s) "
"JOIN basket_constituents USING (company_id, seniority) "
"WHERE basketid=ANY(%s)",
(workdate, list(basketid_list)),
)
for rec in c:
all_tickers.add((rec.markit_ticker, rec.markit_tier))
- key = (
+ key = CurveKey(
rec.markit_ticker,
rec.markit_tier,
rec.currency,
rec.short_code,
- float(rec.spread) / 10000,
+ rec.spread,
)
## each markit ticker can be mapped to multiple bbg tickers
## these bbg tickers can have different curves (ok)
## or same curves (not ok since date, curve_ticker needs to be unique)
## therefore we keep them in a set structure
- markit_bbg_mapping[key].add(tuple(rec.cds_curve))
+ markit_bbg_mapping[key].add(
+ (tuple(rec.cds_curve), (rec.company_id, Seniority[rec.seniority]))
+ )
database.commit()
return (all_tickers, markit_bbg_mapping)
@@ -119,14 +133,24 @@ def insert_cds(database, workdate):
)
tickers_found = set()
+ coupon_100 = np.full(8, 0.01)
+ coupon_500 = np.full(8, 0.05)
+ tenors = np.array([0.5, 1, 2, 3, 4, 5, 7, 10])
+ yc_dict = {curr: get_curve(workdate, curr) for curr in ["USD", "JPY", "EUR"]}
+ seniority_mapping = {
+ "SNRFOR": 0,
+ "SUBLT2": 1,
+ "SECDOM": 1,
+ "SNRLAC": 2,
+ }
with open(
os.path.join(os.environ["BASE_DIR"], "Tranche_data", "CDS", filename)
) as fh:
csvreader = csv.DictReader(fh)
with database.cursor() as c:
for line in csvreader:
- spread = float(line["RunningCoupon"])
- k = (
+ spread = int(float(line["RunningCoupon"]) * 10000)
+ k = CurveKey(
line["Ticker"],
line["Tier"],
line["Ccy"],
@@ -134,21 +158,43 @@ def insert_cds(database, workdate):
spread,
)
if k in markit_bbg_mapping:
- for curves in markit_bbg_mapping[k]:
+ upfront_rates = np.array([convert(line[c]) / 100 for c in colnames])
+ recovery_rates = np.full(8, convert(line["RealRecovery"]) / 100)
+ coupon_rates = coupon_100 if spread == 100 else coupon_500
+ sc = SpreadCurve(
+ workdate,
+ yc_dict[k.currency],
+ None,
+ None,
+ None,
+ tenors,
+ coupon_rates,
+ upfront_rates,
+ recovery_rates,
+ ticker=k.ticker,
+ seniority=seniority_mapping[k.tier],
+ doc_clause=DocClause[k.short_code],
+ )
+ buf = sc.as_buffer(True)
+ for curves, (cid, sen) in markit_bbg_mapping[k]:
+ c.execute(
+ "INSERT INTO cds_curves VALUES(%s, %s, %s, %s)",
+ (workdate, cid, sen.name, buf),
+ )
c.executemany(
sqlstr,
[
(
workdate,
t,
- convert(line[col]),
- convert(line[col]),
- spread * 10000,
- spread * 10000,
+ upf * 100,
+ upf * 100,
+ spread,
+ spread,
"MKIT",
- convert(line["RealRecovery"]) / 100,
+ recovery_rates[0],
)
- for col, t in zip(colnames, curves)
+ for t, upf in zip(curves, upfront_rates)
],
)
tickers_found.add((line["Ticker"], line["Tier"]))