import csv import datetime import io import logging import requests from serenitas.utils.db2 import dbconn from functools import partial, lru_cache from itertools import chain from quote_parsing.parse_emails import get_current_version params = { "username": "serenitasreports", "password": "_m@rk1t_", "reportUri": "/MarkitQuotes/CLIENTS/AllTrancheQuotes", "exportType": "csv", "START_TIME": "11/13/2017", } r = requests.post("https://quotes.markit.com/reports/runReport", params=params) f = io.StringIO(r.text) index_mapping = { "ITRAXX-Europe": "EU", "ITRAXX-Xover": "XO", "CDX-NAIG": "IG", "CDX-NAHY": "HY", } def convert_float(s): return float(s) if s else None serenitasdb = dbconn("serenitasdb") get_version = lru_cache()(partial(get_current_version, conn=serenitasdb)) runningdict1 = {0: 500, 3: 100, 7: 100, 15: 25} runningdict2 = {0: 500, 3: 500, 7: 500, 10: 100, 15: 100, 30: 100} headers = [h.lower() for h in next(f).strip().split(",")] count = 0 to_insert = {} tranche_ref = [] for d in csv.DictReader(f, fieldnames=headers): d["quotedate"] = datetime.datetime.strptime(d["time"], "%m/%d/%Y %H:%M:%S") d["quotedate"] = d["quotedate"].replace(tzinfo=datetime.timezone.utc) d["index"] = index_mapping[d["ticker"]] if d["tenor"] not in ("1", "2", "3", "4", "5", "7", "10"): continue d["tenor"] = d["tenor"] + "yr" for k1 in ["upfront", "spread", "price"]: for k2 in ["_bid", "_ask"]: d[k1 + k2] = convert_float(d[k1 + k2]) for k in ["upfront", "spread", "price"]: d[k + "_mid"] = ( 0.5 * (d[k + "_bid"] + d[k + "_ask"]) if d[k + "_bid"] is not None and d[k + "_ask"] is not None else None ) d["series"] = int(d["series"]) d["attachment"], d["detachment"] = int(d["attachment"]), int(d["detachment"]) if d["version"] == "": d["version"] = get_version(d["index"], d["series"], d["quotedate"].date()) else: d["version"] = int(d["version"]) ref = convert_float(d["reference"]) if d["ticker"] == "CDX-NAHY": if d["contributor"] == "MS" and ref > 115.0: d["deleted"] = True d["indexrefspread"] = ref for k in ["_bid", "_mid", "_ask"]: d["upfront" + k] = 0.0 else: d["indexrefprice"] = ref for k in ["_bid", "_mid", "_ask"]: if d["price" + k]: d["upfront" + k] = d["price" + k] elif d["spread" + k]: d["upfront" + k] = d["spread" + k] d["spread" + k] = ( 0 if d["series"] in [9, 10] and d["attachment"] == 10 else 500 ) else: d["indexrefspread"] = ref if d["ticker"] == "ITRAXX-Xover": if int(d["attachment"]) < 35: for k in ["_bid", "_mid", "_ask"]: ##d[f'upfront{k}'] = d[f'spread{k}'] d["spread" + k] = 500 if d["ticker"] == "ITRAXX-Europe": if d["attachment"] <= 3: for k in ["_bid", "_mid", "_ask"]: # d[f'upfront{k}'] = d[f'spread{k}'] d["spread" + k] = 500 if d["series"] == 19 else 100 if d["ticker"] == "CDX-NAIG": for k in ["Bid", "Mid", "Ask"]: # d[f'upfront{k}'] = d[f'spread{k}'] if d["series"] < 19: try: running = runningdict2[d["attachment"]] except KeyError: continue elif d["series"] < 25: running = runningdict1[d["attachment"]] else: running = 100 for k in ["_bid", "_mid", "_ask"]: d["spread" + k] = running d["delta"] = convert_float(d["delta"]) if d["delta"] is not None and d["delta"] >= 100: logging.error(d) d["delta"] = None if d["version"] == "" and d["index"] == "HY" and d["series"] in (29, 31): d["version"] = 5 if d["version"] == "" and d["index"] == "HY" and d["series"] == 27: d["version"] = 7 if d["version"] == "": if d["index"] == "IG": d["version"] = 1 elif d["index"] == "EU": if d["series"] == 32: d["version"] = 1 elif d["series"] in (28, 30): d["version"] = 2 key_ref = ( d["quotedate"], d["index"], d["series"], d["version"], d["tenor"], d["contributor"][:4], ) val = ( d["attachment"], d["detachment"], d["upfront_bid"], d["upfront_mid"], d["upfront_ask"], d["spread_bid"], d["spread_mid"], d["spread_ask"], d["delta"], d["quote_id"], d.get("deleted", False), ) if key_ref not in to_insert: tranche_ref.append( ( *key_ref[:-1], d.get("indexrefprice"), d.get("indexrefspread"), key_ref[-1], ) ) to_insert[key_ref] = [val] else: to_insert[key_ref].append(val) quoteset_mapping = [] serenitasdb.execute("SET time zone 'UTC'") with serenitasdb.pipeline(): with serenitasdb.cursor() as c: for tr in tranche_ref: c.execute( "INSERT INTO tranche_quotes_ref(quotedate, index, series, version, tenor, ref_price, ref_spread, quotesource) " "VALUES (%s, %s, %s, %s, %s, %s, %s, %s) " "ON CONFLICT (quotedate, index, series, version, tenor, quotesource) DO NOTHING " "RETURNING quoteset, quotedate, index, series, version, tenor, quotesource", tr, ) while True: try: quoteset, *key = c.fetchone() quoteset_mapping.append((quoteset, tuple(key))) if not c.nextset(): break except TypeError: break c.executemany( "INSERT INTO tranche_quotes_tranches(" "quoteset, attach, detach, upfront_bid, upfront_mid, upfront_ask, " "running_bid, running_mid, running_ask, delta, markit_id, deleted)" "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", chain.from_iterable( [[(q, *v) for v in to_insert[key]] for q, key in quoteset_mapping] ), ) serenitasdb.commit() print(f"loaded {len(quoteset_mapping)} new quotes")