aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/markit_tranche_quotes.py104
1 files changed, 104 insertions, 0 deletions
diff --git a/python/markit_tranche_quotes.py b/python/markit_tranche_quotes.py
new file mode 100644
index 00000000..93df191b
--- /dev/null
+++ b/python/markit_tranche_quotes.py
@@ -0,0 +1,104 @@
+import csv
+import datetime
+import io
+import pytz
+import requests
+from db import dbconn
+
+params = {"username": "serenitasreports",
+ "password": "_m@rk1t_",
+ "reportUri": "/MarkitQuotes/CLIENTS/AllTrancheQuotes",
+ "exportType": "csv"}
+
+r = requests.get("https://quotes.markit.com/reports/runReport",
+ params=params)
+
+f = io.StringIO(r.text)
+with open("quotes.csv", "w") as fh:
+ fh.write(r.text)
+
+index_mapping = {'ITRAXX-Europe': 'EU',
+ 'ITRAXX-Xover': 'XO',
+ 'CDX-NAIG': 'IG',
+ 'CDX-NAHY': 'HY'}
+sql_str = f"""INSERT INTO tranche_quotes(quotedate, index, series, version, tenor, attach, detach,
+ trancheupfrontbid, trancheupfrontmid, trancheupfrontask,
+ trancherunningbid, trancherunningmid, trancherunningask,
+ indexrefprice, indexrefspread, tranchedelta, quotesource, markit_id)
+ VALUES({",".join(["%s"]*18)})"""
+
+def get_latest_quote_id(db):
+ with db.cursor() as c:
+ c.execute("SELECT max(markit_id) FROM tranche_quotes")
+ markit_id, = c.fetchone()
+ return markit_id
+
+def convert_float(s):
+ return float(s) if s else None
+
+serenitasdb = dbconn('serenitasdb')
+
+runningdict1 = {0: 500, 3:100, 7:100, 15: 25}
+runningdict2 = {0: 500, 3:500, 7:500, 10:100, 15:100, 30:100}
+markit_id = get_latest_quote_id(serenitasdb)
+
+headers = [h.lower() for h in next(f).split(",")]
+count = 0
+for d in csv.DictReader(f, fieldnames=headers):
+ if int(d['quote_id']) <= markit_id:
+ continue
+ d['quotedate'] = datetime.datetime.strptime(d['time'], "%m/%d/%Y %H:%M:%S")
+ d['quotedate'] = d['quotedate'].replace(tzinfo=pytz.UTC)
+ d['index'] = index_mapping[d['ticker']]
+ d['tenor'] = d['tenor'] + 'yr'
+ for k1 in ['upfront', 'spread', 'price']:
+ for k2 in ['_bid', '_ask']:
+ d[k1 + k2] = convert_float(d[k1 + k2])
+ for k in ['upfront', 'spread', 'price']:
+ d[k+'_mid'] = 0.5 * (d[k+'_bid'] + d[k+'_ask']) \
+ if d[k + '_bid'] and d[k + '_ask'] else None
+ d['series'] = int(d['series'])
+ d['attachment'], d['detachment'] = int(d['attachment']), int(d['detachment'])
+ if d['ticker'] == 'CDX-NAHY':
+ d['indexrefprice'] = convert_float(d['reference'])
+ else:
+ d['indexrefspread'] = convert_float(d['reference'])
+ if d['ticker'] == 'CDX-NAHY':
+ for k in ['_bid', '_mid', '_ask']:
+ d['upfront' + k] = d['price' + k]
+ d['spread' + k] = 0 if d['series'] in [9, 10] and d['attachment'] == 10 else 500
+
+ if d['ticker'] == 'ITRAXX-Xover':
+ if int(d['attachment']) < 35:
+ for k in ['_bid', '_mid', '_ask']:
+ ##d[f'upfront{k}'] = d[f'spread{k}']
+ d['spread' + k] = 500
+ if d['ticker'] == 'ITRAXX-Europe':
+ if d['attachment'] <= 3:
+ for k in ['_bid', '_mid', '_ask']:
+ #d[f'upfront{k}'] = d[f'spread{k}']
+ d['spread' + k] = 500 if d['series'] == 19 else 100
+ if d['ticker'] == 'CDX-NAIG':
+ for k in ['Bid', 'Mid', 'Ask']:
+ #d[f'upfront{k}'] = d[f'spread{k}']
+ if d['series'] < 19:
+ running = runningdict2[d['attachment']]
+ elif d['series'] < 25:
+ running = runningdict1[d['attachment']]
+ else:
+ running = 100
+ for k in ['_bid', '_mid', '_ask']:
+ d['spread' + k] = running
+
+
+ d['delta'] = convert_float(d['delta'])
+ with serenitasdb.cursor() as c:
+ c.execute(sql_str, (d['quotedate'], d['index'], d['series'], d['version'], d['tenor'],
+ d['attachment'], d['detachment'],
+ d['upfront_bid'], d.get('upfront_mid'), d['upfront_ask'],
+ d['spread_bid'], d.get('spread_mid'), d['spread_ask'],
+ d.get('indexrefprice'), d.get('indexrefspread'), d['delta'],
+ d['contributor'], d['quote_id']))
+ count +=1
+serenitasdb.commit()
+print(f"loaded {count} new quotes")