aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/markit_tranche_quotes_csv.py116
1 files changed, 116 insertions, 0 deletions
diff --git a/python/markit_tranche_quotes_csv.py b/python/markit_tranche_quotes_csv.py
new file mode 100644
index 00000000..6393edc1
--- /dev/null
+++ b/python/markit_tranche_quotes_csv.py
@@ -0,0 +1,116 @@
+import csv
+import datetime
+import io
+import pytz
+import requests
+from db import dbconn
+
+index_mapping = {'ITRAXX-Europe': 'EU',
+ 'ITRAXX-Xover': 'XO',
+ 'CDX-NAIG': 'IG',
+ 'CDX-NAHY': 'HY'}
+
+sql_str = f"""INSERT INTO tranche_quotes_test(quotedate, index, series, version, tenor, attach, detach,
+ trancheupfrontbid, trancheupfrontmid, trancheupfrontask,
+ trancherunningbid, trancherunningmid, trancherunningask,
+ indexrefprice, indexrefspread, tranchedelta, quotesource, markit_id)
+ VALUES({",".join(["%s"]*18)}) ON CONFLICT DO NOTHING"""
+
+def convert_float(s):
+ return float(s) if s else None
+
+serenitasdb = dbconn('serenitasdb')
+
+runningdict1 = {0: 500, 3: 100, 7: 100, 15: 25}
+runningdict2 = {0: 500, 3: 500, 7: 500, 10: 100, 15: 100, 30: 100}
+runningdict3 = {0: 500, 3: 500, 6: 300}
+
+f = open("/home/serenitas/CorpCDOs/data/GetTrancheQuotesByTime.csv")
+headers = [h.lower() for h in next(f).strip().split(",")]
+
+l = []
+for d in csv.DictReader(f, fieldnames=headers):
+ d['quotedate'] = datetime.datetime.strptime(d['time'], "%m/%d/%Y %H:%M:%S")
+ d['quotedate'] = d['quotedate'].replace(tzinfo=pytz.UTC)
+ try:
+ d['index'] = index_mapping[d['ticker']]
+ except KeyError:
+ continue
+ if d['tenor'] in ('15', '157', '25', '30', '1994'):
+ continue
+ d['tenor'] = d['tenor'] + 'yr'
+ for k1 in ['upfront', 'spread']:
+ for k2 in ['bid', 'ask']:
+ d[k1 + k2] = convert_float(d[k1 + k2])
+ for k in ['upfront', 'spread']:
+ d[k+'mid'] = 0.5 * (d[k+'bid'] + d[k+'ask']) \
+ if d[k + 'bid'] and d[k + 'ask'] else None
+ d['series'] = int(d['series'])
+ d['attachment'], d['detachment'] = int(d['attachment']), int(d['detachment'])
+ if d['ticker'] == 'CDX-NAHY':
+ d['indexrefprice'] = convert_float(d['reference'])
+ else:
+ d['indexrefspread'] = convert_float(d['reference'])
+ if d['ticker'] == 'CDX-NAHY':
+ for k in ['bid', 'mid', 'ask']:
+ d['upfront' + k] = d['spread' + k]
+ d['spread' + k] = 0 if d['series'] in [9, 10] and d['attachment'] == 10 else 500
+
+ if d['ticker'] == 'ITRAXX-Xover':
+ if int(d['attachment']) < 35:
+ for k in ['bid', 'mid', 'ask']:
+ d[f'upfront{k}'] = d[f'spread{k}']
+ d['spread' + k] = 500
+ if d['ticker'] == 'ITRAXX-Europe':
+ if d['series'] in [4, 6, 7, 8, 9, 15]:
+ if int(d['attachment']) <= 6 :
+ for k in ['bid', 'mid', 'ask']:
+ d[f'upfront{k}'] = d[f'spread{k}']
+ d['spread'+k] = runningdict3[int(d['attachment'])]
+ else:
+ if d['attachment'] <= 3:
+ for k in ['bid', 'mid', 'ask']:
+ d[f'upfront{k}'] = d[f'spread{k}']
+ d['spread' + k] = 500 if d['series'] == 19 else 100
+ if d['ticker'] == 'CDX-NAIG':
+ for k in ['bid', 'mid', 'ask']:
+ d[f'upfront{k}'] = d[f'spread{k}']
+ if d['series'] < 19:
+ try:
+ running = runningdict2[d['attachment']]
+ except KeyError:
+ continue
+ elif d['series'] < 25:
+ try:
+ running = runningdict1[d['attachment']]
+ except KeyError:
+ continue
+ else:
+ running = 100
+ for k in ['bid', 'mid', 'ask']:
+ d['spread' + k] = running
+
+ d['delta'] = convert_float(d['delta'])
+ l.append(d)
+f.close()
+count = 0
+with serenitasdb.cursor() as c:
+ for d in l:
+ if d['contributor'] == 'BROWNSTONE':
+ d['contributor'] = 'BIG'
+ if d['version'] == '' and d['ticker'] in ('ITRAXX-Europe', 'CDX-NAIG'):
+ d['version'] = 1
+ elif d['version'] == '':
+ d['version'] = 0
+ d['contributor'] = d['contributor'][:4]
+ c.execute(sql_str, (d['quotedate'], d['index'], d['series'], d['version'], d['tenor'],
+ d['attachment'], d['detachment'],
+ d['upfrontbid'], d.get('upfrontmid'), d['upfrontask'],
+ d['spreadbid'], d.get('spreadmid'), d['spreadask'],
+ d.get('indexrefprice'), d.get('indexrefspread'), d['delta'],
+ d['contributor'], d['quoteid']))
+
+ count += 1
+serenitasdb.commit()
+f.close()
+print(f"loaded {count} new quotes")