aboutsummaryrefslogtreecommitdiffstats
path: root/python/import_quotes.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/import_quotes.py')
-rw-r--r--python/import_quotes.py128
1 files changed, 128 insertions, 0 deletions
diff --git a/python/import_quotes.py b/python/import_quotes.py
new file mode 100644
index 00000000..d6c80475
--- /dev/null
+++ b/python/import_quotes.py
@@ -0,0 +1,128 @@
+import os
+from common import root
+import csv
+import datetime
+from db import serenitasdb
+import re, sys
+from pandas.tseries.offsets import BDay
+import pandas as pd
+import numpy as np
+
+def convert(x):
+ try:
+ return float(x[:-1])
+ except ValueError:
+ return None
+
+def get_current_tickers(database, workdate):
+ sqlstr = "SELECT markit_ticker, markit_tier, cds_curve from index_members(%s, %s)"
+ markit_bbg_mapping = {}
+ all_tickers = set([])
+ if workdate >= datetime.date(2014, 9, 19):
+ doc_clause = 'XR14'
+ else:
+ doc_clause = 'XR'
+ for index in ['HY9', 'HY10', 'HY15', 'HY17', 'HY19', 'HY21', 'IG9', 'IG19', 'IG21']:
+ spread=0.05 if 'HY' in index else 0.01
+ with database.cursor() as c:
+ c.execute(sqlstr, (index, workdate))
+ for line in c:
+ all_tickers.add((line['markit_ticker'], line['markit_tier']))
+ key = (line['markit_ticker'], line['markit_tier'], 'USD', doc_clause, spread)
+ hykey = key[:-1]+(0.05,)
+ if hykey in markit_bbg_mapping:
+ del markit_bbg_mapping[hykey] ## we only keep the tightest quote
+ markit_bbg_mapping[key] = line['cds_curve']
+
+ return (all_tickers, markit_bbg_mapping)
+
+def insert_cds(database, workdate):
+ all_tickers, markit_bbg_mapping = get_current_tickers(database, workdate)
+ filename = "cds eod {0}.csv".format(datetime.datetime.strftime(workdate, "%Y%m%d"))
+ colnames = ['Upfront'+tenor for tenor in ['6m', '1y', '2y', '3y', '4y', '5y', '7y', '10y']]
+ sqlstr = "INSERT INTO cds_quotes(date, curve_ticker, upfrontbid, upfrontask," \
+ "runningbid, runningask, source, recovery) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
+
+ tickers_found = set([])
+ with database.cursor() as c:
+ c.execute("DELETE from cds_quotes where date=%s", (workdate,))
+ database.commit()
+ with open(os.path.join(root, "Tranche_data", "CDS", filename)) as fh:
+ csvreader = csv.DictReader(fh)
+ with database.cursor() as c:
+ for line in csvreader:
+ tickers_found.add((line['Ticker'], line['Tier']))
+ k = (line['Ticker'], line['Tier'], line['Ccy'], line['DocClause'], float(line['RunningCoupon']))
+ try:
+ c.executemany(sqlstr,
+ [(workdate, t, convert(line[colnames[i]]), convert(line[colnames[i]]),
+ float(line['RunningCoupon'])*10000, float(line['RunningCoupon'])*10000,
+ 'MKIT', convert(line['RealRecovery'])/100)
+ for i, t in enumerate(markit_bbg_mapping[k])])
+ except KeyError:
+ continue
+ database.commit()
+ print(all_tickers-tickers_found)
+
+def insert_cds_single(database, workdate, bbgtickers, mkt_tuple):
+ filename = "cds eod {0}.csv".format(datetime.datetime.strftime(workdate, "%Y%m%d"))
+ colnames = ['Upfront'+tenor for tenor in ['6m', '1y', '2y', '3y', '4y', '5y', '7y', '10y']]
+ sqlstr = "INSERT INTO cds_quotes(date, curve_ticker, upfrontbid, upfrontask," \
+ "runningbid, runningask, source, recovery) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
+ with open(os.path.join(root, "Tranche_data", "CDS", filename)) as fh:
+ csvreader = csv.DictReader(fh)
+ with database.cursor() as c:
+ for line in csvreader:
+ if (line['Ticker'], line['Tier'], line['DocClause'], line['RunningCoupon'], line['Ccy']) == \
+ mkt_tuple:
+ try:
+ c.executemany(sqlstr,
+ [(workdate, t, convert(line[colnames[i]]), convert(line[colnames[i]]),
+ float(line['RunningCoupon'])*10000, float(line['RunningCoupon'])*10000,
+ 'MKIT', convert(line['RealRecovery'])/100)
+ for i, t in enumerate(bbgtickers)])
+ except psycopg2.IntegrityError:
+ database.rollback()
+ database.commit()
+
+def insert_index(database, workdate):
+ basedir = os.path.join(root, 'Tranche_data', 'Composite_reports')
+ filenames = [os.path.join(basedir, f) for f in os.listdir(basedir) if 'Indices' in f]
+
+ name_mapping = {"CDXNAHY":"HY", "CDXNAIG":"IG",'ITraxx Eur': "EU"}
+ sqlstr = "INSERT INTO index_quotes VALUES(%s, %s, %s, %s, %s, %s, %s)"
+
+ for f in filenames:
+ if datetime.datetime.fromtimestamp(os.path.getctime(f)).date()==workdate+BDay(1):
+ data = pd.read_csv(f, skiprows=2, parse_dates=[0,7])
+ data = data.dropna()
+ data[['Composite Price', 'Composite Spread']] = data[['Composite Price', 'Composite Spread']].applymap(lambda x: float(x[:-1]) if x.endswith('%') else x)
+ data['Term']=data['Term'].apply(lambda x: x.lower()+'r')
+ data['Name'] = data['Name'].apply(lambda x: name_mapping[x] if x in name_mapping else np.NaN)
+ data = data.dropna()
+ data = data.set_index('Name', drop=False)
+ data['Composite Spread'] = data['Composite Spread']*100
+ toinsert = [tuple(r) for r in data[["Date", "Name", "Series", "Version", "Term",
+ "Composite Price", "Composite Spread"]].values]
+ with database.cursor() as c:
+ c.executemany(sqlstr, toinsert)
+ database.commit()
+
+if __name__=="__main__":
+ if len(sys.argv)>=2:
+ workdate = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d")
+ else:
+ workdate = datetime.datetime.today()-BDay(1)
+ workdate = workdate.date()
+ insert_cds(serenitasdb, workdate)
+ insert_index(serenitasdb, workdate)
+ serenitasdb.close()
+ ## backpopulate single ticker
+ # bbgtickers = ['CT675194bis', 'CT406651bis', 'CT406655bis', 'CX404662bis', 'CX404666bis',
+ # 'CX404670bis', 'CX404678bis', 'CX404690bis']
+ # mkt_tuple = ('REALOGR', 'SNRFOR', 'XR', "0.05", 'USD')
+ # for f in os.listdir(os.path.join(root, "Tranche_data", "CDS")):
+ # if f.endswith("csv"):
+ # workdate = datetime.datetime.strptime(f.split(" ")[2].split(".")[0], "%Y%m%d")
+ # workdate = workdate.date()
+ # insert_cds_single(serenitasdb, workdate, bbgtickers, mkt_tuple)