aboutsummaryrefslogtreecommitdiffstats
path: root/python/markit/import_quotes.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/markit/import_quotes.py')
-rw-r--r--python/markit/import_quotes.py176
1 files changed, 176 insertions, 0 deletions
diff --git a/python/markit/import_quotes.py b/python/markit/import_quotes.py
new file mode 100644
index 00000000..7bbb6e89
--- /dev/null
+++ b/python/markit/import_quotes.py
@@ -0,0 +1,176 @@
+import csv
+import datetime
+import logging
+import numpy as np
+import pandas as pd
+import os
+
+from collections import defaultdict
+from common import root
+from pandas.tseries.offsets import BDay
+
+logger = logging.getLogger(__name__)
+
+def convert(x):
+ try:
+ return float(x[:-1])
+ except ValueError:
+ return None
+
+index_list = ['HY9', 'HY10'] + ['HY' + str(s) for s in range(15, 26)] + ['IG9'] + \
+ ['IG' + str(s) for s in range(16, 26)] + ['XO22', 'XO23', 'XO24', 'EU9'] + \
+ ['EU' +str(s) for s in range(19, 25)]
+
+DOC_CLAUSE_MAPPING14 = {'Full Restructuring': 'MM14',
+ 'No Restructuring': 'XR14',
+ 'Modified Modified Restructurin': 'MM14'}
+
+DOC_CLAUSE_MAPPING = {'Full Restructuring': 'MM',
+ 'No Restructuring': 'XR',
+ 'Modified Modified Restructurin': 'MM'}
+
+def get_markit_bbg_mapping(database, basketid_list, workdate):
+ if workdate>=datetime.date(2014, 9, 19):
+ doc_clause_mapping = DOC_CLAUSE_MAPPING14
+ else:
+ doc_clause_mapping = DOC_CLAUSE_MAPPING
+ markit_bbg_mapping = defaultdict(set)
+ all_tickers = set([])
+ with database.cursor() as c:
+ c.execute("SELECT markit_ticker, markit_tier, spread, currency, cds_curve, " \
+ " doc_clause FROM historical_cds_issuers(%s) where index_list && %s",
+ (workdate, basketid_list))
+ for line in c:
+ all_tickers.add((line['markit_ticker'], line['markit_tier']))
+ key = (line['markit_ticker'], line['markit_tier'], line['currency'],
+ doc_clause_mapping[line['doc_clause']], float(line['spread'])/10000)
+ if key==('CESEOP', 'SNRFOR', 'USD', 'XR14', 0.05):
+ key=('CESEOP', 'SNRFOR', 'USD', 'XR', 0.05)
+ ## each markit ticker can be mapped to multiple bbg tickers
+ ## these bbg tickers can have different curves (ok)
+ ## or same curves (not ok since date, curve_ticker needs to be unique)
+ ## therefore we keep them in a set strucutre
+ markit_bbg_mapping[key].add(tuple(line['cds_curve']))
+ database.commit()
+ return (all_tickers, markit_bbg_mapping)
+
+
+def get_basketids(database, index_list, workdate):
+ r = []
+ with database.cursor() as c:
+ for index in index_list:
+ c.execute("SELECT * FROM nameToBasketID(%s, %s)", (index, workdate))
+ r.append(c.fetchone()[0])
+ database.commit()
+ return r
+
+def get_current_tickers(database, workdate):
+ basketid_list = get_basketids(database, index_list, workdate)
+ return get_markit_bbg_mapping(database, basketid_list, workdate)
+
+def insert_cds(database, workdate):
+ """insert Markit index quotes into the database
+
+ :param database: psycopg2 connection to the database.
+ :param workdate:
+ """
+
+ all_tickers, markit_bbg_mapping = get_current_tickers(database, workdate)
+ filename = "cds eod {0:%Y%m%d}.csv".format(workdate)
+ colnames = ['Upfront'+tenor for tenor in ['6m', '1y', '2y', '3y', '4y', '5y', '7y', '10y']]
+ sqlstr = "INSERT INTO cds_quotes(date, curve_ticker, upfrontbid, upfrontask," \
+ "runningbid, runningask, source, recovery) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
+
+ tickers_found = set([])
+ with database.cursor() as c:
+ c.execute("DELETE from cds_quotes where date=%s", (workdate,))
+ database.commit()
+ with open(os.path.join(root, "Tranche_data", "CDS", filename)) as fh:
+ csvreader = csv.DictReader(fh)
+ with database.cursor() as c:
+ for line in csvreader:
+ k = (line['Ticker'], line['Tier'], line['Ccy'], line['DocClause'], float(line['RunningCoupon']))
+ if k in markit_bbg_mapping:
+ for curves in markit_bbg_mapping[k]:
+ c.executemany(sqlstr,
+ [(workdate, t, convert(line[colnames[i]]), convert(line[colnames[i]]),
+ float(line['RunningCoupon'])*10000, float(line['RunningCoupon'])*10000,
+ 'MKIT', convert(line['RealRecovery'])/100)
+ for i, t in enumerate(curves)])
+ tickers_found.add((line['Ticker'], line['Tier']))
+ database.commit()
+ logger.warning('missing_quotes for {0}'.format(all_tickers-tickers_found))
+
+def insert_index(engine, workdate=None):
+ """insert Markit index quotes into the database
+
+ :param engine: sqlalchemy engine to the database
+ :param workdate: date. If None, we will try to reinsert all files
+ """
+
+ basedir = os.path.join(root, 'Tranche_data', 'Composite_reports')
+ filenames = [os.path.join(basedir, f) for f in os.listdir(basedir) if 'Indices' in f]
+
+ name_mapping = {"CDXNAHY":"HY", "CDXNAIG":"IG",'iTraxx Eur': "EU", 'iTraxx Eur Xover': "XO"}
+ cols = ['closeprice', 'closespread', 'modelprice', 'modelspread']
+ colmapping={'Date':'date', 'Name': 'index', 'Series': 'series', 'Version': 'version',
+ 'Term': 'tenor', 'Composite Price': 'closeprice', 'Composite Spread': 'closespread',
+ 'Model Price': 'modelprice', 'Model Spread': 'modelspread'}
+ ext_cols = ['date', 'index', 'series', 'version', 'tenor'] + cols + \
+ ['adjcloseprice', 'adjmodelprice']
+ for f in filenames:
+ if workdate is None or \
+ datetime.datetime.fromtimestamp(os.path.getmtime(f)).date()==(workdate+BDay(1)).date():
+ data = pd.read_csv(f, skiprows=2, parse_dates=[0,7], engine='python')
+ data.rename(columns=colmapping, inplace=True)
+ data.dropna(subset=['closeprice'], inplace=True)
+ for col in cols:
+ data[col] = data[col].str.replace('%', '').astype('float')
+ data['tenor'] = data['tenor'].apply(lambda x: x.lower()+'r')
+ data['index'] = data['index'].apply(lambda x: name_mapping[x] if x in name_mapping else np.NaN)
+ data = data.dropna(subset=['index'])
+ data.set_index('index', drop=False, inplace=True)
+ data['closespread'] *= 100
+ data['modelspread'] *= 100
+ ## we renumbered the version for HY9, 10 and 11
+ data.loc[data.series.isin([9, 10, 11]) & (data.index=='HY'),'version'] -= 3
+ data['adjcloseprice'] = data['closeprice']
+ data['adjmodelprice'] = data['modelprice']
+ data = data.groupby(['index', 'series', 'tenor', 'date']).last()
+ data.reset_index(inplace=True)
+ data[ext_cols].to_sql('index_quotes', engine, if_exists='append', index=False)
+
+def insert_tranche(engine, workdate = None):
+ """insert Markit index quotes into the database
+
+ :param engine: sqlalchemy engine to the database
+ :param workdate: date. If None, we will try to reinsert all files
+ """
+
+ basedir = os.path.join(root, 'Tranche_data', 'Composite_reports')
+ filenames = [os.path.join(basedir, f) for f in os.listdir(basedir) if f.startswith('Tranche Composites')]
+ index_version = pd.read_sql_table("index_version", engine, index_col='redindexcode')
+ for f in filenames:
+ if workdate is None or \
+ datetime.datetime.fromtimestamp(os.path.getmtime(f)).date()==(workdate+BDay(1)).date():
+ df = pd.read_csv(f, skiprows=2, parse_dates=['Date'])
+ df.rename(columns={'Date':'quotedate',
+ 'Index Term':'tenor',
+ 'Attachment':'attach',
+ 'Detachment':'detach',
+ 'Tranche Upfront Bid': 'upfront_bid',
+ 'Tranche Upfront Mid': 'upfront_mid',
+ 'Tranche Upfront Ask': 'upfront_ask',
+ 'Index Price Mid': 'index_price',
+ 'Tranche Spread Mid': 'tranche_spread',
+ 'Red Code':'redindexcode'}, inplace=True)
+ df.attach = df.attach *100
+ df.detach = df.detach * 100
+ df.tranche_spread = df.tranche_spread*10000
+ df.tenor = df.tenor.str.lower() + 'r'
+ df.set_index('redindexcode', inplace=True)
+ df = df.join(index_version)
+ df = df.filter(['basketid', 'quotedate', 'tenor', 'attach', 'detach',
+ 'upfront_bid', 'upfront_ask', 'upfront_mid',
+ 'tranche_spread', 'index_price'])
+ df.to_sql('markit_tranche_quotes', engine, if_exists='append', index=False)