aboutsummaryrefslogtreecommitdiffstats
path: root/python/cds_curve.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/cds_curve.py')
-rw-r--r--python/cds_curve.py102
1 files changed, 66 insertions, 36 deletions
diff --git a/python/cds_curve.py b/python/cds_curve.py
index d967c50c..ba353269 100644
--- a/python/cds_curve.py
+++ b/python/cds_curve.py
@@ -2,16 +2,21 @@ from pyisda.curve import YieldCurve, BadDay, SpreadCurve
from pyisda.credit_index import CreditIndex
from pyisda.legs import FeeLeg, ContingentLeg
from pyisda.logging import enable_logging
+
import datetime
import math
+import numpy as np
import pandas as pd
from yieldcurve import YC, ql_to_jp
from quantlib.settings import Settings
from quantlib.time.api import Date
-import numpy as np
-from db import dbconn
+from db import dbconn, dbengine
from concurrent.futures import ProcessPoolExecutor, as_completed
from itertools import zip_longest, chain
+from index_data import get_index_quotes
+from pandas.tseries.offsets import BDay
+from scipy.optimize import brentq
+from dateutil.relativedelta import relativedelta
def get_singlenames_quotes(indexname, date):
conn = dbconn('serenitasdb')
@@ -23,8 +28,11 @@ def build_curve(r, today_date, yc, start_date, step_in_date, value_date, end_dat
spread_curve = 1e-4 * np.array(r['spread_curve'][1:])
upfront_curve = 1e-2 * np.array(r['upfront_curve'][1:])
recovery_curve = np.array(r['recovery_curve'][1:])
- sc = SpreadCurve(today_date, yc, start_date, step_in_date, value_date,
- end_dates, spread_curve, upfront_curve, recovery_curve, True)
+ try:
+ sc = SpreadCurve(today_date, yc, start_date, step_in_date, value_date,
+ end_dates, spread_curve, upfront_curve, recovery_curve, True)
+ except ValueError:
+ pdb.set_trace()
return (r['cds_ticker'], sc)
def grouper(iterable, n, fillvalue=None):
@@ -43,7 +51,6 @@ def build_curves_dist(quotes, args, workers=4):
def build_curves(quotes, args):
return [build_curve(q, *args) for q in quotes if q is not None]
-
def all_curves_pv(curves, today_date, jp_yc, start_date, step_in_date, value_date, maturities):
r = {}
for d in maturities:
@@ -85,36 +92,59 @@ def forward_hazard_rates(sc):
h1, t1 = h2, t2
return t, r
-Settings().evaluation_date = Date(6, 2, 2017)
-yc = YC()
-jp_yc = ql_to_jp(yc)
-today_date = datetime.date(2017, 2, 6)
-step_in_date = datetime.date(2017, 2, 7)
-value_date = datetime.date(2017, 2, 9)
-start_date = datetime.date(2016, 12, 20)
-end_dates = [datetime.date(2017, 12, 20),
- datetime.date(2018, 12, 20),
- datetime.date(2019, 12, 20),
- datetime.date(2020, 12, 20),
- datetime.date(2021, 12, 20),
- datetime.date(2023, 12, 20),
- datetime.date(2026, 12, 20)]
-
-quotes = get_singlenames_quotes("ig27", today_date)
-maturities = [datetime.date(2019, 12, 20),
- datetime.date(2021, 12, 20),
- datetime.date(2023, 12, 20),
- datetime.date(2026, 12, 20)]
-args = (today_date, jp_yc, start_date, step_in_date, value_date, maturities)
-curves = build_curves_dist(quotes, args)
-test = all_curves_pv(curves, *args)
-ig27 = CreditIndex(start_date, maturities, curves)
-test2 = ig27.pv_vec(step_in_date, value_date, jp_yc, 0.4)
+serenitas_engine = dbengine('serenitasdb')
-from index_data import get_index_quotes
-quotes = get_index_quotes("IG", 27, ['3yr', '5yr', '7yr', '10yr'])
+def calibrate_portfolio(index_type, series):
+ if index_type == 'IG':
+ recovery = 0.4
+ else:
+ recovery = 0.3
+ tenors = ['3yr', '5yr', '7yr', '10yr']
+ index_quotes = get_index_quotes(index_type, series,
+ tenors)['closeprice'].unstack()
+ index_desc = pd.read_sql_query("SELECT tenor, maturity, coupon FROM index_maturity " \
+ "WHERE index=%s AND series=%s", serenitas_engine,
+ index_col='tenor', params=(index_type, series))
-def calibrate_portfolio(index, step_in_date, value_date, yc, recovery, quotes):
- for i, m in index.maturities:
- eps = brentq(lambda epsilon: index.pv(step_in_date, value_date, m, yc, recovery, epsilon)
- - quote, -0.15, 0.3)
+ index_quotes.columns = index_desc.loc[index_quotes.columns, "maturity"]
+ index_quotes = index_quotes.sort_index(1)
+ end_dates = [datetime.date(2017, 12, 20),
+ datetime.date(2018, 12, 20),
+ datetime.date(2019, 12, 20),
+ datetime.date(2020, 12, 20),
+ datetime.date(2021, 12, 20),
+ datetime.date(2023, 12, 20),
+ datetime.date(2026, 12, 20)]
+ maturities = index_desc.maturity.tolist()
+ start_date = datetime.date(2016, 9, 20)
+ r = {}
+ for k, s in index_quotes.iterrows():
+ trade_date = k[0].date()
+ print(trade_date)
+ sn_quotes = get_singlenames_quotes("{}{}".format(index_type.lower(), series),
+ trade_date)
+ Settings().evaluation_date = Date.from_datetime(trade_date)
+ yc = YC()
+ jp_yc = ql_to_jp(yc)
+ step_in_date = trade_date + datetime.timedelta(days=1)
+ value_date = (pd.Timestamp(trade_date) + 3* BDay()).date()
+ args = (trade_date, jp_yc, start_date, step_in_date, value_date, end_dates)
+ curves = build_curves_dist(sn_quotes, args)
+ index = CreditIndex(start_date, maturities, curves)
+ d = {'tweak':[],
+ 'duration':[],
+ 'theta':[]}
+ for i, m in enumerate(maturities):
+ index_quote = 1 - s.iat[i]/100
+ eps = brentq(lambda epsilon: index.pv(step_in_date,
+ value_date, m, jp_yc, 0.4, 0.01, epsilon) -
+ index_quote, -0.3, 0.3)
+ #tweak the curves in place
+ index.tweak_portfolio(eps, m)
+ d['duration'].append(index.duration(step_in_date, value_date, m, jp_yc))
+ d['theta'].append(index_quote - index.theta(step_in_date, value_date,
+ m - relativedelta(years=1), jp_yc, 0.4, 0.01) +
+ 0.01)
+ d['tweak'].append(eps)
+ r[trade_date] = pd.DataFrame(d, index=tenors)
+ return pd.concat(d)