aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/cds_curve.py133
1 files changed, 133 insertions, 0 deletions
diff --git a/python/cds_curve.py b/python/cds_curve.py
new file mode 100644
index 00000000..4f96fbfb
--- /dev/null
+++ b/python/cds_curve.py
@@ -0,0 +1,133 @@
+from pyisda.curve import YieldCurve, BadDay, SpreadCurve, BasketIndex
+from pyisda.legs import FeeLeg, ContingentLeg
+from pyisda.logging import enable_logging
+import datetime
+import math
+import pandas as pd
+from yieldcurve import YC, ql_to_jp
+from quantlib.settings import Settings
+from quantlib.time.api import Date
+import numpy as np
+from db import dbconn
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from itertools import zip_longest, chain
+
+def get_singlenames_quotes(indexname, date):
+ conn = dbconn('serenitasdb')
+ with conn.cursor() as c:
+ c.execute("SELECT * FROM curve_quotes(%s, %s)", vars=(indexname, date))
+ return [r for r in c]
+
+def build_curve(r, today_date, yc, start_date, step_in_date, value_date, end_dates):
+ spread_curve = 1e-4 * np.array(r['spread_curve'][1:])
+ upfront_curve = 1e-2 * np.array(r['upfront_curve'][1:])
+ recovery_curve = np.array(r['recovery_curve'][1:])
+ sc = SpreadCurve(today_date, yc, start_date, step_in_date, value_date,
+ end_dates, spread_curve, upfront_curve, recovery_curve, True)
+ return (r['cds_ticker'], sc)
+
+def grouper(iterable, n, fillvalue=None):
+ "Collect data into fixed-length chunks or blocks"
+ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
+ args = [iter(iterable)] * n
+ return zip_longest(fillvalue=fillvalue, *args)
+
+def build_curves_dist(quotes, args, workers=4):
+ ## about twice as fast as the non distributed version
+ ## non thread safe for some reason so need ProcessPool
+ with ProcessPoolExecutor(workers) as e:
+ fs = [e.submit(build_curves, *(q, args)) for q in grouper(quotes, 30)]
+ return list(chain.from_iterable([f.result() for f in as_completed(fs)]))
+
+def build_curves(quotes, args):
+ return [build_curve(q, *args) for q in quotes if q is not None]
+
+
+Settings().evaluation_date = Date(6, 2, 2017)
+yc = YC()
+jp_yc = ql_to_jp(yc)
+today_date = datetime.date(2017, 2, 6)
+step_in_date = datetime.date(2017, 2, 7)
+value_date = datetime.date(2017, 2, 9)
+start_date = datetime.date(2016, 12, 20)
+end_dates = [datetime.date(2017, 12, 20),
+ datetime.date(2018, 12, 20),
+ datetime.date(2019, 12, 20),
+ datetime.date(2020, 12, 20),
+ datetime.date(2021, 12, 20),
+ datetime.date(2023, 12, 20),
+ datetime.date(2026, 12, 20)]
+
+quotes = get_singlenames_quotes("ig27", today_date)
+args = (today_date, jp_yc, start_date, step_in_date, value_date, end_dates)
+curves = build_curves_dist(quotes, args)
+
+def all_curves_pv(curves, today_date, jp_yc, start_date, step_in_date, value_date, end_dates):
+ r = {}
+ for d in end_dates:
+ tenor = {}
+ coupon_leg = FeeLeg(start_date, d, True, 1., 1.)
+ default_leg = ContingentLeg(start_date, d, True)
+ accrued = coupon_leg.accrued(step_in_date)
+ tickers = []
+ data = []
+ for ticker, sc in curves:
+ coupon_leg_pv = coupon_leg.pv(today_date, step_in_date, value_date, jp_yc, sc, False)
+ default_leg_pv = default_leg.pv(today_date, step_in_date, value_date,
+ jp_yc, sc, 0.4)
+ tickers.append(ticker)
+ data.append((coupon_leg_pv-accrued, default_leg_pv))
+ r[pd.Timestamp(d)] = pd.DataFrame.from_records(data,
+ index=tickers,
+ columns=['duration', 'protection_pv'])
+ indexpv = r.mean().unstack()
+ return indexpv.protection_pv-indexpv.duration*0.01
+
+def stack_curves(curves):
+ dates = [d for d, _ in curves[0].inspect()['data']]
+ hazard_rates = np.empty((len(curves), len(dates)))
+ for i, sc in enumerate(curves):
+ hazard_rates[i] = np.array([h for _, h in sc.inspect()['data']])
+ return hazard_rates, dates
+
+def all_curves_pv2(curves, today_date, jp_yc, start_date, step_in_date, value_date, end_dates):
+ tickers = [t for t, _ in curves]
+ hazard_rates, end_dates = stack_curves([c for _, c in curves])
+ r = {}
+ for maturity in end_dates:
+ data = []
+ ig27 = BasketIndex(today_date, start_date, maturity, end_dates, hazard_rates)
+ data.append(ig27.pv(step_in_date, value_date, jp_yc, 0.4))
+ r[pd.Timestamp(d)] = pd.DataFrame.from_records(data,
+ index=tickers,
+ columns=['duration', 'protection_pv'])
+ return r
+
+def forward_hazard_rates(sc):
+ r = []
+ t = []
+ t1 = 0
+ h1 = 0
+ base_date = sc.base_date
+ for d, h in sc.inspect()['data']:
+ h2 = math.log1p(h)
+ t2 = (d - base_date).days / 365
+ r.append( (h2 * t2 - h1 * t1) / (t2 - t1) )
+ t.append(t2)
+ h1, t1 = h2, t2
+ return t, r
+
+ticker, sc = curves[0]
+sc2 = sc.tweak_curve(0.01, inplace=False)
+# class CreditIndex:
+# def __init__(name, trade_date):
+
+# @property
+# def quotes():
+# pass
+# forward_hazard_rates()
+# for d, q in
+# @quotes.setter
+# def quotes(val):
+# self._quotes = val
+# for d, q in self._quotes: