from pyisda.curve import YieldCurve, BadDay, SpreadCurve from pyisda.credit_index import CreditIndex from pyisda.legs import FeeLeg, ContingentLeg from pyisda.logging import enable_logging import datetime import math import pandas as pd from yieldcurve import YC, ql_to_jp from quantlib.settings import Settings from quantlib.time.api import Date import numpy as np from db import dbconn from concurrent.futures import ProcessPoolExecutor, as_completed from itertools import zip_longest, chain def get_singlenames_quotes(indexname, date): conn = dbconn('serenitasdb') with conn.cursor() as c: c.execute("SELECT * FROM curve_quotes(%s, %s)", vars=(indexname, date)) return [r for r in c] def build_curve(r, today_date, yc, start_date, step_in_date, value_date, end_dates): spread_curve = 1e-4 * np.array(r['spread_curve'][1:]) upfront_curve = 1e-2 * np.array(r['upfront_curve'][1:]) recovery_curve = np.array(r['recovery_curve'][1:]) sc = SpreadCurve(today_date, yc, start_date, step_in_date, value_date, end_dates, spread_curve, upfront_curve, recovery_curve, True) return (r['cds_ticker'], sc) def grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def build_curves_dist(quotes, args, workers=4): ## about twice as fast as the non distributed version ## non thread safe for some reason so need ProcessPool with ProcessPoolExecutor(workers) as e: fs = [e.submit(build_curves, *(q, args)) for q in grouper(quotes, 30)] return list(chain.from_iterable([f.result() for f in as_completed(fs)])) def build_curves(quotes, args): return [build_curve(q, *args) for q in quotes if q is not None] def all_curves_pv(curves, today_date, jp_yc, start_date, step_in_date, value_date, maturities): r = {} for d in maturities: tenor = {} coupon_leg = FeeLeg(start_date, d, True, 1., 1.) default_leg = ContingentLeg(start_date, d, True) accrued = coupon_leg.accrued(step_in_date) tickers = [] data = [] for ticker, sc in curves: coupon_leg_pv = coupon_leg.pv(today_date, step_in_date, value_date, jp_yc, sc, False) default_leg_pv = default_leg.pv(today_date, step_in_date, value_date, jp_yc, sc, 0.4) tickers.append(ticker) data.append((coupon_leg_pv-accrued, default_leg_pv)) r[pd.Timestamp(d)] = pd.DataFrame.from_records(data, index=tickers, columns=['duration', 'protection_pv']) return pd.concat(r, axis=1).swaplevel(axis=1).sort_index(axis=1,level=0) def stack_curves(curves): dates = [d for d, _ in curves[0].inspect()['data']] hazard_rates = np.empty((len(curves), len(dates))) for i, sc in enumerate(curves): hazard_rates[i] = np.array([h for _, h in sc.inspect()['data']]) return hazard_rates, dates def forward_hazard_rates(sc): r = [] t = [] t1 = 0 h1 = 0 base_date = sc.base_date for d, h in sc.inspect()['data']: h2 = math.log1p(h) t2 = (d - base_date).days / 365 r.append( (h2 * t2 - h1 * t1) / (t2 - t1) ) t.append(t2) h1, t1 = h2, t2 return t, r Settings().evaluation_date = Date(6, 2, 2017) yc = YC() jp_yc = ql_to_jp(yc) today_date = datetime.date(2017, 2, 6) step_in_date = datetime.date(2017, 2, 7) value_date = datetime.date(2017, 2, 9) start_date = datetime.date(2016, 12, 20) end_dates = [datetime.date(2017, 12, 20), datetime.date(2018, 12, 20), datetime.date(2019, 12, 20), datetime.date(2020, 12, 20), datetime.date(2021, 12, 20), datetime.date(2023, 12, 20), datetime.date(2026, 12, 20)] quotes = get_singlenames_quotes("ig27", today_date) maturities = [datetime.date(2019, 12, 20), datetime.date(2021, 12, 20), datetime.date(2023, 12, 20), datetime.date(2026, 12, 20)] args = (today_date, jp_yc, start_date, step_in_date, value_date, maturities) curves = build_curves_dist(quotes, args) test = all_curves_pv(curves, *args) ig27 = CreditIndex(start_date, maturities, curves) test2 = ig27.pv_vec(step_in_date, value_date, jp_yc, 0.4) from index_data import get_index_quotes quotes = get_index_quotes("IG", 27, ['3yr', '5yr', '7yr', '10yr']) def calibrate_portfolio(index, step_in_date, value_date, yc, recovery, quotes): for i, m in index.maturities: eps = brentq(lambda epsilon: index.pv(step_in_date, value_date, m, yc, recovery, epsilon) - quote, -0.15, 0.3)