aboutsummaryrefslogtreecommitdiffstats
path: root/python/cds_curve.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/cds_curve.py')
-rw-r--r--python/cds_curve.py56
1 files changed, 11 insertions, 45 deletions
diff --git a/python/cds_curve.py b/python/cds_curve.py
index 3e44810c..51f017f1 100644
--- a/python/cds_curve.py
+++ b/python/cds_curve.py
@@ -17,7 +17,6 @@ from itertools import zip_longest, chain
from index_data import get_index_quotes
from pandas.tseries.offsets import BDay
from scipy.optimize import brentq
-from dateutil.relativedelta import relativedelta
from pyisda.logging import enable_logging
from analytics.utils import roll_date, previous_twentieth
@@ -33,19 +32,14 @@ def build_curve(r, today_date, yc, start_date, step_in_date, value_date, end_dat
recovery_curve = np.array(r['recovery_curve'][1:], dtype='float')
try:
sc = SpreadCurve(today_date, yc, start_date, step_in_date, value_date,
- end_dates, spread_curve, upfront_curve, recovery_curve, True)
+ end_dates, spread_curve, upfront_curve, recovery_curve,
+ ticker=r['cds_ticker'])
if np.any(np.isnan(upfront_curve)):
sc = fill_curve(sc, end_dates)
except ValueError as e:
print(e)
- return (r['cds_ticker'], None)
- return (r['cds_ticker'], sc)
-
-def grouper(iterable, n, fillvalue=None):
- "Collect data into fixed-length chunks or blocks"
- # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
- args = [iter(iterable)] * n
- return zip_longest(fillvalue=fillvalue, *args)
+ return None
+ return sc
def build_curves_dist(quotes, args, workers=4):
## about twice as fast as the non distributed version
@@ -80,37 +74,17 @@ def all_curves_pv(curves, today_date, jp_yc, start_date, step_in_date, value_dat
accrued = coupon_leg.accrued(step_in_date)
tickers = []
data = []
- for ticker, sc in curves:
+ for sc in curves:
coupon_leg_pv = coupon_leg.pv(today_date, step_in_date, value_date, jp_yc, sc, False)
default_leg_pv = default_leg.pv(today_date, step_in_date, value_date,
jp_yc, sc, 0.4)
- tickers.append(ticker)
+ tickers.append(sc.ticker)
data.append((coupon_leg_pv-accrued, default_leg_pv))
r[pd.Timestamp(d)] = pd.DataFrame.from_records(data,
index=tickers,
columns=['duration', 'protection_pv'])
return pd.concat(r, axis=1).swaplevel(axis=1).sort_index(axis=1,level=0)
-def stack_curves(curves):
- dates = [d for d, _ in curves[0].inspect()['data']]
- hazard_rates = np.empty((len(curves), len(dates)))
- for i, sc in enumerate(curves):
- hazard_rates[i] = np.array([h for _, h in sc.inspect()['data']])
- return hazard_rates, dates
-
-def forward_hazard_rates(sc):
- r = []
- t = []
- t1 = 0
- h1 = 0
- base_date = sc.base_date
- for d, h2 in sc.inspect()['data']:
- t2 = (d - base_date).days / 365
- r.append( (h2 * t2 - h1 * t1) / (t2 - t1) )
- t.append(t2)
- h1, t1 = h2, t2
- return t, r
-
serenitas_engine = dbengine('serenitasdb')
def calibrate_portfolio(index_type, series, tenors=['3yr', '5yr', '7yr', '10yr']):
@@ -140,16 +114,11 @@ def calibrate_portfolio(index_type, series, tenors=['3yr', '5yr', '7yr', '10yr']
curves, _ = get_singlenames_curves(index_type, series, issue_date)
index = CreditIndex(issue_date, maturities, curves)
r = {}
- old_version = 1
for k, s in index_quotes.iterrows():
trade_date, version = k
curves, args = get_singlenames_curves(index_type, series, trade_date)
_, jp_yc, _, step_in_date, value_date, _ = args
- if version > old_version:
- old_version = version
- index = CreditIndex(issue_date, maturities, curves)
- else:
- index.curves = [c for _, c in curves]
+ index.curves = curves
tweak, duration, theta = [], [], []
s.name = 'index_quote'
quotes = pd.concat([index_desc, s], axis=1)
@@ -157,15 +126,12 @@ def calibrate_portfolio(index_type, series, tenors=['3yr', '5yr', '7yr', '10yr']
eps = brentq(lambda epsilon: index.pv(step_in_date, value_date,
m, jp_yc, recovery,
coupon, epsilon) -
- index_quote, -0.5, 0.3)
+ index_quote, -0.5, 0.3)
#tweak the curves in place
index.tweak_portfolio(eps, m)
duration.append(index.duration(step_in_date, value_date, m, jp_yc))
- theta.append(index_quote -
- index.theta(step_in_date, value_date,
- m - relativedelta(years=1),
- jp_yc, recovery, coupon) +
- coupon)
+ theta.append(index.theta(step_in_date, value_date,
+ m, jp_yc, recovery, coupon))
tweak.append(eps)
r[trade_date] = pd.DataFrame({'duration': duration,
'theta': theta,
@@ -174,4 +140,4 @@ def calibrate_portfolio(index_type, series, tenors=['3yr', '5yr', '7yr', '10yr']
if __name__=="__main__":
enable_logging()
- df = calibrate_portfolio("IG", 27)
+ df = calibrate_portfolio("IG", 25)