1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
|
from analytics.curve_trades import on_the_run
from analytics.index_data import index_returns, get_index_quotes, index_returns
from analytics import Portfolio
from db import dbengine, dbconn
from pandas.tseries.offsets import BDay, BMonthEnd
from copy import deepcopy
import numpy as np
import pandas as pd
import math
import datetime
dawndb = dbengine('dawndb')
serenitasdb = dbengine('serenitasdb')
def hist_var(portf, index_type='IG', quantile=.05, years=5):
df = index_returns(index=index_type, years=years,
tenor=['3yr', '5yr', '7yr', '10yr'])
df = (df.reset_index(['index'], drop=True).
reorder_levels(['date', 'series', 'tenor']))
returns = df.spread_return.dropna().reset_index('series')
returns['dist_on_the_run'] = (returns.
groupby('date')['series'].
transform(lambda x: x.max() - x))
del returns['series']
returns = returns.set_index('dist_on_the_run', append=True).unstack('tenor')
returns.columns = returns.columns.droplevel(0)
portf.reset_pv()
otr = on_the_run(index_type)
spreads = pd.DataFrame({'spread': portf.spread,
'tenor': [ind.tenor for ind in portf.indices],
'dist_on_the_run': [otr - ind.series for ind in portf.indices]})
spreads = spreads.set_index(['dist_on_the_run', 'tenor'])
r = []
for k, g in returns.groupby(level='date', as_index=False):
shocks = g.reset_index('date', drop=True).stack('tenor')
shocks.name = 'shocks'
portf.spread = spreads.spread * (1 + spreads.join(shocks).shocks)
r.append((k, portf.pnl))
pnl = pd.DataFrame.from_records(r, columns=['date', 'pnl'], index=['date'])
return pnl.quantile(quantile) * math.sqrt(12)
def rel_spread_diff(report_date = datetime.date.today(), index='HY', rolling=10):
otr = on_the_run(index)
## look at spreads
df = get_index_quotes(index, list(range(otr - rolling, otr + 1)),
tenor=['3yr', '5yr', '7yr', '10yr'], years=5)
duration = df.duration.xs((report_date,'5yr', otr), level = ['date','tenor', 'series'])[-1]
df = df.xs('5yr', level='tenor')['closespread'].groupby(['date']).last()
df = df.loc['2013-01-15':report_date]
curr_spread = df.iloc[-1]
df = df.pct_change(freq='22B').dropna()
return df.groupby('date').last(), curr_spread, duration
def get_pos(report_date):
strats = ["HEDGE_CSO", "HEDGE_MBS", "HEDGE_CLO", "HEDGE_MAC",
"SER_IGINX", "SER_HYINX", "SER_IGCURVE", "SER_ITRXCURVE",
"IGOPTDEL", "HYOPTDEL"]
r = {}
for st in strats:
r[st] = pd.read_sql_query("SELECT * from list_cds_marks(%s, %s)",
dawndb, params=(report_date, st))
return pd.concat(r, names=['strategy']).reset_index()
def cleared_cds_margins(report_date=datetime.date.today()):
df = get_pos(report_date)
#Cap Allocation for Deltas
percentile = .95 #monthly 90%tile case...
shocks, widen, tighten, onTR_dur, onTR_spread = {}, {}, {}, {}, {}
for ind in ['IG', 'HY', 'EU']:
shocks[ind], onTR_spread[ind], onTR_dur[ind] = rel_spread_diff(report_date, index=ind)
widen[ind] = shocks[ind].quantile(percentile)
tighten[ind] = shocks[ind].quantile(1-percentile)
df['onTR_notional'] = df.apply(lambda df:
df.notional * df.factor * df.duration / onTR_dur[df.p_index], axis=1)
df['widen'] = df.apply(lambda df:
df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * widen[df.p_index]/10000, axis=1)
df['tighten'] = df.apply(lambda df:
df.onTR_notional * onTR_spread[df.p_index] * onTR_dur[df.p_index] * tighten[df.p_index]/10000, axis=1)
delta_alloc = df.groupby('strategy').sum()
delta_alloc['total'] = delta_alloc.apply(lambda df: max(abs(df.widen), abs(df.tighten)), axis=1)
return delta_alloc
|