1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
import sys
sys.path.append("..")
from graphics import plot_time_color_map
import analytics.tranche_functions as tch
import analytics.tranche_basket as bkt
import analytics.basket_index as idx_bkt
import numpy as np
import pandas as pd
from analytics import Swaption, BlackSwaption, Index, BlackSwaptionVolSurface, Portfolio, ProbSurface
from analytics.scenarios import run_swaption_scenarios, run_index_scenarios, run_portfolio_scenarios, run_tranche_scenarios
from scipy.interpolate import interp1d
from datetime import date
from db import dbengine
engine = dbengine('serenitasdb')
def rv_calc1():
#let's do IG27 from IG29, need to get the quotes from risk_numbers_new not just random ones
#Get IG29-1 year shortened rho with TLP, compare to IG27 5y rho
index = 'IG'
series = 29
series2 = series -2
tenor = '5yr'
shortened = 4
method = 'TLP'
#Read existing results, find which ones need to run
try:
results = pd.read_csv(f"/home/serenitas/edwin/Python/rv_{index}{series}.csv",
parse_dates=['date'], index_col=['date'])
except IOError:
results = pd.DataFrame()
sql_string = "select distinct date from risk_numbers_new where index = %s and series = %s order by date desc"
df = pd.read_sql_query(sql_string, engine, params=(index, series), parse_dates=['date'])
df1 = pd.read_sql_query(sql_string, engine, params=(index, series2), parse_dates=['date'])
df = df.merge(df1, on=['date'])
df = df[~df.date.isin(results.index)]
rho_tlp, pv_tlp, rho_prev_index, pv_prev_index = [], [], [], []
tranche = bkt.TrancheBasket('IG', series, '5yr')
tranche2 = bkt.TrancheBasket('IG', series2, '5yr')
for trade_date in df.date:
tranche.trade_date = trade_date
tranche2.trade_date = trade_date
tranche.build_skew()
tranche.rho = tranche.map_skew(tranche, method, 4)
pv = tranche.tranche_pvs().bond_price
rho_tlp.append(tranche.rho[1:-1])
pv_tlp.append(pv)
tranche2.build_skew()
rho_prev_index.append(tranche2.rho[1:-1])
tranche.rho = tranche2.rho
pv = tranche.tranche_pvs(shortened=4).bond_price
pv_prev_index.append(pv)
temp1 = pd.DataFrame(rho_tlp, index=df.date, columns=['3_rho_tlp', '7_rho_tlp', '15_rho_tlp'])
temp2 = pd.DataFrame(pv_tlp, index=df.date, columns=['03_pv_tlp', '37_pv_tlp', '715_pv_tlp', '15100_pv_tlp'])
temp3 = pd.DataFrame(rho_prev_index, index=df.date, columns=['3_rho_ig27', '7_rho_ig27', '15_rho_ig27'])
temp4 = pd.DataFrame(pv_prev_index, index=df.date, columns=['03_pv_ig27', '37_pv_ig27', '715_pv_ig27', '15100_pv_ig27'])
results = results.append(pd.concat([temp1, temp2, temp3, temp4], axis=1))
result.to_csv("/home/serenitas/edwin/Python/rv_" + index + series + ".csv")
def dispersion():
from quantlib.time.api import Schedule, Rule, Date, Period, WeekendsOnly
from quantlib.settings import Settings
curves = {}
maturities = {}
settings = Settings()
for series in [24, 25, 26, 27, 28, 29]:
index_temp = idx_bkt.MarkitBasketIndex('IG', series, ["5yr",], value_date=trade_date)
maturities[series] = index_temp.maturities[0]
cds_schedule = Schedule.from_rule(settings.evaluation_date, Date.from_datetime(maturities[series]),
Period('3M'), WeekendsOnly(), date_generation_rule=Rule.CDS2015)
sm, tickers = index_temp.survival_matrix(cds_schedule.to_npdates().view('int') + 134774)
curves[series] = pd.DataFrame(1 - sm, index=tickers, columns=cds_schedule)
#temp = (pd.to_datetime(maturities[series]) - datetime.datetime(1970,1,1)).days + 134774
#curves[series] = pd.concat([c.to_series() for _,_, c in index_temp.items()], axis=1)
curve_df = pd.concat(curves).stack()
curve_df.index.rename(['series', 'maturity', 'name'], inplace=True)
disp = {}
for series in [24, 25, 26, 27, 28, 29]:
temp = curve_df.xs([series, maturities[series].strftime('%Y-%m-%d')])
temp = temp[pd.qcut(temp, 10, labels=False) == 9]
disp[series] = temp.std()/temp.mean()
dispersion = pd.concat(disp)
curve_df.groupby(['series', 'maturity']).mean()
curve_df.groupby(['series', 'maturity']).std()
|