1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
|
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import datetime\n",
"import globeop_reports as go\n",
"import pandas as pd\n",
"import analytics\n",
"import numpy as np\n",
"\n",
"from pandas.tseries.offsets import BDay, MonthEnd\n",
"from analytics.scenarios import run_portfolio_scenarios\n",
"from utils.db import dbconn\n",
"from risk.portfolio import build_portfolio, generate_vol_surface\n",
"from analytics.basket_index import BasketIndex"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Set dates\n",
"position_date = (datetime.date.today() - MonthEnd(1)).date()\n",
"spread_date = position_date\n",
"analytics._local = False\n",
"analytics.init_ontr(spread_date)\n",
"path = '/home/serenitas/Daily/Risk/'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"################################### Run Credit Spread scenarios\n",
"spread_shock = np.array([-100., -25., 1., +25. , 100.])\n",
"spread_shock /= analytics._ontr['HY'].spread\n",
"portf, _ = build_portfolio(position_date, spread_date)\n",
"vol_surface = generate_vol_surface(portf, 5)\n",
"portf.reset_pv()\n",
"scens = run_portfolio_scenarios(portf, date_range=[pd.Timestamp(spread_date)], params=['pnl'],\n",
" spread_shock=spread_shock,\n",
" vol_shock=[0.0],\n",
" corr_shock=[0.0],\n",
" vol_surface=vol_surface)\n",
"scens.sum(axis=1).to_csv(path+'csscen_'+position_date.strftime(\"%Y%m%d\")+'.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"################################### JTD\n",
"_, portf = build_portfolio(position_date, spread_date)\n",
"jtd_i = []\n",
"for t in portf.indices:\n",
" bkt = BasketIndex(t.index_type, t.series, [t.tenor])\n",
" spreads = pd.DataFrame(bkt.spreads() * 10000, index=pd.Index(bkt.tickers, name='ticker'), columns=['spread'])\n",
" jump = pd.merge(spreads, bkt.jump_to_default() * t.notional, left_index=True, right_index=True)\n",
" jtd_i.append(jump.rename(columns={jump.columns[1]: 'jtd'}))\n",
"jtd_t = []\n",
"for t in portf.tranches:\n",
" jump = pd.concat([t.singlename_spreads().reset_index(['seniority', 'doc_clause'], drop=True), t.jump_to_default().rename('jtd')], axis=1)\n",
" jtd_t.append(jump.drop(['weight', 'recovery'], axis=1))\n",
"\n",
"ref_names = pd.read_sql_query(\"select ticker, referenceentity from refentity\", dbconn('serenitasdb'), index_col='ticker')\n",
"jump = pd.concat([pd.concat(jtd_t), pd.concat(jtd_i)])\n",
"jump = jump.merge(ref_names, left_index=True, right_index=True)\n",
"jump = jump.groupby('referenceentity').agg({'spread': np.mean, 'jtd': np.sum}).sort_values(by='jtd', ascending=True)\n",
"jump.to_csv(path+'jtd_'+position_date.strftime(\"%Y%m%d\")+'.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.8.1 64-bit",
"language": "python",
"name": "python38164bitc40c8740e5d542d7959acb14be96f4f3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|