1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
import pandas as pd
from analytics import Index, Swaption
import datetime
from db import dbengine
from contextlib import contextmanager
from itertools import starmap
from functools import partial
from multiprocessing import Pool
from itertools import repeat
serenitas_engine = dbengine('serenitasdb')
def get_data(index, series, date=datetime.date.min):
df = pd.read_sql_query("SELECT * from swaption_ref_quotes JOIN swaption_quotes " \
"USING (ref_id) WHERE index=%s and series=%s " \
"and quotedate >=%s ORDER BY quotedate",
serenitas_engine,
params=(index, series, date), parse_dates=['quotedate', 'expiry'])
df.loc[(df.quote_source == "GS") & (df['index'] =="HY"),
["pay_bid", "pay_offer", "rec_bid", "rec_offer"]] *= 100
df.quotedate = df.quotedate.dt.tz_convert('America/New_York')
return df
def get_data_latest():
df = pd.read_sql_query("SELECT quotedate, index, series, expiry, ref, quote_source, "
"swaption_quotes.* FROM swaption_ref_quotes " \
"JOIN swaption_quotes USING (ref_id) " \
"LEFT JOIN swaption_calib USING (quote_id) " \
"WHERE swaption_calib.quote_id is NULL",
serenitas_engine,
parse_dates=['quotedate', 'expiry'])
df.loc[(df.quote_source == "GS") & (df['index'] == "HY"),
["pay_bid", "pay_offer", "rec_bid", "rec_offer"]] *=100
df.quotedate = df.quotedate.dt.tz_convert('America/New_York')
return df
def calib(option, ref, strike, pay_bid, pay_offer, rec_bid, rec_offer):
option.ref = ref
option.strike = strike
r = []
for pv_type in ['pv', 'pv_black']:
for option_type in ['pay', 'rec']:
if option_type == "pay":
mid = (pay_bid + pay_offer) / 2 * 1e-4
option.option_type = 'payer'
else:
mid = (rec_bid + rec_offer) / 2 * 1e-4
option.option_type = 'receiver'
try:
setattr(option, pv_type, mid)
except ValueError as e:
r.append(None)
print(e)
else:
r.append(option.sigma)
return r
@contextmanager
def MaybePool(nproc):
yield Pool(nproc) if nproc > 1 else None
def calibrate(index_type=None, series=None, date=None, nproc=4, latest=False):
sql_str = ("INSERT INTO swaption_calib VALUES({}) ON CONFLICT DO NOTHING".
format(",".join(["%s"] * 5)))
if latest:
data = get_data_latest()
else:
data = get_data(index_type, series, date)
with MaybePool(nproc) as pool:
pstarmap = pool.starmap if pool else starmap
for k, v in data.groupby([data['quotedate'].dt.date, 'index', 'series']):
trade_date, index_type, series = k
index = Index.from_name(index_type, series, "5yr", trade_date)
for expiry, df in v.groupby(['expiry']):
option = Swaption(index, expiry.date(), 100)
mycalib = partial(calib, option)
r = pstarmap(mycalib, df[['ref', 'strike', 'pay_bid',
'pay_offer', 'rec_bid', 'rec_offer']].
itertuples(index=False, name=None))
to_insert = [[a] + b for a, b in zip(df.quote_id, r)]
serenitas_engine.execute(sql_str, to_insert)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--index', required=False, type=lambda s: s.upper(), dest="index_type")
parser.add_argument('--series', required=False, type=int, default=28)
parser.add_argument('--date', required=False, default=datetime.date.min)
parser.add_argument('--latest', required=False, action="store_true")
parser.add_argument('--nproc', required=False, type=int, default=4)
args = parser.parse_args()
if args.latest:
calibrate(latest=True, nproc=args.nproc)
else:
calibrate(**vars(args))
|