aboutsummaryrefslogtreecommitdiffstats
path: root/python/pnl_explain.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pnl_explain.py')
-rw-r--r--python/pnl_explain.py107
1 files changed, 66 insertions, 41 deletions
diff --git a/python/pnl_explain.py b/python/pnl_explain.py
index 360dc45f..44325893 100644
--- a/python/pnl_explain.py
+++ b/python/pnl_explain.py
@@ -3,6 +3,10 @@ from functools import reduce
from position import get_list
from db import dbengine
from dates import bus_day, imm_dates
+import numpy as np
+
+from psycopg2.extensions import register_adapter, AsIs
+register_adapter(np.int64, lambda x: AsIs(x))
def pnl_explain(identifier, start_date = None, end_date = None,
engine = dbengine("dawndb")):
@@ -39,11 +43,13 @@ def pnl_explain(identifier, start_date = None, end_date = None,
daily = pd.concat([df1, df2], axis = 1)
daily['unrealized_pnl'] = daily.price.diff() * daily.factor.shift()/100 * daily.faceamount
- daily['realized_pnl'] = (daily.price/100*daily.factor.diff()+daily.principal/100) * daily.faceamount
+ daily['realized_pnl'] = (daily.price/100 * daily.factor.diff() + daily.principal/100) * \
+ daily.faceamount
daily['clean_nav'] = daily.price/100 * daily.factor * daily.faceamount
daily['realized_accrued'] = daily.interest/100 * daily.faceamount
days_accrued = daily.index - daily.prev_cpn_date
- daily['accrued'] = days_accrued.dt.days/360*daily.coupon/100*daily.factor*daily.faceamount
+ daily['accrued'] = days_accrued.dt.days/360*daily.coupon/100*daily.factor * \
+ daily.faceamount
extra_pnl = daily.clean_nav.diff() - daily.principal_payment
daily.loc[daily.principal_payment>0 , 'unrealized_pnl'] += extra_pnl[daily.principal_payment>0]
daily.loc[daily.principal_payment<0, 'realized_pnl'] += extra_pnl[daily.principal_payment<0]
@@ -56,36 +62,37 @@ def pnl_explain_list(id_list, start_date = None, end_date = None, engine = dbeng
return {identifier: pnl_explain(identifier, start_date, end_date, engine)
for identifier in id_list}
-def cds_explain(index, series, tenor, attach = None, detach = None,
+def cds_explain(index, series, tenor, attach = np.nan, detach = np.nan,
start_date = None, end_date = None, engine = dbengine('serenitasdb')):
- if attach is None:
- quotes = pd.read_sql_query("SELECT date, (100-closeprice) AS upfront FROM index_quotes " \
- "WHERE index=%s AND series=%s AND tenor=%s ORDER BY date",
+ if np.isnan(attach) or np.isnan(detach):
+ quotes = pd.read_sql_query("SELECT date, (100-closeprice) AS upfront " \
+ "FROM index_quotes WHERE index=%s AND series=%s " \
+ "AND tenor=%s ORDER BY date",
engine, parse_dates=['date'],
index_col='date', params = (index, series, tenor))
- factors = pd.read_sql_query("""
-SELECT indexfactor/100 AS indexfactor, cumulativeloss/100 AS cumulativeloss
-lastdate FROM index_desc WHERE index=%s AND series=%s AND tenor=%s ORDER BY lastdate
-""",
- engine, parse_dates=['lastdate'], index_col='lastdate',
- params = (attach, detach, index, series, tenor))
+ sqlstring = "SELECT indexfactor/100 AS indexfactor, coupon, " \
+ "cumulativeloss/100 AS cumulativeloss, lastdate " \
+ "FROM index_desc WHERE index=%s AND series=%s AND tenor=%s " \
+ "ORDER BY lastdate"
+ factors = pd.read_sql_query(sqlstring, engine, parse_dates=['lastdate'],
+ index_col='lastdate',
+ params = (index, series, tenor))
else:
#we take the latest version available
- quotes = pd.read_sql_query("SELECT DISTINCT ON (quotedate) quotedate, upfront_mid AS upfront, "\
- "tranche_spread FROM markit_tranche_quotes " \
- "JOIN index_version USING (basketid) " \
- "WHERE index=%s AND series=%s " \
- "AND tenor=%s AND attach=%s AND detach=%s " \
- "ORDER by quotedate, version desc",
- engine, parse_dates=['quotedate'], index_col='quotedate',
- params = (index, series, tenor, attach, detach))
- factors = pd.read_sql_query("""
-SELECT tranche_factor(%s::smallint, %s::smallint, indexfactor, cumulativeloss/100),
-indexfactor/100 AS indexfactor, cumulativeloss/100 AS cumulativeloss, lastdate
-FROM index_desc WHERE index=%s AND series=%s AND tenor=%s ORDER BY lastdate
-""",
- "postgresql://serenitas_user@debian/serenitasdb",
- parse_dates=['lastdate'], index_col='lastdate',
+ sqlstring = "SELECT DISTINCT ON (quotedate) quotedate, upfront_mid AS upfront, " \
+ "tranche_spread FROM markit_tranche_quotes " \
+ "JOIN index_version USING (basketid) WHERE index=%s AND series=%s" \
+ "AND tenor=%s AND attach=%s AND detach=%s ORDER by quotedate, version desc"
+ quotes = pd.read_sql_query(sqlstring, engine, parse_dates=['quotedate'],
+ index_col='quotedate',
+ params = (index, series, tenor, int(attach), int(detach)))
+ sqlstring = "SELECT " \
+ "tranche_factor(%s::smallint, %s::smallint, indexfactor, cumulativeloss/100), " \
+ "indexfactor/100 AS indexfactor, cumulativeloss/100 AS cumulativeloss, " \
+ "lastdate FROM index_desc WHERE index=%s AND series=%s AND tenor=%s " \
+ "ORDER BY lastdate"
+ factors = pd.read_sql_query(sqlstring, engine, parse_dates=['lastdate'],
+ index_col='lastdate',
params = (attach, detach, index, series, tenor))
if start_date is None:
start_date = quotes.index.min()
@@ -93,11 +100,10 @@ FROM index_desc WHERE index=%s AND series=%s AND tenor=%s ORDER BY lastdate
end_date = pd.datetime.today()
#we use tranche_factor
- if attach:
+ if not np.isnan(attach):
factors['factor'] = factors.tranche_factor
else:
factors['factor'] = factors.indexfactor
-
dates = pd.date_range(start_date, end_date, freq = bus_day)
yearfrac = imm_dates(start_date, end_date)
yearfrac = yearfrac.to_series().reindex(dates, method='ffill')
@@ -112,27 +118,45 @@ FROM index_desc WHERE index=%s AND series=%s AND tenor=%s ORDER BY lastdate
df = (quotes.
join(factors[['factor']], how='left').
join(recovery).join(yearfrac))
- if attach:
+ if not np.isnan(attach):
coupon = df.tranche_spread.iat[0]/10000
else:
coupon = factors.coupon.iat[0]/10000
- df.indexfactor = df.indexfactor.bfill()
- df.loc[df.indexfactor.isnull(), 'indexfactor'] = factors.factor.iat[-1]
- df['clean_nav'] = df.upfront*df.factor
- df['accrued'] = df.yearfrac*coupon*df.factor
+ df.factor = df.factor.bfill()
+ df.loc[df.factor.isnull(), 'factor'] = factors.factor.iat[-1]
+ df['clean_nav'] = df.upfront * df.factor/100
+ df['accrued'] = - df.yearfrac * coupon*df.factor
df['unrealized_accrued'] = df.accrued.diff()
df['realized_accrued'] = -df.unrealized_accrued.where(df.unrealized_accrued.isnull() |
- (df.unrealized_accrued<0), 0)
+ (df.unrealized_accrued>0), 0)
df['unrealized_accrued'] = df.unrealized_accrued.where(df.unrealized_accrued.isnull()|
- (df.unrealized_accrued>0), df.accrued)
+ (df.unrealized_accrued<0), df.accrued)
df.loc[df.realized_accrued>0, 'realized_accrued'] += df.loc[df.realized_accrued>0, 'unrealized_accrued']
df['unrealized_pnl'] = df.upfront.diff() * df.factor.shift()/100
- df['realized_pnl'] = df.upfront/100*df.indexfactor.diff()+df.recovery
- return df
+ df['realized_pnl'] = df.upfront/100*df.factor.diff()+df.recovery
+ return df[['clean_nav', 'accrued', 'unrealized_accrued', 'realized_accrued',
+ 'unrealized_pnl', 'realized_pnl']]
+def convert_to_none(x):
+ return None if np.isnan(x) else x
-def cds_explain2(dealid):
- pass
+def cds_explain_strat(strat, start_date, end_date, engine = dbengine("dawndb")):
+ cds_positions = pd.read_sql_table("orig_cds", engine,
+ parse_dates = ['trade_date', 'upfront_settle_date'],
+ index_col='dealid')
+ cds_positions = cds_positions.ix[(cds_positions.folder == strat) &
+ (cds_positions.upfront_settle_date<=pd.Timestamp(end_date))]
+ cds_positions.loc[cds_positions.protection=='Seller', "notional"] *= -1
+ df = {}
+ for r in cds_positions.itertuples():
+ key = (r.index, r.series, r.tenor)
+ trade_df = cds_explain(r.index, r.series, r.tenor, r.attach, r.detach,
+ max(r.trade_date, pd.Timestamp(start_date)), end_date,
+ engine)
+ trade_df.loc[r.upfront_settle_date, 'clean_nav'] +
+ trade_df.loc[r.upfront_settle_date, 'realized_pnl'] trade_df.loc[r.upfront_settle_date, 'realized_pnl']
+ df[key] = df.get(key, 0) + r.notional * trade_df
+ return pd.concat(df)
if __name__=="__main__":
workdate = pd.datetime.today()
@@ -141,4 +165,5 @@ if __name__=="__main__":
df = pnl_explain_list(clo_list.identifier.tolist(), '2015-10-30', '2015-11-30', engine)
df = pd.concat(df)
df_agg = df.groupby(level=1).sum()
- cds_df = cds_explain('HY', 21, '5yr', 25, 35, '2014-07-18')
+ cds_df = cds_explain_strat('SER_IGCURVE', '2015-09-01', '2015-12-08', engine)
+ #cds_df = cds_explain('HY', 21, '5yr', 25, 35, '2014-07-18')