aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--python/mark_backtest_backfill.py47
-rw-r--r--python/mark_backtest_underpar.py87
-rw-r--r--python/notebooks/Valuation Backtest.ipynb43
3 files changed, 98 insertions, 79 deletions
diff --git a/python/mark_backtest_backfill.py b/python/mark_backtest_backfill.py
index d44e2155..4fdadac7 100644
--- a/python/mark_backtest_backfill.py
+++ b/python/mark_backtest_backfill.py
@@ -107,7 +107,7 @@ def get_globs():
basedir, year, "{}_*/{}*/ReviewedPack*.xlsx".format(year, year)
)
)
- for year in ["2015", "2016", "2017", "2018", "2019"]
+ for year in ["2015", "2016", "2017", "2018", "2019", "2020"]
]
for year in ["2013", "2014"]:
globs.append(
@@ -123,6 +123,51 @@ def get_globs():
settings = {
+ "ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20200601.20200630.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20200501.20200531.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.mkhan.SERENITAS.SERCGMAST.20200401.20200430.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20200301.20200331.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.mkhan.SERENITAS.SERCGMAST.20200201.20200229.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.mkhan.SERENITAS.SERCGMAST.20200101.20200131.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20191201.20191231.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20191101.20191130.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
+ "ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20191001.20191031.Draft.xlsx": (
+ "JA:JX",
+ "Securities Valuation Details",
+ "Y",
+ ),
"ReviewedPack.jmircovi.SERENITAS.SERCGMAST.20190901.20190930.Draft.xlsx": (
"JA:JX",
"Securities Valuation Details",
diff --git a/python/mark_backtest_underpar.py b/python/mark_backtest_underpar.py
index 401ef81c..821d86a7 100644
--- a/python/mark_backtest_underpar.py
+++ b/python/mark_backtest_underpar.py
@@ -5,13 +5,11 @@ import matplotlib.pyplot as plt
import statsmodels.api as sm
import seaborn as sb
import datetime
+import globeop_reports as ops
from statsmodels.formula.api import gls
-
from utils.db import serenitas_engine, dawn_engine, DataError
-import globeop_reports as ops
-
def get_mark_df(asset_class="Subprime"):
# It used to be >1000 but as of 12/17/2018 changed it to 3000
@@ -22,7 +20,10 @@ def get_mark_df(asset_class="Subprime"):
df_trades = pd.DataFrame()
for date in df_external_marks.date.unique():
df_temp = pd.read_sql_query(
- "select description, identifier, usd_market_value/price*100 as curr_ntl from risk_positions(%s, %s) where price >0 and length(identifier) = 9 ",
+ "select description, identifier, "
+ "usd_market_value/price*100 as curr_ntl "
+ "from risk_positions(%s, %s) "
+ "where price >0 and length(identifier) = 9 ",
dawn_engine,
params=[date, asset_class],
)
@@ -32,20 +33,21 @@ def get_mark_df(asset_class="Subprime"):
return df.set_index(["date", "identifier"])
-def calc_mark_diff(
- df,
- sources=[
- "PRICESERVE",
- "PRICINGDIRECT",
- "BVAL",
- "MARKIT",
- "BROKER",
- "REUTERS",
- "S&P",
- "IDC",
- ],
-):
+def calc_mark_diff(df=None, sources=None):
+ if sources is None:
+ sources = [
+ "PRICESERVE",
+ "PRICINGDIRECT",
+ "BVAL",
+ "MARKIT",
+ "BROKER",
+ "REUTERS",
+ "S&P",
+ "IDC",
+ ]
+ if df is None:
+ df = get_mark_df()
# All Sources (including manager...?!) - average, manager mark only, median, closest
g = df.groupby(level=["date", "identifier"])
diff = g.mean()
@@ -72,11 +74,8 @@ def calc_mark_diff(
def closest(x):
- if x.mark.count() > 1:
- x["dist"] = abs(x.mark - x.mark[x.source == "MANAGER"])
- return x.mark[x.dist == x.dist[x.dist > 0].min()].iloc[0]
- else:
- return x.mark[0]
+ x["dist"] = abs(x.mark - x.mark[x.source == "MANAGER"]).fillna(0)
+ return x.mark[x.dist == x.dist.min()].iloc[0]
def remove_max_min(x):
@@ -86,8 +85,10 @@ def remove_max_min(x):
return x.mean()
-def diff_by_source(df):
+def diff_by_source(df=None):
# diff_by_source: input get_mark_df(), calculate the pricing by source
+ if df is None:
+ df = get_mark_df()
df = df.drop("description", 1)
df = df.set_index(["source"], append=True).apply(
lambda x: x.curr_ntl * x.mark / 100, axis=1
@@ -97,24 +98,11 @@ def diff_by_source(df):
return df.groupby(level="date").sum()
-def diff_by_source_percentage(df):
- df = diff_by_source(df)
- df = df.join(ops.get_net_navs()["endbooknav"])
- df = df.apply(lambda x: (x / x.endbooknav), axis=1)
- del df["endbooknav"]
- return df
-
-
-def count_sources(df):
- # input get_mark_df(), plot count of each source
- g2 = df.set_index("source", append=True).groupby(level=["date", "source"])
- # there are a good amount of Bloomberg duplicates, not a big deal but should clean them up
- g2["mark"].count().unstack(-1).plot()
-
-
-def alt_navs():
- navs = ops.get_net_navs()
- df = calc_mark_diff(get_mark_df())
+def alt_navs(df=None):
+ navs = ops.get_net_navs().sort_index()
+ if df is None:
+ df = get_mark_df()
+ df = calc_mark_diff(df)
end_nav, beg_nav, returns, nav_100 = (
pd.DataFrame(),
pd.DataFrame(),
@@ -125,28 +113,13 @@ def alt_navs():
end_nav[col] = df[col] + navs.endbooknav
beg_nav[col] = end_nav[col].shift(1) + navs.net_flow.shift(1)
beg_nav[col].iloc[0] = 12500000
- returns[col] = (end_nav[col] - navs.incentive) / beg_nav[col] - 1
+ returns[col] = (end_nav[col] - navs.incentive_fee) / beg_nav[col] - 1
for i, row in returns.dropna().reset_index().iterrows():
nav_100.iloc[i] = 100 if i == 0 else nav_100.iloc[i - 1]
nav_100.iloc[i] = nav_100.iloc[i] * (1 + returns.iloc[i])
return returns, nav_100
-def annual_performance(nav_100):
- perf = nav_100.groupby(pd.Grouper(freq="A")).last()
- perf_ann = perf / perf.shift(1) - 1
- perf_ann["2013"] = perf["2013"] / 100 - 1
- return perf_ann
-
-
-def alt_nav_impact():
- navs = ops.get_net_navs()
- df = calc_mark_diff(get_mark_df())
- df = df.join(navs.endbooknav)
- return df
- # return df.iloc[-1]/df.iloc[-1]['endbooknav']
-
-
def back_test(
begindate="2013-01-01", enddate=datetime.date.today(), sell_price_threshold=200
):
diff --git a/python/notebooks/Valuation Backtest.ipynb b/python/notebooks/Valuation Backtest.ipynb
index 92549418..dfddbf86 100644
--- a/python/notebooks/Valuation Backtest.ipynb
+++ b/python/notebooks/Valuation Backtest.ipynb
@@ -23,7 +23,11 @@
"metadata": {},
"outputs": [],
"source": [
- "date = datetime.date.today() - pd.tseries.offsets.MonthEnd(1)"
+ "date = datetime.date.today() - pd.tseries.offsets.MonthEnd(1)\n",
+ "navs = ops.get_net_navs()\n",
+ "subprime = mark.get_mark_df('Subprime')\n",
+ "clo = mark.get_mark_df('CLO')\n",
+ "df = subprime.append(clo)"
]
},
{
@@ -86,17 +90,9 @@
"metadata": {},
"outputs": [],
"source": [
- "#Now Calculate alternate valuation methodologies\n",
- "df = mark.get_mark_df()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "mark.count_sources(df)"
+ "#source counts\n",
+ "g2 = df.set_index(\"source\", append=True).groupby(level=[\"date\", \"source\"])\n",
+ "g2[\"mark\"].count().unstack(-1).plot()"
]
},
{
@@ -106,8 +102,10 @@
"outputs": [],
"source": [
"#difference by source\n",
- "nav = ops.get_net_navs()['endbooknav']\n",
- "difference = mark.diff_by_source_percentage(df)\n",
+ "difference = mark.diff_by_source(df)\n",
+ "difference = difference.join(navs['endbooknav'])\n",
+ "difference = difference.apply(lambda x: (x / x.endbooknav), axis=1)\n",
+ "del difference[\"endbooknav\"]\n",
"#difference.to_clipboard()"
]
},
@@ -141,7 +139,7 @@
"metadata": {},
"outputs": [],
"source": [
- "results = mark.alt_navs()"
+ "results = mark.alt_navs(df)"
]
},
{
@@ -189,7 +187,10 @@
"outputs": [],
"source": [
"#Annual Return using different methodology\n",
- "round(mark.annual_performance(results[1])*100,2)"
+ "perf = results[1].groupby(pd.Grouper(freq=\"A\")).last()\n",
+ "perf_ann = perf / perf.shift(1) - 1\n",
+ "perf_ann[\"2013\"] = perf[\"2013\"] / 100 - 1\n",
+ "round(perf_ann*100,2)"
]
},
{
@@ -209,8 +210,9 @@
"outputs": [],
"source": [
"#A positive impact % means the alternative methodology results in a higher NAV than the fund's valuation policy.\n",
- "df = mark.alt_nav_impact()\n",
- "round(pd.DataFrame(df.iloc[-1]/df.iloc[-1]['endbooknav'])*100,2)"
+ "alt_nav_impact = mark.calc_mark_diff(df)\n",
+ "alt_nav_impact = alt_nav_impact.join(navs.endbooknav)\n",
+ "round(pd.DataFrame(alt_nav_impact.iloc[-1]/alt_nav_impact.iloc[-1]['endbooknav'])*100,2)"
]
},
{
@@ -220,12 +222,11 @@
"outputs": [],
"source": [
"#% impact historical: positive % means the alternative methodology results in a higher NAV\n",
- "nav_impact = df.divide(df.endbooknav, axis=0)\n",
+ "nav_impact = alt_nav_impact.divide(alt_nav_impact.endbooknav, axis=0)\n",
"to_plot = ['mark_closest_all', 'mark_filtered_mean']\n",
"nav_impact = nav_impact[to_plot].rename(columns={'mark_closest_all': 'mark to closest', \n",
" 'mark_filtered_mean': 'mark to mean'})\n",
"ax = nav_impact.plot()\n",
- "ax.figure.set_figheight(7)\n",
"ax.figure.savefig(\"/home/serenitas/edwin/PythonGraphs/Valuation_3.png\", bbox_extra_artists=(lgd,), bbox_inches='tight')"
]
},
@@ -298,7 +299,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.4"
+ "version": "3.8.1"
}
},
"nbformat": 4,