{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from utils.db import dbconn, dbengine\n", "\n", "from matplotlib.pyplot import hist\n", "import matplotlib.pyplot as plt\n", "\n", "import pandas as pd\n", "import numpy as np\n", "\n", "etengine = dbengine('etdb')\n", "\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "value_date = pd.datetime.today().date()\n", "date_range = pd.bdate_range(end=value_date, freq='3BM',periods=12)\n", "sql_string = \"SELECT c.loanxid, c.issuername, c.dealname, c.facility_type, c.loanx_facility_type, \" \\\n", " \"c.initial_amount, c.initial_spread, c.maturity, c.industry, b.bid, b.offer, b.depth, a.latestdate \" \\\n", " \"FROM ( SELECT markit_prices.pricingdate AS latestdate, \" \\\n", " \"markit_prices.loanxid as loanxid_a FROM markit_prices \" \\\n", " \"where pricingdate = %s GROUP BY markit_prices.loanxid, latestdate) a \" \\\n", " \"JOIN markit_prices b ON loanxid_a = b.loanxid::text AND a.latestdate = b.pricingdate \" \\\n", " \"JOIN latest_markit_facility c ON loanxid_a = c.loanxid::text;\"\n", "df = pd.DataFrame()\n", "for d in date_range:\n", " df = df.append(pd.read_sql_query(sql_string, etengine, params=[d,]))\n", "df.sort_values(by='latestdate', inplace=True)\n", "df['mid'] = (df['bid'] + df['offer'])/2\n", "df = df[df['facility_type']!='Equity']\n", "df['mv'] = df['initial_amount'] *1e6 * df['mid']/100" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "hist_bins = np.linspace(50, 110, 13)\n", "hist_bins = np.insert(hist_bins, 0, 0)\n", "df['price_bucket'] = pd.cut(df['mid'], hist_bins)\n", " \n", "hist_per = df.groupby(['latestdate', 'price_bucket']).agg({'mv': 'sum'})\n", "hist_per = hist_per.groupby(level=0).apply(lambda x: x / float(x.sum()))\n", "hist_per.unstack().plot(kind = 'bar', stacked=True)\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#Distressed - Industry breakdown\n", "industry_hist = df[df['mid']<80].groupby(['latestdate', 'industry']).agg({'mv': 'sum'})\n", "industry_hist = industry_hist.groupby(level=0).apply(lambda x: x / float(x.sum()))\n", "top = industry_hist.groupby('latestdate').head(20)\n", "top.unstack().plot(kind = 'bar', stacked=True)\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#% under 80/90\n", "percent_under_80 = df[df['mid']<80].groupby(['latestdate']).agg({'mv': 'sum'})/df.groupby(['latestdate']).agg({'mv': 'sum'})\n", "percent_under_90 = df[df['mid']<90].groupby(['latestdate']).agg({'mv': 'sum'})/df.groupby(['latestdate']).agg({'mv': 'sum'})\n", "#wtd average prices\n", "df['wtd_avg'] = df['mv'] * df['mid']\n", "wtd_prices = df.dropna().groupby(['latestdate']).agg({'wtd_avg': 'sum'}).join(df.dropna().groupby(['latestdate']).agg({'mv': 'sum'}))\n", "wtd_prices['price'] = wtd_prices['wtd_avg']/wtd_prices['mv']\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.0" } }, "nbformat": 4, "nbformat_minor": 4 }