From 16493ef0bb95d1faf1a00d67682bca889ed8c55c Mon Sep 17 00:00:00 2001 From: jeanpouget-abadie Date: Fri, 11 Dec 2015 16:14:54 -0500 Subject: conclusion of bayesian stuff --- finale/sections/bayesian.tex | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/finale/sections/bayesian.tex b/finale/sections/bayesian.tex index 1426c97..6e44b08 100644 --- a/finale/sections/bayesian.tex +++ b/finale/sections/bayesian.tex @@ -119,6 +119,19 @@ counterpart. \end{split} \end{equation} -Reparametrization trick -Batches -Algorithm +Once again, the product form of the prior allows us to decompose the expected +log-likelihood term as such: +\begin{equation} + \begin{split} + &\mathbb{E}_{q_{\mathbf{\Theta'}}} \log \mathcal{L}(\mathbf{x}_c | + \mathbf{\Theta}) = - \sum_{j,c , t } \mu_j \cdot x^t_c \\ + &+ \sum_{j, c, t} \mathbb{E}_{\theta_j \sim\mathcal{N}(\vec 0_n, I_n)} + \left[ (x_c^{t+1})_j \log \left(1 - e^{-(\sigma_j \cdot \theta_j + \mu_j) + \cdot x_c^t}\right) \right] + \end{split} +\end{equation} + +Note that by reparametrizing $\theta_j' = \sigma_j \cdot \theta_j + \mu_j$, +where $\sigma_j \cdot \theta_j$ is the elementwise multiplication operation, we +can easily compute the gradient of the variational objective with respect to +$\mu_j$ and $\sigma_j$. -- cgit v1.2.3-70-g09d2