\documentclass{acm_proc_article-sp} \usepackage[utf8]{inputenc} \usepackage{amsmath,amsfonts} \usepackage{algorithm} \usepackage{algpseudocode} \newtheorem{lemma}{Lemma} \newtheorem{fact}{Fact} \newtheorem{example}{Example} \newtheorem{prop}{Proposition} \newtheorem{theorem}{Theorem} \newcommand*{\defeq}{\stackrel{\text{def}}{=}} \newcommand{\var}{\mathop{\mathrm{Var}}} \newcommand{\condexp}[2]{\mathop{\mathbb{E}}\left[#1|#2\right]} \newcommand{\expt}[1]{\mathop{\mathbb{E}}\left[#1\right]} \newcommand{\norm}[1]{\lVert#1\rVert} \newcommand{\tr}[1]{#1^*} \newcommand{\ip}[2]{\langle #1, #2 \rangle} \newcommand{\mse}{\mathop{\mathrm{MSE}}} \DeclareMathOperator{\trace}{tr} \DeclareMathOperator*{\argmax}{arg\,max} \begin{document} \section{Budget feasible mechanism} \subsection{Notations} \begin{itemize} \item If $x\in\mathbf{R}^d$, $x^*$ will denote the transpose of vector $x$. If $(x, y)\in\mathbf{R}^d\times\mathbf{R}^d$, $x^*y$ is the standard inner product of $\mathbf{R}^d$. \item We will often use the following order over symmetric matrices: if $A$ and $B$ are two $d\times d$ real symmetric matrices, we write that $A\leq B$ iff: \begin{displaymath} \forall x\in\mathbf{R}^d,\quad x^*Ax \leq x^*Bx \end{displaymath} That is, iff $B-A$ is symmetric semi-definite positive. \item Let us recall this property of the ordered defined above: if $A$ and $B$ are two symmetric definite positive matrices such that $A\leq B$, then $A^{-1}\leq B^{-1}$. \end{itemize} \subsection{Data model} \begin{itemize} \item set of $n$ users $\mathcal{N} = \{1,\ldots, n\}$ \item each user has a public vector of features $x_i\in\mathbf{R}^d$ and some undisclosed variable $y_i\in\mathbf{R}$ \item we assume that the data has been normalized: $\forall i\in\mathcal{N}$, $\norm{x_i}\leq 1$ \item \textbf{Ridge regression model:} \begin{itemize} \item $y_i = \beta^*x_i + \varepsilon_i$ \item $\varepsilon_i \sim \mathcal{N}(0,\sigma^2)$, $(\varepsilon_i)_{i\in \mathcal{N}}$ are mutually independent. \item prior knowledge of $\beta$: $\beta\sim\mathcal{N}(0,\kappa I_d)$ \item $\mu = \frac{\kappa}{\sigma^2}$ is the regularization factor. \item after the variables $y_i$ are disclosed, the experimenter does \emph{maximum a posteriori estimation} which is equivalent under Gaussian prior to solving the ridge regression maximisation problem: \begin{displaymath} \beta_{\text{max}} = \argmax \sum_i (y_i - \beta^*x_i)^2 + \frac{1}{\mu}\sum_i \norm{\beta}_2^2 \end{displaymath} \end{itemize} \end{itemize} \subsection{Economics} Value function: following the experiment design theory, the value of data is the decrease of uncertainty about the model. Common measure of uncertainty, the entropy. Thus, the value of a set $S$ of users is: \begin{displaymath} V(S) = \mathbb{H}(\beta) - \mathbb{H}(\beta|Y_S) \end{displaymath} where $Y_S = \{y_i,\,i\in S\}$ is the set of observations. In our case (under Gaussian prior): \begin{align*} \forall S\subset\mathcal{N},\; V(S) & = \frac{1}{2}\log\det\left(I_d + \mu\sum_{i\in S} x_ix_i^*\right)\\ & \defeq \frac{1}{2}\log\det A(S) \end{align*} \begin{lemma}[Marginal contribution] \begin{displaymath} \Delta_i V(S)\defeq V(S\cup\{i\}) - V(S) = \frac{1}{2}\log\left(1 + \mu x_i^*A(S)^{-1}x_i\right) \end{displaymath} \end{lemma} \begin{proof} We have: \begin{align*} V(S\cup\{i\}) & = \frac{1}{2}\log\det A(S\cup\{i\})\\ & = \frac{1}{2}\log\det\left(A(S) + \mu x_i x_i^*\right)\\ & = V(S) + \frac{1}{2}\log\det\left(I_d + \mu A(S)^{-1}x_i x_i^*\right)\\ & = V(S) + \frac{1}{2}\log\left(1 + \mu x_i^* A(S)^{-1}x_i\right) \end{align*} where the last equality comes from Sylvester's determinant formula. \end{proof} \begin{itemize} \item each user $i$ has a cost $c_i$ \item the auctioneer has a budget constraint $B$ \item optimisation problem: \begin{displaymath} OPT(V,\mathcal{N}, B) = \max_{S\subset\mathcal{N}} \left\{ V(S)\,|\, \sum_{i\in S}c_i\leq B\right\} \end{displaymath} \end{itemize} \subsection{Relaxations of the value function} We say that $R_\mathcal{N}:[0,1]^n\rightarrow\mathbf{R}$ is a relaxation of the value function $V$ over $\mathcal{N}$ if it coincides with $V$ at binary points. Formally, for any $S\subset\mathcal{N}$, let $\mathbf{1}_S$ denote the indicator vector of $S$. $R_\mathcal{N}$ is a relaxation of $V$ over $\mathcal{N}$ iff: \begin{displaymath} \forall S\subset\mathcal{N},\; R_\mathcal{N}(\mathbf{1}_S) = V(S) \end{displaymath} We can extend the optimisation problem defined above to a relaxation by extending the cost function: \begin{displaymath} \forall \lambda\in[0,1]^n,\; c(\lambda) = \sum_{i\in\mathcal{N}}\lambda_ic_i \end{displaymath} The optimisation problem becomes: \begin{displaymath} OPT(R_\mathcal{N}, B) = \max_{\lambda\in[0,1]^n}\left\{R_\mathcal{N}(\lambda)\,|\, c(\lambda)\leq B\right\} \end{displaymath} The relaxations we will consider here rely on defining a probability distribution over subsets of $\mathcal{N}$. Let $\lambda\in[0,1]^n$, let us define: \begin{displaymath} P_\mathcal{N}^\lambda(S) = \prod_{i\in S}\lambda_i \prod_{i\in\mathcal{N}\setminus S}(1-\lambda_i) \end{displaymath} $P_\mathcal{N}^\lambda(S)$ is the probability of picking the set $S$ if we select a subset of $\mathcal{N}$ at random by deciding independently for each point to include it in the set with probability $\lambda_i$ (and to exclude it with probability $1-\lambda_i$). We will consider two relaxations of the value function $V$ over $\mathcal{N}$: \begin{itemize} \item the \emph{multi-linear extension} of $V$: \begin{align*} F_\mathcal{N}(\lambda) & = \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[\log\det A(S)\big]\\ & = \sum_{S\subset\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)\\ & = \sum_{S\subset\mathcal{N}} P_\mathcal{N}^\lambda(S) \log\det A(S)\\ \end{align*} \item the \emph{concave relaxation} of $V$: \begin{align*} L_{\mathcal{N}}(\lambda) & = \log\det \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[A(S)\big]\\ & = \log\det\left(\sum_{S\subset N} P_\mathcal{N}^\lambda(S)A(S)\right)\\ & = \log\det\left(I_d + \mu\sum_{i\in\mathcal{N}} \lambda_ix_ix_i^*\right)\\ & \defeq \log\det \tilde{A}(\lambda) \end{align*} \end{itemize} \begin{lemma} The \emph{concave relaxation} $L_\mathcal{N}$ is concave\footnote{Hence this relaxation is well-named!}. \end{lemma} \begin{proof} This follows from the concavity of the $\log\det$ function over symmetric positive semi-definite matrices. More precisely, if $A$ and $B$ are two symmetric positive semi-definite matrices, then: \begin{multline*} \forall\alpha\in [0, 1],\; \log\det\big(\alpha A + (1-\alpha) B\big)\\ \geq \alpha\log\det A + (1-\alpha)\log\det B \end{multline*} \end{proof} \begin{lemma}[Rounding]\label{lemma:rounding} For any feasible $\lambda\in[0,1]^n$, there exists a feasible $\bar{\lambda}\in[0,1]^n$ such that at most one of its component is fractional, that is, lies in $(0,1)$ and: \begin{displaymath} F_{\mathcal{N}}(\lambda)\leq F_{\mathcal{N}}(\bar{\lambda}) \end{displaymath} \end{lemma} \begin{proof} We give a rounding procedure which given a feasible $\lambda$ with at least two fractional components, returns some $\lambda'$ with one less fractional component, feasible such that: \begin{displaymath} F_\mathcal{N}(\lambda) \leq F_\mathcal{N}(\lambda') \end{displaymath} Applying this procedure recursively yields the lemma's result. Let us consider such a feasible $\lambda$. Let $i$ and $j$ be two fractional components of $\lambda$ and let us define the following function: \begin{displaymath} F_\lambda(\varepsilon) = F(\lambda_\varepsilon) \quad\textrm{where} \quad \lambda_\varepsilon = \lambda + \varepsilon\left(e_i-\frac{c_i}{c_j}e_j\right) \end{displaymath} It is easy to see that if $\lambda$ is feasible, then: \begin{multline}\label{eq:convex-interval} \forall\varepsilon\in\Big[\max\Big(-\lambda_i,(\lambda_j-1)\frac{c_j}{c_i}\Big), \min\Big(1-\lambda_i, \lambda_j \frac{c_j}{c_i}\Big)\Big],\;\\ \lambda_\varepsilon\;\;\textrm{is feasible} \end{multline} Furthermore, the function $F_\lambda$ is convex, indeed: \begin{align*} F_\lambda(\varepsilon) & = \mathbb{E}_{S'\sim P_{\mathcal{N}\setminus\{i,j\}}^\lambda(S')}\Big[ (\lambda_i+\varepsilon)\Big(\lambda_j-\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{i,j\})\\ & + (\lambda_i+\varepsilon)\Big(1-\lambda_j+\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{i\})\\ & + (1-\lambda_i-\varepsilon)\Big(\lambda_j-\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{j\})\\ & + (1-\lambda_i-\varepsilon)\Big(1-\lambda_j+\varepsilon\frac{c_i}{c_j}\Big)V(S')\Big]\\ \end{align*} Thus, $F_\lambda$ is a degree 2 polynomial whose dominant coefficient is: \begin{multline*} \frac{c_i}{c_j}\mathbb{E}_{S'\sim P_{\mathcal{N}\setminus\{i,j\}}^\lambda(S')}\Big[ V(S'\cup\{i\})+V(S'\cup\{i\})\\ -V(S'\cup\{i,j\})-V(S')\Big] \end{multline*} which is positive by submodularity of $V$. Hence, the maximum of $F_\lambda$ over the interval given in \eqref{eq:convex-interval} is attained at one of its limit, at which either the $i$-th or $j$-th component of $\lambda_\varepsilon$ becomes integral. \end{proof} \begin{lemma}\label{lemma:relaxation-ratio} The following inequality holds: \begin{displaymath} \forall\lambda\in[0,1]^n,\; \frac{\log\big(1+\mu\big)}{2\mu} \,L_\mathcal{N}(\lambda)\leq F_\mathcal{N}(\lambda)\leq L_{\mathcal{N}}(\lambda) \end{displaymath} \end{lemma} \begin{proof} We will prove that: \begin{displaymath} \frac{\log\big(1+\mu\big)}{2\mu} \end{displaymath} is a lower bound of the ratio $\partial_i F_\mathcal{N}(\lambda)/\partial_i L_\mathcal{N}(\lambda)$. This will be enough to conclude, by observing that: \begin{displaymath} \frac{F_\mathcal{N}(\lambda)}{L_\mathcal{N}(\lambda)} \sim_{\lambda\rightarrow 0} \frac{\sum_{i\in \mathcal{N}}\lambda_i\partial_i F_\mathcal{N}(0)} {\sum_{i\in\mathcal{N}}\lambda_i\partial_i L_\mathcal{N}(0)} \end{displaymath} and that an interior critical point of the ratio $F_\mathcal{N}(\lambda)/L_\mathcal{N}(\lambda)$ is defined by: \begin{displaymath} \frac{F_\mathcal{N}(\lambda)}{L_\mathcal{N}(\lambda)} = \frac{\partial_i F_\mathcal{N}(\lambda)}{\partial_i L_\mathcal{N}(\lambda)} \end{displaymath} Let us start by computing the derivatives of $F_\mathcal{N}$ and $L_\mathcal{N}$ with respect to the $i$-th component. For $F$, it suffices to look at the derivative of $P_\mathcal{N}^\lambda(S)$: \begin{displaymath} \partial_i P_\mathcal{N}^\lambda(S) = \left\{ \begin{aligned} & P_{\mathcal{N}\setminus\{i\}}^\lambda(S\setminus\{i\})\;\textrm{if}\; i\in S \\ & - P_{\mathcal{N}\setminus\{i\}}^\lambda(S)\;\textrm{if}\; i\in \mathcal{N}\setminus S \\ \end{aligned}\right. \end{displaymath} Hence: \begin{multline*} \partial_i F_\mathcal{N} = \sum_{\substack{S\subset\mathcal{N}\\ i\in S}} P_{\mathcal{N}\setminus\{i\}}^\lambda(S\setminus\{i\})V(S)\\ - \sum_{\substack{S\subset\mathcal{N}\\ i\in \mathcal{N}\setminus S}} P_{\mathcal{N}\setminus\{i\}}^\lambda(S)V(S)\\ \end{multline*} Now, using that every $S$ such that $i\in S$ can be uniquely written as $S'\cup\{i\}$, we can write: \begin{multline*} \partial_i F_\mathcal{N} = \sum_{\substack{S\subset\mathcal{N}\\ i\in\mathcal{N}\setminus S}} P_{\mathcal{N}\setminus\{i\}}^\lambda(S)V(S\cup\{i\})\\ - \sum_{\substack{S\subset\mathcal{N}\\ i\in \mathcal{N}\setminus S}} P_{\mathcal{N}\setminus\{i\}}^\lambda(S)V(S)\\ \end{multline*} Finally, by using the expression for the marginal contribution of $i$ to $S$: \begin{displaymath} \partial_i F_\mathcal{N}(\lambda) = \sum_{\substack{S\subset\mathcal{N}\\ i\in\mathcal{N}\setminus S}} P_{\mathcal{N}\setminus\{i\}}^\lambda(S) \log\Big(1 + \mu x_i^*A(S)^{-1}x_i\Big) \end{displaymath} The computation of the derivative of $L_\mathcal{N}$ uses standard matrix calculus and gives: \begin{displaymath} \partial_i L_\mathcal{N}(\lambda) = \mu x_i^* \tilde{A}(\lambda)^{-1}x_i \end{displaymath} Using the following inequalities: \begin{gather*} \forall S\subset\mathcal{N}\setminus\{i\},\quad P_{\mathcal{N}\setminus\{i\}}^\lambda(S)\geq P_{\mathcal{N}\setminus\{i\}}^\lambda(S\cup\{i\})\\ \forall S\subset\mathcal{N},\quad P_{\mathcal{N}\setminus\{i\}}^\lambda(S) \geq P_\mathcal{N}^\lambda(S)\\ \forall S\subset\mathcal{N},\quad A(S)^{-1} \geq A(S\cup\{i\})^{-1}\\ \end{gather*} we get: \begin{align*} \partial_i F_\mathcal{N}(\lambda) & \geq \sum_{\substack{S\subset\mathcal{N}\\ i\in\mathcal{N}\setminus S}} P_\mathcal{N}^\lambda(S) \log\Big(1 + \mu x_i^*A(S)^{-1}x_i\Big)\\ & \geq \frac{1}{2} \sum_{\substack{S\subset\mathcal{N}\\ i\in\mathcal{N}\setminus S}} P_\mathcal{N}^\lambda(S) \log\Big(1 + \mu x_i^*A(S)^{-1}x_i\Big)\\ &\hspace{-3.5em}+\frac{1}{2} \sum_{\substack{S\subset\mathcal{N}\\ i\in\mathcal{N}\setminus S}} P_\mathcal{N}^\lambda(S\cup\{i\}) \log\Big(1 + \mu x_i^*A(S\cup\{i\})^{-1}x_i\Big)\\ &\geq \frac{1}{2} \sum_{S\subset\mathcal{N}} P_\mathcal{N}^\lambda(S) \log\Big(1 + \mu x_i^*A(S)^{-1}x_i\Big)\\ \end{align*} Using that $A(S)\geq I_d$ we get that: \begin{displaymath} \mu x_i^*A(S)^{-1}x_i \leq \mu \end{displaymath} Moreover: \begin{displaymath} \forall x\leq\mu,\; \log(1+x)\geq \frac{\log\big(1+\mu\big)}{\mu} x \end{displaymath} Hence: \begin{displaymath} \partial_i F_\mathcal{N}(\lambda) \geq \frac{\log\big(1+\mu\big)}{2\mu} x_i^*\bigg(\sum_{S\subset\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)^{-1}\bigg)x_i \end{displaymath} Finally, using that the inverse is a matrix convex function over symmetric positive definite matrices: \begin{align*} \partial_i F_\mathcal{N}(\lambda) &\geq \frac{\log\big(1+\mu\big)}{2\mu} x_i^*\bigg(\sum_{S\subset\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)\bigg)^{-1}x_i\\ & \geq \frac{\log\big(1+\mu\big)}{2\mu} \partial_i L_\mathcal{N}(\lambda) \end{align*} \end{proof} \begin{lemma} Let us denote by $C_\mu$ the constant of which appears in the bound of the previous lemma: $C_\mu = \frac{\log(1+\mu)}{2\mu}$, then: \begin{displaymath} OPT(L_\mathcal{N}, B) \leq \frac{1}{C_\mu}\big(2 OPT(V,\mathcal{N},B) + \max_{i\in\mathcal{N}}V(i)\big) \end{displaymath} \end{lemma} \begin{proof} Let us consider a feasible point $\lambda^*\in[0,1]^n$ such that $L_\mathcal{N}(\lambda^*) = OPT(L_\mathcal{N}, B)$. By applying lemma~\ref{lemma:relaxation-ratio} and lemma~\ref{lemma:rounding} we get a feasible point $\bar{\lambda}$ with at most one fractional component such that: \begin{equation}\label{eq:e1} L_\mathcal{N}(\lambda^*) \leq \frac{1}{C_\mu} F_\mathcal{N}(\bar{\lambda}) \end{equation} Let $\lambda_i$ denote the fractional component of $\bar{\lambda}$ and $S$ denote the set whose indicator vector is $\bar{\lambda} - \lambda_i e_i$. Using the fact that $F_\mathcal{N}$ is linear with respect to the $i$-th component and is a relaxation of the value function, we get: \begin{displaymath} F_\mathcal{N}(\bar{\lambda}) = V(S) +\lambda_i V(S\cup\{i\}) \end{displaymath} Using the submodularity of $V$: \begin{displaymath} F_\mathcal{N}(\bar{\lambda}) \leq 2 V(S) + V(i) \end{displaymath} Note that since $\bar{\lambda}$ is feasible, $S$ is also feasible and $V(S)\leq OPT(V,\mathcal{N}, B)$. Hence: \begin{equation}\label{eq:e2} F_\mathcal{N}(\bar{\lambda}) \leq 2 OPT(V,\mathcal{N}, B) + \max_{i\in\mathcal{N}} V(i) \end{equation} Putting \eqref{eq:e1} and \eqref{eq:e2} together gives the results. \end{proof} \subsection{The mechanism} \begin{algorithm}\label{mechanism} \caption{Budget feasible mechanism for ridge regression} \begin{algorithmic}[1] \State $i^* \gets \argmax_{j\in\mathcal{N}}V(j)$ \State $x^* \gets \argmax_{x\in[0,1]^n} \{L_{\mathcal{N}\setminus\{i^*\}}(x) \,|\, c(x)\leq B\}$ \Statex \If{$L(x^*) < CV(i^*)$} \State \textbf{return} $\{i^*\}$ \Else \State $i \gets \argmax_{1\leq j\leq n}\frac{V(j)}{c_j}$ \State $S \gets \emptyset$ \While{$c_i\leq \frac{B}{2}\frac{V(S\cup\{i\})-V(S)}{V(S\cup\{i\})}$} \State $S \gets S\cup\{i\}$ \State $i \gets \argmax_{j\in\mathcal{N}\setminus S} \frac{V(S\cup\{j\})-V(S)}{c_j}$ \EndWhile \State \textbf{return} $S$ \EndIf \end{algorithmic} \end{algorithm} The constant $C$ which appears in the mechanism is unspecified for now. We can make the analysis regardless of its value. The optimal value will be specified in the theorem. \begin{lemma} The mechanism is monotone. \end{lemma} \begin{proof} We assume by contradiction that there exists a user $i$ that has been selected by the mechanism and that would not be selected had he reported a cost $c_i'\leq c_i$ (all the other costs staying the same). If $i\neq i^*$ and $i$ has been selected, then we are in the case where $L(x^*) \geq C V(i^*)$ and $i$ was included in the result set by the greedy part of the mechanism. By reporting a cost $c_i'\leq c_i$, using the submodularity of $V$, we see that $i$ will satisfy the greedy selection rule: \begin{displaymath} i = \argmax_{j\in\mathcal{N}\setminus S} \frac{V(S\cup\{j\}) - V(S)}{c_j} \end{displaymath} in an earlier iteration of the greedy heuristic. Let us denote by $S_i$ (resp. $S_i'$) the set to which $i$ is added when reporting cost $c_i$ (resp. $c_i'$). We have $S_i'\subset S_i$. Moreover: \begin{align*} c_i' & \leq c_i \leq \frac{B}{2}\frac{V(S_i\cup\{i\})-V(S_i)}{V(S_i\cup\{i\})}\\ & \leq \frac{B}{2}\frac{V(S_i'\cup\{i\})-V(S_i')}{V(S_i'\cup\{i\})} \end{align*} Hence $i$ will still be included in the result set. If $i = i^*$, $i$ is included iff $L(x^*) \leq C V(i^*)$. Reporting $c_i'$ instead of $c_i$ does not change the value $V(i^*)$ nor $L(x^*)$ (which is computed over $\mathcal{N}\setminus\{i^*\}$). Thus $i$ is still included by reporting a different cost. \end{proof} \begin{lemma} The mechanism is budget feasible. \end{lemma} \begin{lemma} Let us denote by $S_M$ the set returned by the mechanism. Let us also write: \begin{displaymath} C_{\textrm{max}} = \max\left(1+C,\frac{e}{e-1}\left( 3 + \frac{12e}{C\cdot C_\mu(e-1) -5e +1}\right)\right) \end{displaymath} Then: \begin{displaymath} OPT(V, \mathcal{N}, B) \leq C_\text{max}\cdot V(S_M) \end{displaymath} \end{lemma} \begin{proof} If the condition on line 3 of the algorithm holds, then: \begin{displaymath} V(i^*) \geq \frac{1}{C}L(x^*) \geq \frac{1}{C}OPT(V,\mathcal{N}\setminus\{i\}, B) \end{displaymath} But: \begin{displaymath} OPT(V,\mathcal{N},B) \leq OPT(V,\mathcal{N}\setminus\{i\}, B) + V(i^*) \end{displaymath} Hence: \begin{displaymath} V(i^*) \geq \frac{1}{C+1} OPT(V,\mathcal{N}, B) \end{displaymath} If the condition of the algorithm does not hold: \begin{align*} V(i^*) & \leq \frac{1}{C}L(x^*) \leq \frac{1}{C\cdot C_\mu} \big(2 OPT(V,\mathcal{N}, B) + V(i^*)\big)\\ & \leq \frac{1}{C\cdot C_\mu}\left(\frac{2e}{e-1}\big(3 V(S_M) + 2 V(i^*)\big) + V(i^*)\right) \end{align*} Thus: \begin{align*} V(i^*) \leq \frac{6e}{C\cdot C_\mu(e-1)- 5e + 1} V(S_M) \end{align*} Finally, using again that: \begin{displaymath} OPT(V,\mathcal{N},B) \leq \frac{e}{e-1}\big(3 V(S_M) + 2 V(i^*)\big) \end{displaymath} We get: \begin{displaymath} OPT(V, \mathcal{N}, B) \leq \frac{e}{e-1}\left( 3 + \frac{12e}{C\cdot C_\mu(e-1) -5e +1}\right) V(S_M) \end{displaymath} \end{proof} The optimal value for $C$ is: \begin{displaymath} C^* = \arg\min C_{\textrm{max}} \end{displaymath} This equation has two solutions. Only one of those is such that: \begin{displaymath} C\cdot C_\mu(e-1) -5e +1 \geq 0 \end{displaymath} which is needed in the proof of the previous lemma. Computing this solution, we can state the main result of this section. \begin{theorem} The mechanism is individually rational, truthful and has an approximation ratio of: \begin{multline*} 1 + C^* = \frac{5e-1 + C_\mu(4e-1)}{2C_\mu(e-1)}\\ + \frac{\sqrt{C_\mu^2(1+2e)^2 + 2C_\mu(14e^2+5e+1) + (1-5e)^2}}{2C_\mu(e-1)} \end{multline*} \end{theorem} \end{document}