summaryrefslogtreecommitdiffstats
path: root/approximation.tex
blob: 23dc212c25ffa41b4e5a5d32d13a4b8aa5660fba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
As noted above, \EDP{} is NP-hard. Designing a mechanism for this problem, as
we will see in Section~\ref{sec:mechanism}, will involve being able to find an approximation of its optimal value
$OPT$ defined in \eqref{eq:non-strategic}. In addition to being computable in
polynomial time and having a bounded approximation ratio to $OPT$, we will also
require this approximation to be non-increasing in the following sense:

\begin{definition}
Let $f$ be a function from $\reals^n$ to $\reals$. We say that $f$ is
\emph{non-decreasing (resp. non-increasing) along the $i$-th coordinate} iff:
\begin{displaymath}
\forall x\in\reals^n,\;
t\mapsto f(x+ te_i)\; \text{is non-decreasing (resp. non-increasing)}
\end{displaymath}
where $e_i$ is the $i$-th canonical basis vector of $\reals^n$. 

We say that $f$ is \emph{non-decreasing} (resp. \emph{non-increasing}) iff it
is non-decreasing (resp. non-increasing) along all coordinates.
\end{definition}


Such an approximation will be obtained by introducing a concave optimization
problem with a constant approximation ratio to \EDP{}
(Proposition~\ref{prop:relaxation}) and then showing how to approximately solve
this problem in a monotone way.

\subsection{A Concave Relaxation of \EDP}\label{sec:concave}

A classical way of relaxing combinatorial optimization problems is 
\emph{relaxing by expectation}, using the so-called \emph{multi-linear}
extension of the objective function $V$.

Let $P_\mathcal{N}^\lambda(S)$ be the probability of choosing the
set $S$ if we select each element $i$ in $\mathcal{N}$ independently with
probability $\lambda_i$:
\begin{displaymath}
    P_\mathcal{N}^\lambda(S) \defeq \prod_{i\in S} \lambda_i
    \prod_{i\in\mathcal{N}\setminus S}( 1 - \lambda_i).
\end{displaymath}
Then, the \emph{multi-linear} extension $F$ of $V$ is defined as the
expectation of $V$ under the probability distribution $P_\mathcal{N}^\lambda$:
\begin{equation}\label{eq:multi-linear}
    F(\lambda) 
    \defeq \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[V(S)\big]
    = \sum_{S\subseteq\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)
\end{equation}

This function is an extension of $V$ in the following sense: $F(\id_S) = V(S)$ for all
$S\subseteq\mathcal{N}$, where $\id_S$ denotes the indicator vector of $S$.

\citeN{pipage} have shown how to use this extension to obtain approximation
guarantees for an interesting class of optimization problems through the
\emph{pipage rounding} framework, which has been successfully applied
in \citeN{chen, singer-influence}. 

For the specific function $V$ defined in \eqref{modified}, the
multi-linear extension cannot be computed (and \emph{a fortiori} maximized) in
polynomial time. Hence, we introduce a new function $L$:
\begin{equation}\label{eq:our-relaxation}
\forall\,\lambda\in[0,1]^n,\quad L(\lambda) \defeq
\log\det\left(I_d + \sum_{i\in\mathcal{N}} \lambda_i x_i\T{x_i}\right),
\end{equation}
Note that this relaxation, 
follows naturally from the \emph{multi-linear} extension by swapping the
expectation and $V$ in \eqref{eq:multi-linear}:
\begin{displaymath}
    L(\lambda) = \log\det\left(\mathbb{E}_{S\sim
    P_\mathcal{N}^\lambda}\bigg[I_d + \sum_{i\in S} x_i\T{x_i} \bigg]\right).
\end{displaymath}

The optimization program \eqref{eq:non-strategic} extends naturally to such
a relaxation. We define:
\begin{equation}\tag{$P_c$}\label{eq:primal}
    L^*_c \defeq \max_{\lambda\in[0, 1]^{n}}
    \left\{L(\lambda) \Big| \sum_{i=1}^{n} \lambda_i c_i
    \leq B\right\}
\end{equation}

The key property of our relaxation $L$ is that it has a bounded approximation
ratio to the multi-linear relaxation $F$. This is one of our main technical
contributions and is stated and proved in Lemma~\ref{lemma:relaxation-ratio}
found in Appendix. Moreover, the multi-linear relaxation $F$ has an exchange
property (see Lemma~\ref{lemma:rounding}) which allows us to relate its value to
$OPT$ through rounding. Together, these properties give the following
proposition which is also proved in the Appendix.

\begin{proposition}\label{prop:relaxation}
$L^*_c \leq 2 OPT + 2\max_{i\in\mathcal{N}}V(i)$.
\end{proposition}

\subsection{Solving a Convex Problem Monotonously}\label{sec:monotonicity}

Note, that the feasible set in Problem~\eqref{eq:primal} increases (for the
inclusion) when the cost decreases. As a consequence, $c\mapsto L^*_c$ is
non-increasing.

Furthermore, \eqref{eq:primal} being a convex optimization problem, using
standard convex optimization algorithms (Lemma~\ref{lemma:barrier} gives
a formal statement for our specific problem) we can compute
a $\varepsilon$-accurate approximation of its optimal value as defined below.

\begin{definition}
$a$ is an $\varepsilon$-accurate approximation of $b$ iff $|a-b|\leq \varepsilon$.
\end{definition}

Note however that an $\varepsilon$-accurate approximation of a non-increasing
function is not in general non-increasing itself. The goal of this section is
to approximate $L^*_c$ while preserving monotonicity. The estimator we
construct has a weaker form of monotonicity that we call
\emph{$\delta$-monotonicity}.

\begin{definition}
Let $f$ be a function from $\reals^n$ to $\reals$, we say that $f$ is
\emph{$\delta$-increasing along the $i$-th coordinate} iff:
\begin{equation}\label{eq:dd}
    \forall x\in\reals^n,\;
    \forall \mu\geq\delta,\;
    f(x+\mu e_i)\geq f(x)
\end{equation}
where $e_i$ is the $i$-th canonical basis vector of $\reals^n$. By extension,
$f$ is $\delta$-increasing iff it is $\delta$-increasing along all coordinates.

We define \emph{$\delta$-decreasing} functions by reversing the inequality in
\eqref{eq:dd}.
\end{definition}

We consider a perturbation of \eqref{eq:primal} by introducing:
\begin{equation}\tag{$P_{c, \alpha}$}\label{eq:perturbed-primal}
    L^*_c(\alpha) \defeq \max_{\lambda\in[\alpha, 1]^{n}}
    \left\{L(\lambda) \Big| \sum_{i=1}^{n} \lambda_i c_i
    \leq B\right\}
\end{equation}
Note that we have $L^*_c = L^*_c(0)$.  We will also assume that
$\alpha<\frac{1}{nB}$ so that \eqref{eq:perturbed-primal} has at least one
feasible point: $(\frac{1}{nB},\ldots,\frac{1}{nB})$. By computing
an approximation of $L^*_c(\alpha)$ as in Algorithm~\ref{alg:monotone}, we
obtain a $\delta$-decreasing approximation of $L^*_c$.

\begin{algorithm}[t]
    \caption{}\label{alg:monotone}
    \begin{algorithmic}[1]
        \State $\alpha \gets \varepsilon B^{-1}(\delta+n^2)^{-1} $ 

        \State Compute a $\frac{1}{2^{n+1}}\alpha\delta b$-accurate approximation of
        $L^*_c(\alpha)$
    \end{algorithmic}
\end{algorithm}

\begin{proposition}\label{prop:monotonicity}
    For any $\delta\in(0,1]$ and any $\varepsilon\in(0,1]$,
    Algorithm~\ref{alg:monotone} computes a $\delta$-decreasing,
    $\varepsilon$-accurate approximation of $L^*_c$. The running time of the
    algorithm is $O\big(poly(n, d, \log\log\frac{1}{b\varepsilon\delta})\big)$
\end{proposition}