diff options
| -rw-r--r-- | notes.bib | 22 | ||||
| -rw-r--r-- | related.tex | 9 |
2 files changed, 26 insertions, 5 deletions
@@ -5,6 +5,28 @@ publisher={Cambridge University Press} } +@inproceedings{pranav, +author="Pranav Dandekar and Nadia Fawaz and Stratis Ioannidis", +title= "Privacy Auctions for Recommender Systems", +booktitle = "WINE", +year = 2012 +} +@inproceedings{ghosh-roth:privacy-auction, + author = {Ghosh, Arpita and Roth, Aaron}, + title = {{Selling privacy at auction}}, + booktitle = {{Proc. ACM EC}}, + year = {2011}, + isbn = {978-1-4503-0261-6}, + location = {San Jose, California, USA}, + pages = {199--208}, + numpages = {10}, + doi = {http://doi.acm.org/10.1145/1993574.1993605}, + acmid = {1993605}, + keywords = {auctions, differential privacy}, +} + + + @article{approximatemechanismdesign, author = {Kobbi Nissim and Rann Smorodinsky and diff --git a/related.tex b/related.tex index cd27ed5..b514244 100644 --- a/related.tex +++ b/related.tex @@ -13,13 +13,12 @@ a truthful, $O(\log^3 n)$-approximate mechanism \cite{dobz2011-mechanisms} as well as a universally truthful, $O(\frac{\log n}{\log \log n})$-approximate mechanism for subadditive maximization \cite{bei2012budget}. Moreover, in a Bayesian setup, assuming a prior distribution among the agent's costs, there exists a truthful mechanism with a 768/512-approximation ratio \cite{bei2012budget}. %(in terms of expectations) - A series of recent papers \cite{mcsherrytalwar,approximatemechanismdesign,xiao:privacy-truthfulness,chen:privacy-truthfulness} consider the related problem of conducting retrieve data from an \textit{unverified} database: the buyer of the data cannot verify the data reported by individuals and therefore must incentivize them to report truthfully. -McSherry and Talwar \cite{mcsherrytalwar} argue that \emph{differentially private} mechanisms also offer a form of \emph{approximate truthfulness}, as the mechanism's output is only slightly perturbed when an individual unilaterally changes her data value; as a result, reporting untruthfully can only increase an individual's utility by a small amount. Xiao \cite{xiao:privacy-truthfulness}, improving upon earlier work by Nissim \emph{et al.}~\cite{approximatemechanismdesign} construct mechanisms that + A series of recent papers \cite{mcsherrytalwar,approximatemechanismdesign,xiao:privacy-truthfulness,chen:privacy-truthfulness} consider the related problem of retreiving data from an \textit{unverified} database: the auctioneer cannot verify the data reported by individuals and therefore must incentivize them to report this truthfully. +McSherry and Talwar \cite{mcsherrytalwar} argue that \emph{differentially private} mechanisms offer a form of \emph{approximate truthfulness}: if users have a utility that depends on their privacy, reporting their data untruthfully can only increase their utility by a small amount. Xiao \cite{xiao:privacy-truthfulness}, improving upon earlier work by Nissim \emph{et al.}~\cite{approximatemechanismdesign} construct mechanisms that simultaneously achieve exact truthfulness as well as differential privacy. Eliciting private data through a \emph{survey} \cite{roth-liggett}, whereby individuals first decide whether to participate in the survey and then report their data, - also fall under the unverified database setting \cite{xiao:privacy-truthfulness}. Our work differs from the above works in that it does not capture user utility through differential privacy; crucially, we also assume that experiments conducted are \emph{tamper-proof}, and individuals can misreport their costs but not their values. + also fall under the unverified database setting \cite{xiao:privacy-truthfulness}. In the \emph{verified} database setting, Ghosh and Roth~\cite{ghosh-roth:privacy-auction} and Dandekar \emph{et al.}~\cite{pranav} consider budgeted auctions where users have a utility again captured by differential privacy. Our work departs from the above setups in that utilities do not involve privacy, whose effects are assumed to be internalized in the costs reported by the users; crucially, we also assume that experiments are tamper-proof, and individuals can misreport their costs but not their values. -Our work is closest to Roth and Schoenebeck \cite{roth-schoenebeck}, who also consider how to sample individuals from a population to obtain an unbiased estimate of a reported value. The authors assume a prior on the joint distribution -\stratis{to be continued} +Our work is closest to the survey setup of Roth and Schoenebeck~\cite{roth-schoenebeck}, who also consider how to sample individuals with different features who reported a hidden value at a certain cost. The authors assume a prior on the joint distribution between costs and features, and wish to obtain an unbiased estimate of the expectation of the hidden value under the constraints of truthfulness, budget feasibility and individual rationality. Our work departs by learning a more general statistic (a linear model) than data means. We note that, as in \cite{roth-shoenebeck}, costs and features can be arbitrarily corellated (our results are prior-free). %\stratis{TODO: privacy discussion. Logdet objective. Should be one paragraph each.} |
