%%% -*-BibTeX-*-
%%% ====================================================================
%%% BibTeX-file{
%%% author = "Nelson H. F. Beebe",
%%% version = "1.07",
%%% date = "31 July 2015",
%%% time = "19:35:00 MDT",
%%% filename = "teac.bib",
%%% address = "University of Utah
%%% Department of Mathematics, 110 LCB
%%% 155 S 1400 E RM 233
%%% Salt Lake City, UT 84112-0090
%%% USA",
%%% telephone = "+1 801 581 5254",
%%% FAX = "+1 801 581 4148",
%%% URL = "http://www.math.utah.edu/~beebe",
%%% checksum = "55331 3015 17178 157754",
%%% email = "beebe at math.utah.edu, beebe at acm.org,
%%% beebe at computer.org (Internet)",
%%% codetable = "ISO/ASCII",
%%% keywords = "bibliography; BibTeX; ACM Transactions on
%%% Economics and Computation (TEAC)",
%%% license = "public domain",
%%% supported = "no",
%%% docstring = "This is a COMPLETE BibTeX bibliography for
%%% the journal ACM Transactions on Economics and
%%% Computation (no CODEN, ISSN 2167-8375
%%% (print), 2167-8383 (electronic)), for
%%% 2013--date.
%%%
%%% Publication began with volume 1, number 1,
%%% in January 2013. The journal appears
%%% quarterly, in January, May, September, and
%%% December.
%%%
%%% The journal has a World-Wide Web site at:
%%%
%%% http://teac.acm.org/
%%%
%%% Tables-of-contents of all issues are
%%% available at:
%%%
%%% http://dl.acm.org/citation.cfm?id=2542174
%%%
%%% Qualified subscribers can retrieve the full
%%% text of recent articles in PDF form.
%%%
%%% At version 1.07, the COMPLETE journal
%%% coverage looked like this:
%%%
%%% 2013 ( 21) 2014 ( 17) 2015 ( 26)
%%%
%%% Article: 64
%%%
%%% Total entries: 64
%%%
%%% Spelling has been verified with the UNIX
%%% spell and GNU ispell programs using the
%%% exception dictionary stored in the
%%% companion file with extension .sok.
%%%
%%% BibTeX citation tags are uniformly chosen
%%% as name:year:abbrev, where name is the
%%% family name of the first author or editor,
%%% year is a 4-digit number, and abbrev is a
%%% 3-letter condensation of important title
%%% words. Citation tags were automatically
%%% generated by software developed for the
%%% BibNet Project.
%%%
%%% In this bibliography, entries are sorted in
%%% publication order, using ``bibsort -byvolume.''
%%%
%%% The checksum field above contains a CRC-16
%%% checksum as the first value, followed by the
%%% equivalent of the standard UNIX wc (word
%%% count) utility output of lines, words, and
%%% characters. This is produced by Robert
%%% Solovay's checksum utility.",
%%% }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}
%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
FAX: +1 801 581 4148,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|http://www.math.utah.edu/~beebe/|"}
%%% ====================================================================
%%% Journal abbreviations:
@String{j-TEAC = "ACM Transactions on Economics and
Computation"}
%%% ====================================================================
%%% Publisher abbreviations:
@String{pub-ACM = "ACM Press"}
@String{pub-ACM:adr = "New York, NY 10036, USA"}
%%% ====================================================================
%%% Bibliography entries:
@Article{Conitzer:2013:ATE,
author = "Vincent Conitzer and R. Preston Mcafee",
title = "The {ACM Transactions on Economics and Computation}:
an introduction",
journal = j-TEAC,
volume = "1",
number = "1",
pages = "1:1--1:??",
month = jan,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2399187.2399188",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:51 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Gradwohl:2013:SRC,
author = "Ronen Gradwohl and Noam Livne and Alon Rosen",
title = "Sequential rationality in cryptographic protocols",
journal = j-TEAC,
volume = "1",
number = "1",
pages = "2:1--2:??",
month = jan,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2399187.2399189",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:51 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Much of the literature on rational cryptography
focuses on analyzing the strategic properties of
cryptographic protocols. However, due to the presence
of computationally-bounded players and the asymptotic
nature of cryptographic security, a definition of
sequential rationality for this setting has thus far
eluded researchers. We propose a new framework for
overcoming these obstacles, and provide the first
definitions of computational solution concepts that
guarantee sequential rationality. We argue that natural
computational variants of subgame perfection are too
strong for cryptographic protocols. As an alternative,
we introduce a weakening called threat-free Nash
equilibrium that is more permissive but still
eliminates the undesirable ``empty threats'' of
nonsequential solution concepts. To demonstrate the
applicability of our framework, we revisit the problem
of implementing a mediator for correlated equilibria
[Dodis et al 2000], and propose a variant of their
protocol that is sequentially rational for a nontrivial
class of correlated equilibria. Our treatment provides
a better understanding of the conditions under which
mediators in a correlated equilibrium can be replaced
by a stable protocol.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Jain:2013:GTA,
author = "Shaili Jain and David C. Parkes",
title = "A game-theoretic analysis of the {ESP} game",
journal = j-TEAC,
volume = "1",
number = "1",
pages = "3:1--3:??",
month = jan,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2399187.2399190",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:51 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "``Games with a Purpose'' are interactive games that
users play because they are fun, with the added benefit
that the outcome of play is useful work. The ESP game,
developed by von Ahn and Dabbish [2004], is an example
of such a game devised to label images on the web.
Since labeling images is a hard problem for computer
vision algorithms and can be tedious and time-consuming
for humans, the ESP game provides humans with incentive
to do useful work by being enjoyable to play. We
present a simple game-theoretic model of the ESP game
and characterize the equilibrium behavior in our model.
Our equilibrium analysis supports the fact that users
appear to coordinate on low effort words. We provide an
alternate model of user preferences, modeling a change
that could be induced through a different scoring
method, and show that equilibrium behavior in this
model coordinates on high-effort words. We also give
sufficient conditions for coordinating on high-effort
words to be a Bayesian-Nash equilibrium. Our results
suggest the possibility of formal incentive design in
achieving desirable system-wide outcomes for the
purpose of human computation, complementing existing
considerations of robustness against cheating and human
factors.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Naroditskiy:2013:OPD,
author = "Victor Naroditskiy and Maria Polukarov and Nicholas R.
Jennings",
title = "Optimal payments in dominant-strategy mechanisms for
single-parameter domains",
journal = j-TEAC,
volume = "1",
number = "1",
pages = "4:1--4:??",
month = jan,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2399187.2399191",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:51 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study dominant-strategy mechanisms in allocation
domains where agents have one-dimensional types and
quasilinear utilities. Taking an allocation function as
an input, we present an algorithmic technique for
finding optimal payments in a class of mechanism design
problems, including utilitarian and egalitarian
allocation of homogeneous items with nondecreasing
marginal costs. Our results link optimality of payment
functions to a geometric condition involving
triangulations of polytopes. When this condition is
satisfied, we constructively show the existence of an
optimal payment function that is piecewise linear in
agent types.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Feldman:2013:ISI,
author = "Michal Feldman and Noam Nisan",
title = "Introduction to the {Special Issue on Algorithmic Game
Theory}",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "5:1--5:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465770",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Blume:2013:NFP,
author = "Lawrence Blume and David Easley and Jon Kleinberg and
Robert Kleinberg and {\'E}va Tardos",
title = "Network Formation in the Presence of Contagious Risk",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "6:1--6:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465771",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "There are a number of domains where agents must
collectively form a network in the face of the
following trade-off: each agent receives benefits from
the direct links it forms to others, but these links
expose it to the risk of being hit by a cascading
failure that might spread over multistep paths.
Financial contagion, epidemic disease, the exposure of
covert organizations to discovery, and electrical power
networks are all settings in which such issues have
been articulated. Here we formulate the problem in
terms of strategic network formation, and provide
asymptotically tight bounds on the welfare of both
optimal and stable networks. We find that socially
optimal networks are, in a precise sense, situated just
beyond a phase transition in the behavior of the
cascading failures, and that stable graphs lie slightly
further beyond this phase transition, at a point where
most of the available welfare has been lost. Our
analysis enables us to explore such issues as the
trade-offs between clustered and anonymous market
structures, and it exposes a fundamental sense in which
very small amounts of ``over-linking'' in networks with
contagious risk can have strong consequences for the
welfare of the participants.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Karlin:2013:SEM,
author = "Anna R. Karlin and C. Thach Nguyen and Yuval Peres",
title = "Selling in Exclusive Markets: Some Observations on
Prior-Free Mechanism Design",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "7:1--7:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465772",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider prior-free benchmarks in non-matroid
settings. In particular, we show that a very desirable
benchmark proposed by Hartline and Roughgarden is too
strong, in the sense that no truthful mechanism can
compete with it even in a very simple non-matroid
setting where there are two exclusive markets and the
seller can only sell to agents in one of them. On the
other hand, we show that there is a mechanism that
competes with a symmetrized version of this benchmark.
We further investigate the more traditional best fixed
price profit benchmark and show that there are
mechanisms that compete with it in any downward-closed
settings.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Ha:2013:MDC,
author = "Bach Q. Ha and Jason D. Hartline",
title = "Mechanism Design via Consensus Estimates, Cross
Checking, and Profit Extraction",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "8:1--8:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465773",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "There is only one technique for prior-free optimal
mechanism design that generalizes beyond the
structurally benevolent setting of digital goods. This
technique uses random sampling to estimate the
distribution of agent values and then employs the
Bayesian optimal mechanism for this estimated
distribution on the remaining players. Though quite
general, even for digital goods, this random sampling
auction has a complicated analysis and is known to be
suboptimal. To overcome these issues we generalize the
consensus and profit extraction techniques from
Goldberg and Hartline [2003] to structurally rich
environments that include, for example, single-minded
combinatorial auctions.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Goldberg:2013:CHM,
author = "Paul W. Goldberg and Christos H. Papadimitriou and
Rahul Savani",
title = "The Complexity of the Homotopy Method, Equilibrium
Selection, and {Lemke--Howson} Solutions",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "9:1--9:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465774",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We show that the widely used homotopy method for
solving fixpoint problems, as well as the
Harsanyi-Selten equilibrium selection process for
games, are PSPACE-complete to implement. Extending our
result for the Harsanyi-Selten process, we show that
several other homotopy-based algorithms for finding
equilibria of games are also PSPACE-complete to
implement. A further application of our techniques
yields the result that it is PSPACE-complete to compute
any of the equilibria that could be found via the
classical Lemke--Howson algorithm, a
complexity-theoretic strengthening of the result in
Savani and von Stengel [2006]. These results show that
our techniques can be widely applied and suggest that
the PSPACE-completeness of implementing homotopy
methods is a general principle.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Koutsoupias:2013:CRO,
author = "Elias Koutsoupias and George Pierrakos",
title = "On the Competitive Ratio of Online Sampling Auctions",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "10:1--10:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465775",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study online profit-maximizing auctions for digital
goods with adversarial bid selection and uniformly
random arrivals; in this sense, our model lies at the
intersection of prior-free mechanism design and
secretary problems. Our goal is to design auctions that
are constant competitive with F$^{(2)}$. We give a
generic reduction that transforms any offline auction
to an online one with only a loss of a factor of 2 in
the competitive ratio. We also present some natural
auctions, both randomized and deterministic, and study
their competitive ratio. Our analysis reveals some
interesting connections of one of these auctions with
RSOP.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Candogan:2013:NPG,
author = "Ozan Candogan and Asuman Ozdaglar and Pablo A.
Parrilo",
title = "Near-Potential Games: Geometry and Dynamics",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "11:1--11:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465776",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Potential games are a special class of games for which
many adaptive user dynamics converge to a Nash
equilibrium. In this article, we study properties of
near-potential games, that is, games that are close in
terms of payoffs to potential games, and show that such
games admit similar limiting dynamics. We first focus
on finite games in strategic form. We introduce a
distance notion in the space of games and study the
geometry of potential games and sets of games that are
equivalent, with respect to various equivalence
relations, to potential games. We discuss how, given an
arbitrary game, one can find a nearby game in these
sets. We then study dynamics in near-potential games by
focusing on continuous-time perturbed best response
dynamics. We characterize the limiting behavior of this
dynamics in terms of the upper contour sets of the
potential function of a close potential game and
approximate equilibria of the game. Exploiting
structural properties of approximate equilibrium sets,
we strengthen our result and show that for games that
are sufficiently close to a potential game, the
sequence of mixed strategies generated by this dynamics
converges to a small neighborhood of equilibria whose
size is a function of the distance from the set of
potential games. In the second part of the article, we
study continuous games and show that our approach for
characterizing the limiting sets in near-potential
games extends to continuous games. In particular, we
consider continuous-time best response dynamics and a
variant of it (where players update their strategies
only if there is at least $ \epsilon $ utility
improvement opportunity) in near-potential games where
the strategy sets are compact and convex subsets of a
Euclidean space. We show that these update rules
converge to a neighborhood of equilibria (or the
maximizer of the potential function), provided that the
potential function of the nearby potential game
satisfies some structural properties. Our results
generalize the known convergence results for potential
games to near-potential games.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Abernethy:2013:EMM,
author = "Jacob Abernethy and Yiling Chen and Jennifer Wortman
Vaughan",
title = "Efficient Market Making via Convex Optimization, and a
Connection to Online Learning",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "12:1--12:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465777",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We propose a general framework for the design of
securities markets over combinatorial or infinite state
or outcome spaces. The framework enables the design of
computationally efficient markets tailored to an
arbitrary, yet relatively small, space of securities
with bounded payoff. We prove that any market
satisfying a set of intuitive conditions must price
securities via a convex cost function, which is
constructed via conjugate duality. Rather than deal
with an exponentially large or infinite outcome space
directly, our framework only requires optimization over
a convex hull. By reducing the problem of automated
market making to convex optimization, where many
efficient algorithms exist, we arrive at a range of new
polynomial-time pricing mechanisms for various
problems. We demonstrate the advantages of this
framework with the design of some particular markets.
We also show that by relaxing the convex hull we can
gain computational tractability without compromising
the market institution's bounded budget. Although our
framework was designed with the goal of deriving
efficient automated market makers for markets with very
large outcome spaces, this framework also provides new
insights into the relationship between market design
and machine learning, and into the complete market
setting. Using our framework, we illustrate the
mathematical parallels between cost-function-based
markets and online learning and establish a
correspondence between cost-function-based markets and
market scoring rules for complete markets.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Haghpanah:2013:OAP,
author = "Nima Haghpanah and Nicole Immorlica and Vahab Mirrokni
and Kamesh Munagala",
title = "Optimal Auctions with Positive Network Externalities",
journal = j-TEAC,
volume = "1",
number = "2",
pages = "13:1--13:??",
month = may,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2465769.2465778",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:52 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider the problem of designing auctions in
social networks for goods that exhibit single-parameter
submodular network externalities in which a bidder's
value for an outcome is a fixed private type times a
known submodular function of the allocation of his
friends. Externalities pose many issues that are hard
to address with traditional techniques; our work shows
how to resolve these issues in a specific setting of
particular interest. We operate in a Bayesian
environment and so assume private values are drawn
according to known distributions. We prove that the
optimal auction is NP-hard to approximate pointwise,
and APX-hard on average. Thus we instead design
auctions whose revenue approximates that of the optimal
auction. Our main result considers step-function
externalities in which a bidder's value for an outcome
is either zero, or equal to his private type if at
least one friend has the good. For these settings, we
provide a e / e + 1-approximation. We also give a
0.25-approximation auction for general single-parameter
submodular network externalities, and discuss
optimizing over a class of simple pricing strategies.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Othman:2013:PLS,
author = "Abraham Othman and David M. Pennock and Daniel M.
Reeves and Tuomas Sandholm",
title = "A Practical Liquidity-Sensitive Automated Market
Maker",
journal = j-TEAC,
volume = "1",
number = "3",
pages = "14:1--14:??",
month = sep,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2509413.2509414",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:54 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Automated market makers are algorithmic agents that
enable participation and information elicitation in
electronic markets. They have been widely and
successfully applied in artificial-money settings, like
some Internet prediction markets. Automated market
makers from the literature suffer from two problems
that contribute to their impracticality and impair
their use beyond artificial-money settings: first, they
are unable to adapt to liquidity, so that trades cause
prices to move the same amount in both heavily and
lightly traded markets, and second, in typical
circumstances, they run at a deficit. In this article,
we construct a market maker that is both sensitive to
liquidity and can run at a profit. Our market maker has
bounded loss for any initial level of liquidity and, as
the initial level of liquidity approaches zero,
worst-case loss approaches zero. For any level of
initial liquidity we can establish a boundary in market
state space such that, if the market terminates within
that boundary, the market maker books a profit
regardless of the realized outcome.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Balcan:2013:PU,
author = "Maria-Florina Balcan and Avrim Blum and Yishay
Mansour",
title = "The Price of Uncertainty",
journal = j-TEAC,
volume = "1",
number = "3",
pages = "15:1--15:??",
month = sep,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2509413.2509415",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:54 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "In this work, we study the degree to which small
fluctuations in costs in well-studied potential games
can impact the result of natural best-response and
improved-response dynamics. We call this the Price of
Uncertainty and study it in a wide variety of potential
games including fair cost-sharing games, set-cover
games, routing games, and job-scheduling games. We show
that in certain cases, even extremely small
fluctuations can have the ability to cause these
dynamics to spin out of control and move to states of
much higher social cost, whereas in other cases these
dynamics are much more stable even to large degrees of
fluctuation. We also consider the resilience of these
dynamics to a small number of Byzantine players about
which no assumptions are made. We show again a contrast
between different games. In certain cases (e.g., fair
cost-sharing, set-cover, job-scheduling) even a single
Byzantine player can cause best-response dynamics to
transition from low-cost states to states of
substantially higher cost, whereas in others (e.g., the
class of $ \beta $-nice games, which includes routing,
market-sharing and many others) these dynamics are much
more resilient. Overall, our work can be viewed as
analyzing the inherent resilience or safety of games to
different kinds of imperfections in player behavior,
player information, or in modeling assumptions made.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Ben-Yehuda:2013:DAE,
author = "Orna Agmon Ben-Yehuda and Muli Ben-Yehuda and Assaf
Schuster and Dan Tsafrir",
title = "Deconstructing {Amazon EC2} Spot Instance Pricing",
journal = j-TEAC,
volume = "1",
number = "3",
pages = "16:1--16:??",
month = sep,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2509413.2509416",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:54 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Cloud providers possessing large quantities of spare
capacity must either incentivize clients to purchase it
or suffer losses. Amazon is the first cloud provider to
address this challenge, by allowing clients to bid on
spare capacity and by granting resources to bidders
while their bids exceed a periodically changing spot
price. Amazon publicizes the spot price but does not
disclose how it is determined. By analyzing the spot
price histories of Amazon's EC2 cloud, we reverse
engineer how prices are set and construct a model that
generates prices consistent with existing price traces.
Our findings suggest that usually prices are not
market-driven, as sometimes previously assumed. Rather,
they are likely to be generated most of the time at
random from within a tight price range via a dynamic
hidden reserve price mechanism. Our model could help
clients make informed bids, cloud providers design
profitable systems, and researchers design pricing
algorithms.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Sarne:2013:CSM,
author = "David Sarne",
title = "Competitive Shopbots-Mediated Markets",
journal = j-TEAC,
volume = "1",
number = "3",
pages = "17:1--17:??",
month = sep,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2509413.2509417",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:54 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "This article considers markets mediated by autonomous
self-interested comparison-shopping agents. As in
today's markets, the agents do not charge buyers for
their services but rather benefit from payments
obtained from sellers upon the execution of a
transaction. The agents aim at maximizing their
expected benefit, taking into consideration the cost
incurred by the search and competition dynamics that
arise in the multi-agent setting. This article provides
a comprehensive analysis of such models, based on
search theory principles. The analysis results in a
characterization of the buyers' and agents' search
strategies in equilibrium. The main result of this
article is that the use of self-interested
comparison-shopping agents can result in a beneficial
equilibrium, where both buyers and sellers benefit, in
comparison to the case where buyers control the
comparison-shopping agent, and the comparison-shopping
agents necessarily do not lose. This, despite the fact
that the service is offered for free to buyers and its
cost is essentially covered by sellers. The analysis
generalizes to any setting where buyers can use
self-interested agents capable of effectively
performing the search (e.g., evaluating opportunities)
on their behalf.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Procaccia:2013:AMD,
author = "Ariel D. Procaccia and Moshe Tennenholtz",
title = "Approximate Mechanism Design without Money",
journal = j-TEAC,
volume = "1",
number = "4",
pages = "18:1--18:??",
month = dec,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2542174.2542175",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:56 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "The literature on algorithmic mechanism design is
mostly concerned with game-theoretic versions of
optimization problems to which standard economic
money-based mechanisms cannot be applied efficiently.
Recent years have seen the design of various truthful
approximation mechanisms that rely on payments. In this
article, we advocate the reconsideration of highly
structured optimization problems in the context of
mechanism design. We explicitly argue for the first
time that, in such domains, approximation can be
leveraged to obtain truthfulness without resorting to
payments. This stands in contrast to previous work
where payments are almost ubiquitous and (more often
than not) approximation is a necessary evil that is
required to circumvent computational complexity. We
present a case study in approximate mechanism design
without money. In our basic setting, agents are located
on the real line and the mechanism must select the
location of a public facility; the cost of an agent is
its distance to the facility. We establish tight upper
and lower bounds for the approximation ratio given by
strategy-proof mechanisms without payments, with
respect to both deterministic and randomized
mechanisms, under two objective functions: the social
cost and the maximum cost. We then extend our results
in two natural directions: a domain where two
facilities must be located and a domain where each
agent controls multiple locations.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Yildiz:2013:BOD,
author = "Ercan Yildiz and Asuman Ozdaglar and Daron Acemoglu
and Amin Saberi and Anna Scaglione",
title = "Binary Opinion Dynamics with Stubborn Agents",
journal = j-TEAC,
volume = "1",
number = "4",
pages = "19:1--19:??",
month = dec,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2538508",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:56 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study binary opinion dynamics in a social network
with stubborn agents who influence others but do not
change their opinions. We focus on a generalization of
the classical voter model by introducing nodes
(stubborn agents) that have a fixed state. We show that
the presence of stubborn agents with opposing opinions
precludes convergence to consensus; instead, opinions
converge in distribution with disagreement and
fluctuations. In addition to the first moment of this
distribution typically studied in the literature, we
study the behavior of the second moment in terms of
network properties and the opinions and locations of
stubborn agents. We also study the problem of optimal
placement of stubborn agents where the location of a
fixed number of stubborn agents is chosen to have the
maximum impact on the long-run expected opinions of
agents.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Mossel:2013:MCT,
author = "Elchanan Mossel and Omer Tamuz",
title = "Making Consensus Tractable",
journal = j-TEAC,
volume = "1",
number = "4",
pages = "20:1--20:??",
month = dec,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2542174.2542176",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:56 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study a model of consensus decision making in which
a finite group of Bayesian agents has to choose between
one of two courses of action. Each member of the group
has a private and independent signal at his or her
disposal, giving some indication as to which action is
optimal. To come to a common decision, the participants
perform repeated rounds of voting. In each round, each
agent casts a vote in favor of one of the two courses
of action, reflecting his or her current belief, and
observes the votes of the rest. We provide an efficient
algorithm for the calculation the agents have to
perform and show that consensus is always reached and
that the probability of reaching a wrong decision
decays exponentially with the number of agents.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Hoefer:2013:AAC,
author = "Martin Hoefer and Alexander Skopalik",
title = "Altruism in Atomic Congestion Games",
journal = j-TEAC,
volume = "1",
number = "4",
pages = "21:1--21:??",
month = dec,
year = "2013",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2542174.2542177",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 14 06:10:56 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "This article studies the effects of altruism, a
phenomenon widely observed in practice, in the model of
atomic congestion games. Altruistic behavior is modeled
by a linear trade-off between selfish and social
objectives. Our model can be embedded in the framework
of congestion games with player-specific latency
functions. Stable states are the pure Nash equilibria
of these games, and we examine their existence and the
convergence of sequential best-response dynamics. In
general, pure Nash equilibria are often absent, and
existence is NP-hard to decide. Perhaps surprisingly,
if all delay functions are affine, the games remain
potential games, even when agents are arbitrarily
altruistic. The construction underlying this result can
be extended to a class of general potential games and
social cost functions, and we study a number of
prominent examples. These results give important
insights into the robustness of multi-agent systems
with heterogeneous altruistic incentives. Furthermore,
they yield a general technique to prove that
stabilization is robust, even with partly altruistic
agents, which is of independent interest. In addition
to these results for uncoordinated dynamics, we
consider a scenario with a central altruistic
institution that can set incentives for the agents. We
provide constructive and hardness results for finding
the minimum number of altruists to stabilize an optimal
congestion profile and more general mechanisms to
incentivize agents to adopt favorable behavior. These
results are closely related to Stackelberg routing and
answer open questions raised recently in the
literature.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Polevoy:2014:SCS,
author = "Gleb Polevoy and Rann Smorodinsky and Moshe
Tennenholtz",
title = "Signaling Competition and Social Welfare",
journal = j-TEAC,
volume = "2",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2560766",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 21 18:00:43 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider an environment where sellers compete over
buyers. All sellers are a-priori identical and
strategically signal buyers about the product they
sell. In a setting motivated by online advertising in
display ad exchanges, where firms use second price
auctions, a firm's strategy is a decision about its
signaling scheme for a stream of goods (e.g., user
impressions), and a buyer's strategy is a selection
among the firms. In this setting, a single seller will
typically provide partial information, and
consequently, a product may be allocated inefficiently.
Intuitively, competition among sellers may induce
sellers to provide more information in order to attract
buyers and thus increase efficiency. Surprisingly, we
show that such a competition among firms may yield
significant loss in consumers' social welfare with
respect to the monopolistic setting. Although we also
show that in some cases, the competitive setting yields
gain in social welfare, we provide a tight bound on
that gain, which is shown to be small with respect to
the preceding possible loss. Our model is tightly
connected with the literature on bundling in
auctions.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Albers:2014:NEN,
author = "Susanne Albers and Stefan Eilts and Eyal Even-Dar and
Yishay Mansour and Liam Roditty",
title = "On {Nash} Equilibria for a Network Creation Game",
journal = j-TEAC,
volume = "2",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2560767",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 21 18:00:43 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study a basic network creation game proposed by
Fabrikant et al. [2003]. In this game, each player
(vertex) can create links (edges) to other players at a
cost of \alpha per edge. The goal of every player is to
minimize the sum consisting of (a) the cost of the
links he has created and (b) the sum of the distances
to all other players. Fabrikant et al. conjectured that
there exists a constant $A$ such that, for any $\alpha
> A$, all nontransient Nash equilibria graphs are
trees. They showed that if a Nash equilibrium is a
tree, the price of anarchy is constant. In this
article, we disprove the tree conjecture. More
precisely, we show that for any positive integer $n_0$,
there exists a graph built by $n \geq n_0$ players
which contains cycles and forms a nontransient Nash
equilibrium, for any $\alpha$ with $1 < \alpha \leq
\sqrt n / 2$. Our construction makes use of some
interesting results on finite affine planes. On the
other hand, we show that, for $\alpha \geq 12 n \lceil
log n \rceil$, every Nash equilibrium forms a
tree. Without relying on the tree conjecture, Fabrikant
et al. proved an upper bound on the price of anarchy of
$O(\sqrt{\alpha})$, where $\alpha \in [2, n^2]$. We
improve this bound. Specifically, we derive a constant
upper bound for $\alpha \in O(\sqrt{n})$ and for
$\alpha \geq 12 n \lceil log n \rceil$. For the
intermediate values, we derive an improved bound of
$O(1 + (\min\{\alpha^2 / n, n^2 / \alpha \})^{1 /
3})$. Additionally, we develop characterizations of
Nash equilibria and extend our results to a weighted
network creation game as well as to scenarios with cost
sharing.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Smeulders:2014:GFM,
author = "Bart Smeulders and Frits C. R. Spieksma and Laurens
Cherchye and Bram {De Rock}",
title = "Goodness-of-Fit Measures for Revealed Preference
Tests: Complexity Results and Algorithms",
journal = j-TEAC,
volume = "2",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2560793",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 21 18:00:43 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We provide results on the computational complexity of
goodness-of-fit measures (i.e., Afriat's efficiency
index, Varian's efficiency vector-index, and the
Houtman-Maks index) associated with several revealed
preference axioms (i.e., WARP, SARP, GARP, and
HARP). These results explain the computational
difficulties that have been observed in literature when
computing these indices. Our NP-hardness results are
obtained by reductions from the independent set
problem. We also show that this reduction can be used
to prove that no approximation algorithm achieving a
ratio of $O(n^{1 - \delta})$, $\delta;> 0$ exists for
Varian's index, nor for Houtman-Maks' index (unless P =
NP). Finally, we give an exact polynomial-time
algorithm for finding Afriat's efficiency index.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Zhang:2014:RPO,
author = "Yu Zhang and Jaeok Park and Mihaela van der Schaar",
title = "Rating Protocols in Online Communities",
journal = j-TEAC,
volume = "2",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2560794",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 21 18:00:43 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Sustaining cooperation among self-interested agents is
critical for the proliferation of emerging online
communities. Providing incentives for cooperation in
online communities is particularly challenging because
of their unique features: a large population of
anonymous agents having asymmetric interests and
dynamically joining and leaving the community,
operation errors, and agents trying to whitewash when
they have a low standing in the community. In this
article, we take these features into consideration and
propose a framework for designing and analyzing a class
of incentive schemes based on rating protocols, which
consist of a rating scheme and a recommended strategy.
We first define the concept of sustainable rating
protocols under which every agent has the incentive to
follow the recommended strategy given the deployed
rating scheme. We then formulate the problem of
designing an optimal rating protocol, which selects the
protocol that maximizes the overall social welfare
among all sustainable rating protocols. Using the
proposed framework, we study the structure of optimal
rating protocols and explore the impact of one-sided
rating, punishment lengths, and whitewashing on optimal
rating protocols. Our results show that optimal rating
protocols are capable of sustaining cooperation, with
the amount of cooperation varying depending on the
community characteristics.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Emek:2014:SSR,
author = "Yuval Emek and Michal Feldman and Iftah Gamzu and
Renato PaesLeme and Moshe Tennenholtz",
title = "Signaling Schemes for Revenue Maximization",
journal = j-TEAC,
volume = "2",
number = "2",
pages = "5:1--5:??",
month = jun,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2594564",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Mon Jun 9 16:42:02 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Signaling is an important topic in the study of
asymmetric information in economic settings. In
particular, the transparency of information available
to a seller in an auction setting is a question of
major interest. We introduce the study of signaling
when conducting a second price auction of a
probabilistic good whose actual instantiation is known
to the auctioneer but not to the bidders. This
framework can be used to model impressions selling in
display advertising. We establish several results
within this framework. First, we study the problem of
computing a signaling scheme that maximizes the
auctioneer's revenue in a Bayesian setting. We show
that this problem is polynomially solvable for some
interesting special cases, but computationally hard in
general. Second, we establish a tight bound on the
minimum number of signals required to implement an
optimal signaling scheme. Finally, we show that at
least half of the maximum social welfare can be
preserved within such a scheme.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Chen:2014:EPR,
author = "Yiling Chen and Ian A. Kash and Michael Ruberry and
Victor Shnayder",
title = "Eliciting Predictions and Recommendations for Decision
Making",
journal = j-TEAC,
volume = "2",
number = "2",
pages = "6:1--6:??",
month = jun,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2556271",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Mon Jun 9 16:42:02 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "When making a decision, a decision maker selects one
of several possible actions and hopes to achieve a
desirable outcome. To make a better decision, the
decision maker often asks experts for advice. In this
article, we consider two methods of acquiring advice
for decision making. We begin with a method where one
or more experts predict the effect of each action and
the decision maker then selects an action based on the
predictions. We characterize strictly proper decision
making, where experts have an incentive to accurately
reveal their beliefs about the outcome of each action.
However, strictly proper decision making requires the
decision maker use a completely mixed strategy to
choose an action. To address this limitation, we
consider a second method where the decision maker asks
a single expert to recommend an action. We show that it
is possible to elicit the decision maker's most
preferred action for a broad class of preferences of
the decision maker, including when the decision maker
is an expected value maximizer.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Rozen:2014:EPE,
author = "Rakefet Rozen and Rann Smorodinsky",
title = "Ex-Post Equilibrium and {VCG} Mechanisms",
journal = j-TEAC,
volume = "2",
number = "2",
pages = "7:1--7:??",
month = jun,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2594565",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Mon Jun 9 16:42:02 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Consider an abstract social choice setting with
incomplete information, where the number of
alternatives is large. Albeit natural, implementing VCG
mechanisms is infeasible due to the prohibitive
communication constraints. However, if players restrict
attention to a subset of the alternatives, feasibility
may be recovered. This article characterizes the class
of subsets that induce an ex-post equilibrium in the
original game. It turns out that a crucial condition
for such subsets to exist is the availability of a
type-independent optimal social alternative for each
player. We further analyze the welfare implications of
these restrictions. This work follows that of Holzman
et al. [2004] and Holzman and Monderer [2004] where
similar analysis is done for combinatorial auctions.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Chen:2014:PAS,
author = "Xujin Chen and Benjamin Doerr and Carola Doerr and
Xiaodong Hu and Weidong Ma and Rob van Stee",
title = "The Price of Anarchy for Selfish Ring Routing is Two",
journal = j-TEAC,
volume = "2",
number = "2",
pages = "8:1--8:??",
month = jun,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2548545",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Mon Jun 9 16:42:02 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We analyze the network congestion game with atomic
players, asymmetric strategies, and the maximum latency
among all players as social cost. This important social
cost function is much less understood than the average
latency. We show that the price of anarchy is at most
two, when the network is a ring and the link latencies
are linear. Our bound is tight. This is the first sharp
bound for the maximum latency objective.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Cary:2014:CPA,
author = "Matthew Cary and Aparna Das and Benjamin Edelman and
Ioannis Giotis and Kurtis Heimerl and Anna R. Karlin
and Scott Duke Kominers and Claire Mathieu and Michael
Schwarz",
title = "Convergence of Position Auctions under Myopic
Best-Response Dynamics",
journal = j-TEAC,
volume = "2",
number = "3",
pages = "9:1--9:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2632226",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Oct 17 12:45:12 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study the dynamics of multiround position auctions,
considering both the case of exogenous click-through
rates and the case in which click-through rates are
determined by an endogenous consumer search process. In
both contexts, we demonstrate that dynamic position
auctions converge to their associated static, envy-free
equilibria. Furthermore, convergence is efficient, and
the entry of low-quality advertisers does not slow
convergence. Because our approach predominantly relies
on assumptions common in the sponsored search
literature, our results suggest that dynamic position
auctions converge more generally.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Azar:2014:QCS,
author = "Pablo Daniel Azar and Silvio Micali",
title = "The Query Complexity of Scoring Rules",
journal = j-TEAC,
volume = "2",
number = "3",
pages = "10:1--10:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2632228",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Oct 17 12:45:12 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Proper scoring rules are crucial tools to elicit
truthful information from experts. A scoring rule maps
X, an expert-provided distribution over the set of all
possible states of the world, and $ \omega $, a
realized state of the world, to a real number
representing the expert's reward for his provided
information. To compute this reward, a scoring rule
queries the distribution X at various states. The
number of these queries is thus a natural measure of
the complexity of the scoring rule. We prove that any
bounded and strictly proper scoring rule that is
deterministic must make a number of queries to its
input distribution that is a quarter of the number of
states of the world. When the state space is very
large, this makes the computation of such scoring rules
impractical. We also show a new stochastic scoring rule
that is bounded, strictly proper, and which makes only
two queries to its input distribution. Thus, using
randomness allows us to have significant savings when
computing scoring rules.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Alaei:2014:RSA,
author = "Saeed Alaei and Azarakhsh Malekian and Aravind
Srinivasan",
title = "On Random Sampling Auctions for Digital Goods",
journal = j-TEAC,
volume = "2",
number = "3",
pages = "11:1--11:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2517148",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Oct 17 12:45:12 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "In the context of auctions for digital goods, an
interesting random sampling auction has been proposed
by Goldberg et al. [2001]. This auction has been
analyzed by Feige et al. [2005], who have shown that it
obtains in expectation at least 1/15 fraction of the
optimal revenue, which is substantially better than the
previously proven constant bounds but still far from
the conjectured lower bound of 1/4. In this article, we
prove that the aforementioned random sampling auction
obtains at least 1/4 fraction of the optimal revenue
for a large class of instances where the number of bids
above (or equal to) the optimal sale price is at least
6. We also show that this auction obtains at least
1/4.68 fraction of the optimal revenue for the small
class of remaining instances, thus leaving a negligible
gap between the lower and upper bound. We employ a mix
of probabilistic techniques and dynamic programming to
compute these bounds.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Dandekar:2014:PAR,
author = "Pranav Dandekar and Nadia Fawaz and Stratis
Ioannidis",
title = "Privacy Auctions for Recommender Systems",
journal = j-TEAC,
volume = "2",
number = "3",
pages = "12:1--12:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2629665",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Oct 17 12:45:12 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study a market for private data in which a data
analyst publicly releases a statistic over a database
of private information. Individuals that own the data
incur a cost for their loss of privacy proportional to
the differential privacy guarantee given by the analyst
at the time of the release. The analyst incentivizes
individuals by compensating them, giving rise to a
privacy auction. Motivated by recommender systems, the
statistic we consider is a linear predictor function
with publicly known weights. The statistic can be
viewed as a prediction of the unknown data of a new
individual, based on the data of individuals in the
database. We formalize the trade-off between privacy
and accuracy in this setting, and show that a simple
class of estimates achieves an order-optimal trade-off.
It thus suffices to focus on auction mechanisms that
output such estimates. We use this observation to
design a truthful, individually rational,
proportional-purchase mechanism under a fixed budget
constraint. We show that our mechanism is 5-approximate
in terms of accuracy compared to the optimal mechanism,
and that no truthful mechanism can achieve a $ 2 -
\epsilon $ approximation, for any $\epsilon > 0$.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Balcan:2014:NOC,
author = "Maria-Florina Balcan and Sara Krehbiel and Georgios
Piliouras and Jinwoo Shin",
title = "Near-Optimality in Covering Games by Exposing Global
Information",
journal = j-TEAC,
volume = "2",
number = "4",
pages = "13:1--13:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2597890",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Oct 28 16:50:26 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Mechanism design for distributed systems is
fundamentally concerned with aligning individual
incentives with social welfare to avoid socially
inefficient outcomes that can arise from agents acting
autonomously. One simple and natural approach is to
centrally broadcast nonbinding advice intended to guide
the system to a socially near-optimal state while still
harnessing the incentives of individual agents. The
analytical challenge is proving fast convergence to
near optimal states, and in this article we give the
first results that carefully constructed advice vectors
yield stronger guarantees. We apply this approach to a
broad family of potential games modeling vertex cover
and set cover optimization problems in a distributed
setting. This class of problems is interesting because
finding exact solutions to their optimization problems
is NP-hard yet highly inefficient equilibria exist, so
a solution in which agents simply locally optimize is
not satisfactory. We show that with an arbitrary advice
vector, a set cover game quickly converges to an
equilibrium with cost of the same order as the square
of the social cost of the advice vector. More
interestingly, we show how to efficiently construct an
advice vector with a particular structure with cost O
(log n ) times the optimal social cost, and we prove
that the system quickly converges to an equilibrium
with social cost of this same order.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Bhawalkar:2014:WCG,
author = "Kshipra Bhawalkar and Martin Gairing and Tim
Roughgarden",
title = "Weighted Congestion Games: The Price of Anarchy,
Universal Worst-Case Examples, and Tightness",
journal = j-TEAC,
volume = "2",
number = "4",
pages = "14:1--14:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2629666",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Oct 28 16:50:26 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We characterize the Price of Anarchy (POA) in weighted
congestion games, as a function of the allowable
resource cost functions. Our results provide as
thorough an understanding of this quantity as is
already known for nonatomic and unweighted congestion
games, and take the form of universal (cost
function-independent) worst-case examples. One
noteworthy by-product of our proofs is the fact that
weighted congestion games are ``tight,'' which implies
that the worst-case price of anarchy with respect to
pure Nash equilibria, mixed Nash equilibria, correlated
equilibria, and coarse correlated equilibria are always
equal (under mild conditions on the allowable cost
functions). Another is the fact that, like nonatomic
but unlike atomic (unweighted) congestion games,
weighted congestion games with trivial structure
already realize the worst-case POA, at least for
polynomial cost functions. We also prove a new result
about unweighted congestion games: the worst-case price
of anarchy in symmetric games is as large as in their
more general asymmetric counterparts.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Fotakis:2014:PDM,
author = "Dimitris Fotakis and Christos Tzamos",
title = "On the Power of Deterministic Mechanisms for Facility
Location Games",
journal = j-TEAC,
volume = "2",
number = "4",
pages = "15:1--15:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2665005",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Oct 28 16:50:26 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider $K$-Facility Location games, where n
strategic agents report their locations in a metric
space and a mechanism maps them to $K$ facilities. The
agents seek to minimize their connection cost, namely
the distance of their true location to the nearest
facility, and may misreport their location. We are
interested in deterministic mechanisms that are
strategyproof, that is, ensure that no agent can
benefit from misreporting her location, do not resort
to monetary transfers, and achieve a bounded
approximation ratio to the total connection cost of the
agents (or to the $ L_p $ norm of the connection costs,
for some $ p \in [1, \infty) $ or for $ p = \infty) $.
Our main result is an elegant characterization of
deterministic strategyproof mechanisms with a bounded
approximation ratio for $2$-Facility Location on the
line. In particular, we show that for instances with $
n \geq 5 $ agents, any such mechanism either admits a
unique dictator or always places the facilities at the
leftmost and the rightmost location of the instance. As
a corollary, we obtain that the best approximation
ratio achievable by deterministic strategyproof
mechanisms for the problem of locating $2$ facilities
on the line to minimize the total connection cost is
precisely $ n - 2$. Another rather surprising
consequence is that the Two-Extremes mechanism of
Procaccia and Tennenholtz [2009] is the only
deterministic anonymous strategyproof mechanism with a
bounded approximation ratio for $2$-Facility Location
on the line. The proof of the characterization employs
several new ideas and technical tools, which provide
new insights into the behavior of deterministic
strategyproof mechanisms for $K$-Facility Location
games and may be of independent interest. Employing one
of these tools, we show that for every $ K \geq 3$,
there do not exist any deterministic anonymous
strategyproof mechanisms with a bounded approximation
ratio for $K$-Facility Location on the line, even for
simple instances with $ K + 1$ agents. Moreover,
building on the characterization for the line, we show
that there do not exist any deterministic strategy
proof mechanisms with a bounded approximation ratio for
$2$-Facility Location and instances with $ n \geq 3$
agents located in a star.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Michalak:2014:ICV,
author = "Tomasz P. Michalak and Piotr L. Szczepa{\'n}ski and
Talal Rahwan and Agata Chrobak and Simina Br{\^a}nzei
and Michael Wooldridge and Nicholas R. Jennings",
title = "Implementation and Computation of a Value for
Generalized Characteristic Function Games",
journal = j-TEAC,
volume = "2",
number = "4",
pages = "16:1--16:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2665007",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Oct 28 16:50:26 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Generalized characteristic function games are a
variation of characteristic function games, in which
the value of a coalition depends not only on the
identities of its members, but also on the order in
which the coalition is formed. This class of games is a
useful abstraction for a number of realistic settings
and economic situations, such as modeling relationships
in social networks. To date, two main extensions of the
Shapley value have been proposed for generalized
characteristic function games: the Nowak--Radzik [1994]
value and the S{\'a}nchez--Berganti{\~n}os [1997]
value. In this context, the present article studies
generalized characteristic function games from the
point of view of implementation and computation.
Specifically, the article makes two key contributions.
First, building upon the mechanism by Dasgupta and Chiu
[1998], we present a non-cooperative mechanism that
implements both the Nowak--Radzik value and the
S{\'a}nchez-Berganti{\~n}os value in Subgame-Perfect
Nash Equilibria in expectations. Second, in order to
facilitate an efficient computation supporting the
implementation mechanism, we propose the Generalized
Marginal-Contribution Nets representation for this type
of game. This representation extends the results of
Ieong and Shoham [2005] and Elkind et al. [2009] for
characteristic function games and retains their
attractive computational properties.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Chen:2014:AIP,
author = "Po-An Chen and Bart {De Keijzer} and David Kempe and
Guido Sch{\"a}fer",
title = "Altruism and Its Impact on the Price of Anarchy",
journal = j-TEAC,
volume = "2",
number = "4",
pages = "17:1--17:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2597893",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Oct 28 16:50:26 MDT 2014",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study the inefficiency of equilibria for congestion
games when players are (partially) altruistic. We model
altruistic behavior by assuming that player $i$'s
perceived cost is a convex combination of $\alpha_i$
times his direct cost and $\alpha_i$ times the social
cost. Tuning the parameters $\alpha_i$ allows smooth
interpolation between purely selfish and purely
altruistic behavior. Within this framework, we study
primarily altruistic extensions of (atomic and
nonatomic) congestion games, but also obtain some
results on fair cost-sharing games and valid utility
games. We derive (tight) bounds on the price of anarchy
of these games for several solution concepts. Thereto,
we suitably adapt the smoothness notion introduced by
Roughgarden and show that it captures the essential
properties to determine the robust price of anarchy of
these games. Our bounds show that for atomic congestion
games and cost-sharing games, the robust price of
anarchy gets worse with increasing altruism, while for
valid utility games, it remains constant and is not
affected by altruism. However, the increase in the
price of anarchy is not a universal phenomenon: For
general nonatomic congestion games with uniform
altruism, the price of anarchy improves with increasing
altruism. For atomic and nonatomic symmetric singleton
congestion games, we derive bounds on the pure price of
anarchy that improve as the average level of altruism
increases. (For atomic games, we only derive such
bounds when cost functions are linear.) Since the
bounds are also strictly lower than the robust price of
anarchy, these games exhibit natural examples in which
pure Nash equilibria are more efficient than more
permissive notions of equilibrium.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Leyton-Brown:2015:ISI,
author = "Kevin Leyton-Brown and Panos Ipeirotis",
title = "Introduction to the Special Issue on {EC'12}",
journal = j-TEAC,
volume = "3",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2742678",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 27 17:58:56 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Caragiannis:2015:APN,
author = "Ioannis Caragiannis and Angelo Fanelli and Nick Gravin
and Alexander Skopalik",
title = "Approximate Pure {Nash} Equilibria in Weighted
Congestion Games: Existence, Efficient Computation, and
Structure",
journal = j-TEAC,
volume = "3",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2614687",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 27 17:58:56 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider structural and algorithmic questions
related to the Nash dynamics of weighted congestion
games. In weighted congestion games with linear latency
functions, the existence of pure Nash equilibria is
guaranteed by a potential function argument.
Unfortunately, this proof of existence is inefficient
and computing pure Nash equilibria in such games is a
PLS-hard problem even when all players have unit
weights. The situation gets worse when superlinear
(e.g., quadratic) latency functions come into play; in
this case, the Nash dynamics of the game may contain
cycles and pure Nash equilibria may not even exist.
Given these obstacles, we consider approximate pure
Nash equilibria as alternative solution concepts. A $
\rho $-approximate pure Nash equilibrium is a state of
a (weighted congestion) game from which no player has
any incentive to deviate in order to improve her cost
by a multiplicative factor higher than $ \rho $. Do
such equilibria exist for small values of $ \rho $ ?
And if so, can we compute them efficiently? We provide
positive answers to both questions for weighted
congestion games with polynomial latency functions by
exploiting an ``approximation'' of such games by a new
class of potential games that we call $ \Psi $-games.
This allows us to show that these games have $
d!$-approximate pure Nash equilibria, where $d$ is the
maximum degree of the latency functions. Our main
technical contribution is an efficient algorithm for
computing O(1)-approximate pure Nash equilibria when
$d$ is a constant. For games with linear latency
functions, the approximation guarantee is $ 3 + \sqrt 5
/ 2 + O(\gamma)$ for arbitrarily small $ \gamma > 0$;
for latency functions with maximum degree $ d \geq 2$,
it is $ d 2 d + o (d)$. The running time is polynomial
in the number of bits in the representation of the game
and $ 1 / \gamma $. As a byproduct of our techniques,
we also show the following interesting structural
statement for weighted congestion games with polynomial
latency functions of maximum degree $ d \geq 2 $:
polynomially-long sequences of best-response moves from
any initial state to a $ d O (d 2)$-approximate pure
Nash equilibrium exist and can be efficiently
identified in such games as long as $d$ is a constant.
To the best of our knowledge, these are the first
positive algorithmic results for approximate pure Nash
equilibria in weighted congestion games. Our techniques
significantly extend our recent work on unweighted
congestion games through the use of $ \Psi $-games. The
concept of approximating nonpotential games by
potential ones is interesting in itself and might have
further applications.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Parkes:2015:BDR,
author = "David C. Parkes and Ariel D. Procaccia and Nisarg
Shah",
title = "Beyond Dominant Resource Fairness: Extensions,
Limitations, and Indivisibilities",
journal = j-TEAC,
volume = "3",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2739040",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 27 17:58:56 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study the problem of allocating multiple resources
to agents with heterogeneous demands. Technological
advances such as cloud computing and data centers
provide a new impetus for investigating this problem
under the assumption that agents demand the resources
in fixed proportions, known in economics as Leontief
preferences. In a recent paper, Ghodsi et al. [2011]
introduced the dominant resource fairness (DRF)
mechanism, which was shown to possess highly desirable
theoretical properties under Leontief preferences. We
extend their results in three directions. First, we
show that DRF generalizes to more expressive settings,
and leverage a new technical framework to formally
extend its guarantees. Second, we study the relation
between social welfare and properties such as
truthfulness; DRF performs poorly in terms of social
welfare, but we show that this is an unavoidable
shortcoming that is shared by every mechanism that
satisfies one of three basic properties. Third, and
most importantly, we study a realistic setting that
involves indivisibilities. We chart the boundaries of
the possible in this setting, contributing a new
relaxed notion of fairness and providing both
possibility and impossibility results.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Babaioff:2015:DPL,
author = "Moshe Babaioff and Shaddin Dughmi and Robert Kleinberg
and Aleksandrs Slivkins",
title = "Dynamic Pricing with Limited Supply",
journal = j-TEAC,
volume = "3",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2559152",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 27 17:58:56 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider the problem of designing
revenue-maximizing online posted-price mechanisms when
the seller has limited supply. A seller has k identical
items for sale and is facing n potential buyers
(``agents'') that are arriving sequentially. Each agent
is interested in buying one item. Each agent's value
for an item is an independent sample from some fixed
(but unknown) distribution with support [0,1]. The
seller offers a take-it-or-leave-it price to each
arriving agent (possibly different for different
agents), and aims to maximize his expected revenue. We
focus on mechanisms that do not use any information
about the distribution; such mechanisms are called
detail-free (or prior-independent). They are desirable
because knowing the distribution is unrealistic in many
practical scenarios. We study how the revenue of such
mechanisms compares to the revenue of the optimal
offline mechanism that knows the distribution
(``offline benchmark''). We present a detail-free
online posted-price mechanism whose revenue is at most
$ O((k \log n)2 / 3) $ less than the offline benchmark,
for every distribution that is regular. In fact, this
guarantee holds without any assumptions if the
benchmark is relaxed to fixed-price mechanisms.
Further, we prove a matching lower bound. The
performance guarantee for the same mechanism can be
improved to $ O (\sqrt k \log n) $, with a
distribution-dependent constant, if the ratio $ k / n $
is sufficiently small. We show that, in the worst case
over all demand distributions, this is essentially the
best rate that can be obtained with a
distribution-specific constant. On a technical level,
we exploit the connection to multiarmed bandits (MAB).
While dynamic pricing with unlimited supply can easily
be seen as an MAB problem, the intuition behind MAB
approaches breaks when applied to the setting with
limited supply. Our high-level conceptual contribution
is that even the limited supply setting can be
fruitfully treated as a bandit problem.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Dutting:2015:PRT,
author = "Paul D{\"u}tting and Felix Fischer and Pichayut
Jirapinyo and John K. Lai and Benjamin Lubin and David
C. Parkes",
title = "Payment Rules through Discriminant-Based Classifiers",
journal = j-TEAC,
volume = "3",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2559049",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 27 17:58:56 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "In mechanism design it is typical to impose incentive
compatibility and then derive an optimal mechanism
subject to this constraint. By replacing the incentive
compatibility requirement with the goal of minimizing
expected ex post regret, we are able to adapt
statistical machine learning techniques to the design
of payment rules. This computational approach to
mechanism design is applicable to domains with
multi-dimensional types and situations where
computational efficiency is a concern. Specifically,
given an outcome rule and access to a type
distribution, we train a support vector machine with a
specific structure imposed on the discriminant
function, such that it implicitly learns a
corresponding payment rule with desirable incentive
properties. We extend the framework to adopt succinct
$k$-wise dependent valuations, leveraging a connection
with maximum a posteriori assignment on Markov networks
to enable training to scale up to settings with a large
number of items; we evaluate this construction in the
case where $ k = 2 $. We present applications to
multiparameter combinatorial auctions with approximate
winner determination, and the assignment problem with
an egalitarian outcome rule. Experimental results
demonstrate that the construction produces payment
rules with low ex post regret, and that penalizing
classification error is effective in preventing
failures of ex post individual rationality.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Roughgarden:2015:PAG,
author = "Tim Roughgarden",
title = "The Price of Anarchy in Games of Incomplete
Information",
journal = j-TEAC,
volume = "3",
number = "1",
pages = "6:1--6:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2737816",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Mar 27 17:58:56 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We define smooth games of incomplete information. We
prove an ``extension theorem'' for such games: price of
anarchy bounds for pure Nash equilibria for all induced
full-information games extend automatically, without
quantitative degradation, to all mixed-strategy
Bayes--Nash equilibria with respect to a product prior
distribution over players' preferences. We also note
that, for Bayes--Nash equilibria in games with
correlated player preferences, there is no general
extension theorem for smooth games. We give several
applications of our definition and extension theorem.
First, we show that many games of incomplete
information for which the price of anarchy has been
studied are smooth in our sense. Our extension theorem
unifies much of the known work on the price of anarchy
in games of incomplete information. Second, we use our
extension theorem to prove new bounds on the price of
anarchy of Bayes--Nash equilibria in routing games with
incomplete information.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Goldstein:2015:IET,
author = "Daniel G. Goldstein and R. Preston McAfee and
Siddharth Suri",
title = "Improving the Effectiveness of Time-Based Display
Advertising",
journal = j-TEAC,
volume = "3",
number = "2",
pages = "7:1--7:??",
month = apr,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2716323",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Apr 21 11:23:36 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Display advertisements are typically sold by the
impression, where one impression is simply one download
of an ad. Previous work has shown that the longer an ad
is in view, the more likely a user is to remember it
and that there are diminishing returns to increased
exposure time [Goldstein et al. 2011]. Since a pricing
scheme that is at least partially based on time is more
exact than one based solely on impressions, time-based
advertising may become an industry standard. We answer
an open question concerning time-based pricing schemes:
how should time slots for advertisements be divided? We
provide evidence that ads can be scheduled in a way
that leads to greater total recollection, which
advertisers value, and increased revenue, which
publishers value. We document two main findings. First,
we show that displaying two shorter ads results in more
total recollection than displaying one longer ad of
twice the duration. Second, we show that this effect
disappears as the duration of these ads increases. We
conclude with a theoretical prediction regarding the
circumstances under which the display advertising
industry would benefit if it moved to a partially or
fully time-based standard.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Ganzfried:2015:SOE,
author = "Sam Ganzfried and Tuomas Sandholm",
title = "Safe Opponent Exploitation",
journal = j-TEAC,
volume = "3",
number = "2",
pages = "8:1--8:??",
month = apr,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2716322",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Apr 21 11:23:36 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider the problem of playing a repeated
two-player zero-sum game safety: that is, guaranteeing
at least the value of the game per period in
expectation regardless of the strategy used by the
opponent. Playing a stage-game equilibrium strategy at
each time step clearly guarantees safety, and prior
work has (incorrectly) stated that it is impossible to
simultaneously deviate from a stage-game equilibrium
(in hope of exploiting a suboptimal opponent) and to
guarantee safety. We show that such profitable
deviations are indeed possible specifically in games
where certain types of ``gift'' strategies exist, which
we define formally. We show that the set of strategies
constituting such gifts can be strictly larger than the
set of iteratively weakly-dominated strategies; this
disproves another recent assertion which states that
all noniteratively weakly dominated strategies are best
responses to each equilibrium strategy of the other
player. We present a full characterization of safe
strategies, and develop efficient algorithms for
exploiting suboptimal opponents while guaranteeing
safety. We also provide analogous results for
extensive-form games of perfect and imperfect
information, and present safe exploitation algorithms
and full characterizations of safe strategies for those
settings as well. We present experimental results in
Kuhn poker, a canonical test problem for game-theoretic
algorithms. Our experiments show that (1) aggressive
safe exploitation strategies significantly outperform
adjusting the exploitation within stage-game
equilibrium strategies only and (2) all the safe
exploitation strategies significantly outperform a
(nonsafe) best response strategy against strong dynamic
opponents.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Hoefer:2015:SSA,
author = "Martin Hoefer and Thomas Kesselheim",
title = "Secondary Spectrum Auctions for Symmetric and
Submodular Bidders",
journal = j-TEAC,
volume = "3",
number = "2",
pages = "9:1--9:??",
month = apr,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2739041",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Apr 21 11:23:36 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study truthful auctions for secondary spectrum
usage in wireless networks. In this scenario, $n$
communication requests need to be allocated to $k$
available channels that are subject to interference and
noise. We present the first truthful mechanisms for
secondary spectrum auctions with symmetric or
submodular valuations. Our approach to model
interference uses an edge-weighted conflict graph, and
our algorithms provide asymptotically almost optimal
approximation bounds for conflict graphs with a small
inductive independence number $ \rho \ll n $. This
approach covers a large variety of interference models
such as, for instance, the protocol model or the
recently popular physical model of interference. For
unweighted conflict graphs and symmetric valuations we
use LP-rounding to obtain $ O(\rho)$-approximate
mechanisms; for weighted conflict graphs we get a
factor of $ O(\rho \cdot (\log n + \log k))$. For
submodular users we combine the convex rounding
framework of Dughmi et al. [2011] with randomized
metarounding to obtain $ O(\rho)$-approximate
mechanisms for matroid-rank-sum valuations; for
weighted conflict graphs we can fully drop the
dependence on $k$ to get $ O(\rho \cdot \log n)$. We
conclude with promising initial results for
deterministically truthful mechanisms that allow
approximation factors based on $ \rho $.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Wilkens:2015:SCM,
author = "Christopher A. Wilkens and Balasubramanian Sivan",
title = "Single-Call Mechanisms",
journal = j-TEAC,
volume = "3",
number = "2",
pages = "10:1--10:??",
month = apr,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2741027",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Apr 21 11:23:36 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Truthfulness is fragile and demanding. It is
oftentimes harder to guarantee truthfulness when
solving a problem than it is to solve the problem
itself. Even worse, truthfulness can be utterly
destroyed by small uncertainties in a mechanism's
outcome. One obstacle is that truthful payments depend
on outcomes other than the one realized, such as the
lengths of non-shortest-paths in a shortest-path
auction. Single-call mechanisms are a powerful tool
that circumvents this obstacle: they implicitly charge
truthful payments, guaranteeing truthfulness in
expectation using only the outcome realized by the
mechanism. The cost of such truthfulness is a trade-off
between the expected quality of the outcome and the
risk of large payments. We study two of the most
general domains for truthful mechanisms and largely
settle when and to what extent single-call mechanisms
are possible. The first single-call construction was
discovered by Babaioff et al. [2010] in
single-parameter domains. They give a transformation
that turns any monotone, single-parameter allocation
rule into a truthful-in-expectation single-call
mechanism. Our first result is a natural complement to
Babaioff et al. [2010]: we give a new transformation
that produces a single-call VCG mechanism from any
allocation rule for which VCG payments are truthful.
Second, in both the single-parameter and VCG settings,
we precisely characterize the possible transformations,
showing that a wide variety of transformations are
possible but that all take a very simple form. Finally,
we study the inherent trade-off between the expected
quality of the outcome and the risk of large payments.
We show that our construction and that of Babaioff et
al. [2010] simultaneously optimize a variety of metrics
in their respective domains. Our study is motivated by
settings where uncertainty in a mechanism renders other
known techniques untruthful, and we offer a variety of
examples where such uncertainty can arise. In
particular, we analyze pay-per-click advertising
auctions, where the truthfulness of the standard
VCG-based auction is easily broken when the
auctioneer's estimated click-through-rates are
imprecise.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Chakrabarti:2015:TSO,
author = "Deepayan Chakrabarti and Erik Vee",
title = "Traffic Shaping to Optimize Ad Delivery",
journal = j-TEAC,
volume = "3",
number = "2",
pages = "11:1--11:??",
month = apr,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2739010",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Apr 21 11:23:36 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Web publishers must balance two objectives: how to
keep users engaged by directing them to relevant
content, and how to properly monetize this user
traffic. The standard approach is to solve each problem
in isolation, for example, by displaying content that
is tailored to the user's interests so as to maximize
clickthrough rates (CTR), and also by building a
standalone ad serving system that displays ads
depending on the user's characteristics, the article
being viewed by the user, and advertiser-specified
constraints. However, showing the user only those
articles with highest expected CTR precludes the
display of some ads; if the publisher had previously
guaranteed the delivery of a certain volume of
impressions to such ads, then underdelivery penalties
might have to be paid. We propose a joint optimization
of article selection and ad serving that minimizes
underdelivery by shaping some of the incoming traffic
to pages where underperforming ads can be displayed,
while incurring only minor drops in CTR. In addition to
formulating the problem, we design an online
optimization algorithm that can find the optimal
traffic shaping probabilities for each new user using
only a cache of one number per ad contract. Experiments
on a large real-world ad-serving Web portal demonstrate
significant advantages over the standalone approach: a
threefold reduction in underdelivery with only 10\%
drop in CTR, or a 2.6-fold reduction with a 4\% CTR
drop, and similar results over a wide range.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Ghosh:2015:MME,
author = "Arpita Ghosh and Mohammad Mahdian and R. Preston
McAfee and Sergei Vassilvitskii",
title = "To Match or Not to Match: Economics of Cookie Matching
in Online Advertising",
journal = j-TEAC,
volume = "3",
number = "2",
pages = "12:1--12:??",
month = apr,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2745801",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Tue Apr 21 11:23:36 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Modern online advertising increasingly relies on the
ability to follow the same user across the Internet
using technology called cookie matching to increase
efficiency in ad allocation. Web publishers today use
this technology to share information about the websites
a user has visited, making it possible to target
advertisements to users based on their prior history.
This begs the question: do publishers (who are
competitors for advertising money) always have the
incentive to share online information? Intuitive
arguments as well as anecdotal evidence suggest that
sometimes a premium publisher might suffer information
sharing through an effect called information leakage:
by sharing user information with the advertiser, the
advertiser will be able to target the same user
elsewhere on cheaper publishers, leading to a dilution
of the value of the supply on the premium publishers.
The goal of this article is to explore this aspect of
online information sharing. We show that, when
advertisers are homogeneous in the sense that their
relative valuations of users are consistent, publishers
always agree about the benefits of cookie matching in
equilibrium: either all publishers' revenues benefit,
or all suffer, from cookie matching. We also show using
a simple model that, when advertisers are not
homogeneous, the information leakage indeed can occur,
with cookie matching helping one publisher's revenues
while harming the other.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Kash:2015:EAS,
author = "Ian A. Kash and Eric J. Friedman and Joseph Y.
Halpern",
title = "An Equilibrium Analysis of Scrip Systems",
journal = j-TEAC,
volume = "3",
number = "3",
pages = "13:1--13:??",
month = jun,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2659006",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:16 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "A game-theoretic model of scrip (artificial currency)
systems is analyzed. It is shown that relative entropy
can be used to characterize the distribution of agent
wealth when all agents use threshold strategies -that
is, they volunteer to do work if and only if they have
below a threshold amount of money. Monotonicity of
agents' best-reply functions is used to show that scrip
systems have pure strategy equilibria where all agents
use threshold strategies. An algorithm is given that
can compute such an equilibrium and the resulting
distribution of wealth.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Immorlica:2015:ILR,
author = "Nicole Immorlica and Mohammad Mahdian",
title = "Incentives in Large Random Two-Sided Markets",
journal = j-TEAC,
volume = "3",
number = "3",
pages = "14:1--14:??",
month = jun,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2656202",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:16 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Many centralized two-sided markets form a matching
between participants by running a stable matching
algorithm. It is a well-known fact that no matching
mechanism based on a stable matching algorithm can
guarantee truthfulness as a dominant strategy for
participants. However, we show that in a probabilistic
setting where the preference lists on one side of the
market are composed of only a constant (independent of
the size of the market) number of entries, each drawn
from an arbitrary distribution, the number of
participants that have more than one stable partner is
vanishingly small. This proves (and generalizes) a
conjecture of Roth and Peranson [1999]. As a corollary
of this result, we show that, with high probability,
the truthful strategy is the best response for a random
player when the other players are truthful. We also
analyze equilibria of the deferred acceptance stable
matching game. We show that the game with complete
information has an equilibrium in which, in
expectation, a $ (1 - o(1)) $ fraction of the
strategies are truthful. In the more realistic setting
of a game of incomplete information, we will show that
the set of truthful strategies form a $ (1 +
o(1))$-approximate Bayesian--Nash equilibrium for
uniformly random preferences. Our results have
implications in many practical settings and are
inspired by the work of Roth and Peranson [1999] on the
National Residency Matching Program.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Cavallo:2015:DAA,
author = "Ruggiero Cavallo and R. Preston Mcafee and Sergei
Vassilvitskii",
title = "Display Advertising Auctions with Arbitrage",
journal = j-TEAC,
volume = "3",
number = "3",
pages = "15:1--15:??",
month = jun,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2668033",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:16 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Online display advertising exchanges connect Web
publishers with advertisers seeking to place ads. In
many cases, the advertiser obtains value from an ad
impression (a viewing by a user) only if it is clicked,
and frequently advertisers prefer to pay contingent on
this occurring. But at the same time, many publishers
demand payment independent of clicks. Arbitragers with
good estimates of click-probabilities can resolve this
conflict by absorbing the risk and acting as an
intermediary, paying the publisher on allocation and
being paid only if a click occurs. This article
examines the incentives of advertisers and arbitragers
and contributes an efficient mechanism with truthful
bidding by the advertisers and truthful reporting of
click predictions by arbitragers as dominant strategies
while, given that a hazard rate condition is satisfied,
yielding increased revenue to the publisher. We provide
empirical evidence based on bid data from Yahoo's Right
Media Exchange suggesting that the mechanism would
increase revenue in practice.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Bilo:2015:BDN,
author = "Davide Bil{\`o} and Luciano Gual{\`a} and Guido
Proietti",
title = "Bounded-Distance Network Creation Games",
journal = j-TEAC,
volume = "3",
number = "3",
pages = "16:1--16:??",
month = jun,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2770639",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:16 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "A network creation game simulates a decentralized and
noncooperative construction of a communication network.
Informally, there are $n$ players sitting on the
network nodes, which attempt to establish a reciprocal
communication by activating, thereby incurring a
certain cost, any of their incident links. The goal of
each player is to have all the other nodes as close as
possible in the resulting network, while buying as few
links as possible. According to this intuition, any
model of the game must then appropriately address a
balance between these two conflicting objectives.
Motivated by the fact that a player might have a strong
requirement about her centrality in the network, we
introduce a new setting in which a player who maintains
her (maximum or average) distance to the other nodes
within a given bound incurs a cost equal to the number
of activated edges; otherwise her cost is unbounded. We
study the problem of understanding the structure of
pure Nash equilibria of the resulting games, which we
call MaxBD and SumBD, respectively. For both games, we
show that when distance bounds associated with players
are nonuniform, then equilibria can be arbitrarily bad.
On the other hand, for MaxBD, we show that when nodes
have a uniform bound $ D \geq 3$ on the maximum
distance, then the price of anarchy (PoA) is lower and
upper bounded by 2 and $ O(n^{1 / \lfloor \log 3 D
\rfloor + 1})$, respectively (i.e., PoA is constant as
soon as $D$ is $ \Omega (n^\epsilon)$, for any $
\epsilon > 0$), while for the interesting case $ D =
2$, we are able to prove that the PoA is $ \Omega
(\sqrt {n})$ and $ O(\sqrt {n \log n})$. For the
uniform SumBD, we obtain similar (asymptotically)
results and moreover show that PoA becomes constant as
soon as the bound on the average distance is $ 2^{
\omega (\sqrt {\log n})}$.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Chen:2015:ISI,
author = "Yiling Chen and Nicole Immorlica",
title = "Introduction to the Special Issue on {WINE'13}",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "17:1--17:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2796538",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Bateni:2015:RMN,
author = "Mohammadhossein Bateni and Nima Haghpanah and
Balasubramanian Sivan and Morteza Zadimoghaddam",
title = "Revenue Maximization with Nonexcludable Goods",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "18:1--18:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2790131",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study the design of revenue-maximizing mechanisms
for selling nonexcludable public goods. In particular,
we study revenue-maximizing mechanisms in Bayesian
settings for facility location problems on graphs where
no agent can be excluded from using a facility that has
been constructed. We show that the pointwise
optimization problem involved in implementing the
revenue optimal mechanism, namely, optimizing over
arbitrary profiles of virtual values, is hard to
approximate within a factor of $ \Omega (n^{2 -
\epsilon }) $ (assuming P $ \neq $ NP) even in star
graphs. Furthermore, we show that optimizing the
expected revenue is APX-hard. However, in a relevant
special case, rooted version with identical
distributions, we construct polynomial time truthful
mechanisms that approximate the optimal expected
revenue within a constant factor. We also study the
effect of partially mitigating nonexcludability by
collecting tolls for using the facilities. We show that
such ``posted-price'' mechanisms obtain significantly
higher revenue and often approach the optimal revenue
obtainable with full excludability.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Minooei:2015:NOR,
author = "Hadi Minooei and Chaitanya Swamy",
title = "Near-Optimal and Robust Mechanism Design for Covering
Problems with Correlated Players",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "19:1--19:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2790133",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider the problem of designing
incentive-compatible, ex-post individually rational
(IR) mechanisms for covering problems in the Bayesian
setting, where players' types are drawn from an
underlying distribution and may be correlated, and the
goal is to minimize the expected total payment made by
the mechanism. We formulate a notion of incentive
compatibility (IC) that we call support-based IC that
is substantially more robust than Bayesian IC, and
develop black-box reductions from support-based-IC
mechanism design to algorithm design. For
single-dimensional settings, this black-box reduction
applies even when we only have an LP-relative
approximation algorithm for the algorithmic problem.
Thus, we obtain near-optimal mechanisms for various
covering settings, including single-dimensional
covering problems, multi-item procurement auctions, and
multidimensional facility location.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Fotakis:2015:TFD,
author = "Dimitris Fotakis and Emmanouil Zampetakis",
title = "Truthfulness Flooded Domains and the Power of
Verification for Mechanism Design",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "20:1--20:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2790086",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "In this work, we investigate the reasons that make
symmetric partial verification essentially useless in
virtually all domains. Departing from previous work, we
consider any possible (finite or infinite) domain and
general symmetric verification. We identify a natural
property, namely, that the correspondence graph of a
symmetric verification $M$ is strongly connected by
finite paths along which the preferences are consistent
with the preferences at the endpoints, and prove that
this property is sufficient for the equivalence of
truthfulness and $M$-truthfulness. In fact, defining
appropriate versions of this property, we obtain this
result for deterministic and randomized mechanisms with
and without money. Moreover, we show that a slightly
relaxed version of this property is also necessary for
the equivalence of truthfulness and $M$-truthfulness.
Our conditions provide a generic and convenient way of
checking whether truthful implementation can take
advantage of any symmetric verification scheme in any
(finite or infinite) domain. Since the simplest
symmetric verification is the local verification,
specific cases of our result are closely related, in
the case without money, to the research about the
equivalence of local truthfulness and global
truthfulness. To complete the picture, we consider
asymmetric verification and prove that a social choice
function is $M$-truthfully implementable by some
asymmetric verification $M$ if and only if $f$ does not
admit a cycle of profitable deviations.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Kollias:2015:RPE,
author = "Konstantinos Kollias and Tim Roughgarden",
title = "Restoring Pure Equilibria to Weighted Congestion
Games",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "21:1--21:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2781678",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "Congestion games model several interesting
applications, including routing and network formation
games, and also possess attractive theoretical
properties, including the existence of and convergence
of natural dynamics to a pure Nash equilibrium.
Weighted variants of congestion games that rely on
sharing costs proportional to players' weights do not
generally have pure-strategy Nash equilibria. We
propose a new way of assigning costs to players with
weights in congestion games that recovers the important
properties of the unweighted model. This method is
derived from the Shapley value, and it always induces a
game with a (weighted) potential function. For the
special cases of weighted network cost-sharing and
weighted routing games with Shapley value-based cost
shares, we prove tight bounds on the worst-case
inefficiency of equilibria. For weighted network
cost-sharing games, we precisely calculate the price of
stability for any given player weight vector, while for
weighted routing games, we precisely calculate the
price of anarchy, as a parameter of the set of
allowable cost functions.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Babichenko:2015:QCC,
author = "Yakov Babichenko and Siddharth Barman",
title = "Query Complexity of Correlated Equilibrium",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "22:1--22:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2785668",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study lower bounds on the query complexity of
determining correlated equilibrium. In particular, we
consider a query model in which an $n$-player game is
specified via a black box that returns players'
utilities at pure action profiles. In this model, we
establish that in order to compute a correlated
equilibrium, any deterministic algorithm must query the
black box an exponential (in $n$) number of times.",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Aumann:2015:EFD,
author = "Yonatan Aumann and Yair Dombb",
title = "The Efficiency of Fair Division with Connected
Pieces",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "23:1--23:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2781776",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "The cake-cutting setting, in which a single
heterogeneous good must be divided between multiple
parties with different tastes, is a classic model for
studying questions regarding fairness in resource
allocation. In this work, we turn our attention to
(economic) efficiency considerations in cake cutting,
examining the possible trade-offs between meeting the
fairness criteria, on the one hand, and maximizing
social welfare, on the other. We focus on divisions
that give each agent a single (contiguous) piece of the
cake and provide tight bounds (or, in some cases,
nearly tight) on the possible degradation in
utilitarian and egalitarian welfare resulting from
meeting the fairness requirements.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Dimitrov:2015:SPM,
author = "Stanko Dimitrov and Rahul Sami and Marina A. Epelman",
title = "Subsidized Prediction Mechanisms for Risk-Averse
Agents",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "24:1--24:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2716327",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "In this article, we study the design and
characterization of sequential prediction mechanisms in
the presence of agents with unknown risk aversion. We
formulate a collection of desirable properties for any
sequential forecasting mechanism. We present a
randomized mechanism that satisfies all of these
properties, including a guarantee that it is myopically
optimal for each agent to report honestly, regardless
of her degree of risk aversion. We observe, however,
that the mechanism has an undesirable side effect: each
agent's expected reward, normalized against the
inherent value of her private information, decreases
exponentially with the number of agents. We prove a
negative result showing that this is unavoidable: any
mechanism that is myopically strategyproof for agents
of all risk types, while also satisfying other natural
properties of sequential forecasting mechanisms, must
sometimes result in a player getting an exponentially
small expected normalized reward.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Xiao:2015:SOD,
author = "Yuanzhang Xiao and Mihaela {Van Der Schaar}",
title = "Socially-Optimal Design of Service Exchange Platforms
with Imperfect Monitoring",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "25:1--25:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2785627",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We study the design of service exchange platforms in
which long-lived anonymous users exchange services with
each other. The users are randomly and repeatedly
matched into pairs of clients and servers, and each
server can choose to provide high-quality or
low-quality services to the client with whom it is
matched. Since the users are anonymous and incur high
costs (e.g., exert high effort) in providing
high-quality services, it is crucial that the platform
incentivizes users to provide high-quality services.
Rating mechanisms have been shown to work effectively
as incentive schemes in such platforms. A rating
mechanism labels each user by a rating, which
summarizes the user's past behaviors, recommends a
desirable behavior to each server (e.g., provide
higher-quality services for clients with higher
ratings), and updates each server's rating based on the
recommendation and its client's report on the service
quality. Based on this recommendation, a low-rating
user is less likely to obtain high-quality services,
thereby providing users with incentives to obtain high
ratings by providing high-quality services. However, if
monitoring or reporting is imperfect-clients do not
perfectly assess the quality or the reports are lost-a
user's rating may not be updated correctly. In the
presence of such errors, existing rating mechanisms
cannot achieve the social optimum. In this article, we
propose the first rating mechanism that does achieve
the social optimum, even in the presence of monitoring
or reporting errors. On one hand, the socially-optimal
rating mechanism needs to be complicated enough,
because the optimal recommended behavior depends not
only on the current rating distribution, but also
(necessarily) on the history of past rating
distributions in the platform. On the other hand, we
prove that the social optimum can be achieved by
``simple'' rating mechanisms that use binary rating
labels and a small set of (three) recommended
behaviors. We provide design guidelines of
socially-optimal rating mechanisms and a low-complexity
online algorithm for the rating mechanism to determine
the optimal recommended behavior.",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}
@Article{Nath:2015:AMD,
author = "Swaprava Nath and Arunava Sen",
title = "Affine Maximizers in Domains with Selfish Valuations",
journal = j-TEAC,
volume = "3",
number = "4",
pages = "26:1--26:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "http://dx.doi.org/10.1145/2786014",
ISSN = "2167-8375 (print), 2167-8383 (electronic)",
ISSN-L = "1539-9087",
bibdate = "Fri Jul 31 19:26:18 MDT 2015",
bibsource = "http://portal.acm.org/;
http://www.math.utah.edu/pub/tex/bib/teac.bib",
abstract = "We consider the domain of selfish and continuous
preferences over a ``rich'' allocation space and show
that onto, strategyproof and allocation non-bossy
social choice functions are affine maximizers. Roberts
[1979] proves this result for a finite set of
alternatives and an unrestricted valuation space. In
this article, we show that in a subdomain of the
unrestricted valuations with the additional assumption
of allocation non-bossiness, using the richness of the
allocations, the strategyproof social choice functions
can be shown to be affine maximizers. We provide an
example to show that allocation non-bossiness is indeed
critical for this result. This work shows that an
affine maximizer result needs a certain amount of
richness split across valuations and allocations.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Economics and Computation",
journal-URL = "http://dl.acm.org/citation.cfm?id=2542174",
}