Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.09",
%%%     date            = "14 October 2017",
%%%     time            = "10:26:08 MDT",
%%%     filename        = "stoc2010.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "31388 10295 60046 545326",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "ACM Symposium on Theory of Computing (STOC)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE bibliography of
%%%                        publications in the ACM Symposium on Theory
%%%                        of Computing (STOC) conference proceedings
%%%                        for the decade 2010--2019.  Companion
%%%                        bibliographies stoc19xx.bib and stoc20xx.bib
%%%                        cover other decades, and stoc.bib contains
%%%                        entries for just the proceedings volumes
%%%                        themselves.
%%%
%%%                        There is World-Wide Web sites for these
%%%                        publications at
%%%
%%%                            http://dl.acm.org/event.cfm?id=RE224
%%%                            http://www.sigact.org/stoc.html?searchterm=STOC
%%%                            http://www.acm.org/pubs/contents/proceedings/series/stoc/
%%%
%%%                        At version 1.09, the year coverage looked
%%%                        like this:
%%%
%%%                             2006 (   2)    2009 (   0)    2012 (  90)
%%%                             2007 (   0)    2010 (  83)    2013 ( 101)
%%%                             2008 (   0)    2011 (  85)
%%%
%%%                             InProceedings:  356
%%%                             Proceedings:      5
%%%
%%%                             Total entries:  361
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{
    "\ifx \undefined \mathbb \def \mathbb #1{{\bf #1}}\fi" #
    "\ifx \undefined \norm \def \norm {{\rm norm}} \fi" #
    "\ifx \undefined \ocirc \def \ocirc #1{{\accent'27#1}}\fi" #
    "\ifx \undefined \poly \def \poly {{\rm poly}} \fi" #
    "\ifx \undefined \polylog \def \polylog {{\rm polylog}} \fi" #
    "\ifx \undefined \rank \def \rank {{\rm rank}} \fi"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Publisher abbreviations:
@String{pub-ACM                 = "ACM Press"}

@String{pub-ACM:adr             = "New York, NY, USA"}

%%% ====================================================================
%%% Bibliography entries:
@InProceedings{Akavia:2006:BOW,
  author =       "Adi Akavia and Oded Goldreich and Shafi Goldwasser and
                 Dana Moshkovitz",
  title =        "On basing one-way functions on {NP}-hardness",
  crossref =     "ACM:2006:SPT",
  pages =        "701--710",
  year =         "2006",
  bibdate =      "Thu May 25 06:19:54 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  note =         "See erratum \cite{Akavia:2010:EBO}.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kannan:2010:SMM,
  author =       "Ravindran Kannan",
  title =        "Spectral methods for matrices and tensors",
  crossref =     "ACM:2010:SPA",
  pages =        "1--12",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Talagrand:2010:MSS,
  author =       "Michel Talagrand",
  title =        "Are many small sets explicitly small?",
  crossref =     "ACM:2010:SPA",
  pages =        "13--36",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Montanari:2010:MPA,
  author =       "Andrea Montanari",
  title =        "Message passing algorithms: a success looking for
                 theoreticians",
  crossref =     "ACM:2010:SPA",
  pages =        "37--38",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goel:2010:PML,
  author =       "Ashish Goel and Michael Kapralov and Sanjeev Khanna",
  title =        "Perfect matchings in $o(n \log n)$ time in regular
                 bipartite graphs",
  crossref =     "ACM:2010:SPA",
  pages =        "39--46",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Leighton:2010:ELV,
  author =       "F. Thomson Leighton and Ankur Moitra",
  title =        "Extensions and limits to vertex sparsification",
  crossref =     "ACM:2010:SPA",
  pages =        "47--56",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kolla:2010:SSN,
  author =       "Alexandra Kolla and Yury Makarychev and Amin Saberi
                 and Shang-Hua Teng",
  title =        "Subgraph sparsification and nearly optimal
                 ultrasparsifiers",
  crossref =     "ACM:2010:SPA",
  pages =        "57--66",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Barak:2010:HCI,
  author =       "Boaz Barak and Mark Braverman and Xi Chen and Anup
                 Rao",
  title =        "How to compress interactive communication",
  crossref =     "ACM:2010:SPA",
  pages =        "67--76",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Klauck:2010:SDP,
  author =       "Hartmut Klauck",
  title =        "A strong direct product theorem for disjointness",
  crossref =     "ACM:2010:SPA",
  pages =        "77--86",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Beame:2010:HAP,
  author =       "Paul Beame and Trinh Huynh and Toniann Pitassi",
  title =        "Hardness amplification in proof complexity",
  crossref =     "ACM:2010:SPA",
  pages =        "87--96",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gao:2010:LBO,
  author =       "Pu Gao and Nicholas C. Wormald",
  title =        "Load balancing and orientability thresholds for random
                 hypergraphs",
  crossref =     "ACM:2010:SPA",
  pages =        "97--104",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bayati:2010:CAI,
  author =       "Mohsen Bayati and David Gamarnik and Prasad Tetali",
  title =        "Combinatorial approach to the interpolation method and
                 scaling limits in sparse random graphs",
  crossref =     "ACM:2010:SPA",
  pages =        "105--114",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hirai:2010:MMP,
  author =       "Hiroshi Hirai",
  title =        "The maximum multiflow problems with bounded
                 fractionality",
  crossref =     "ACM:2010:SPA",
  pages =        "115--120",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Madry:2010:FAS,
  author =       "Aleksander Madry",
  title =        "Faster approximation schemes for fractional
                 multicommodity flow problems via dynamic graph
                 algorithms",
  crossref =     "ACM:2010:SPA",
  pages =        "121--130",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Aaronson:2010:FCQ,
  author =       "Scott Aaronson and Andrew Drucker",
  title =        "A full characterization of quantum advice",
  crossref =     "ACM:2010:SPA",
  pages =        "131--140",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Aaronson:2010:BPH,
  author =       "Scott Aaronson",
  title =        "{BQP} and the polynomial hierarchy",
  crossref =     "ACM:2010:SPA",
  pages =        "141--150",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ambainis:2010:QLL,
  author =       "Andris Ambainis and Julia Kempe and Or Sattath",
  title =        "A quantum {Lov{\'a}sz} local lemma",
  crossref =     "ACM:2010:SPA",
  pages =        "151--160",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{De:2010:NOE,
  author =       "Anindya De and Thomas Vidick",
  title =        "Near-optimal extractors against quantum storage",
  crossref =     "ACM:2010:SPA",
  pages =        "161--170",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Applebaum:2010:PKC,
  author =       "Benny Applebaum and Boaz Barak and Avi Wigderson",
  title =        "Public-key cryptography from different assumptions",
  crossref =     "ACM:2010:SPA",
  pages =        "171--180",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ajtai:2010:ORC,
  author =       "Mikl{\'o}s Ajtai",
  title =        "Oblivious {RAM}s without cryptographic assumptions",
  crossref =     "ACM:2010:SPA",
  pages =        "181--190",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goyal:2010:RCC,
  author =       "Vipul Goyal and Abhishek Jain",
  title =        "On the round complexity of covert computation",
  crossref =     "ACM:2010:SPA",
  pages =        "191--200",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhaskara:2010:DHL,
  author =       "Aditya Bhaskara and Moses Charikar and Eden Chlamtac
                 and Uriel Feige and Aravindan Vijayaraghavan",
  title =        "Detecting high log-densities: an {$O(n^{1/4})$}
                 approximation for densest $k$-subgraph",
  crossref =     "ACM:2010:SPA",
  pages =        "201--210",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bateni:2010:ASS,
  author =       "MohammadHossein Bateni and MohammadTaghi Hajiaghayi
                 and D{\'a}niel Marx",
  title =        "Approximation schemes for {Steiner} forest on planar
                 graphs and graphs of bounded treewidth",
  crossref =     "ACM:2010:SPA",
  pages =        "211--220",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dey:2010:OHC,
  author =       "Tamal K. Dey and Anil N. Hirani and Bala
                 Krishnamoorthy",
  title =        "Optimal homologous cycles, total unimodularity, and
                 linear programming",
  crossref =     "ACM:2010:SPA",
  pages =        "221--230",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Williams:2010:IES,
  author =       "Ryan Williams",
  title =        "Improving exhaustive search implies superpolynomial
                 lower bounds",
  crossref =     "ACM:2010:SPA",
  pages =        "231--240",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Paturi:2010:CCS,
  author =       "Ramamohan Paturi and Pavel Pudlak",
  title =        "On the complexity of circuit satisfiability",
  crossref =     "ACM:2010:SPA",
  pages =        "241--250",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dell:2010:SAN,
  author =       "Holger Dell and Dieter van Melkebeek",
  title =        "Satisfiability allows no nontrivial sparsification
                 unless the polynomial-time hierarchy collapses",
  crossref =     "ACM:2010:SPA",
  pages =        "251--260",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Magniez:2010:RWP,
  author =       "Fr{\'e}d{\'e}ric Magniez and Claire Mathieu and Ashwin
                 Nayak",
  title =        "Recognizing well-parenthesized expressions in the
                 streaming model",
  crossref =     "ACM:2010:SPA",
  pages =        "261--270",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Braverman:2010:MID,
  author =       "Vladimir Braverman and Rafail Ostrovsky",
  title =        "Measuring independence of datasets",
  crossref =     "ACM:2010:SPA",
  pages =        "271--280",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Braverman:2010:ZOF,
  author =       "Vladimir Braverman and Rafail Ostrovsky",
  title =        "Zero-one frequency laws",
  crossref =     "ACM:2010:SPA",
  pages =        "281--290",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Orlin:2010:IAC,
  author =       "James B. Orlin",
  title =        "Improved algorithms for computing {Fisher}'s market
                 clearing prices",
  crossref =     "ACM:2010:SPA",
  pages =        "291--300",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hartline:2010:BAM,
  author =       "Jason D. Hartline and Brendan Lucier",
  title =        "{Bayesian} algorithmic mechanism design",
  crossref =     "ACM:2010:SPA",
  pages =        "301--310",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chawla:2010:MPM,
  author =       "Shuchi Chawla and Jason D. Hartline and David L. Malec
                 and Balasubramanian Sivan",
  title =        "Multi-parameter mechanism design and sequential posted
                 pricing",
  crossref =     "ACM:2010:SPA",
  pages =        "311--320",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lokshtanov:2010:SSA,
  author =       "Daniel Lokshtanov and Jesper Nederlof",
  title =        "Saving space by algebraization",
  crossref =     "ACM:2010:SPA",
  pages =        "321--330",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Haramaty:2010:SCQ,
  author =       "Elad Haramaty and Amir Shpilka",
  title =        "On the structure of cubic and quartic polynomials",
  crossref =     "ACM:2010:SPA",
  pages =        "331--340",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dasgupta:2010:SJL,
  author =       "Anirban Dasgupta and Ravi Kumar and Tam{\'a}s Sarlos",
  title =        "A sparse {Johnson--Lindenstrauss} transform",
  crossref =     "ACM:2010:SPA",
  pages =        "341--350",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Micciancio:2010:DSE,
  author =       "Daniele Micciancio and Panagiotis Voulgaris",
  title =        "A deterministic single exponential time algorithm for
                 most lattice problems based on {Voronoi} cell
                 computations",
  crossref =     "ACM:2010:SPA",
  pages =        "351--358",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cardinal:2010:SUP,
  author =       "Jean Cardinal and Samuel Fiorini and Gwena{\"e}l Joret
                 and Rapha{\"e}l M. Jungers and J. Ian Munro",
  title =        "Sorting under partial information (without the
                 ellipsoid algorithm)",
  crossref =     "ACM:2010:SPA",
  pages =        "359--368",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lee:2010:MMP,
  author =       "Jon Lee and Maxim Sviridenko and Jan Vondrak",
  title =        "Matroid matching: the power of local search",
  crossref =     "ACM:2010:SPA",
  pages =        "369--378",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhattacharya:2010:BCA,
  author =       "Sayan Bhattacharya and Gagan Goel and Sreenivas
                 Gollapudi and Kamesh Munagala",
  title =        "Budget constrained auctions with heterogeneous items",
  crossref =     "ACM:2010:SPA",
  pages =        "379--388",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Fraigniaud:2010:SSW,
  author =       "Pierre Fraigniaud and George Giakkoupis",
  title =        "On the searchability of small-world networks with
                 arbitrary underlying structure",
  crossref =     "ACM:2010:SPA",
  pages =        "389--398",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chierichetti:2010:ATB,
  author =       "Flavio Chierichetti and Silvio Lattanzi and Alessandro
                 Panconesi",
  title =        "Almost tight bounds for rumour spreading with
                 conductance",
  crossref =     "ACM:2010:SPA",
  pages =        "399--408",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Guruswami:2010:LDR,
  author =       "Venkatesan Guruswami and Johan H{\aa}stad and Swastik
                 Kopparty",
  title =        "On the list-decodability of random linear codes",
  crossref =     "ACM:2010:SPA",
  pages =        "409--416",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kopparty:2010:LLD,
  author =       "Swastik Kopparty and Shubhangi Saraf",
  title =        "Local list-decoding and testing of random linear codes
                 from high error",
  crossref =     "ACM:2010:SPA",
  pages =        "417--426",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Meka:2010:PGP,
  author =       "Raghu Meka and David Zuckerman",
  title =        "Pseudorandom generators for polynomial threshold
                 functions",
  crossref =     "ACM:2010:SPA",
  pages =        "427--436",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Haitner:2010:EIC,
  author =       "Iftach Haitner and Omer Reingold and Salil Vadhan",
  title =        "Efficiency improvements in constructing pseudorandom
                 generators from one-way functions",
  crossref =     "ACM:2010:SPA",
  pages =        "437--446",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Verbin:2010:LBT,
  author =       "Elad Verbin and Qin Zhang",
  title =        "The limits of buffering: a tight lower bound for
                 dynamic membership in the external memory model",
  crossref =     "ACM:2010:SPA",
  pages =        "447--456",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Onak:2010:MLM,
  author =       "Krzysztof Onak and Ronitt Rubinfeld",
  title =        "Maintaining a large matching and a small vertex
                 cover",
  crossref =     "ACM:2010:SPA",
  pages =        "457--464",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Duan:2010:COF,
  author =       "Ran Duan and Seth Pettie",
  title =        "Connectivity oracles for failure prone graphs",
  crossref =     "ACM:2010:SPA",
  pages =        "465--474",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gilbert:2010:ASR,
  author =       "Anna C. Gilbert and Yi Li and Ely Porat and Martin J.
                 Strauss",
  title =        "Approximate sparse recovery: optimizing time and
                 measurements",
  crossref =     "ACM:2010:SPA",
  pages =        "475--484",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Godoy:2010:HPD,
  author =       "Guillem Godoy and Omer Gim{\'e}nez and Lander Ramos
                 and Carme {\`A}lvarez",
  title =        "The {HOM} problem is decidable",
  crossref =     "ACM:2010:SPA",
  pages =        "485--494",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawamura:2010:CTO,
  author =       "Akitoshi Kawamura and Stephen Cook",
  title =        "Complexity theory for operators in analysis",
  crossref =     "ACM:2010:SPA",
  pages =        "495--502",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Burgisser:2010:SPE,
  author =       "Peter B{\"u}rgisser and Felipe Cucker",
  title =        "Solving polynomial equations in smoothed polynomial
                 time and a near solution to {Smale}'s 17th problem",
  crossref =     "ACM:2010:SPA",
  pages =        "503--512",
  year =         "2010",
  DOI =          "https://doi.org/10.1145/1806689.1806759",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The 17th of the problems proposed by Steve Smale for
                 the 21st century asks for the existence of a
                 deterministic algorithm computing an approximate
                 solution of a system of $n$ complex polynomials in $n$
                 unknowns in time polynomial, on the average, in the
                 size $N$ of the input system. A partial solution to
                 this problem was given by Carlos Beltran and Luis
                 Miguel Pardo who exhibited a randomized algorithm, call
                 it LV, doing so. In this paper we further extend this
                 result in several directions. Firstly, we perform a
                 smoothed analysis (in the sense of Spielman and Teng)
                 of algorithm LV and prove that its smoothed complexity
                 is polynomial in the input size and $\sigma - 1$, where
                 $\sigma$ controls the size of the random perturbation
                 of the input systems. Secondly, we perform a
                 condition-based analysis of LV. That is, we give a
                 bound, for each system $f$, of the expected running
                 time of LV with input $f$. In addition to its
                 dependence on $N$ this bound also depends on the
                 condition of $f$. Thirdly, and to conclude, we return
                 to Smale's 17th problem as originally formulated for
                 deterministic algorithms. We exhibit such an algorithm
                 and show that its average complexity is $N^{O(\log \log
                 N)}$. This is nearly a solution to Smale's 17th
                 problem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kuhn:2010:DCD,
  author =       "Fabian Kuhn and Nancy Lynch and Rotem Oshman",
  title =        "Distributed computation in dynamic networks",
  crossref =     "ACM:2010:SPA",
  pages =        "513--522",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sherstov:2010:OBS,
  author =       "Alexander A. Sherstov",
  title =        "Optimal bounds for sign-representing the intersection
                 of two halfspaces by polynomials",
  crossref =     "ACM:2010:SPA",
  pages =        "523--532",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Diakonikolas:2010:BAS,
  author =       "Ilias Diakonikolas and Prahladh Harsha and Adam
                 Klivans and Raghu Meka and Prasad Raghavendra and Rocco
                 A. Servedio and Li-Yang Tan",
  title =        "Bounding the average sensitivity and noise sensitivity
                 of polynomial threshold functions",
  crossref =     "ACM:2010:SPA",
  pages =        "533--542",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Harsha:2010:IPP,
  author =       "Prahladh Harsha and Adam Klivans and Raghu Meka",
  title =        "An invariance principle for polytopes",
  crossref =     "ACM:2010:SPA",
  pages =        "543--552",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kalai:2010:ELM,
  author =       "Adam Tauman Kalai and Ankur Moitra and Gregory
                 Valiant",
  title =        "Efficiently learning mixtures of two {Gaussians}",
  crossref =     "ACM:2010:SPA",
  pages =        "553--562",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{VAgh:2010:AUN,
  author =       "L{\'a}szl{\'o} A. V{\'e}gh",
  title =        "Augmenting undirected node-connectivity by one",
  crossref =     "ACM:2010:SPA",
  pages =        "563--572",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Jain:2010:QP,
  author =       "Rahul Jain and Zhengfeng Ji and Sarvagya Upadhyay and
                 John Watrous",
  title =        "{QIP $=$ PSPACE}",
  crossref =     "ACM:2010:SPA",
  pages =        "573--582",
  year =         "2010",
  DOI =          "https://doi.org/10.1145/1806689.1806768",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove that the complexity class QIP, which consists
                 of all problems having quantum interactive proof
                 systems, is contained in PSPACE. This containment is
                 proved by applying a parallelized form of the matrix
                 multiplicative weights update method to a class of
                 semidefinite programs that captures the computational
                 power of quantum interactive proofs. As the containment
                 of PSPACE in QIP follows immediately from the
                 well-known equality IP $=$ PSPACE, the equality QIP $=$
                 PSPACE follows.",
  acknowledgement = ack-nhfb,
  remark =       "This work won the conference's Best Paper Award. An
                 updated version appears in Comm. ACM 53(12) 102--109
                 (December 2010),
                 \url{https://doi.org/10.1145/1859204.1859231}.",
}

@InProceedings{Byrka:2010:ILB,
  author =       "Jaroslaw Byrka and Fabrizio Grandoni and Thomas
                 Rothvo{\ss} and Laura Sanit{\`a}",
  title =        "An improved {LP}-based approximation for {Steiner}
                 tree",
  crossref =     "ACM:2010:SPA",
  pages =        "583--592",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dodis:2010:CBL,
  author =       "Yevgeniy Dodis and Mihai Patrascu and Mikkel Thorup",
  title =        "Changing base without losing space",
  crossref =     "ACM:2010:SPA",
  pages =        "593--602",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Patrascu:2010:TPL,
  author =       "Mihai Patrascu",
  title =        "Towards polynomial lower bounds for dynamic problems",
  crossref =     "ACM:2010:SPA",
  pages =        "603--610",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Fraigniaud:2010:OAS,
  author =       "Pierre Fraigniaud and Amos Korman",
  title =        "An optimal ancestry scheme and small universal
                 posets",
  crossref =     "ACM:2010:SPA",
  pages =        "611--620",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lee:2010:BSM,
  author =       "James R. Lee and Mohammad Moharrami",
  title =        "Bilipschitz snowflakes and metrics of negative type",
  crossref =     "ACM:2010:SPA",
  pages =        "621--630",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Raghavendra:2010:AIS,
  author =       "Prasad Raghavendra and David Steurer and Prasad
                 Tetali",
  title =        "Approximations for the isoperimetric and spectral
                 profile of graphs and related parameters",
  crossref =     "ACM:2010:SPA",
  pages =        "631--640",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Varadarajan:2010:WGS,
  author =       "Kasturi Varadarajan",
  title =        "Weighted geometric set cover via quasi-uniform
                 sampling",
  crossref =     "ACM:2010:SPA",
  pages =        "641--648",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Karnin:2010:DIT,
  author =       "Zohar S. Karnin and Partha Mukhopadhyay and Amir
                 Shpilka and Ilya Volkovich",
  title =        "Deterministic identity testing of depth-4 multilinear
                 circuits with bounded top fan-in",
  crossref =     "ACM:2010:SPA",
  pages =        "649--658",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Raz:2010:TRL,
  author =       "Ran Raz",
  title =        "Tensor-rank and lower bounds for arithmetic formulas",
  crossref =     "ACM:2010:SPA",
  pages =        "659--666",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{HrubeAa:2010:NCC,
  author =       "Pavel Hrube and Avi Wigderson and Amir Yehudayoff",
  title =        "Non-commutative circuits and the sum-of-squares
                 problem",
  crossref =     "ACM:2010:SPA",
  pages =        "667--676",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Arvind:2010:HND,
  author =       "Vikraman Arvind and Srikanth Srinivasan",
  title =        "On the hardness of the noncommutative determinant",
  crossref =     "ACM:2010:SPA",
  pages =        "677--686",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawarabayashi:2010:SPG,
  author =       "Ken-ichi Kawarabayashi and Paul Wollan",
  title =        "A shorter proof of the graph minor algorithm: the
                 unique linkage theorem",
  crossref =     "ACM:2010:SPA",
  pages =        "687--694",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawarabayashi:2010:OCP,
  author =       "Ken-ichi Kawarabayashi and Bruce Reed",
  title =        "Odd cycle packing",
  crossref =     "ACM:2010:SPA",
  pages =        "695--704",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hardt:2010:GDP,
  author =       "Moritz Hardt and Kunal Talwar",
  title =        "On the geometry of differential privacy",
  crossref =     "ACM:2010:SPA",
  pages =        "705--714",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dwork:2010:DPU,
  author =       "Cynthia Dwork and Moni Naor and Toniann Pitassi and
                 Guy N. Rothblum",
  title =        "Differential privacy under continual observation",
  crossref =     "ACM:2010:SPA",
  pages =        "715--724",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dyer:2010:CC,
  author =       "Martin E. Dyer and David M. Richerby",
  title =        "On the complexity of {\#CSP}",
  crossref =     "ACM:2010:SPA",
  pages =        "725--734",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Marx:2010:THP,
  author =       "D{\'a}niel Marx",
  title =        "Tractable hypergraph properties for constraint
                 satisfaction and conjunctive queries",
  crossref =     "ACM:2010:SPA",
  pages =        "735--744",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Svensson:2010:CHP,
  author =       "Ola Svensson",
  title =        "Conditional hardness of precedence constrained
                 scheduling on identical machines",
  crossref =     "ACM:2010:SPA",
  pages =        "745--754",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Raghavendra:2010:GEU,
  author =       "Prasad Raghavendra and David Steurer",
  title =        "Graph expansion and the unique games conjecture",
  crossref =     "ACM:2010:SPA",
  pages =        "755--764",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Roth:2010:IPM,
  author =       "Aaron Roth and Tim Roughgarden",
  title =        "Interactive privacy via the median mechanism",
  crossref =     "ACM:2010:SPA",
  pages =        "765--774",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kasiviswanathan:2010:PPR,
  author =       "Shiva Prasad Kasiviswanathan and Mark Rudelson and
                 Adam Smith and Jonathan Ullman",
  title =        "The price of privately releasing contingency tables
                 and the spectra of random matrices with correlated
                 rows",
  crossref =     "ACM:2010:SPA",
  pages =        "775--784",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chandran:2010:PAA,
  author =       "Nishanth Chandran and Bhavana Kanukurthi and Rafail
                 Ostrovsky and Leonid Reyzin",
  title =        "Privacy amplification with asymptotically optimal
                 entropy loss",
  crossref =     "ACM:2010:SPA",
  pages =        "785--794",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Akavia:2010:EBO,
  author =       "Adi Akavia and Oded Goldreich and Shafi Goldwasser and
                 Dana Moshkovitz",
  title =        "Erratum for: {{\em On basing one-way functions on
                 NP-hardness}}",
  crossref =     "ACM:2010:SPA",
  pages =        "795--796",
  year =         "2010",
  bibdate =      "Wed Sep 1 10:42:57 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  note =         "See \cite{Akavia:2006:BOW}.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Patrascu:2011:PST,
  author =       "Mihai Patrascu and Mikkel Thorup",
  title =        "The power of simple tabulation hashing",
  crossref =     "ACM:2011:SPA",
  pages =        "1--10",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993638",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lenzen:2011:TBP,
  author =       "Christoph Lenzen and Roger Wattenhofer",
  title =        "Tight bounds for parallel randomized load balancing:
                 extended abstract",
  crossref =     "ACM:2011:SPA",
  pages =        "11--20",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993639",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Doerr:2011:SNS,
  author =       "Benjamin Doerr and Mahmoud Fouz and Tobias Friedrich",
  title =        "Social networks spread rumors in sublogarithmic time",
  crossref =     "ACM:2011:SPA",
  pages =        "21--30",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993640",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Regev:2011:QOW,
  author =       "Oded Regev and Bo'az Klartag",
  title =        "Quantum one-way communication can be exponentially
                 stronger than classical communication",
  crossref =     "ACM:2011:SPA",
  pages =        "31--40",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993642",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sherstov:2011:SDP,
  author =       "Alexander A. Sherstov",
  title =        "Strong direct product theorems for quantum
                 communication and query complexity",
  crossref =     "ACM:2011:SPA",
  pages =        "41--50",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993643",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chakrabarti:2011:OLB,
  author =       "Amit Chakrabarti and Oded Regev",
  title =        "An optimal lower bound on the communication complexity
                 of gap-{Hamming}-distance",
  crossref =     "ACM:2011:SPA",
  pages =        "51--60",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993644",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ding:2011:CTB,
  author =       "Jian Ding and James R. Lee and Yuval Peres",
  title =        "Cover times, blanket times, and majorizing measures",
  crossref =     "ACM:2011:SPA",
  pages =        "61--70",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993646",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Fung:2011:GFG,
  author =       "Wai Shing Fung and Ramesh Hariharan and Nicholas J. A.
                 Harvey and Debmalya Panigrahi",
  title =        "A general framework for graph sparsification",
  crossref =     "ACM:2011:SPA",
  pages =        "71--80",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993647",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawarabayashi:2011:BAA,
  author =       "Ken-ichi Kawarabayashi and Yusuke Kobayashi",
  title =        "Breaking $o(n^{1/2})$-approximation algorithms for the
                 edge-disjoint paths problem with congestion two",
  crossref =     "ACM:2011:SPA",
  pages =        "81--88",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993648",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Holenstein:2011:ERO,
  author =       "Thomas Holenstein and Robin K{\"u}nzler and Stefano
                 Tessaro",
  title =        "The equivalence of the random oracle model and the
                 ideal cipher model, revisited",
  crossref =     "ACM:2011:SPA",
  pages =        "89--98",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993650",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gentry:2011:SSN,
  author =       "Craig Gentry and Daniel Wichs",
  title =        "Separating succinct non-interactive arguments from all
                 falsifiable assumptions",
  crossref =     "ACM:2011:SPA",
  pages =        "99--108",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993651",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Pass:2011:LPS,
  author =       "Rafael Pass",
  title =        "Limits of provable security from standard
                 assumptions",
  crossref =     "ACM:2011:SPA",
  pages =        "109--118",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993652",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Papadimitriou:2011:OSI,
  author =       "Christos H. Papadimitriou and George Pierrakos",
  title =        "On optimal single-item auctions",
  crossref =     "ACM:2011:SPA",
  pages =        "119--128",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993654",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dobzinski:2011:OAC,
  author =       "Shahar Dobzinski and Hu Fu and Robert D. Kleinberg",
  title =        "Optimal auctions with correlated bidders are easy",
  crossref =     "ACM:2011:SPA",
  pages =        "129--138",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993655",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dobzinski:2011:IRT,
  author =       "Shahar Dobzinski",
  title =        "An impossibility result for truthful combinatorial
                 auctions with submodular valuations",
  crossref =     "ACM:2011:SPA",
  pages =        "139--148",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993656",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dughmi:2011:COR,
  author =       "Shaddin Dughmi and Tim Roughgarden and Qiqi Yan",
  title =        "From convex optimization to randomized mechanisms:
                 toward optimal combinatorial auctions",
  crossref =     "ACM:2011:SPA",
  pages =        "149--158",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993657",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Braverman:2011:TCM,
  author =       "Mark Braverman and Anup Rao",
  title =        "Towards coding for maximum errors in interactive
                 communication",
  crossref =     "ACM:2011:SPA",
  pages =        "159--166",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993659",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kopparty:2011:HRC,
  author =       "Swastik Kopparty and Shubhangi Saraf and Sergey
                 Yekhanin",
  title =        "High-rate codes with sublinear-time decoding",
  crossref =     "ACM:2011:SPA",
  pages =        "167--176",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993660",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Zewi:2011:ATS,
  author =       "Noga Zewi and Eli Ben-Sasson",
  title =        "From affine to two-source extractors via approximate
                 duality",
  crossref =     "ACM:2011:SPA",
  pages =        "177--186",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993661",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hatami:2011:CTA,
  author =       "Hamed Hatami and Shachar Lovett",
  title =        "Correlation testing for affine invariant properties on
                 {$\mathbb{F}_p^n$} in the high error regime",
  crossref =     "ACM:2011:SPA",
  pages =        "187--194",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993662",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Adsul:2011:RBG,
  author =       "Bharat Adsul and Jugal Garg and Ruta Mehta and Milind
                 Sohoni",
  title =        "Rank-1 bimatrix games: a homeomorphism and a
                 polynomial time algorithm",
  crossref =     "ACM:2011:SPA",
  pages =        "195--204",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993664",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hansen:2011:EAS,
  author =       "Kristoffer Arnsfelt Hansen and Michal Koucky and Niels
                 Lauritzen and Peter Bro Miltersen and Elias P.
                 Tsigaridas",
  title =        "Exact algorithms for solving stochastic games:
                 extended abstract",
  crossref =     "ACM:2011:SPA",
  pages =        "205--214",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993665",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Immorlica:2011:DA,
  author =       "Nicole Immorlica and Adam Tauman Kalai and Brendan
                 Lucier and Ankur Moitra and Andrew Postlewaite and
                 Moshe Tennenholtz",
  title =        "Dueling algorithms",
  crossref =     "ACM:2011:SPA",
  pages =        "215--224",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993666",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Moitra:2011:POS,
  author =       "Ankur Moitra and Ryan O'Donnell",
  title =        "{Pareto} optimal solutions for smoothed analysts",
  crossref =     "ACM:2011:SPA",
  pages =        "225--234",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993667",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kolipaka:2011:MTM,
  author =       "Kashyap Babu Rao Kolipaka and Mario Szegedy",
  title =        "{Moser} and {Tardos} meet {Lov{\'a}sz}",
  crossref =     "ACM:2011:SPA",
  pages =        "235--244",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993669",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Moser:2011:FDS,
  author =       "Robin A. Moser and Dominik Scheder",
  title =        "A full derandomization of {Sch{\"o}ning}'s {$k$-SAT}
                 algorithm",
  crossref =     "ACM:2011:SPA",
  pages =        "245--252",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993670",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gopalan:2011:PGC,
  author =       "Parikshit Gopalan and Raghu Meka and Omer Reingold and
                 David Zuckerman",
  title =        "Pseudorandom generators for combinatorial shapes",
  crossref =     "ACM:2011:SPA",
  pages =        "253--262",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993671",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Koucky:2011:PGG,
  author =       "Michal Kouck{\'y} and Prajakta Nimbhorkar and Pavel
                 Pudl{\'a}k",
  title =        "Pseudorandom generators for group products: extended
                 abstract",
  crossref =     "ACM:2011:SPA",
  pages =        "263--272",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993672",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Christiano:2011:EFL,
  author =       "Paul Christiano and Jonathan A. Kelner and Aleksander
                 Madry and Daniel A. Spielman and Shang-Hua Teng",
  title =        "Electrical flows, {Laplacian} systems, and faster
                 approximation of maximum flow in undirected graphs",
  crossref =     "ACM:2011:SPA",
  pages =        "273--282",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993674",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Friedmann:2011:SLB,
  author =       "Oliver Friedmann and Thomas Dueholm Hansen and Uri
                 Zwick",
  title =        "Subexponential lower bounds for randomized pivoting
                 rules for the simplex algorithm",
  crossref =     "ACM:2011:SPA",
  pages =        "283--292",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993675",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Haeupler:2011:ANC,
  author =       "Bernhard Haeupler",
  title =        "Analyzing network coding gossip made easy",
  crossref =     "ACM:2011:SPA",
  pages =        "293--302",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993676",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chuzhoy:2011:AGC,
  author =       "Julia Chuzhoy",
  title =        "An algorithm for the graph crossing number problem",
  crossref =     "ACM:2011:SPA",
  pages =        "303--312",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993678",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Italiano:2011:IAM,
  author =       "Giuseppe F. Italiano and Yahav Nussbaum and Piotr
                 Sankowski and Christian Wulff-Nilsen",
  title =        "Improved algorithms for min cut and max flow in
                 undirected planar graphs",
  crossref =     "ACM:2011:SPA",
  pages =        "313--322",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993679",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dinitz:2011:DSF,
  author =       "Michael Dinitz and Robert Krauthgamer",
  title =        "Directed spanners via flow-based linear programs",
  crossref =     "ACM:2011:SPA",
  pages =        "323--332",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993680",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Aaronson:2011:CCL,
  author =       "Scott Aaronson and Alex Arkhipov",
  title =        "The computational complexity of linear optics",
  crossref =     "ACM:2011:SPA",
  pages =        "333--342",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993682",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brandao:2011:QTA,
  author =       "Fernando G. S. L. Brand{\~a}o and Matthias Christandl
                 and Jon Yard",
  title =        "A quasipolynomial-time algorithm for the quantum
                 separability problem",
  crossref =     "ACM:2011:SPA",
  pages =        "343--352",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993683",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kempe:2011:PRE,
  author =       "Julia Kempe and Thomas Vidick",
  title =        "Parallel repetition of entangled games",
  crossref =     "ACM:2011:SPA",
  pages =        "353--362",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993684",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sarma:2011:DVH,
  author =       "Atish Das Sarma and Stephan Holzer and Liah Kor and
                 Amos Korman and Danupon Nanongkai and Gopal Pandurangan
                 and David Peleg and Roger Wattenhofer",
  title =        "Distributed verification and hardness of distributed
                 approximation",
  crossref =     "ACM:2011:SPA",
  pages =        "363--372",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993686",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Golab:2011:LID,
  author =       "Wojciech Golab and Lisa Higham and Philipp Woelfel",
  title =        "Linearizable implementations do not suffice for
                 randomized distributed computation",
  crossref =     "ACM:2011:SPA",
  pages =        "373--382",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993687",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kantor:2011:TWC,
  author =       "Erez Kantor and Zvi Lotker and Merav Parter and David
                 Peleg",
  title =        "The topology of wireless communication",
  crossref =     "ACM:2011:SPA",
  pages =        "383--392",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993688",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Giakkoupis:2011:OPS,
  author =       "George Giakkoupis and Nicolas Schabanel",
  title =        "Optimal path search in small worlds: dimension
                 matters",
  crossref =     "ACM:2011:SPA",
  pages =        "393--402",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993689",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Novocin:2011:LRA,
  author =       "Andrew Novocin and Damien Stehl{\'e} and Gilles
                 Villard",
  title =        "An {LLL}-reduction algorithm with quasi-linear time
                 complexity: extended abstract",
  crossref =     "ACM:2011:SPA",
  pages =        "403--412",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993691",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Khot:2011:NHA,
  author =       "Subhash Khot and Dana Moshkovitz",
  title =        "{NP}-hardness of approximately solving linear
                 equations over reals",
  crossref =     "ACM:2011:SPA",
  pages =        "413--420",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993692",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Saraf:2011:BBI,
  author =       "Shubhangi Saraf and Ilya Volkovich",
  title =        "Black-box identity testing of depth-4 multilinear
                 circuits",
  crossref =     "ACM:2011:SPA",
  pages =        "421--430",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993693",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Saxena:2011:BIT,
  author =       "Nitin Saxena and C. Seshadhri",
  title =        "Blackbox identity testing for bounded top fanin
                 depth-3 circuits: the field doesn't matter",
  crossref =     "ACM:2011:SPA",
  pages =        "431--440",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993694",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Demaine:2011:CDH,
  author =       "Erik D. Demaine and MohammadTaghi Hajiaghayi and
                 Ken-ichi Kawarabayashi",
  title =        "Contraction decomposition in $h$-minor-free graphs and
                 algorithmic applications",
  crossref =     "ACM:2011:SPA",
  pages =        "441--450",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993696",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawarabayashi:2011:SAS,
  author =       "Ken-ichi Kawarabayashi and Paul Wollan",
  title =        "A simpler algorithm and shorter proof for the graph
                 minor decomposition",
  crossref =     "ACM:2011:SPA",
  pages =        "451--458",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993697",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bousquet:2011:MF,
  author =       "Nicolas Bousquet and Jean Daligault and St{\'e}phan
                 Thomass{\'e}",
  title =        "Multicut is {FPT}",
  crossref =     "ACM:2011:SPA",
  pages =        "459--468",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993698",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Marx:2011:FPT,
  author =       "D{\'a}niel Marx and Igor Razgon",
  title =        "Fixed-parameter tractability of multicut parameterized
                 by the size of the cutset",
  crossref =     "ACM:2011:SPA",
  pages =        "469--478",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993699",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Grohe:2011:FTS,
  author =       "Martin Grohe and Ken-ichi Kawarabayashi and D{\'a}niel
                 Marx and Paul Wollan",
  title =        "Finding topological subgraphs is fixed-parameter
                 tractable",
  crossref =     "ACM:2011:SPA",
  pages =        "479--488",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993700",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kopparty:2011:CPF,
  author =       "Swastik Kopparty",
  title =        "On the complexity of powering in finite fields",
  crossref =     "ACM:2011:SPA",
  pages =        "489--498",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993702",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chien:2011:ASH,
  author =       "Steve Chien and Prahladh Harsha and Alistair Sinclair
                 and Srikanth Srinivasan",
  title =        "Almost settling the hardness of noncommutative
                 determinant",
  crossref =     "ACM:2011:SPA",
  pages =        "499--508",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993703",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Burgisser:2011:GCT,
  author =       "Peter B{\"u}rgisser and Christian Ikenmeyer",
  title =        "Geometric complexity theory and tensor rank",
  crossref =     "ACM:2011:SPA",
  pages =        "509--518",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993704",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Barak:2011:RBD,
  author =       "Boaz Barak and Zeev Dvir and Amir Yehudayoff and Avi
                 Wigderson",
  title =        "Rank bounds for design matrices with applications to
                 combinatorial geometry and locally correctable codes",
  crossref =     "ACM:2011:SPA",
  pages =        "519--528",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993705",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kleinberg:2011:MMA,
  author =       "Jon Kleinberg and Sigal Oren",
  title =        "Mechanisms for (mis)allocating scientific credit",
  crossref =     "ACM:2011:SPA",
  pages =        "529--538",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993707",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cole:2011:IPS,
  author =       "Richard Cole and Jos{\'e} R. Correa and Vasilis
                 Gkatzelis and Vahab Mirrokni and Neil Olver",
  title =        "Inner product spaces for {MinSum} coordination
                 mechanisms",
  crossref =     "ACM:2011:SPA",
  pages =        "539--548",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993708",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Feige:2011:MDU,
  author =       "Uriel Feige and Moshe Tennenholtz",
  title =        "Mechanism design with uncertain inputs: (to err is
                 human, to forgive divine)",
  crossref =     "ACM:2011:SPA",
  pages =        "549--558",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993709",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Patrascu:2011:DRU,
  author =       "Mihai P{\u{a}}tra{\c{s}}cu and Mikkel Thorup",
  title =        "Don't rush into a union: take time to find your
                 roots",
  crossref =     "ACM:2011:SPA",
  pages =        "559--568",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993711",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Feldman:2011:UFA,
  author =       "Dan Feldman and Michael Langberg",
  title =        "A unified framework for approximating and clustering
                 data",
  crossref =     "ACM:2011:SPA",
  pages =        "569--578",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993712",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Arya:2011:APM,
  author =       "Sunil Arya and Guilherme D. da Fonseca and David M.
                 Mount",
  title =        "Approximate polytope membership queries",
  crossref =     "ACM:2011:SPA",
  pages =        "579--586",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993713",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Karande:2011:OBM,
  author =       "Chinmay Karande and Aranyak Mehta and Pushkar
                 Tripathi",
  title =        "Online bipartite matching with unknown distributions",
  crossref =     "ACM:2011:SPA",
  pages =        "587--596",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993715",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Mahdian:2011:OBM,
  author =       "Mohammad Mahdian and Qiqi Yan",
  title =        "Online bipartite matching with random arrivals: an
                 approach based on strongly factor-revealing {LPs}",
  crossref =     "ACM:2011:SPA",
  pages =        "597--606",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993716",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Adamaszek:2011:ATB,
  author =       "Anna Adamaszek and Artur Czumaj and Matthias Englert
                 and Harald R{\"a}cke",
  title =        "Almost tight bounds for reordering buffer management",
  crossref =     "ACM:2011:SPA",
  pages =        "607--616",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993717",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Svensson:2011:SCS,
  author =       "Ola Svensson",
  title =        "{Santa Claus} schedules jobs on unrelated machines",
  crossref =     "ACM:2011:SPA",
  pages =        "617--626",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993718",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Indyk:2011:KMC,
  author =       "Piotr Indyk and Eric Price",
  title =        "{$K$}-median clustering, model-based compressive
                 sensing, and sparse recovery for earth mover distance",
  crossref =     "ACM:2011:SPA",
  pages =        "627--636",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993720",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bourgain:2011:BBE,
  author =       "Jean Bourgain and Stephen J. Dilworth and Kevin Ford
                 and Sergei V. Konyagin and Denka Kutzarova",
  title =        "Breaking the $k^2$ barrier for explicit {RIP}
                 matrices",
  crossref =     "ACM:2011:SPA",
  pages =        "637--644",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993721",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Karnin:2011:DCH,
  author =       "Zohar S. Karnin",
  title =        "Deterministic construction of a high dimensional $l_p$
                 section in $l_1^n$ for any $p < 2$",
  crossref =     "ACM:2011:SPA",
  pages =        "645--654",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993722",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bodirsky:2011:STG,
  author =       "Manuel Bodirsky and Michael Pinsker",
  title =        "{Schaefer}'s theorem for graphs",
  crossref =     "ACM:2011:SPA",
  pages =        "655--664",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993724",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Yoshida:2011:OCT,
  author =       "Yuichi Yoshida",
  title =        "Optimal constant-time approximation algorithms and
                 (unconditional) inapproximability results for every
                 bounded-degree {CSP}",
  crossref =     "ACM:2011:SPA",
  pages =        "665--674",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993725",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Newman:2011:EPH,
  author =       "Ilan Newman and Christian Sohler",
  title =        "Every property of hyperfinite graphs is testable",
  crossref =     "ACM:2011:SPA",
  pages =        "675--684",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993726",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Valiant:2011:EUS,
  author =       "Gregory Valiant and Paul Valiant",
  title =        "Estimating the unseen: an $n / \log(n)$-sample
                 estimator for entropy and support size, shown optimal
                 via new {CLTs}",
  crossref =     "ACM:2011:SPA",
  pages =        "685--694",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993727",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goyal:2011:CRN,
  author =       "Vipul Goyal",
  title =        "Constant round non-malleable protocols using one way
                 functions",
  crossref =     "ACM:2011:SPA",
  pages =        "695--704",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993729",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lin:2011:CRN,
  author =       "Huijia Lin and Rafael Pass",
  title =        "Constant-round non-malleable commitments from any
                 one-way function",
  crossref =     "ACM:2011:SPA",
  pages =        "705--714",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993730",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ajtai:2011:SCI,
  author =       "Miklos Ajtai",
  title =        "Secure computation with information leaking to an
                 adversary",
  crossref =     "ACM:2011:SPA",
  pages =        "715--724",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993731",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lewko:2011:HLK,
  author =       "Allison Lewko and Mark Lewko and Brent Waters",
  title =        "How to leak on key updates",
  crossref =     "ACM:2011:SPA",
  pages =        "725--734",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993732",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Woodruff:2011:NOP,
  author =       "David P. Woodruff",
  title =        "Near-optimal private approximation protocols via a
                 black box transformation",
  crossref =     "ACM:2011:SPA",
  pages =        "735--744",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993733",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kane:2011:FME,
  author =       "Daniel M. Kane and Jelani Nelson and Ely Porat and
                 David P. Woodruff",
  title =        "Fast moment estimation in data streams in optimal
                 space",
  crossref =     "ACM:2011:SPA",
  pages =        "745--754",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993735",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sohler:2011:SEN,
  author =       "Christian Sohler and David P. Woodruff",
  title =        "Subspace embeddings for the {$L_1$}-norm with
                 applications",
  crossref =     "ACM:2011:SPA",
  pages =        "755--764",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993736",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lee:2011:NOD,
  author =       "James R. Lee and Anastasios Sidiropoulos",
  title =        "Near-optimal distortion bounds for embedding doubling
                 spaces into {$L_1$}",
  crossref =     "ACM:2011:SPA",
  pages =        "765--772",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993737",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Fawzi:2011:LDN,
  author =       "Omar Fawzi and Patrick Hayden and Pranab Sen",
  title =        "From low-distortion norm embeddings to explicit
                 uncertainty relations and efficient information
                 locking",
  crossref =     "ACM:2011:SPA",
  pages =        "773--782",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993738",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Vondrak:2011:SFM,
  author =       "Jan Vondr{\'a}k and Chandra Chekuri and Rico
                 Zenklusen",
  title =        "Submodular function maximization via the multilinear
                 relaxation and contention resolution schemes",
  crossref =     "ACM:2011:SPA",
  pages =        "783--792",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993740",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Balcan:2011:LSF,
  author =       "Maria-Florina Balcan and Nicholas J. A. Harvey",
  title =        "Learning submodular functions",
  crossref =     "ACM:2011:SPA",
  pages =        "793--802",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993741",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gupta:2011:PRC,
  author =       "Anupam Gupta and Moritz Hardt and Aaron Roth and
                 Jonathan Ullman",
  title =        "Privately releasing conjunctions and the statistical
                 query barrier",
  crossref =     "ACM:2011:SPA",
  pages =        "803--812",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993742",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Smith:2011:PPS,
  author =       "Adam Smith",
  title =        "Privacy-preserving statistical estimation with optimal
                 convergence rates",
  crossref =     "ACM:2011:SPA",
  pages =        "813--822",
  year =         "2011",
  DOI =          "https://doi.org/10.1145/1993636.1993743",
  bibdate =      "Tue Jun 7 18:53:27 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kelner:2012:FAM,
  author =       "Jonathan A. Kelner and Gary L. Miller and Richard
                 Peng",
  title =        "Faster approximate multicommodity flow using
                 quadratically coupled flows",
  crossref =     "ACM:2012:SPA",
  pages =        "1--18",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213979",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The maximum multicommodity flow problem is a natural
                 generalization of the maximum flow problem to route
                 multiple distinct flows. Obtaining a $1 - \epsilon$
                 approximation to the multicommodity flow problem on
                 graphs is a well-studied problem. In this paper we
                 present an adaptation of recent advances in
                 single-commodity flow algorithms to this problem. As
                 the underlying linear systems in the electrical
                 problems of multicommodity flow problems are no longer
                 Laplacians, our approach is tailored to generate
                 specialized systems which can be preconditioned and
                 solved efficiently using Laplacians. Given an
                 undirected graph with $m$ edges and $k$ commodities, we
                 give algorithms that find $1 - \epsilon$ approximate
                 solutions to the maximum concurrent flow problem and
                 maximum weighted multicommodity flow problem in time
                 $O(m^{4/3} \poly(k, \epsilon^{-1}))$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chakrabarti:2012:WCC,
  author =       "Amit Chakrabarti and Lisa Fleischer and Christophe
                 Weibel",
  title =        "When the cut condition is enough: a complete
                 characterization for multiflow problems in
                 series-parallel networks",
  crossref =     "ACM:2012:SPA",
  pages =        "19--26",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213980",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Let $G = (V, E)$ be a supply graph and $H = (V,F)$ a
                 demand graph defined on the same set of vertices. An
                 assignment of capacities to the edges of $G$ and
                 demands to the edges of $H$ is said to satisfy the cut
                 condition if for any cut in the graph, the total demand
                 crossing the cut is no more than the total capacity
                 crossing it. The pair $(G, H)$ is called cut-sufficient
                 if for any assignment of capacities and demands that
                 satisfy the cut condition, there is a multiflow routing
                 the demands defined on $H$ within the network with
                 capacities defined on $G$. We prove a previous
                 conjecture, which states that when the supply graph $G$
                 is series-parallel, the pair (G,H) is cut-sufficient if
                 and only if $(G, H)$ does not contain an odd spindle as
                 a minor; that is, if it is impossible to contract edges
                 of $G$ and delete edges of $G$ and $H$ so that $G$
                 becomes the complete bipartite graph $K_{2,p}$, with $p
                 \geq 3$ odd, and $H$ is composed of a cycle connecting
                 the $p$ vertices of degree $2$, and an edge connecting
                 the two vertices of degree $p$. We further prove that
                 if the instance is Eulerian --- that is, the demands
                 and capacities are integers and the total of demands
                 and capacities incident to each vertex is even --- then
                 the multiflow problem has an integral solution. We
                 provide a polynomial-time algorithm to find an integral
                 solution in this case. In order to prove these results,
                 we formulate properties of tight cuts (cuts for which
                 the cut condition inequality is tight) in
                 cut-sufficient pairs. We believe these properties might
                 be useful in extending our results to planar graphs.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Vegh:2012:SPA,
  author =       "L{\'a}szl{\'o} A. V{\'e}gh",
  title =        "Strongly polynomial algorithm for a class of
                 minimum-cost flow problems with separable convex
                 objectives",
  crossref =     "ACM:2012:SPA",
  pages =        "27--40",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213981",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A well-studied nonlinear extension of the minimum-cost
                 flow problem is to minimize the objective $\Sigma_{i j
                 \in E} C_{ij} (f_{ij})$ over feasible flows $f$, where
                 on every arc $ij$ of the network, $C_{ij}$ is a convex
                 function. We give a strongly polynomial algorithm for
                 finding an exact optimal solution for a broad class of
                 such problems. The key characteristic of this class is
                 that an optimal solution can be computed exactly
                 provided its support. This includes separable convex
                 quadratic objectives and also certain market equilibria
                 problems: Fisher's market with linear and with spending
                 constraint utilities. We thereby give the first
                 strongly polynomial algorithms for separable quadratic
                 minimum-cost flows and for Fisher's market with
                 spending constraint utilities, settling open questions
                 posed e.g. in [15] and in [35], respectively. The
                 running time is $O(m^4 \log m)$ for quadratic costs,
                 $O(n^4 + n^2 (m + n \log n) \log n)$ for Fisher's
                 markets with linear utilities and $O(m n^3 + m^2 (m + n
                 \log n) \log m)$ for spending constraint utilities.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Aaronson:2012:QMH,
  author =       "Scott Aaronson and Paul Christiano",
  title =        "Quantum money from hidden subspaces",
  crossref =     "ACM:2012:SPA",
  pages =        "41--60",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213983",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Forty years ago, Wiesner pointed out that quantum
                 mechanics raises the striking possibility of money that
                 cannot be counterfeited according to the laws of
                 physics. We propose the first quantum money scheme that
                 is (1) public-key --- meaning that anyone can verify a
                 banknote as genuine, not only the bank that printed it,
                 and (2) cryptographically secure, under a ``classical''
                 hardness assumption that has nothing to do with quantum
                 money. Our scheme is based on hidden subspaces, encoded
                 as the zero-sets of random multivariate polynomials. A
                 main technical advance is to show that the
                 ``black-box'' version of our scheme, where the
                 polynomials are replaced by classical oracles, is
                 unconditionally secure. Previously, such a result had
                 only been known relative to a quantum oracle (and even
                 there, the proof was never published). Even in
                 Wiesner's original setting --- quantum money that can
                 only be verified by the bank --- we are able to use our
                 techniques to patch a major security hole in Wiesner's
                 scheme. We give the first private-key quantum money
                 scheme that allows unlimited verifications and that
                 remains unconditionally secure, even if the
                 counterfeiter can interact adaptively with the bank.
                 Our money scheme is simpler than previous public-key
                 quantum money schemes, including a knot-based scheme of
                 Farhi et al. The verifier needs to perform only two
                 tests, one in the standard basis and one in the
                 Hadamard basis --- matching the original intuition for
                 quantum money, based on the existence of complementary
                 observables. Our security proofs use a new variant of
                 Ambainis's quantum adversary method, and several other
                 tools that might be of independent interest.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Vazirani:2012:CQD,
  author =       "Umesh Vazirani and Thomas Vidick",
  title =        "Certifiable quantum dice: or, true random number
                 generation secure against quantum adversaries",
  crossref =     "ACM:2012:SPA",
  pages =        "61--76",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213984",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We introduce a protocol through which a pair of
                 quantum mechanical devices may be used to generate $n$
                 bits that are $\epsilon$-close in statistical distance
                 from $n$ uniformly distributed bits, starting from a
                 seed of $O(\log n \log 1 / \epsilon)$ uniform bits. The
                 bits generated are certifiably random based only on a
                 simple statistical test that can be performed by the
                 user, and on the assumption that the devices do not
                 communicate in the middle of each phase of the
                 protocol. No other assumptions are placed on the
                 devices' inner workings. A modified protocol uses a
                 seed of $O(\log^3 n)$ uniformly random bits to generate
                 $n$ bits that are $\poly^{-1}(n)$-indistinguishable
                 from uniform even from the point of view of a quantum
                 adversary who may have had prior access to the devices,
                 and may be entangled with them.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Belovs:2012:SPF,
  author =       "Aleksandrs Belovs",
  title =        "Span programs for functions with constant-sized
                 $1$-certificates: extended abstract",
  crossref =     "ACM:2012:SPA",
  pages =        "77--84",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213985",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Besides the Hidden Subgroup Problem, the second large
                 class of quantum speed-ups is for functions with
                 constant-sized 1-certificates. This includes the OR
                 function, solvable by the Grover algorithm, the element
                 distinctness, the triangle and other problems. The
                 usual way to solve them is by quantum walk on the
                 Johnson graph. We propose a solution for the same
                 problems using span programs. The span program is a
                 computational model equivalent to the quantum query
                 algorithm in its strength, and yet very different in
                 its outfit. We prove the power of our approach by
                 designing a quantum algorithm for the triangle problem
                 with query complexity $O(n^{35/27})$ that is better
                 than $O(n^{13/10})$ of the best previously known
                 algorithm by Magniez et al.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Larsen:2012:CPC,
  author =       "Kasper Green Larsen",
  title =        "The cell probe complexity of dynamic range counting",
  crossref =     "ACM:2012:SPA",
  pages =        "85--94",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213987",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In this paper we develop a new technique for proving
                 lower bounds on the update time and query time of
                 dynamic data structures in the cell probe model. With
                 this technique, we prove the highest lower bound to
                 date for any explicit problem, namely a lower bound of
                 $t_q = \Omega((\lg n / \lg (wt_u))^2)$. Here $n$ is the
                 number of update operations, $w$ the cell size, $t_q$
                 the query time and t$_u$ the update time. In the most
                 natural setting of cell size $w = \Theta(\lg n)$, this
                 gives a lower bound of $t_q = \Omega((\lg n / \lg \lg
                 n)^2)$ for any polylogarithmic update time. This bound
                 is almost a quadratic improvement over the highest
                 previous lower bound of $\Omega(\lg n)$, due to
                 Patrascu and Demaine [SICOMP'06]. We prove our lower
                 bound for the fundamental problem of weighted
                 orthogonal range counting. In this problem, we are to
                 support insertions of two-dimensional points, each
                 assigned a $\Theta(\lg n)$-bit integer weight. A query
                 to this problem is specified by a point $q = (x, y)$,
                 and the goal is to report the sum of the weights
                 assigned to the points dominated by $q$, where a point
                 $(x',y')$ is dominated by $q$ if $x' \leq x$ and $y'
                 \leq y$. In addition to being the highest cell probe
                 lower bound to date, our lower bound is also tight for
                 data structures with update time $t_u = \Omega(\lg^{2 +
                 \epsilon} n)$, where $\epsilon > 0$ is an arbitrarily
                 small constant.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Fiorini:2012:LVS,
  author =       "Samuel Fiorini and Serge Massar and Sebastian Pokutta
                 and Hans Raj Tiwary and Ronald de Wolf",
  title =        "Linear vs. semidefinite extended formulations:
                 exponential separation and strong lower bounds",
  crossref =     "ACM:2012:SPA",
  pages =        "95--106",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213988",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We solve a 20-year old problem posed by Yannakakis and
                 prove that there exists no polynomial-size linear
                 program (LP) whose associated polytope projects to the
                 traveling salesman polytope, even if the LP is not
                 required to be symmetric. Moreover, we prove that this
                 holds also for the cut polytope and the stable set
                 polytope. These results were discovered through a new
                 connection that we make between one-way quantum
                 communication protocols and semidefinite programming
                 reformulations of LPs.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goel:2012:PCA,
  author =       "Gagan Goel and Vahab Mirrokni and Renato Paes Leme",
  title =        "Polyhedral clinching auctions and the {AdWords}
                 polytope",
  crossref =     "ACM:2012:SPA",
  pages =        "107--122",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213990",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A central issue in applying auction theory in practice
                 is the problem of dealing with budget-constrained
                 agents. A desirable goal in practice is to design
                 incentive compatible, individually rational, and Pareto
                 optimal auctions while respecting the budget
                 constraints. Achieving this goal is particularly
                 challenging in the presence of nontrivial combinatorial
                 constraints over the set of feasible allocations.
                 Toward this goal and motivated by AdWords auctions, we
                 present an auction for polymatroidal environments
                 satisfying the above properties. Our auction employs a
                 novel clinching technique with a clean geometric
                 description and only needs an oracle access to the
                 submodular function defining the polymatroid. As a
                 result, this auction not only simplifies and
                 generalizes all previous results, it applies to several
                 new applications including AdWords Auctions, bandwidth
                 markets, and video on demand. In particular, our
                 characterization of the AdWords auction as
                 polymatroidal constraints might be of independent
                 interest. This allows us to design the first mechanism
                 for Ad Auctions taking into account simultaneously
                 budgets, multiple keywords and multiple slots. We show
                 that it is impossible to extend this result to generic
                 polyhedral constraints. This also implies an
                 impossibility result for multi-unit auctions with
                 decreasing marginal utilities in the presence of budget
                 constraints.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kleinberg:2012:MPI,
  author =       "Robert Kleinberg and Seth Matthew Weinberg",
  title =        "Matroid prophet inequalities",
  crossref =     "ACM:2012:SPA",
  pages =        "123--136",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213991",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Consider a gambler who observes a sequence of
                 independent, non-negative random numbers and is allowed
                 to stop the sequence at any time, claiming a reward
                 equal to the most recent observation. The famous
                 prophet inequality of Krengel, Sucheston, and Garling
                 asserts that a gambler who knows the distribution of
                 each random variable can achieve at least half as much
                 reward, in expectation, as a ``prophet'' who knows the
                 sampled values of each random variable and can choose
                 the largest one. We generalize this result to the
                 setting in which the gambler and the prophet are
                 allowed to make more than one selection, subject to a
                 matroid constraint. We show that the gambler can still
                 achieve at least half as much reward as the prophet;
                 this result is the best possible, since it is known
                 that the ratio cannot be improved even in the original
                 prophet inequality, which corresponds to the special
                 case of rank-one matroids. Generalizing the result
                 still further, we show that under an intersection of
                 $p$ matroid constraints, the prophet's reward exceeds
                 the gambler's by a factor of at most $O(p)$, and this
                 factor is also tight. Beyond their interest as theorems
                 about pure online algorithms or optimal stopping rules,
                 these results also have applications to mechanism
                 design. Our results imply improved bounds on the
                 ability of sequential posted-price mechanisms to
                 approximate optimal mechanisms in both single-parameter
                 and multi-parameter Bayesian settings. In particular,
                 our results imply the first efficiently computable
                 constant-factor approximations to the Bayesian optimal
                 revenue in certain multi-parameter settings.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Devanur:2012:OMC,
  author =       "Nikhil R. Devanur and Kamal Jain",
  title =        "Online matching with concave returns",
  crossref =     "ACM:2012:SPA",
  pages =        "137--144",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213992",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider a significant generalization of the
                 AdWords problem by allowing arbitrary concave returns,
                 and we characterize the optimal competitive ratio
                 achievable. The problem considers a sequence of items
                 arriving online that have to be allocated to agents,
                 with different agents bidding different amounts. The
                 objective function is the sum, over each agent i, of a
                 monotonically non-decreasing concave function $M_i: R_?
                 + \to R_+$ of the total amount allocated to $i$. All
                 variants of online matching problems (including the
                 AdWords problem) studied in the literature consider the
                 special case of budgeted linear functions, that is,
                 functions of the form $M_i(u_i) = \min\{u_i, B_i\}$ for
                 some constant $B_i$. The distinguishing feature of this
                 paper is in allowing arbitrary concave returns. The
                 main result of this paper is that for each concave
                 function $M$, there exists a constant $F(M) \leq 1$
                 such that: there exists an algorithm with competitive
                 ratio of $\min_i F(M_i)$, independent of the sequence
                 of items. No algorithm has a competitive ratio larger
                 than $F(M)$ over all instances with $M_i = M$ for all
                 $i$. Our algorithm is based on the primal-dual paradigm
                 and makes use of convex programming duality. The upper
                 bounds are obtained by formulating the task of finding
                 the right counterexample as an optimization problem.
                 This path takes us through the calculus of variations
                 which deals with optimizing over continuous functions.
                 The algorithm and the upper bound are related to each
                 other via a set of differential equations, which points
                 to a certain kind of duality between them.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Arora:2012:CNM,
  author =       "Sanjeev Arora and Rong Ge and Ravindran Kannan and
                 Ankur Moitra",
  title =        "Computing a nonnegative matrix factorization ---
                 provably",
  crossref =     "ACM:2012:SPA",
  pages =        "145--162",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213994",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The Nonnegative Matrix Factorization (NMF) problem has
                 a rich history spanning quantum mechanics, probability
                 theory, data analysis, polyhedral combinatorics,
                 communication complexity, demography, chemometrics,
                 etc. In the past decade NMF has become enormously
                 popular in machine learning, where the factorization is
                 computed using a variety of local search heuristics.
                 Vavasis recently proved that this problem is
                 NP-complete. We initiate a study of when this problem
                 is solvable in polynomial time. Consider a nonnegative
                 $m \times n$ matrix $M$ and a target inner-dimension
                 $r$. Our results are the following: --- We give a
                 polynomial-time algorithm for exact and approximate NMF
                 for every constant $r$. Indeed NMF is most interesting
                 in applications precisely when $r$ is small. We
                 complement this with a hardness result, that if exact
                 NMF can be solved in time $(n m)^{o(r)}$, 3-SAT has a
                 sub-exponential time algorithm. Hence, substantial
                 improvements to the above algorithm are unlikely. ---
                 We give an algorithm that runs in time polynomial in n,
                 $m$ and $r$ under the separability condition identified
                 by Donoho and Stodden in 2003. The algorithm may be
                 practical since it is simple and noise tolerant (under
                 benign assumptions). Separability is believed to hold
                 in many practical settings. To the best of our
                 knowledge, this last result is the first
                 polynomial-time algorithm that provably works under a
                 non-trivial condition on the input matrix and we
                 believe that this will be an interesting and important
                 direction for future work.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Forbes:2012:ITT,
  author =       "Michael A. Forbes and Amir Shpilka",
  title =        "On identity testing of tensors, low-rank recovery and
                 compressed sensing",
  crossref =     "ACM:2012:SPA",
  pages =        "163--172",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213995",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the problem of obtaining efficient,
                 deterministic, {\em black-box polynomial identity
                 testing algorithms\/} for depth-$3$ set-multilinear
                 circuits (over arbitrary fields). This class of
                 circuits has an efficient, deterministic, white-box
                 polynomial identity testing algorithm (due to Raz and
                 Shpilka [36]), but has no known such black-box
                 algorithm. We recast this problem as a question of
                 finding a low-dimensional subspace $H$, spanned by rank
                 $1$ tensors, such that any non-zero tensor in the dual
                 space $\ker(H)$ has high rank. We obtain explicit
                 constructions of essentially optimal-size hitting sets
                 for tensors of degree $2$ (matrices), and obtain the
                 first quasi-polynomial sized hitting sets for arbitrary
                 tensors.\par

                 We also show connections to the task of performing
                 low-rank recovery of matrices, which is studied in the
                 field of compressed sensing. Low-rank recovery asks
                 (say, over $R$) to recover a matrix $M$ from few
                 measurements, under the promise that $M$ is rank $\leq
                 r$. In this work, we restrict our attention to
                 recovering matrices that are exactly rank $\leq r$
                 using deterministic, non-adaptive, linear measurements,
                 that are free from noise. Over $R$, we provide a set
                 (of size $4 n r$) of such measurements, from which $M$
                 can be recovered in $O(r n^2 + r^3 n)$ field
                 operations, and the number of measurements is
                 essentially optimal. Further, the measurements can be
                 taken to be all rank-$1$ matrices, or all sparse
                 matrices. To the best of our knowledge no explicit
                 constructions with those properties were known prior to
                 this work.\par

                 We also give a more formal connection between low-rank
                 recovery and the task of {\em sparse (vector)
                 recovery\/}: any sparse-recovery algorithm that exactly
                 recovers vectors of length $n$ and sparsity $2 r$,
                 using $m$ non-adaptive measurements, yields a low-rank
                 recovery scheme for exactly recovering $n \times n$
                 matrices of rank $\leq r$, making $2 n m$ non-adaptive
                 measurements. Furthermore, if the sparse-recovery
                 algorithm runs in time $\tau$, then the low-rank
                 recovery algorithm runs in time $O(r n^2 + n \tau)$. We
                 obtain this reduction using linear-algebraic
                 techniques, and not using convex optimization, which is
                 more commonly seen in compressed sensing
                 algorithms.\par

                 Finally, we also make a connection to {\em rank-metric
                 codes}, as studied in coding theory. These are codes
                 with codewords consisting of matrices (or tensors)
                 where the distance of matrices $M$ and $N$ is rank $(M
                 - N)$, as opposed to the usual Hamming metric. We
                 obtain essentially optimal-rate codes over matrices,
                 and provide an efficient decoding algorithm. We obtain
                 codes over tensors as well, with poorer rate, but still
                 with efficient decoding.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Grohe:2012:STI,
  author =       "Martin Grohe and D{\'a}niel Marx",
  title =        "Structure theorem and isomorphism test for graphs with
                 excluded topological subgraphs",
  crossref =     "ACM:2012:SPA",
  pages =        "173--192",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213996",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We generalize the structure theorem of Robertson and
                 Seymour for graphs excluding a fixed graph $H$ as a
                 minor to graphs excluding $H$ as a topological
                 subgraph. We prove that for a fixed H, every graph
                 excluding $H$ as a topological subgraph has a tree
                 decomposition where each part is either ``almost
                 embeddable'' to a fixed surface or has bounded degree
                 with the exception of a bounded number of vertices.
                 Furthermore, such a decomposition is computable by an
                 algorithm that is fixed-parameter tractable with
                 parameter $|H|$. We present two algorithmic
                 applications of our structure theorem. To illustrate
                 the mechanics of a ``typical'' application of the
                 structure theorem, we show that on graphs excluding $H$
                 as a topological subgraph, Partial Dominating Set (find
                 $k$ vertices whose closed neighborhood has maximum
                 size) can be solved in time $f(H,k) \cdot n^{O(1)}$
                 time. More significantly, we show that on graphs
                 excluding $H$ as a topological subgraph, Graph
                 Isomorphism can be solved in time $n^{f(H)}$. This
                 result unifies and generalizes two previously known
                 important polynomial-time solvable cases of Graph
                 Isomorphism: bounded-degree graphs and H-minor free
                 graphs. The proof of this result needs a generalization
                 of our structure theorem to the context of invariant
                 treelike decomposition.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hrubes:2012:SPD,
  author =       "Pavel Hrubes and Iddo Tzameret",
  title =        "Short proofs for the determinant identities",
  crossref =     "ACM:2012:SPA",
  pages =        "193--212",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213998",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study arithmetic proof systems $P_c(F)$ and
                 $P_f(F)$ operating with arithmetic circuits and
                 arithmetic formulas, respectively, that prove
                 polynomial identities over a field $F$. We establish a
                 series of structural theorems about these proof
                 systems, the main one stating that $P_c(F)$ proofs can
                 be balanced: if a polynomial identity of syntactic
                 degree $d$ and depth $k$ has a $P_c(F)$ proof of size
                 $s$, then it also has a $P_c(F)$ proof of size
                 $\poly(s,d)$ and depth $O(k + \log^2 d + \log d \cdot
                 \log s)$. As a corollary, we obtain a quasipolynomial
                 simulation of $P_c(F)$ by $P_f(F)$, for identities of a
                 polynomial syntactic degree.\par

                 Using these results we obtain the following: consider
                 the identities:\par

                 $$\det(X Y) = \det(X) \cdot \det(Y) \quad {\rm and}
                 \quad \det(Z) = z_{11} \cdots z_{nn},$$\par

                 where $X$, $Y$ and $Z$ are $n \times n$ square matrices
                 and $Z$ is a triangular matrix with $z_{11}, \ldots{},
                 z_{nn}$ on the diagonal (and $\det$ is the determinant
                 polynomial). Then we can construct a polynomial-size
                 arithmetic circuit $\det$ such that the above
                 identities have $P_c(F)$ proofs of polynomial-size and
                 $O(\log^2 n)$ depth. Moreover, there exists an
                 arithmetic formula det of size $n^{O(\log n)}$ such
                 that the above identities have $P_f(F)$ proofs of size
                 $n^{O(\log n)}$.\par

                 This yields a solution to a basic open problem in
                 propositional proof complexity, namely, whether there
                 are polynomial-size NC$^2$-Frege proofs for the
                 determinant identities and the hard matrix identities,
                 as considered, e.g. in Soltys and Cook (2004) (cf.,
                 Beame and Pitassi (1998)). We show that matrix
                 identities like $A B = I \to B A = I$ (for matrices
                 over the two element field) as well as basic properties
                 of the determinant have polynomial-size NC$^2$-Frege
                 proofs, and quasipolynomial-size Frege proofs.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Beame:2012:TST,
  author =       "Paul Beame and Christopher Beck and Russell
                 Impagliazzo",
  title =        "Time-space tradeoffs in resolution: superpolynomial
                 lower bounds for superlinear space",
  crossref =     "ACM:2012:SPA",
  pages =        "213--232",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2213999",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give the first time-space tradeoff lower bounds for
                 Resolution proofs that apply to superlinear space. In
                 particular, we show that there are formulas of size $N$
                 that have Resolution refutations of space and size each
                 roughly $N^{\log 2 N}$ (and like all formulas have
                 Resolution refutations of space $N$) for which any
                 Resolution refutation using space $S$ and length $T$
                 requires $T \geq (N^{0.58 \log 2 N} /S)^{\Omega(\log
                 \log N/\log \log \log N)}$. By downward translation, a
                 similar tradeoff applies to all smaller space bounds.
                 We also show somewhat stronger time-space tradeoff
                 lower bounds for Regular Resolution, which are also the
                 first to apply to superlinear space. Namely, for any
                 space bound $S$ at most $2^{o(N^{1/4})}$ there are
                 formulas of size $N$, having clauses of width $4$, that
                 have Regular Resolution proofs of space $S$ and
                 slightly larger size $T = O(NS)$, but for which any
                 Regular Resolution proof of space $S^{1 - \epsilon}$
                 requires length $T^{\Omega(\log \log N / \log \log \log
                 N)}$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Huynh:2012:VSP,
  author =       "Trinh Huynh and Jakob Nordstrom",
  title =        "On the virtue of succinct proofs: amplifying
                 communication complexity hardness to time-space
                 trade-offs in proof complexity",
  crossref =     "ACM:2012:SPA",
  pages =        "233--248",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214000",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "An active line of research in proof complexity over
                 the last decade has been the study of proof space and
                 trade-offs between size and space. Such questions were
                 originally motivated by practical SAT solving, but have
                 also led to the development of new theoretical concepts
                 in proof complexity of intrinsic interest and to
                 results establishing nontrivial relations between space
                 and other proof complexity measures. By now, the
                 resolution proof system is fairly well understood in
                 this regard, as witnessed by a sequence of papers
                 leading up to [Ben-Sasson and Nordstrom 2008, 2011] and
                 [Beame, Beck, and Impagliazzo 2012]. However, for other
                 relevant proof systems in the context of SAT solving,
                 such as polynomial calculus (PC) and cutting planes
                 (CP), very little has been known. Inspired by [BN08,
                 BN11], we consider CNF encodings of so-called pebble
                 games played on graphs and the approach of making such
                 pebbling formulas harder by simple syntactic
                 modifications. We use this paradigm of hardness
                 amplification to make progress on the relatively
                 longstanding open question of proving time-space
                 trade-offs for PC and CP. Namely, we exhibit a family
                 of modified pebbling formulas $\{F_n\}$ such that: ---
                 The formulas $F_n$ have size $O(n)$ and width $O(1)$.
                 --- They have proofs in length $O(n)$ in resolution,
                 which generalize to both PC and CP. --- Any refutation
                 in CP or PCR (a generalization of PC) in length $L$ and
                 space $s$ must satisfy $s \log L > \approx \sqrt
                 [4]{n}$. A crucial technical ingredient in these
                 results is a new two-player communication complexity
                 lower bound for composed search problems in terms of
                 block sensitivity, a contribution that we believe to be
                 of independent interest.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ajtai:2012:DVN,
  author =       "Mikl{\'o}s Ajtai",
  title =        "Determinism versus nondeterminism with arithmetic
                 tests and computation: extended abstract",
  crossref =     "ACM:2012:SPA",
  pages =        "249--268",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214001",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "For each natural number $d$ we consider a finite
                 structure $M_d$ whose universe is the set of all $0,
                 1$-sequence of length $n = 2^d$, each representing a
                 natural number in the set $\{0, 1, \ldots{}, 2^n - 1\}$
                 in binary form. The operations included in the
                 structure are the four constants $0$, $1$, $2^n - 1$,
                 $n$, multiplication and addition modulo $2^n$, the
                 unary function $\min\{2^x, 2^n - 1\}$, the binary
                 functions $\lfloor x / y \rfloor$ (with $\lfloor x / 0
                 \rfloor = 0$), $\max(x, y)$, $\min(x, y)$, and the
                 boolean vector operations $\wedge$, $\vee$, [??not??]
                 defined on $0, 1$ sequences of length $n$ by performing
                 the operations on all components simultaneously. These
                 are essentially the arithmetic operations that can be
                 performed on a RAM, with wordlength $n$, by a single
                 instruction. We show that there exists a term (that is,
                 an algebraic expression) $F(x,y)$ built up from the
                 mentioned operations, with the only free variables $x$,
                 $y$, such that for all terms $G(y)$, which is also
                 built up from the mentioned operations, the following
                 holds. For infinitely many positive integers $d$, there
                 exists an $a \in M_d$ such that the following two
                 statements are not equivalent: (i) $M_d \models \exists
                 x, F(x,a)$, (ii) $M_d \models G(a) = 0$. In other
                 words, the question whether an existential statement,
                 depending on the parameter $a \in M_d$ is true or not,
                 cannot be decided by evaluating an algebraic expression
                 at $a$.\par

                 Another way of formulating the theorem, in a slightly
                 stronger form, is, that over the structures $M_d$,
                 quantifier elimination is not possible in the following
                 sense. Let $\cal M$ be a first-order language with
                 equality, containing function symbols for all of the
                 mentioned arithmetic operations. Then there exists an
                 existential first-order formula $\phi(y)$ of $\cal M$,
                 containing a single existential quantifier and the only
                 free variable $y$, such that for each propositional
                 formula $P(y)$ of $\cal M$, we have that for infinitely
                 many positive integers $d$, $\phi(y)$ and $P(y)$ are
                 not equivalent on $M_d$, that is, $M_d \models {\em
                 [not]} \forall y$, $\phi(y) \leftrightarrow
                 P(y)$.\par

                 We also show that the theorem, in both forms, remains
                 true if the binary operation $\min\{x^y, 2^n - 1\}$ is
                 added to the structure $M_d$. A general theorem is
                 proved as well, which describes sufficient conditions
                 for a set of operations on a sequence of structures
                 $K_d$, $d = 1, 2, \ldots{}$ which guarantees that the
                 analogues of the mentioned theorems holds for the
                 structures $K_d$ too.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Heilman:2012:SPC,
  author =       "Steven Heilman and Aukosh Jagannath and Assaf Naor",
  title =        "Solution of the propeller conjecture in
                 {$\mathbb{R}^3$}",
  crossref =     "ACM:2012:SPA",
  pages =        "269--276",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214003",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "It is shown that every measurable partition $\{A_1,
                 \ldots{}, A_k\}$ of $\mathbb{R}^3$
                 satisfies:\par

                 $$\sum_{i = 1}^k || \int_{A i} x e^{-1/2||x||^2_2} \,
                 dx||_2^2 \leq 9 \pi ^2$$.\par

                 Let $\{P_1, P_2, P_3\}$ be the partition of
                 $\mathbb{R}^2$ into $120^\circ$ sectors centered at the
                 origin. The bound (1) is sharp, with equality holding
                 if $A_i = P_i \times \mathbb{R}$ for $i \in \{1, 2,
                 3\}$ and $A_i = \emptyset$ for $i \in \{4, \ldots{},
                 k\}$. This settles positively the 3-dimensional
                 Propeller Conjecture of Khot and Naor (FOCS 2008). The
                 proof of (1) reduces the problem to a finite set of
                 numerical inequalities which are then verified with
                 full rigor in a computer-assisted fashion. The main
                 consequence (and motivation) of (1) is
                 complexity-theoretic: the Unique Games hardness
                 threshold of the Kernel Clustering problem with $4
                 \times 4$ centered and spherical hypothesis matrix
                 equals $2 \pi /3$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Khot:2012:LHC,
  author =       "Subhash A. Khot and Preyas Popat and Nisheeth K.
                 Vishnoi",
  title =        "$2^{\log^{1 - \epsilon} n}$ hardness for the closest
                 vector problem with preprocessing",
  crossref =     "ACM:2012:SPA",
  pages =        "277--288",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214004",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove that for an arbitrarily small constant
                 $\epsilon > 0$, assuming NP $\not\subseteq$ DTIME
                 $(2^{\log^{O(1 / \epsilon) n}})$, the preprocessing
                 versions of the closest vector problem and the nearest
                 codeword problem are hard to approximate within a
                 factor better than $2^{\log^{1 - \epsilon} n}$. This
                 improves upon the previous hardness factor of $(\log
                 n)^\delta$ for some $\delta > 0$ due to [AKKV05].",
  acknowledgement = ack-nhfb,
}

@InProceedings{ODonnell:2012:NPN,
  author =       "Ryan O'Donnell and John Wright",
  title =        "A new point of {NP}-hardness for unique games",
  crossref =     "ACM:2012:SPA",
  pages =        "289--306",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214005",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show that distinguishing $1/2$-satisfiable
                 Unique-Games instances from $(3/8 +
                 \epsilon)$-satisfiable instances is NP-hard (for all
                 $\epsilon > 0$). A consequence is that we match or
                 improve the best known $c$ vs. $s$ NP-hardness result
                 for Unique-Games for all values of $c$ (except for $c$
                 very close to $0$). For these $c$, ours is the first
                 hardness result showing that it helps to take the
                 alphabet size larger than $2$. Our NP-hardness
                 reductions are quasilinear-size and thus show nearly
                 full exponential time is required, assuming the ETH.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Barak:2012:HSS,
  author =       "Boaz Barak and Fernando G. S. L. Brandao and Aram W.
                 Harrow and Jonathan Kelner and David Steurer and Yuan
                 Zhou",
  title =        "Hypercontractivity, sum-of-squares proofs, and their
                 applications",
  crossref =     "ACM:2012:SPA",
  pages =        "307--326",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214006",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the computational complexity of approximating
                 the $2$-to-$q$ norm of linear operators (defined as
                 $||A||_{2 \to q} = \max_{v \neq 0} ||A v||_q /
                 ||v||_2$) for $q > 2$, as well as connections between
                 this question and issues arising in quantum information
                 theory and the study of Khot's Unique Games Conjecture
                 (UGC). We show the following: For any constant even
                 integer $q \geq 4$, a graph $G$ is a small-set expander
                 if and only if the projector into the span of the top
                 eigenvectors of $G$'s adjacency matrix has bounded $2
                 \to q$ norm. As a corollary, a good approximation to
                 the $2 \to q$ norm will refute the Small-Set Expansion
                 Conjecture --- a close variant of the UGC. We also show
                 that such a good approximation can be obtained in
                 $\exp(n^{2 / q})$ time, thus obtaining a different
                 proof of the known subexponential algorithm for
                 Small-Set-Expansion. Constant rounds of the ``Sum of
                 Squares'' semidefinite programming hierarchy certify an
                 upper bound on the $2 \to 4$ norm of the projector to
                 low degree polynomials over the Boolean cube, as well
                 certify the unsatisfiability of the ``noisy cube'' and
                 ``short code'' based instances of Unique-Games
                 considered by prior works. This improves on the
                 previous upper bound of $\exp(\log^{O(1)} n)$ rounds
                 (for the ``short code''), as well as separates the
                 ``Sum of Squares'' / ``Lasserre'' hierarchy from weaker
                 hierarchies that were known to require $\omega(1)$
                 rounds. We show reductions between computing the $2 \to
                 4$ norm and computing the injective tensor norm of a
                 tensor, a problem with connections to quantum
                 information theory. Three corollaries are: (i) the $2
                 \to 4$ norm is NP-hard to approximate to precision
                 inverse-polynomial in the dimension, (ii) the $2 \to 4$
                 norm does not have a good approximation (in the sense
                 above) unless 3-SAT can be solved in time $\exp(\sqrt n
                 \poly \log (n))$, and (iii) known algorithms for the
                 quantum separability problem imply a non-trivial
                 additive approximation for the $2 \to 4$ norm.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Efremenko:2012:IRL,
  author =       "Klim Efremenko",
  title =        "From irreducible representations to locally decodable
                 codes",
  crossref =     "ACM:2012:SPA",
  pages =        "327--338",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214008",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A $q$-query Locally Decodable Code (LDC) is an
                 error-correcting code that allows to read any
                 particular symbol of the message by reading only $q$
                 symbols of the codeword even if the codeword is
                 adversary corrupted. In this paper we present a new
                 approach for the construction of LDCs. We show that if
                 there exists an irreducible representation $(\rho, V)$
                 of $G$ and $q$ elements $g_1$, $g_2$, \ldots{}, $g_q$
                 in $G$ such that there exists a linear combination of
                 matrices $\rho(g_i)$ that is of rank one, then we can
                 construct a $q$-query Locally Decodable Code $C: V \to
                 F^G$. We show the potential of this approach by
                 constructing constant query LDCs of sub-exponential
                 length matching the best known constructions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Guruswami:2012:FCF,
  author =       "Venkatesan Guruswami and Chaoping Xing",
  title =        "Folded codes from function field towers and improved
                 optimal rate list decoding",
  crossref =     "ACM:2012:SPA",
  pages =        "339--350",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214009",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give a new construction of algebraic codes which
                 are efficiently list decodable from a fraction $1 - R -
                 \epsilon$ of adversarial errors where $R$ is the rate
                 of the code, for any desired positive constant
                 $\epsilon$. The worst-case list size output by the
                 algorithm is $O(1 / \epsilon)$, matching the
                 existential bound for random codes up to constant
                 factors. Further, the alphabet size of the codes is a
                 constant depending only on $\epsilon$ --- it can be
                 made $\exp(\tilde{O}(1 / \epsilon^2))$ which is not
                 much worse than the non-constructive $\exp(1 /
                 \epsilon)$ bound of random codes. The code construction
                 is Monte Carlo and has the claimed list decoding
                 property with high probability. Once the code is
                 (efficiently) sampled, the encoding/decoding algorithms
                 are deterministic with a running time $O_{\epsilon}
                 (N^c)$ for an absolute constant $c$, where $N$ is the
                 code's block length. Our construction is based on a
                 careful combination of a linear-algebraic approach to
                 list decoding folded codes from towers of function
                 fields, with a special form of subspace-evasive sets.
                 Instantiating this with the explicit ``asymptotically
                 good'' Garcia--Stichtenoth (GS for short) tower of
                 function fields yields the above parameters. To
                 illustrate the method in a simpler setting, we also
                 present a construction based on Hermitian function
                 fields, which offers similar guarantees with a
                 list-size and alphabet size polylogarithmic in the
                 block length $N$. In comparison, algebraic codes
                 achieving the optimal trade-off between list
                 decodability and rate based on folded Reed--Solomon
                 codes have a decoding complexity of $N^{\Omega(1 /
                 \epsilon)}$, an alphabet size of $N^{\Omega(1 /
                 \epsilon 2)}$, and a list size of $O(1 / \epsilon^2)$
                 (even after combination with subspace-evasive sets).
                 Thus we get an improvement over the previous best
                 bounds in all three aspects simultaneously, and are
                 quite close to the existential random coding bounds.
                 Along the way, we shed light on how to use
                 automorphisms of certain function fields to enable list
                 decoding of the folded version of the associated
                 algebraic-geometric codes.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dvir:2012:SES,
  author =       "Zeev Dvir and Shachar Lovett",
  title =        "Subspace evasive sets",
  crossref =     "ACM:2012:SPA",
  pages =        "351--358",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214010",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We construct explicit subspace-evasive sets. These are
                 subsets of $F^n$ of size $|F|^{(1- \epsilon)n}$ whose
                 intersection with any $k$-dimensional subspace is
                 bounded by a constant $c(k, \epsilon)$. This problem
                 was raised by Guruswami (CCC 2011) as it leads to
                 optimal rate list-decodable codes of constant list
                 size. The main technical ingredient is the construction
                 of $k$ low-degree polynomials whose common set of zeros
                 has small intersection with any $k$-dimensional
                 subspace.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kaufman:2012:ETR,
  author =       "Tali Kaufman and Alexander Lubotzky",
  title =        "Edge transitive {Ramanujan} graphs and symmetric
                 {LDPC} good codes",
  crossref =     "ACM:2012:SPA",
  pages =        "359--366",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214011",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present the first explicit construction of a binary
                 symmetric code with constant rate and constant distance
                 (i.e., good code). Moreover, the code is LDPC and its
                 constraint space is generated by the orbit of one
                 constant weight constraint under the group action. Our
                 construction provides the first symmetric LDPC good
                 codes. In particular, it solves the main open problem
                 raised by Kaufman and Wigderson {8}.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Makarychev:2012:AAS,
  author =       "Konstantin Makarychev and Yury Makarychev and
                 Aravindan Vijayaraghavan",
  title =        "Approximation algorithms for semi-random partitioning
                 problems",
  crossref =     "ACM:2012:SPA",
  pages =        "367--384",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214013",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In this paper, we propose and study a new semi-random
                 model for graph partitioning problems. We believe that
                 it captures many properties of real-world instances.
                 The model is more flexible than the semi-random model
                 of Feige and Kilian and planted random model of Bui,
                 Chaudhuri, Leighton and Sipser. We develop a general
                 framework for solving semi-random instances and apply
                 it to several problems of interest. We present constant
                 factor bi-criteria approximation algorithms for
                 semi-random instances of the Balanced Cut, Multicut,
                 Min Uncut, Sparsest Cut and Small Set Expansion
                 problems. We also show how to almost recover the
                 optimal solution if the instance satisfies an
                 additional expanding condition. Our algorithms work in
                 a wider range of parameters than most algorithms for
                 previously studied random and semi-random models.
                 Additionally, we study a new planted algebraic expander
                 model and develop constant factor bi-criteria
                 approximation algorithms for graph partitioning
                 problems in this model.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sharathkumar:2012:NLT,
  author =       "R. Sharathkumar and Pankaj K. Agarwal",
  title =        "A near-linear time $\epsilon$-approximation algorithm
                 for geometric bipartite matching",
  crossref =     "ACM:2012:SPA",
  pages =        "385--394",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214014",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "For point sets $A, B \subset \mathbb{R}^d$, $|A| = |B|
                 = n$, and for a parameter $\epsilon > 0$, we present an
                 algorithm that computes, in $O(n \poly(\log n, 1 /
                 \epsilon))$ time, an $\epsilon$-approximate perfect
                 matching of $A$ and $B$ with high probability; the
                 previously best known algorithm takes $\Omega(n^{3/2})$
                 time. We approximate the L$_p$-norm using a distance
                 function, $d(\cdot,\cdot)$ based on a randomly shifted
                 quad-tree. The algorithm iteratively generates an
                 approximate minimum-cost augmenting path under
                 $d(\cdot,\cdot)$ in time proportional to the length of
                 the path. We show that the total length of the
                 augmenting paths generated by the algorithm is $O((n /
                 \epsilon)\log n)$, implying that the running time of
                 our algorithm is $O(n \poly(\log n, 1 / \epsilon))$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Abraham:2012:UPD,
  author =       "Ittai Abraham and Ofer Neiman",
  title =        "Using petal-decompositions to build a low stretch
                 spanning tree",
  crossref =     "ACM:2012:SPA",
  pages =        "395--406",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214015",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove that any graph $G = (V, E)$ with $n$ points
                 and $m$ edges has a spanning tree $T$ such that
                 $\sum_{(u,v) \in E(G)} d_T(u,v) = O(m \log n \log \log
                 n)$. Moreover such a tree can be found in time $O(m
                 \log n \log \log n)$. Our result is obtained using a
                 new petal-decomposition approach which guarantees that
                 the radius of each cluster in the tree is at most $4$
                 times the radius of the induced subgraph of the cluster
                 in the original graph.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brunsch:2012:ISA,
  author =       "Tobias Brunsch and Heiko R{\"o}glin",
  title =        "Improved smoothed analysis of multiobjective
                 optimization",
  crossref =     "ACM:2012:SPA",
  pages =        "407--426",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214016",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present several new results about smoothed analysis
                 of multiobjective optimization problems. Motivated by
                 the discrepancy between worst-case analysis and
                 practical experience, this line of research has gained
                 a lot of attention in the last decade. We consider
                 problems in which $d$ linear and one arbitrary
                 objective function are to be optimized over a set $S
                 \subseteq \{0, 1\}^n$ of feasible solutions. We improve
                 the previously best known bound for the smoothed number
                 of Pareto-optimal solutions to $O(n^{2d} \phi^d)$,
                 where $\phi$ denotes the perturbation parameter.
                 Additionally, we show that for any constant $c$ the
                 $c$-th moment of the smoothed number of Pareto-optimal
                 solutions is bounded by $O((n^{2d} \phi^d)^c)$. This
                 improves the previously best known bounds
                 significantly. Furthermore, we address the criticism
                 that the perturbations in smoothed analysis destroy the
                 zero-structure of problems by showing that the smoothed
                 number of Pareto-optimal solutions remains polynomially
                 bounded even for zero-preserving perturbations. This
                 broadens the class of problems captured by smoothed
                 analysis and it has consequences for non-linear
                 objective functions. One corollary of our result is
                 that the smoothed number of Pareto-optimal solutions is
                 polynomially bounded for polynomial objective
                 functions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Leonardi:2012:PFA,
  author =       "Stefano Leonardi and Tim Roughgarden",
  title =        "Prior-free auctions with ordered bidders",
  crossref =     "ACM:2012:SPA",
  pages =        "427--434",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214018",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Prior-free auctions are robust auctions that assume no
                 distribution over bidders' valuations and provide
                 worst-case (input-by-input) approximation guarantees.
                 In contrast to previous work on this topic, we pursue
                 good prior-free auctions with non-identical bidders.
                 Prior-free auctions can approximate meaningful
                 benchmarks for non-identical bidders only when
                 ``sufficient qualitative information'' about the bidder
                 asymmetry is publicly known. We consider digital goods
                 auctions where there is a total ordering of the bidders
                 that is known to the seller, where earlier bidders are
                 in some sense thought to have higher valuations. We use
                 the framework of Hartline and Roughgarden (STOC '08) to
                 define an appropriate revenue benchmark: the maximum
                 revenue that can be obtained from a bid vector using
                 prices that are nonincreasing in the bidder ordering
                 and bounded above by the second-highest bid. This
                 monotone-price benchmark is always as large as the
                 well-known fixed-price benchmark F$^{(2)}$, so
                 designing prior-free auctions with good approximation
                 guarantees is only harder. By design, an auction that
                 approximates the monotone-price benchmark satisfies a
                 very strong guarantee: it is, in particular,
                 simultaneously near-optimal for essentially every
                 Bayesian environment in which bidders' valuation
                 distributions have nonincreasing monopoly prices, or in
                 which the distribution of each bidder stochastically
                 dominates that of the next. Of course, even if there is
                 no distribution over bidders' valuations, such an
                 auction still provides a quantifiable input-by-input
                 performance guarantee. In this paper, we design a
                 simple prior-free auction for digital goods with
                 ordered bidders, the Random Price Restriction (RPR)
                 auction. We prove that its expected revenue on every
                 bid profile $b$ is $\Omega(M(b) / \log^*n)$, where $M$
                 denotes the monotone-price benchmark and $\log^*n$
                 denotes the number of times that the $\log_2$ operator
                 can be applied to $n$ before the result drops below a
                 fixed constant.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chawla:2012:LBB,
  author =       "Shuchi Chawla and Nicole Immorlica and Brendan
                 Lucier",
  title =        "On the limits of black-box reductions in mechanism
                 design",
  crossref =     "ACM:2012:SPA",
  pages =        "435--448",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214019",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider the problem of converting an arbitrary
                 approximation algorithm for a single-parameter
                 optimization problem into a computationally efficient
                 truthful mechanism. We ask for reductions that are
                 black-box, meaning that they require only oracle access
                 to the given algorithm and in particular do not require
                 explicit knowledge of the problem constraints. Such a
                 reduction is known to be possible, for example, for the
                 social welfare objective when the goal is to achieve
                 Bayesian truthfulness and preserve social welfare in
                 expectation. We show that a black-box reduction for the
                 social welfare objective is not possible if the
                 resulting mechanism is required to be truthful in
                 expectation and to preserve the worst-case
                 approximation ratio of the algorithm to within a
                 subpolynomial factor. Further, we prove that for other
                 objectives such as makespan, no black-box reduction is
                 possible even if we only require Bayesian truthfulness
                 and an average-case performance guarantee.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bei:2012:BFM,
  author =       "Xiaohui Bei and Ning Chen and Nick Gravin and Pinyan
                 Lu",
  title =        "Budget feasible mechanism design: from prior-free to
                 {Bayesian}",
  crossref =     "ACM:2012:SPA",
  pages =        "449--458",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214020",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Budget feasible mechanism design studies procurement
                 combinatorial auctions in which the sellers have
                 private costs to produce items, and the buyer
                 (auctioneer) aims to maximize a social valuation
                 function on subsets of items, under the budget
                 constraint on the total payment. One of the most
                 important questions in the field is ``which valuation
                 domains admit truthful budget feasible mechanisms with
                 'small' approximations (compared to the social
                 optimum)?'' Singer [35] showed that additive and
                 submodular functions have a constant approximation
                 mechanism. Recently, Dobzinski, Papadimitriou, and
                 Singer [20] gave an $O(\log^2 n)$ approximation
                 mechanism for subadditive functions; further, they
                 remarked that: ``A fundamental question is whether,
                 regardless of computational constraints, a
                 constant-factor budget feasible mechanism exists for
                 subadditive functions.'' In this paper, we address this
                 question from two viewpoints: prior-free worst case
                 analysis and Bayesian analysis, which are two standard
                 approaches from computer science and economics,
                 respectively. --- For the prior-free framework, we use
                 a linear program (LP) that describes the fractional
                 cover of the valuation function; the LP is also
                 connected to the concept of approximate core in
                 cooperative game theory. We provide a mechanism for
                 subadditive functions whose approximation is $O(I)$,
                 via the worst case integrality gap $I$ of this LP. This
                 implies an $O(\log n)$-approximation for subadditive
                 valuations, $O(1)$-approximation for XOS valuations, as
                 well as for valuations having a constant integrality
                 gap. XOS valuations are an important class of functions
                 and lie between the submodular and the subadditive
                 classes of valuations. We further give another
                 polynomial time $O(\log n / (\log \log n))$
                 sub-logarithmic approximation mechanism for subadditive
                 functions. Both of our mechanisms improve the best
                 known approximation ratio $O(\log^2 n)$. --- For the
                 Bayesian framework, we provide a constant approximation
                 mechanism for all subadditive functions, using the
                 above prior-free mechanism for XOS valuations as a
                 subroutine. Our mechanism allows correlations in the
                 distribution of private information and is universally
                 truthful.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cai:2012:ACM,
  author =       "Yang Cai and Constantinos Daskalakis and S. Matthew
                 Weinberg",
  title =        "An algorithmic characterization of multi-dimensional
                 mechanisms",
  crossref =     "ACM:2012:SPA",
  pages =        "459--478",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214021",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show that every feasible, Bayesian, multi-item
                 multi-bidder mechanism for independent, additive
                 bidders can be implemented as a mechanism that: (a)
                 allocates every item independently of the other items;
                 (b) for the allocation of each item it uses a strict
                 ordering of all bidders' types; and allocates the item
                 using a distribution over hierarchical mechanisms that
                 iron this ordering into a non-strict ordering, and give
                 the item uniformly at random to the bidders whose
                 reported types dominate all other reported types
                 according to the non-strict ordering. Combined with
                 cyclic-monotonicity our results provide a
                 characterization of feasible, Bayesian Incentive
                 Compatible mechanisms in this setting. Our
                 characterization is enabled by a new, constructive
                 proof of Border's theorem [Border 1991], and a new
                 generalization of this theorem to independent (but not
                 necessarily identically distributed) bidders, improving
                 upon the results of [Border 2007, Che--Kim--Mierendorf
                 2011]. For a single item and independent bidders, we
                 show that every feasible reduced form auction can be
                 implemented as a distribution over hierarchical
                 mechanisms that are consistent with the same strict
                 ordering of all bidders' types, which every mechanism
                 in the support of the distribution irons to a
                 non-strict ordering. We also give a polynomial-time
                 algorithm for determining feasibility of a reduced form
                 auction, or providing a separation hyperplane from the
                 set of feasible reduced forms. To complete the picture,
                 we provide polynomial-time algorithms to find and
                 exactly sample from a distribution over hierarchical
                 mechanisms consistent with a given feasible reduced
                 form. All these results generalize to multi-item
                 reduced form auctions for independent, additive
                 bidders. Finally, for multiple items, additive bidders
                 with hard demand constraints, and arbitrary value
                 correlation across items or bidders, we give a proper
                 generalization of Border's Theorem, and characterize
                 feasible reduced form auctions as multi-commodity flows
                 in related multi-commodity flow instances. We also show
                 that our generalization holds for a broader class of
                 feasibility constraints, including the intersection of
                 any two matroids. As a corollary of our results we
                 compute revenue-optimal, Bayesian Incentive Compatible
                 (BIC) mechanisms in multi-item multi-bidder settings,
                 when each bidder has arbitrarily correlated values over
                 the items and additive valuations over bundles of
                 items, and the bidders are independent. Our mechanisms
                 run in time polynomial in the total number of bidder
                 types (and {not} type profiles). This running time is
                 polynomial in the number of bidders, but potentially
                 exponential in the number of items. We improve the
                 running time to polynomial in both the number of items
                 and the number of bidders by using recent structural
                 results on optimal BIC auctions in item-symmetric
                 settings [Daskalakis--Weinberg 2011].",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gal:2012:TBC,
  author =       "Anna G{\'a}l and Kristoffer Arnsfelt Hansen and Michal
                 Kouck{\'y} and Pavel Pudl{\'a}k and Emanuele Viola",
  title =        "Tight bounds on computing error-correcting codes by
                 bounded-depth circuits with arbitrary gates",
  crossref =     "ACM:2012:SPA",
  pages =        "479--494",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214023",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We bound the minimum number $w$ of wires needed to
                 compute any (asymptotically good) error-correcting code
                 $C: \{0, 1\}^{\Omega(n)} \to \{0, 1\}^n$ with minimum
                 distance $\Omega(n)$, using unbounded fan-in circuits
                 of depth $d$ with arbitrary gates. Our main results
                 are: (1) If $d = 2$ then $w = \Theta(n({\log n/ \log
                 \log n})^2)$. (2) If $d = 3$ then $w = \Theta(n \lg \lg
                 n)$. (3) If $d = 2k$ or $d = 2k + 1$ for some integer
                 $k \geq 2$ then $w = \Theta(n \lambda_k (n))$, where
                 $\lambda_1(n) = \lceil \log n \rceil$, $\lambda_{i +
                 1}(n) = \lambda_i*(n)$, and the $*$ operation gives how
                 many times one has to iterate the function $\lambda_i$
                 to reach a value at most $1$ from the argument $n$. (4)
                 If $d = \log * n$ then $w = O(n)$. For depth $d = 2$,
                 our $\Omega(n (\log n/\log \log n)^2)$ lower bound
                 gives the largest known lower bound for computing any
                 linear map. Using a result by Ishai, Kushilevitz,
                 Ostrovsky, and Sahai (2008), we also obtain similar
                 bounds for computing pairwise-independent hash
                 functions. Our lower bounds are based on a
                 superconcentrator-like condition that the graphs of
                 circuits computing good codes must satisfy. This
                 condition is provably intermediate between
                 superconcentrators and their weakenings considered
                 before.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chan:2012:TBM,
  author =       "Siu Man Chan and Aaron Potechin",
  title =        "Tight bounds for monotone switching networks via
                 {Fourier} analysis",
  crossref =     "ACM:2012:SPA",
  pages =        "495--504",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214024",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove tight size bounds on monotone switching
                 networks for the $k$-clique problem, and for an
                 explicit monotone problem by analyzing the generation
                 problem with a pyramid structure of height $h$. This
                 gives alternative proofs of the separations of $m$-NC
                 from $m$-P and of $m$-NC$^i$ from $m$-NC$^{i + 1}$,
                 different from Raz--McKenzie (Combinatorica '99). The
                 enumerative-combinatorial and Fourier analytic
                 techniques in this work are very different from a large
                 body of work on circuit depth lower bounds, and may be
                 of independent interest.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Braverman:2012:IIC,
  author =       "Mark Braverman",
  title =        "Interactive information complexity",
  crossref =     "ACM:2012:SPA",
  pages =        "505--524",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214025",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The primary goal of this paper is to define and study
                 the interactive information complexity of functions.
                 Let $f(x,y)$ be a function, and suppose Alice is given
                 $x$ and Bob is given $y$. Informally, the interactive
                 information complexity ${\rm IC}(f)$ of $f$ is the
                 least amount of information Alice and Bob need to
                 reveal to each other to compute $f$. Previously,
                 information complexity has been defined with respect to
                 a prior distribution on the input pairs $(x,y)$. Our
                 first goal is to give a definition that is independent
                 of the prior distribution. We show that several
                 possible definitions are essentially equivalent. We
                 establish some basic properties of the interactive
                 information complexity ${\rm IC}(f)$. In particular, we
                 show that ${\rm IC}(f)$ is equal to the amortized
                 (randomized) communication complexity of $f$. We also
                 show a direct sum theorem for ${\rm IC}(f)$ and give
                 the first general connection between information
                 complexity and (non-amortized) communication
                 complexity. This connection implies that a non-trivial
                 exchange of information is required when solving
                 problems that have non-trivial communication
                 complexity. We explore the information complexity of
                 two specific problems --- Equality and Disjointness. We
                 show that only a constant amount of information needs
                 to be exchanged when solving Equality with no errors,
                 while solving Disjointness with a constant error
                 probability requires the parties to reveal a linear
                 amount of information to each other.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sherstov:2012:MCC,
  author =       "Alexander A. Sherstov",
  title =        "The multiparty communication complexity of set
                 disjointness",
  crossref =     "ACM:2012:SPA",
  pages =        "525--548",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214026",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the set disjointness problem in the
                 number-on-the-forehead model of multiparty
                 communication.\par

                 (i) We prove that $k$-party set disjointness has
                 communication complexity $\Omega(n/4^k)^{1/4}$ in the
                 randomized and nondeterministic models and
                 $\Omega(n/4^k)^{1/8}$ in the Merlin--Arthur model.
                 These lower bounds are close to tight. Previous lower
                 bounds (2007-2008) for $k \geq 3$ parties were weaker
                 than $\Omega(n/2^{k^3})^{1 / (k + 1)}$ in all three
                 models.\par

                 (ii) We prove that solving $\ell$ instances of set
                 disjointness requires $\ell \cdot \Omega(n/4^k)^{1/4}$
                 bits of communication, even to achieve correctness
                 probability exponentially close to $1/2$. This gives
                 the first direct-product result for multiparty set
                 disjointness, solving an open problem due to Beame,
                 Pitassi, Segerlind, and Wigderson (2005).\par

                 (iii) We construct a read-once $\{\wedge,
                 \vee\}$-circuit of depth 3 with exponentially small
                 discrepancy for up to $k \approx (1/2)\log n$ parties.
                 This result is optimal with respect to depth and solves
                 an open problem due to Beame and Huynh-Ngoc (FOCS '09),
                 who gave a depth-$6$ construction. Applications to
                 circuit complexity are given.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cheung:2012:FMR,
  author =       "Ho Yee Cheung and Tsz Chiu Kwok and Lap Chi Lau",
  title =        "Fast matrix rank algorithms and applications",
  crossref =     "ACM:2012:SPA",
  pages =        "549--562",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214028",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider the problem of computing the rank of an $m
                 \times n$ matrix $A$ over a field. We present a
                 randomized algorithm to find a set of $r = \rank(A)$
                 linearly independent columns in $O(|A| + r^w)$ field
                 operations, where $|A|$ denotes the number of nonzero
                 entries in $A$ and $w < 2.38$ is the matrix
                 multiplication exponent. Previously the best known
                 algorithm to find a set of $r$ linearly independent
                 columns is by Gaussian elimination, with running time
                 $O(m n r^w)$. Our algorithm is faster when $r <
                 \max\{m, n\}$, for instance when the matrix is
                 rectangular. We also consider the problem of computing
                 the rank of a matrix dynamically, supporting the
                 operations of rank one updates and additions and
                 deletions of rows and columns. We present an algorithm
                 that updates the rank in $O(m n)$ field operations. We
                 show that these algorithms can be used to obtain faster
                 algorithms for various problems in numerical linear
                 algebra, combinatorial optimization and dynamic data
                 structure.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hassanieh:2012:NOS,
  author =       "Haitham Hassanieh and Piotr Indyk and Dina Katabi and
                 Eric Price",
  title =        "Nearly optimal sparse {Fourier} transform",
  crossref =     "ACM:2012:SPA",
  pages =        "563--578",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214029",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider the problem of computing the $k$-sparse
                 approximation to the discrete Fourier transform of an
                 $n$-dimensional signal. We show:\par

                 (1) An $O(k \log n)$-time randomized algorithm for the
                 case where the input signal has at most $k$ non-zero
                 Fourier coefficients, and\par

                 (2) An $O(k \log n \log (n/k))$-time randomized
                 algorithm for general input signals.\par

                 Both algorithms achieve $o(n \log n)$ time, and thus
                 improve over the Fast Fourier Transform, for any $k =
                 o(n)$. They are the first known algorithms that satisfy
                 this property. Also, if one assumes that the Fast
                 Fourier Transform is optimal, the algorithm for the
                 exactly $k$-sparse case is optimal for any $k =
                 n^{\Omega(1)}$.\par

                 We complement our algorithmic results by showing that
                 any algorithm for computing the sparse Fourier
                 transform of a general signal must use at least
                 $\Omega(k \log (n / k) / \log \log n)$ signal samples,
                 even if it is allowed to perform adaptive sampling.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Etessami:2012:PTA,
  author =       "Kousha Etessami and Alistair Stewart and Mihalis
                 Yannakakis",
  title =        "Polynomial time algorithms for multi-type branching
                 processes and stochastic context-free grammars",
  crossref =     "ACM:2012:SPA",
  pages =        "579--588",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214030",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show that one can approximate the least fixed point
                 solution for a multivariate system of monotone
                 probabilistic polynomial equations in time polynomial
                 in both the encoding size of the system of equations
                 and in $\log (1 / \epsilon)$, where $\epsilon > 0$ is
                 the desired additive error bound of the solution. (The
                 model of computation is the standard Turing machine
                 model.) We use this result to resolve several open
                 problems regarding the computational complexity of
                 computing key quantities associated with some classic
                 and heavily studied stochastic processes, including
                 multi-type branching processes and stochastic
                 context-free grammars.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Adamaszek:2012:OOB,
  author =       "Anna Adamaszek and Artur Czumaj and Matthias Englert
                 and Harald R{\"a}cke",
  title =        "Optimal online buffer scheduling for block devices",
  crossref =     "ACM:2012:SPA",
  pages =        "589--598",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214031",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We introduce a buffer scheduling problem for block
                 operation devices in an online setting. We consider a
                 stream of items of different types to be processed by a
                 block device. The block device can process all items of
                 the same type in a single step. To improve the
                 performance of the system a buffer of size $k$ is used
                 to store items in order to reduce the number of
                 operations required. Whenever the buffer becomes full a
                 buffer scheduling strategy has to select one type and
                 then a block operation on all elements with this type
                 that are currently in the buffer is performed. The goal
                 is to design a scheduling strategy that minimizes the
                 number of block operations required. In this paper we
                 consider the online version of this problem, where the
                 buffer scheduling strategy must make decisions without
                 knowing the future items that appear in the input
                 stream. Our main result is the design of an $O(\log
                 \log k)$-competitive online randomized buffer
                 scheduling strategy. The bound is asymptotically tight.
                 As a byproduct of our LP-based techniques, we obtain a
                 randomized offline algorithm that approximates the
                 optimal number of block operations to within a constant
                 factor.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Agrawal:2012:JHC,
  author =       "Manindra Agrawal and Chandan Saha and Ramprasad
                 Saptharishi and Nitin Saxena",
  title =        "{Jacobian} hits circuits: hitting-sets, lower bounds
                 for depth-{D} occur-$k$ formulas \& depth-$3$
                 transcendence degree-$k$ circuits",
  crossref =     "ACM:2012:SPA",
  pages =        "599--614",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214033",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present a single common tool to strictly subsume
                 all known cases of polynomial time blackbox polynomial
                 identity testing (PIT), that have been hitherto solved
                 using diverse tools and techniques, over fields of zero
                 or large characteristic. In particular, we show that
                 polynomial time hitting-set generators for identity
                 testing of the two seemingly different and well studied
                 models --- depth-3 circuits with bounded top fanin, and
                 constant-depth constant-read multilinear formulas ---
                 can be constructed using one common algebraic-geometry
                 theme: Jacobian captures algebraic independence. By
                 exploiting the Jacobian, we design the first efficient
                 hitting-set generators for broad generalizations of the
                 above-mentioned models, namely:\par

                 (1) depth-3 $(\Omega \Pi \Omega)$ circuits with
                 constant transcendence degree of the polynomials
                 computed by the product gates (no bounded top fanin
                 restriction), and\par

                 (2) constant-depth constant-{\em occur\/} formulas (no
                 multilinear restriction).\par

                 Constant-{\rm occur} of a variable, as we define it, is
                 a much more general concept than constant-read. Also,
                 earlier work on the latter model assumed that the
                 formula is multilinear. Thus, our work goes further
                 beyond the related results obtained by Saxena \&
                 Seshadhri (STOC 2011), Saraf \& Volkovich (STOC 2011),
                 Anderson et al. (CCC 2011), Beecken et al. (ICALP 2011)
                 and Grenet et al. (FSTTCS 2011), and brings them under
                 one unifying technique.\par

                 In addition, using the same Jacobian based approach, we
                 prove exponential lower bounds for the immanant (which
                 includes permanent and determinant) on the same
                 depth-$3$ and depth-$4$ models for which we give
                 efficient PIT algorithms. Our results reinforce the
                 intimate connection between identity testing and lower
                 bounds by exhibiting a concrete mathematical tool ---
                 the Jacobian --- that is equally effective in solving
                 both the problems on certain interesting and previously
                 well-investigated (but not well understood) models of
                 computation.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dvir:2012:SMB,
  author =       "Zeev Dvir and Guillaume Malod and Sylvain Perifel and
                 Amir Yehudayoff",
  title =        "Separating multilinear branching programs and
                 formulas",
  crossref =     "ACM:2012:SPA",
  pages =        "615--624",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214034",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "This work deals with the power of linear algebra in
                 the context of multilinear computation. By linear
                 algebra we mean algebraic branching programs (ABPs)
                 which are known to be computationally equivalent to two
                 basic tools in linear algebra: iterated matrix
                 multiplication and the determinant. We compare the
                 computational power of multilinear ABPs to that of
                 multilinear arithmetic formulas, and prove a tight
                 super-polynomial separation between the two models.
                 Specifically, we describe an explicit $n$-variate
                 polynomial $F$ that is computed by a linear-size
                 multilinear ABP but every multilinear formula computing
                 $F$ must be of size n$^{ \Omega(\log n)}$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gupta:2012:RDM,
  author =       "Ankit Gupta and Neeraj Kayal and Satya Lokam",
  title =        "Reconstruction of depth-$4$ multilinear circuits with
                 top fan-in $2$",
  crossref =     "ACM:2012:SPA",
  pages =        "625--642",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214035",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present a randomized algorithm for reconstructing
                 multilinear $\Sigma \Pi \Sigma \Pi (2)$ circuits, i.e.,
                 multilinear depth-$4$ circuits with fan-in $2$ at the
                 top $+$ gate. The algorithm is given blackbox access to
                 a polynomial $f \in F[x_1,\ldots{},x_n]$ computable by
                 a multilinear $\Sigma \Pi \Sigma \Pi (2)$ circuit of
                 size $s$ and outputs an equivalent multilinear $\Sigma
                 \Pi \Sigma \Pi (2)$ circuit, runs in time $\poly(n,s)$,
                 and works over any field $F$. This is the first
                 reconstruction result for any model of depth-$4$
                 arithmetic circuits. Prior to our work, reconstruction
                 results for bounded depth circuits were known only for
                 depth-$2$ arithmetic circuits (Klivans \& Spielman,
                 STOC 2001), $\Sigma \Pi \Sigma (2)$ circuits (depth-$3$
                 arithmetic circuits with top fan-in $2$) (Shpilka, STOC
                 2007), and $\Sigma \Pi \Sigma (k)$ with $k = O(1)$
                 (Karnin \& Shpilka, CCC 2009). Moreover, the running
                 times of these algorithms have a polynomial dependence
                 on $4|F|$ and hence do not work for infinite fields
                 such as $Q$. Our techniques are quite different from
                 the previous ones for depth-$3$ reconstruction and rely
                 on a polynomial operator introduced by Karnin et al.
                 (STOC 2010) and Saraf \& Volkovich (STOC 2011) for
                 devising blackbox identity tests for multilinear
                 $\Sigma \Pi \Sigma \Pi (k)$ circuits. Some other
                 ingredients of our algorithm include the classical
                 multivariate blackbox factoring algorithm by Kaltofen
                 \& Trager (FOCS 1988) and an average-case algorithm for
                 reconstructing $\Sigma \Pi \Sigma (2)$ circuits by
                 Kayal.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kayal:2012:APP,
  author =       "Neeraj Kayal",
  title =        "Affine projections of polynomials: extended abstract",
  crossref =     "ACM:2012:SPA",
  pages =        "643--662",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214036",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "An $m$-variate polynomial $f$ is said to be an affine
                 projection of some $n$-variate polynomial $g$ if there
                 exists an $n m$ matrix $A$ and an $n$-dimensional
                 vector $b$ such that $f(x) = g(Ax + b)$. In other
                 words, if $f$ can be obtained by replacing each
                 variable of $g$ by an affine combination of the
                 variables occurring in $f$, then it is said to be an
                 affine projection of $g$. Some well known problems
                 (such as the determinant versus permanent and matrix
                 multiplication for example) are instances of this
                 problem. Given $f$ and $g$ can we determine whether $f$
                 is an affine projection of $g$? The intention of this
                 paper is to understand the complexity of the
                 corresponding computational problem: given polynomials
                 $f$ and $g$ find $A$ and $b$ such that $f = g(A x +
                 b)$, if such an $(A b)$ exists. We first show that this
                 is an NP-hard problem. We then focus our attention on
                 instances where $g$ is a member of some fixed, well
                 known family of polynomials so that the input consists
                 only of the polynomial $f(x)$ having $m$ variables and
                 degree $d$. We consider the situation where $f(x)$ is
                 given to us as a blackbox (i.e. for any point $aFm$ we
                 can query the blackbox and obtain $f(a)$ in one step)
                 and devise randomized algorithms with running time
                 $\poly(m n d)$ in the following special cases. Firstly
                 where $g$ is the Permanent (respectively the
                 Determinant) of an $n \times n$ matrix and $A$ is of
                 rank $n^2$. Secondly where $g$ is the sum of powers
                 polynomial (respectively the sum of products
                 polynomial), and $A$ is a random matrix of the
                 appropriate dimensions (also $d$ should not be too
                 small).",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bartal:2012:TSP,
  author =       "Yair Bartal and Lee-Ad Gottlieb and Robert
                 Krauthgamer",
  title =        "The traveling salesman problem: low-dimensionality
                 implies a polynomial time approximation scheme",
  crossref =     "ACM:2012:SPA",
  pages =        "663--672",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214038",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The Traveling Salesman Problem (TSP) is among the most
                 famous NP-hard optimization problems. We design for
                 this problem a randomized polynomial-time algorithm
                 that computes a $(1 + \mu)$-approximation to the
                 optimal tour, for any fixed $\mu > 0$, in TSP instances
                 that form an arbitrary metric space with bounded
                 intrinsic dimension. The celebrated results of Arora
                 [Aro98] and Mitchell [Mit99] prove that the above
                 result holds in the special case of TSP in a
                 fixed-dimensional Euclidean space. Thus, our algorithm
                 demonstrates that the algorithmic tractability of
                 metric TSP depends on the dimensionality of the space
                 and not on its specific geometry. This result resolves
                 a problem that has been open since the quasi-polynomial
                 time algorithm of Talwar [Tal04].",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chuzhoy:2012:VSS,
  author =       "Julia Chuzhoy",
  title =        "On vertex sparsifiers with {Steiner} nodes",
  crossref =     "ACM:2012:SPA",
  pages =        "673--688",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214039",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Given an undirected graph $G = (V, E)$ with edge
                 capacities $c_e \geq 1$ for $e \in E$ and a subset $T$
                 of $k$ vertices called terminals, we say that a graph
                 $H$ is a quality-$q$ cut sparsifier for $G$ iff $T
                 \subseteq V(H)$, and for any partition $(A, B)$ of $T$,
                 the values of the minimum cuts separating $A$ and $B$
                 in graphs $G$ and $H$ are within a factor $q$ from each
                 other. We say that $H$ is a quality-$q$ flow sparsifier
                 for $G$ iff $T \subseteq V(H)$, and for any set $D$ of
                 demands over the terminals, the values of the minimum
                 edge congestion incurred by fractionally routing the
                 demands in $D$ in graphs $G$ and $H$ are within a
                 factor $q$ from each other.\par

                 So far vertex sparsifiers have been studied in a
                 restricted setting where the sparsifier $H$ is not
                 allowed to contain any non-terminal vertices, that is
                 $V(H) = {\cal T}$. For this setting, efficient
                 algorithms are known for constructing quality-$O(\log
                 k/\log \log k)$ cut and flow vertex sparsifiers, as
                 well as a lower bound of $\tilde{\Omega}(\sqrt{\log
                 k})$ on the quality of any flow or cut
                 sparsifier.\par

                 We study flow and cut sparsifiers in the more general
                 setting where Steiner vertices are allowed, that is, we
                 no longer require that $V(H) = {\cal T}$. We show
                 algorithms to construct constant-quality cut
                 sparsifiers of size $O(C^3)$ in time $\poly(n) \cdot
                 2^C$, and constant-quality flow sparsifiers of size
                 $C^{O(\log \log C)}$ in time $n^{O(\log C)} \cdot 2^C$,
                 where $C$ is the total capacity of the edges incident
                 on the terminals.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chalermsook:2012:AAH,
  author =       "Parinya Chalermsook and Julia Chuzhoy and Alina Ene
                 and Shi Li",
  title =        "Approximation algorithms and hardness of integral
                 concurrent flow",
  crossref =     "ACM:2012:SPA",
  pages =        "689--708",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214040",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study an integral counterpart of the classical
                 Maximum Concurrent Flow problem, that we call Integral
                 Concurrent Flow (ICF). In the basic version of this
                 problem (basic-ICF), we are given an undirected
                 $n$-vertex graph $G$ with edge capacities $c(e)$, a
                 subset $T$ of vertices called terminals, and a demand
                 $D(t,t')$ for every pair $(t,t')$ of the terminals. The
                 goal is to find a maximum value $\lambda$, and a
                 collection $P$ of paths, such that every pair $(t,t')$
                 of terminals is connected by $\lfloor \lambda \cdot
                 D(t,t') \rfloor$ paths in $P$, and the number of paths
                 containing any edge $e$ is at most $c(e)$. We show an
                 algorithm that achieves a $\poly \log n$-approximation
                 for basic-ICF, while violating the edge capacities by
                 only a constant factor. We complement this result by
                 proving that no efficient algorithm can achieve a
                 factor $\alpha$-approximation with congestion $c$ for
                 any values $\alpha$, $c$ satisfying $\alpha \cdot c =
                 O(\log \log n / \log \log \log n)$, unless NP
                 $\subseteq$ ZPTIME(n$^{\poly \log n}$). We then turn to
                 study the more general group version of the problem
                 (group = ICF), in which we are given a collection
                 $(S_1, T_1), \ldots{}, (S_k, T_k)$ of pairs of vertex
                 subsets, and for each $1 \leq $i$ \leq k$, a demand
                 D$_i$ is specified. The goal is to find a maximum value
                 $\lambda$ and a collection $P$ of paths, such that for
                 each $i$, at least $\lfloor \lambda \cdot D_i \rfloor$
                 paths connect the vertices of $S_i$ to the vertices of
                 $T_i$, while respecting the edge capacities. We show
                 that for any $1 \leq $c$ \leq O(\log \log n)$, no
                 efficient algorithm can achieve a factor $O(n^{1/(2 2c
                 + 3)})$-approximation with congestion $c$ for the
                 problem, unless NP $\subseteq$ DTIME($n^{O(\log \log
                 n)}$). On the other hand, we show an efficient
                 randomized algorithm that finds a $\poly \log
                 n$-approximate solution with a constant congestion, if
                 we are guaranteed that the optimal solution contains at
                 least $D \geq $k$ \poly \log n$ paths connecting every
                 pair (S$_i$, T$_i$).",
  acknowledgement = ack-nhfb,
}

@InProceedings{Daskalakis:2012:LPB,
  author =       "Constantinos Daskalakis and Ilias Diakonikolas and
                 Rocco A. Servedio",
  title =        "Learning {Poisson} binomial distributions",
  crossref =     "ACM:2012:SPA",
  pages =        "709--728",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214042",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider a basic problem in unsupervised learning:
                 learning an unknown Poisson Binomial Distribution. A
                 Poisson Binomial Distribution (PBD) over $\{0,
                 1,\ldots{}, n\}$ is the distribution of a sum of $n$
                 independent Bernoulli random variables which may have
                 arbitrary, potentially non-equal, expectations. These
                 distributions were first studied by S. Poisson in 1837
                 and are a natural $n$-parameter generalization of the
                 familiar Binomial Distribution. Surprisingly, prior to
                 our work this basic learning problem was poorly
                 understood, and known results for it were far from
                 optimal. We essentially settle the complexity of the
                 learning problem for this basic class of distributions.
                 As our main result we give a highly efficient algorithm
                 which learns to $\epsilon$-accuracy using $O(1 /
                 \epsilon^3)$ samples independent of $n$. The running
                 time of the algorithm is quasilinear in the size of its
                 input data, i.e. $\tilde{O}(\log (n)/ \epsilon^3)$
                 bit-operations (observe that each draw from the
                 distribution is a $\log(n)$-bit string). This is nearly
                 optimal since any algorithm must use $\Omega(1 /
                 \epsilon^2)$ samples. We also give positive and
                 negative results for some extensions of this learning
                 problem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{De:2012:NOS,
  author =       "Anindya De and Ilias Diakonikolas and Vitaly Feldman
                 and Rocco A. Servedio",
  title =        "Nearly optimal solutions for the chow parameters
                 problem and low-weight approximation of halfspaces",
  crossref =     "ACM:2012:SPA",
  pages =        "729--746",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214043",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The Chow parameters of a Boolean function $f: \{-1,
                 1\}^n \to \{-1, 1\}$ are its $n + 1$ degree-$0$ and
                 degree-$1$ Fourier coefficients. It has been known
                 since 1961 [Cho61, Tan61] that the (exact values of
                 the) Chow parameters of any linear threshold function
                 $f$ uniquely specify $f$ within the space of all
                 Boolean functions, but until recently [OS11] nothing
                 was known about efficient algorithms for reconstructing
                 $f$ (exactly or approximately) from exact or
                 approximate values of its Chow parameters. We refer to
                 this reconstruction problem as the Chow Parameters
                 Problem. Our main result is a new algorithm for the
                 Chow Parameters Problem which, given (sufficiently
                 accurate approximations to) the Chow parameters of any
                 linear threshold function f, runs in time
                 $\tilde{O}(n^2) o(1/ \epsilon)^{O(\log 2 (1 /
                 \epsilon))}$ and with high probability outputs a
                 representation of an LTF $f'$ that is $\epsilon$-close
                 to $f$. The only previous algorithm [OS11] had running
                 time $\poly(n) \cdot 2^{2 \tilde{O}(1 / \epsilon 2)}$.
                 As a byproduct of our approach, we show that for any
                 linear threshold function $f$ over ${-1,1}^n$, there is
                 a linear threshold function $f'$ which is
                 $\epsilon$-close to $f$ and has all weights that are
                 integers at most $\sqrt n o(1 / \epsilon)^{O(\log 2 (1
                 / \epsilon))}$. This significantly improves the best
                 previous result of [Serv09] which gave a $\poly(n) o
                 2^{O(1 / \epsilon 2/3)}$ weight bound, and is close to
                 the known lower bound of $\max\{\sqrt n, (1 /
                 \epsilon)^{\Omega(\log \log (1 / \epsilon))}\}$
                 [Gol06,Serv07]. Our techniques also yield improved
                 algorithms for related problems in learning theory. In
                 addition to being significantly stronger than previous
                 work, our results are obtained using conceptually
                 simpler proofs. The two main ingredients underlying our
                 results are (1) a new structural result showing that
                 for $f$ any linear threshold function and g any bounded
                 function, if the Chow parameters of $f$ are close to
                 the Chow parameters of $g$ then $f$ is close to g; (2)
                 a new boosting-like algorithm that given approximations
                 to the Chow parameters of a linear threshold function
                 outputs a bounded function whose Chow parameters are
                 close to those of $f$.",
}

@InProceedings{Sherstov:2012:MPR,
  author =       "Alexander A. Sherstov",
  title =        "Making polynomials robust to noise",
  crossref =     "ACM:2012:SPA",
  pages =        "747--758",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214044",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A basic question in any computational model is how to
                 reliably compute a given function when the inputs or
                 intermediate computations are subject to noise at a
                 constant rate. Ideally, one would like to use at most a
                 constant factor more resources compared to the
                 noise-free case. This question has been studied for
                 decision trees, circuits, automata, data structures,
                 broadcast networks, communication protocols, and other
                 models. Buhrman et al. (2003) posed the noisy
                 computation problem for real polynomials. We give a
                 complete solution to this problem. For any polynomial
                 $p : \{0, 1\}^n \to [-1, 1]$, we construct a polynomial
                 $p_{\rm robust}: R^n \to R$ of degree $O(\deg p +
                 \log(1 / \epsilon))$ that $\epsilon$-approximates $p$
                 and is robust to noise in the inputs: $|p(x) - p_{\rm
                 robust} (x + \delta)| < \epsilon$ for all $x \in \{0,
                 1\}^n$ and all $\delta \in [-1/3, 1/3]^n$. This result
                 is optimal with respect to all parameters. We construct
                 $p_{\rm robust}$ explicitly for each $p$. Previously,
                 it was open to give such a construction even for $p =
                 x_1 \oplus x_2 \oplus \ldots{} \oplus x_n$ (Buhrman et
                 al., 2003). The proof contributes a technique of
                 independent interest, which allows one to force partial
                 cancellation of error terms in a polynomial.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goyal:2012:CCN,
  author =       "Sanjeev Goyal and Michael Kearns",
  title =        "Competitive contagion in networks",
  crossref =     "ACM:2012:SPA",
  pages =        "759--774",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214046",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We develop a game-theoretic framework for the study of
                 competition between firms who have budgets to ``seed''
                 the initial adoption of their products by consumers
                 located in a social network. The payoffs to the firms
                 are the eventual number of adoptions of their product
                 through a competitive stochastic diffusion process in
                 the network. This framework yields a rich class of
                 competitive strategies, which depend in subtle ways on
                 the stochastic dynamics of adoption, the relative
                 budgets of the players, and the underlying structure of
                 the social network. We identify a general property of
                 the adoption dynamics --- namely, decreasing returns to
                 local adoption --- for which the inefficiency of
                 resource use at equilibrium (the Price of Anarchy) is
                 uniformly bounded above, across all networks. We also
                 show that if this property is violated the Price of
                 Anarchy can be unbounded, thus yielding sharp threshold
                 behavior for a broad class of dynamics. We also
                 introduce a new notion, the Budget Multiplier, that
                 measures the extent that imbalances in player budgets
                 can be amplified at equilibrium. We again identify a
                 general property of the adoption dynamics --- namely,
                 proportional local adoption between competitors --- for
                 which the (pure strategy) Budget Multiplier is
                 uniformly bounded above, across all networks. We show
                 that a violation of this property can lead to unbounded
                 Budget Multiplier, again yielding sharp threshold
                 behavior for a broad class of dynamics.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cebrian:2012:FRB,
  author =       "Manuel Cebrian and Lorenzo Coviello and Andrea Vattani
                 and Panagiotis Voulgaris",
  title =        "Finding red balloons with split contracts: robustness
                 to individuals' selfishness",
  crossref =     "ACM:2012:SPA",
  pages =        "775--788",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214047",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The present work deals with the problem of information
                 acquisition in a strategic networked environment. To
                 study this problem, Kleinberg and Raghavan (FOCS 2005)
                 introduced the model of {\em query incentive networks},
                 where the root of a binomial branching process wishes
                 to retrieve an information --- known by each node
                 independently with probability 1/n --- by investing as
                 little as possible. The authors considered {\em
                 fixed-payment contracts\/} in which every node
                 strategically chooses an amount to offer its children
                 paid upon information retrieval to convince them to
                 seek the information in their subtrees. Kleinberg and
                 Raghavan discovered that the investment needed at the
                 root exhibits an unexpected threshold behavior that
                 depends on the branching parameter b. For b > 2, the
                 investment is linear in the expected distance to the
                 closest information (logarithmic in $n$, the rarity of
                 the information), while, for $1 < b < 2$, it becomes
                 exponential in the same distance (i.e., polynomial in
                 $n$). Arcaute et al. (EC 2007) later observed the same
                 threshold behavior for arbitrary Galton--Watson
                 branching processes.\par

                 The DARPA Network Challenge --- retrieving the
                 locations of ten balloons placed at undisclosed
                 positions in the US --- has recently brought practical
                 attention to the problems of social mobilization and
                 information acquisition in a networked environment. The
                 MIT Media Laboratory team won the challenge by acting
                 as the root of a query incentive network that unfolded
                 all over the world. However, rather than adopting a
                 {\em fixed-payment strategy}, the team implemented a
                 different incentive scheme based on {\em $1/2$-split
                 contracts}. Under such incentive scheme, a node $u$ who
                 does not possess the information can recruit a friend
                 $v$ through a contract stipulating that if the
                 information is found in the subtree rooted at $v$, then
                 $v$ has to give half of her own reward back to
                 $u$.\par

                 Motivated by its empirical success, we present a
                 comprehensive theoretical study of this scheme in the
                 game theoretical setting of query incentive networks.
                 Our main result is that split contracts are robust ---
                 as opposed to fixed-payment contracts --- to nodes'
                 selfishness. Surprisingly, when nodes determine the
                 splits to offer their children based on the contracts
                 received from their recruiters, the threshold behavior
                 observed in the previous work vanishes, and an
                 investment linear in the expected distance to the
                 closest information is sufficient to retrieve the
                 information in {\em any arbitrary\/} Galton--Watson
                 process with $b > 1$. Finally, while previous analyses
                 considered the parameters of the branching process as
                 constants, we are able to characterize the rate of the
                 investment in terms of the branching process and the
                 desired probability of success. This allows us to show
                 improvements even in other special cases.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brandt:2012:AOD,
  author =       "Christina Brandt and Nicole Immorlica and Gautam
                 Kamath and Robert Kleinberg",
  title =        "An analysis of one-dimensional {Schelling}
                 segregation",
  crossref =     "ACM:2012:SPA",
  pages =        "789--804",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214048",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We analyze the Schelling model of segregation in which
                 a society of $n$ individuals live in a ring. Each
                 individual is one of two races and is only satisfied
                 with his location so long as at least half his 2w
                 nearest neighbors are of the same race as him. In the
                 dynamics, randomly-chosen unhappy individuals
                 successively swap locations. We consider the average
                 size of monochromatic neighborhoods in the final stable
                 state. Our analysis is the first rigorous analysis of
                 the Schelling dynamics. We note that, in contrast to
                 prior approximate analyses, the final state is nearly
                 integrated: the average size of monochromatic
                 neighborhoods is independent of $n$ and polynomial in
                 w.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Applebaum:2012:PGL,
  author =       "Benny Applebaum",
  title =        "Pseudorandom generators with long stretch and low
                 locality from random local one-way functions",
  crossref =     "ACM:2012:SPA",
  pages =        "805--816",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214050",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We continue the study of {\em locally-computable\/}
                 pseudorandom generators (PRG) $G : \{0, 1\}^n \to \{0,
                 1\}^m$ that each of their outputs depend on a small
                 number of $d$ input bits. While it is known that such
                 generators are likely to exist for the case of small
                 sub-linear stretch $m = n + n^{1 - \delta}$, it is less
                 clear whether achieving larger stretch such as $m = n +
                 \Omega(n)$, or even $m = n^{1 + \delta}$ is possible.
                 The existence of such PRGs, which was posed as an open
                 question in previous works, has recently gained an
                 additional motivation due to several interesting
                 applications.\par

                 We make progress towards resolving this question by
                 obtaining several local constructions based on the
                 one-wayness of ``random'' local functions --- a variant
                 of an assumption made by Goldreich (ECCC 2000).
                 Specifically, we construct collections of PRGs with the
                 following parameters:\par

                 1. Linear stretch $m = n + \Omega(n)$ and constant
                 locality $d = O(1)$.\par

                 2. Polynomial stretch $m = n^{1 + \delta}$ and {\em
                 any\/} (arbitrarily slowly growing) super-constant
                 locality $d = \omega(1)$, e.g., $\log^*n$.\par

                 3. Polynomial stretch $m = n^{1 + \delta}$, constant
                 locality $d = O(1)$, and inverse polynomial
                 distinguishing advantage (as opposed to the standard
                 case of $n^{-\omega(1)}$).\par

                 As an additional contribution, we show that our
                 constructions give rise to strong inapproximability
                 results for the densest-subgraph problem in $d$-uniform
                 hypergraphs for constant $d$. This allows us to improve
                 the previous bounds of Feige (STOC 2002) and Khot (FOCS
                 2004) from constant inapproximability factor to
                 $n^\epsilon$-inapproximability, at the expense of
                 relying on stronger assumptions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Vadhan:2012:CPS,
  author =       "Salil Vadhan and Colin Jia Zheng",
  title =        "Characterizing pseudoentropy and simplifying
                 pseudorandom generator constructions",
  crossref =     "ACM:2012:SPA",
  pages =        "817--836",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214051",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We provide a characterization of pseudoentropy in
                 terms of hardness of sampling: Let $(X, B)$ be jointly
                 distributed random variables such that $B$ takes values
                 in a polynomial-sized set. We show that $B$ is
                 computationally indistinguishable from a random
                 variable of higher Shannon entropy given $X$ if and
                 only if there is no probabilistic polynomial-time $S$
                 such that $(X, S(X))$ has small KL divergence from $(X,
                 B)$. This can be viewed as an analogue of the
                 Impagliazzo Hardcore Theorem (FOCS '95) for Shannon
                 entropy (rather than min-entropy).\par

                 Using this characterization, we show that if $f$ is a
                 one-way function, then $(f(U_n), U_n)$ has ``next-bit
                 pseudoentropy'' at least $n + \log n$, establishing a
                 conjecture of Haitner, Reingold, and Vadhan (STOC '10).
                 Plugging this into the construction of Haitner et al.,
                 this yields a simpler construction of pseudorandom
                 generators from one-way functions. In particular, the
                 construction only performs hashing once, and only needs
                 the hash functions that are randomness extractors (e.g.
                 universal hash functions) rather than needing them to
                 support ``local list-decoding'' (as in the
                 Goldreich--Levin hardcore predicate, STOC
                 '89).\par

                 With an additional idea, we also show how to improve
                 the seed length of the pseudorandom generator to
                 $\tilde{O}(n^3)$, compared to $\tilde{O}(n^4)$ in the
                 construction of Haitner et al.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Li:2012:DEN,
  author =       "Xin Li",
  title =        "Design extractors, non-malleable condensers and
                 privacy amplification",
  crossref =     "ACM:2012:SPA",
  pages =        "837--854",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214052",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We introduce a new combinatorial object, called a
                 design extractor, that has both the properties of a
                 design and an extractor. We give efficient
                 constructions of such objects and show that they can be
                 used in several applications.\par

                 1. {\bf Improving the output length of known
                 non-malleable extractors.} Non-malleable extractors
                 were introduced in [DW09] to study the problem of
                 privacy amplification with an active adversary.
                 Currently, only two explicit constructions are known
                 [DLWZ11, CRS11]. Both constructions work for $n$ bit
                 sources with min-entropy $k > n / 2$. However, in both
                 constructions the output length is smaller than the
                 seed length, while the probabilistic method shows that
                 to achieve error $\epsilon$, one can use $O(\log n +
                 \log (1 / \epsilon))$ bits to extract up to $k/2$
                 output bits. In this paper, we use our design extractor
                 to give an explicit non-malleable extractor for
                 min-entropy $k > n / 2$, that has seed length $O(\log n
                 + \log (1 / \epsilon))$ and output length
                 $\Omega(k)$.\par

                 2. {\bf Non-malleable condensers.} We introduce and
                 define the notion of a {\em non-malleable condenser}. A
                 non-malleable condenser is a generalization and
                 relaxation of a non-malleable extractor. We show that
                 similar as extractors and condensers, non-malleable
                 condensers can be used to construct non-malleable
                 extractors. We then show that our design extractor
                 already gives a non-malleable condenser for min-entropy
                 $k > n / 2$, with error $\epsilon$ and seed length
                 $O(\log (1 / \epsilon))$.\par

                 3. {\bf A new optimal protocol for privacy
                 amplification.} More surprisingly, we show that
                 non-malleable condensers themselves give optimal
                 privacy amplification protocols with an active
                 adversary. In fact, the non-malleable condensers used
                 in these protocols are much weaker compared to
                 non-malleable extractors, in the sense that the entropy
                 rate of the condenser's output does not need to
                 increase at all. This suggests that one promising next
                 step to achieve better privacy amplification protocols
                 may be to construct non-malleable condensers for
                 smaller min-entropy. As a by-product, we also obtain a
                 new explicit $2$-round privacy amplification protocol
                 with optimal entropy loss and optimal communication
                 complexity for min-entropy $k > n / 2$, without using
                 non-malleable extractors.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chuzhoy:2012:RUG,
  author =       "Julia Chuzhoy",
  title =        "Routing in undirected graphs with constant
                 congestion",
  crossref =     "ACM:2012:SPA",
  pages =        "855--874",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214054",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Given an undirected graph $G = (V,E)$, a collection
                 $(s_1, t_1)$, \ldots{}, $(s_k, t_k)$ of $k$ demand
                 pairs, and an integer $c$, the goal in the Edge
                 Disjoint Paths with Congestion problem is to connect
                 maximum possible number of the demand pairs by paths,
                 so that the maximum load on any edge (called edge
                 congestion) does not exceed $c$. We show an efficient
                 randomized algorithm that routes $\Omega({\rm OPT} /
                 \poly \log k)$ demand pairs with congestion at most
                 $14$, where OPT is the maximum number of pairs that can
                 be simultaneously routed on edge-disjoint paths. The
                 best previous algorithm that routed $\Omega({\rm OPT} /
                 \poly \log n)$ pairs required congestion $\poly(\log
                 \log n)$, and for the setting where the maximum allowed
                 congestion is bounded by a constant $c$, the best
                 previous algorithms could only guarantee the routing of
                 OPT / $n^{O(1/c)}$ pairs. We also introduce a new type
                 of vertex sparsifiers that we call integral flow
                 sparsifiers, which approximately preserve both
                 fractional and integral routings, and show an algorithm
                 to construct such sparsifiers.",
  acknowledgement = ack-nhfb,
}

@InProceedings{An:2012:ICA,
  author =       "Hyung-Chan An and Robert Kleinberg and David B.
                 Shmoys",
  title =        "Improving {Christofides}' algorithm for the $s$-$t$
                 path {TSP}",
  crossref =     "ACM:2012:SPA",
  pages =        "875--886",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214055",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present a deterministic $(1 + \sqrt
                 5/2)$-approximation algorithm for the $s$-$t$ path TSP
                 for an arbitrary metric. Given a symmetric metric cost
                 on $n$ vertices including two prespecified endpoints,
                 the problem is to find a shortest Hamiltonian path
                 between the two endpoints; Hoogeveen showed that the
                 natural variant of Christofides' algorithm is a
                 $5/3$-approximation algorithm for this problem, and
                 this asymptotically tight bound in fact had been the
                 best approximation ratio known until now. We modify
                 this algorithm so that it chooses the initial spanning
                 tree based on an optimal solution to the Held--Karp
                 relaxation rather than a minimum spanning tree; we
                 prove this simple but crucial modification leads to an
                 improved approximation ratio, surpassing the
                 20-year-old barrier set by the natural Christofides'
                 algorithm variant. Our algorithm also proves an upper
                 bound of $1 + \sqrt 5/2$ on the integrality gap of the
                 path-variant Held--Karp relaxation. The techniques
                 devised in this paper can be applied to other
                 optimization problems as well: these applications
                 include improved approximation algorithms and improved
                 LP integrality gap upper bounds for the
                 prize-collecting $s$-$t$ path problem and the
                 unit-weight graphical metric $s$-$t$ path TSP.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Williams:2012:MMF,
  author =       "Virginia Vassilevska Williams",
  title =        "Multiplying matrices faster than
                 {Coppersmith--Winograd}",
  crossref =     "ACM:2012:SPA",
  pages =        "887--898",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214056",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We develop an automated approach for designing matrix
                 multiplication algorithms based on constructions
                 similar to the Coppersmith--Winograd construction.
                 Using this approach we obtain a new improved bound on
                 the matrix multiplication exponent $\omega < 2.3727$.",
  acknowledgement = ack-nhfb,
  keywords =     "fast matrix multiplication",
}

@InProceedings{Coja-Oglan:2012:CKN,
  author =       "Amin Coja-Oglan and Konstantinos Panagiotou",
  title =        "Catching the {$k$-NAESAT} threshold",
  crossref =     "ACM:2012:SPA",
  pages =        "899--908",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214058",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The best current estimates of the thresholds for the
                 existence of solutions in random constraint
                 satisfaction problems ('CSPs') mostly derive from the
                 first and the second moment method. Yet apart from a
                 very few exceptional cases these methods do not quite
                 yield matching upper and lower bounds. According to
                 deep but non-rigorous arguments from statistical
                 mechanics, this discrepancy is due to a change in the
                 geometry of the set of solutions called condensation
                 that occurs shortly before the actual threshold for the
                 existence of solutions (Krzakala, Montanari,
                 Ricci-Tersenghi, Semerjian, Zdeborova: PNAS~2007). To
                 cope with condensation, physicists have developed a
                 sophisticated but non-rigorous formalism called Survey
                 Propagation (Mezard, Parisi, Zecchina: Science 2002).
                 This formalism yields precise conjectures on the
                 threshold values of many random CSPs. Here we develop a
                 new Survey Propagation inspired second moment method
                 for the random $k$-NAESAT problem, which is one of the
                 standard benchmark problems in the theory of random
                 CSPs. This new technique allows us to overcome the
                 barrier posed by condensation rigorously. We prove that
                 the threshold for the existence of solutions in random
                 $k$-NAESAT is $2^{k-1} \ln 2 - (\ln / 2 2 + 1/4) +
                 \epsilon_k$, where $|\epsilon_k| \leq 2^{-(1 -o
                 k(1))k}$, thereby verifying the statistical mechanics
                 conjecture for this problem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cai:2012:CCC,
  author =       "Jin-Yi Cai and Xi Chen",
  title =        "Complexity of counting {CSP} with complex weights",
  crossref =     "ACM:2012:SPA",
  pages =        "909--920",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214059",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give a complexity dichotomy theorem for the
                 counting constraint satisfaction problem (\#CSP in
                 short) with algebraic complex weights. To this end, we
                 give three conditions for its tractability. Let $F$ be
                 any finite set of complex-valued functions. We show
                 that \#CSP($F$) is solvable in polynomial time if all
                 three conditions are satisfied; and is \#P-hard
                 otherwise. Our dichotomy theorem generalizes a long
                 series of important results on counting problems: (a)
                 the problem of counting graph homomorphisms is the
                 special case when $F$ has a single symmetric binary
                 function; (b) the problem of counting directed graph
                 homomorphisms is the special case when $F$ has a single
                 but not-necessarily-symmetric binary function; and (c)
                 the unweighted form of \#CSP is when all functions in
                 $F$ take values in $\{0, 1\}$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Molloy:2012:FTK,
  author =       "Michael Molloy",
  title =        "The freezing threshold for $k$-colourings of a random
                 graph",
  crossref =     "ACM:2012:SPA",
  pages =        "921--930",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214060",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We rigorously determine the exact freezing threshold,
                 $r_k^f$, for $k$-colourings of a random graph. We prove
                 that for random graphs with density above $r_k^f$,
                 almost every colouring is such that a linear number of
                 variables are {\em frozen}, meaning that their colours
                 cannot be changed by a sequence of alterations whereby
                 we change the colours of $o(n)$ vertices at a time,
                 always obtaining another proper colouring. When the
                 density is below $r_k^f$, then almost every colouring
                 has at most $o(n)$ frozen variables. This confirms
                 hypotheses made using the non-rigorous cavity
                 method.\par

                 It has been hypothesized that the freezing threshold is
                 the cause of the ``algorithmic barrier'', the long
                 observed phenomenon that when the edge-density of a
                 random graph exceeds $(1/2) k \ln k(1 + o_k(1))$, no
                 algorithms are known to find $k$-colourings, despite
                 the fact that this density is only half the
                 $k$-colourability threshold.\par

                 We also show that $r_k^f$ is the threshold of a strong
                 form of reconstruction for $k$-colourings of the
                 Galton--Watson tree, and of the graphical model.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Barto:2012:RSC,
  author =       "Libor Barto and Marcin Kozik",
  title =        "Robust satisfiability of constraint satisfaction
                 problems",
  crossref =     "ACM:2012:SPA",
  pages =        "931--940",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214061",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "An algorithm for a constraint satisfaction problem is
                 called robust if it outputs an assignment satisfying at
                 least $(1 - g(\epsilon))$-fraction of the constraints
                 given a $(1 - \epsilon)$-satisfiable instance, where
                 $g(\epsilon) \to 0$ as $\epsilon \to 0$, $g(0) = 0$.
                 Guruswami and Zhou conjectured a characterization of
                 constraint languages for which the corresponding
                 constraint satisfaction problem admits an efficient
                 robust algorithm. This paper confirms their
                 conjecture.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Woodruff:2012:TBD,
  author =       "David P. Woodruff and Qin Zhang",
  title =        "Tight bounds for distributed functional monitoring",
  crossref =     "ACM:2012:SPA",
  pages =        "941--960",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214063",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We resolve several fundamental questions in the area
                 of distributed functional monitoring, initiated by
                 Cormode, Muthukrishnan, and Yi (SODA, 2008), and
                 receiving recent attention. In this model there are $k$
                 sites each tracking their input streams and
                 communicating with a central coordinator. The
                 coordinator's task is to continuously maintain an
                 approximate output to a function computed over the
                 union of the $k$ streams. The goal is to minimize the
                 number of bits communicated. Let the $p$-th frequency
                 moment be defined as $F_p = \sum_i f_i^p$, where
                 $f_i$ is the frequency of element $i$. We show the
                 randomized communication complexity of estimating the
                 number of distinct elements (that is, $F_0$) up to a $1
                 + \epsilon$ factor is $\Omega(k / \epsilon^2)$,
                 improving upon the previous $\Omega(k + 1/ \epsilon^2)$
                 bound and matching known upper bounds. For $F_p$, $p >
                 1$, we improve the previous $\Omega(k + 1/ \epsilon^2)$
                 communication bound to $\Omega(k^{p - 1} /
                 \epsilon^2)$. We obtain similar improvements for heavy
                 hitters, empirical entropy, and other problems. Our
                 lower bounds are the first of any kind in distributed
                 functional monitoring to depend on the product of $k$
                 and $1 / \epsilon^2$. Moreover, the lower bounds are
                 for the static version of the distributed functional
                 monitoring model where the coordinator only needs to
                 compute the function at the time when all $k$ input
                 streams end; surprisingly they almost match what is
                 achievable in the (dynamic version of) distributed
                 functional monitoring model where the coordinator needs
                 to keep track of the function continuously at any time
                 step. We also show that we can estimate $F_p$, for any
                 $p > 1$, using $O(k^{p - 1} \poly(\epsilon^{-1}))$
                 communication. This drastically improves upon the
                 previous $O(k^{2 p + 1} N^{1 - 2/p}
                 \poly(\epsilon^{-1}))$ bound of Cormode, Muthukrishnan,
                 and Yi for general $p$, and their $O(k^2 / \epsilon +
                 k^{1.5} / \epsilon^3)$ bound for $p = 2$. For $p = 2$,
                 our bound resolves their main open question. Our lower
                 bounds are based on new direct sum theorems for
                 approximate majority, and yield improvements to
                 classical problems in the standard data stream model.
                 First, we improve the known lower bound for estimating
                 $F_p$, $p > 2$, in $t$ passes from $\Omega(n^{1 - 2 /
                 p} /(\epsilon^{2 / p} t))$ to $\Omega(n^{1 - 2 / p}
                 /(\epsilon^{4 / p} t))$, giving the first bound that
                 matches what we expect when $p = 2$ for any constant
                 number of passes. Second, we give the first lower bound
                 for estimating $F_0$ in $t$ passes with $\Omega(1 /
                 (\epsilon^2 t))$ bits of space that does not use the
                 hardness of the gap-Hamming problem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Censor-Hillel:2012:GCP,
  author =       "Keren Censor-Hillel and Bernhard Haeupler and Jonathan
                 Kelner and Petar Maymounkov",
  title =        "Global computation in a poorly connected world: fast
                 rumor spreading with no dependence on conductance",
  crossref =     "ACM:2012:SPA",
  pages =        "961--970",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214064",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In this paper, we study the question of how
                 efficiently a collection of interconnected nodes can
                 perform a global computation in the GOSSIP model of
                 communication. In this model, nodes do not know the
                 global topology of the network, and they may only
                 initiate contact with a single neighbor in each round.
                 This model contrasts with the much less restrictive
                 LOCAL model, where a node may simultaneously
                 communicate with all of its neighbors in a single
                 round. A basic question in this setting is how many
                 rounds of communication are required for the
                 information dissemination problem, in which each node
                 has some piece of information and is required to
                 collect all others. In the LOCAL model, this is quite
                 simple: each node broadcasts all of its information in
                 each round, and the number of rounds required will be
                 equal to the diameter of the underlying communication
                 graph. In the GOSSIP model, each node must
                 independently choose a single neighbor to contact, and
                 the lack of global information makes it difficult to
                 make any sort of principled choice. As such,
                 researchers have focused on the uniform gossip
                 algorithm, in which each node independently selects a
                 neighbor uniformly at random. When the graph is
                 well-connected, this works quite well. In a string of
                 beautiful papers, researchers proved a sequence of
                 successively stronger bounds on the number of rounds
                 required in terms of the conductance $\phi$ and graph
                 size $n$, culminating in a bound of $O(\phi^{-1} \log
                 n)$. In this paper, we show that a fairly simple
                 modification of the protocol gives an algorithm that
                 solves the information dissemination problem in at most
                 $O(D + \polylog(n))$ rounds in a network of diameter
                 $D$, with no dependence on the conductance. This is at
                 most an additive polylogarithmic factor from the
                 trivial lower bound of $D$, which applies even in the
                 LOCAL model. In fact, we prove that something stronger
                 is true: any algorithm that requires $T$ rounds in the
                 LOCAL model can be simulated in $O(T + \polylog(n))$
                 rounds in the GOSSIP model. We thus prove that these
                 two models of distributed computation are essentially
                 equivalent.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bansal:2012:TTS,
  author =       "Nikhil Bansal and Vibhor Bhatt and Prasad Jayanti and
                 Ranganath Kondapally",
  title =        "Tight time-space tradeoff for mutual exclusion",
  crossref =     "ACM:2012:SPA",
  pages =        "971--982",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214065",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Mutual Exclusion is a fundamental problem in
                 distributed computing, and the problem of proving upper
                 and lower bounds on the RMR complexity of this problem
                 has been extensively studied. Here, we give matching
                 lower and upper bounds on how RMR complexity trades off
                 with space. Two implications of our results are that
                 constant RMR complexity is impossible with
                 subpolynomial space and subpolynomial RMR complexity is
                 impossible with constant space for cache-coherent
                 multiprocessors, regardless of how strong the hardware
                 synchronization operations are. To prove these results
                 we show that the complexity of mutual exclusion, which
                 can be ``messy'' to analyze because of system details
                 such as asynchrony and cache coherence, is captured
                 precisely by a simple and purely combinatorial game
                 that we design. We then derive lower and upper bounds
                 for this game, thereby obtaining corresponding bounds
                 for mutual exclusion. The lower bounds for the game are
                 proved using potential functions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Giakkoupis:2012:TRL,
  author =       "George Giakkoupis and Philipp Woelfel",
  title =        "A tight {RMR} lower bound for randomized mutual
                 exclusion",
  crossref =     "ACM:2012:SPA",
  pages =        "983--1002",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214066",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The Cache Coherent (CC) and the Distributed Shared
                 Memory (DSM) models are standard shared memory models,
                 and the Remote Memory Reference (RMR) complexity is
                 considered to accurately predict the actual performance
                 of mutual exclusion algorithms in shared memory
                 systems. In this paper we prove a tight lower bound for
                 the RMR complexity of deadlock-free randomized mutual
                 exclusion algorithms in both the CC and the DSM model
                 with atomic registers and compare \& swap objects and
                 an adaptive adversary. Our lower bound establishes that
                 an adaptive adversary can schedule $n$ processes in
                 such a way that each enters the critical section once,
                 and the total number of RMRs is $\Omega(n \log n/\log
                 \log n)$ in expectation. This matches an upper bound of
                 Hendler and Woelfel (2011).",
  acknowledgement = ack-nhfb,
}

@InProceedings{Garg:2012:CPA,
  author =       "Jugal Garg and Ruta Mehta and Milind Sohoni and Vijay
                 V. Vazirani",
  title =        "A complementary pivot algorithm for markets under
                 separable, piecewise-linear concave utilities",
  crossref =     "ACM:2012:SPA",
  pages =        "1003--1016",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214068",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Using the powerful machinery of the linear
                 complementarity problem and Lemke's algorithm, we give
                 a practical algorithm for computing an equilibrium for
                 Arrow--Debreu markets under separable, piecewise-linear
                 concave (SPLC) utilities, despite the PPAD-completeness
                 of this case. As a corollary, we obtain the first
                 elementary proof of existence of equilibrium for this
                 case, i.e., without using fixed point theorems. In
                 1975, Eaves [10] had given such an algorithm for the
                 case of linear utilities and had asked for an extension
                 to the piecewise-linear, concave utilities. Our result
                 settles the relevant subcase of his problem as well as
                 the problem of Vazirani and Yannakakis of obtaining a
                 path following algorithm for SPLC markets, thereby
                 giving a direct proof of membership of this case in
                 PPAD. We also prove that SPLC markets have an odd
                 number of equilibria (up to scaling), hence matching
                 the classical result of Shapley about 2-Nash equilibria
                 [24], which was based on the Lemke--Howson algorithm.
                 For the linear case, Eaves had asked for a
                 combinatorial interpretation of his algorithm. We
                 provide this and it yields a particularly simple proof
                 of the fact that the set of equilibrium prices is
                 convex.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Azar:2012:RP,
  author =       "Pablo Daniel Azar and Silvio Micali",
  title =        "Rational proofs",
  crossref =     "ACM:2012:SPA",
  pages =        "1017--1028",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214069",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study a new type of proof system, where an
                 unbounded prover and a polynomial time verifier
                 interact, on inputs a string $x$ and a function $f$, so
                 that the Verifier may learn $f(x)$. The novelty of our
                 setting is that there no longer are ``good'' or
                 ``malicious'' provers, but only rational ones. In
                 essence, the Verifier has a budget $c$ and gives the
                 Prover a reward $r \in [0,c]$ determined by the
                 transcript of their interaction; the prover wishes to
                 maximize his expected reward; and his reward is
                 maximized only if he the verifier correctly learns
                 $f(x)$. Rational proof systems are as powerful as their
                 classical counterparts for polynomially many rounds of
                 interaction, but are much more powerful when we only
                 allow a constant number of rounds. Indeed, we prove
                 that if $f \in \#P$, then $f$ is computable by a
                 one-round rational Merlin--Arthur game, where, on input
                 $x$, Merlin's single message actually consists of
                 sending just the value $f(x)$. Further, we prove that
                 CH, the counting hierarchy, coincides with the class of
                 languages computable by a constant-round rational
                 Merlin--Arthur game. Our results rely on a basic and
                 crucial connection between rational proof systems and
                 proper scoring rules, a tool developed to elicit
                 truthful information from experts.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Abernethy:2012:MOP,
  author =       "Jacob Abernethy and Rafael M. Frongillo and Andre
                 Wibisono",
  title =        "Minimax option pricing meets {Black--Scholes} in the
                 limit",
  crossref =     "ACM:2012:SPA",
  pages =        "1029--1040",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214070",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Option contracts are a type of financial derivative
                 that allow investors to hedge risk and speculate on the
                 variation of an asset's future market price. In short,
                 an option has a particular payout that is based on the
                 market price for an asset on a given date in the
                 future. In 1973, Black and Scholes proposed a valuation
                 model for options that essentially estimates the tail
                 risk of the asset price under the assumption that the
                 price will fluctuate according to geometric Brownian
                 motion. A key element of their analysis is that the
                 investor can ``hedge'' the payout of the option by
                 continuously buying and selling the asset depending on
                 the price fluctuations. More recently, DeMarzo et al.
                 proposed a more robust valuation scheme which does not
                 require any assumption on the price path; indeed, in
                 their model the asset's price can even be chosen
                 adversarially. This framework can be considered as a
                 sequential two-player zero-sum game between the
                 investor and Nature. We analyze the value of this game
                 in the limit, where the investor can trade at smaller
                 and smaller time intervals. Under weak assumptions on
                 the actions of Nature (an adversary), we show that the
                 minimax option price asymptotically approaches exactly
                 the Black--Scholes valuation. The key piece of our
                 analysis is showing that Nature's minimax optimal dual
                 strategy converges to geometric Brownian motion in the
                 limit.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Mossel:2012:QGS,
  author =       "Elchanan Mossel and Mikl{\'o}s Z. R{\'a}cz",
  title =        "A quantitative {Gibbard--Satterthwaite} theorem
                 without neutrality",
  crossref =     "ACM:2012:SPA",
  pages =        "1041--1060",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214071",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Recently, quantitative versions of the
                 Gibbard--Satterthwaite theorem were proven for $k = 3$
                 alternatives by Friedgut, Kalai, Keller and Nisan and
                 for neutral functions on $k \geq 4$ alternatives by
                 Isaksson, Kindler and Mossel. In the present paper we
                 prove a quantitative version of the
                 Gibbard--Satterthwaite theorem for general social
                 choice functions for any number $k \geq 3$ of
                 alternatives. In particular we show that for a social
                 choice function $f$ on $k \geq 3$ alternatives and $n$
                 voters, which is $\epsilon$-far from the family of
                 nonmanipulable functions, a uniformly chosen voter
                 profile is manipulable with probability at least
                 inverse polynomial in $n$, $k$, and $\epsilon^{-1}$.
                 Removing the neutrality assumption of previous theorems
                 is important for multiple reasons. For one, it is known
                 that there is a conflict between anonymity and
                 neutrality, and since most common voting rules are
                 anonymous, they cannot always be neutral. Second,
                 virtual elections are used in many applications in
                 artificial intelligence, where there are often
                 restrictions on the outcome of the election, and so
                 neutrality is not a natural assumption in these
                 situations. Ours is a unified proof which in particular
                 covers all previous cases established before. The proof
                 crucially uses reverse hypercontractivity in addition
                 to several ideas from the two previous proofs. Much of
                 the work is devoted to understanding functions of a
                 single voter, and in particular we also prove a
                 quantitative Gibbard--Satterthwaite theorem for one
                 voter.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bourgain:2012:ME,
  author =       "Jean Bourgain and Amir Yehudayoff",
  title =        "Monotone expansion",
  crossref =     "ACM:2012:SPA",
  pages =        "1061--1078",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214073",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "This work presents an explicit construction of a
                 family of monotone expanders, which are bi-partite
                 expander graphs whose edge-set is defined by (partial)
                 monotone functions. The family is essentially defined
                 by the M{\"o}bius action of ${\rm SL}_2(R)$, the group
                 of $2 \times 2$ matrices with determinant one, on the
                 interval $[0, 1]$. No other proof-of-existence for
                 monotone expanders is known, not even using the
                 probabilistic method. The proof extends recent results
                 on finite/compact groups to the non-compact scenario.
                 Specifically, we show a product-growth theorem for
                 ${\rm SL}_2(R)$; roughly, that for every $A \subset
                 {\rm SL}_2(R)$ with certain properties, the size of
                 $AAA$ is much larger than that of $A$. We mention two
                 applications of this construction: Dvir and Shpilka
                 showed that it yields a construction of explicit
                 dimension expanders, which are a generalization of
                 standard expander graphs. Dvir and Wigderson proved
                 that it yields the existence of explicit pushdown
                 expanders, which are graphs that arise in Turing
                 machine simulations.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Alon:2012:NCG,
  author =       "Noga Alon and Ankur Moitra and Benny Sudakov",
  title =        "Nearly complete graphs decomposable into large induced
                 matchings and their applications",
  crossref =     "ACM:2012:SPA",
  pages =        "1079--1090",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214074",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We describe two constructions of (very) dense graphs
                 which are edge disjoint unions of large {\em induced\/}
                 matchings. The first construction exhibits graphs on
                 $N$ vertices with ${N \choose 2} - o(N^2)$ edges, which
                 can be decomposed into pairwise disjoint induced
                 matchings, each of size $N^{1 - o(1)}$. The second
                 construction provides a covering of all edges of the
                 complete graph $K_N$ by two graphs, each being the edge
                 disjoint union of at most $N^{2 - \delta}$ induced
                 matchings, where $\delta > 0.076$. This disproves (in a
                 strong form) a conjecture of Meshulam, substantially
                 improves a result of Birk, Linial and Meshulam on
                 communicating over a shared channel, and (slightly)
                 extends the analysis of Hastad and Wigderson of the
                 graph test of Samorodnitsky and Trevisan for linearity.
                 Additionally, our constructions settle a combinatorial
                 question of Vempala regarding a candidate rounding
                 scheme for the directed Steiner tree problem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kuperberg:2012:PER,
  author =       "Greg Kuperberg and Shachar Lovett and Ron Peled",
  title =        "Probabilistic existence of rigid combinatorial
                 structures",
  crossref =     "ACM:2012:SPA",
  pages =        "1091--1106",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214075",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show the existence of rigid combinatorial objects
                 which previously were not known to exist. Specifically,
                 for a wide range of the underlying parameters, we show
                 the existence of non-trivial orthogonal arrays,
                 $t$-designs, and $t$-wise permutations. In all cases,
                 the sizes of the objects are optimal up to polynomial
                 overhead. The proof of existence is probabilistic. We
                 show that a randomly chosen such object has the
                 required properties with positive yet tiny probability.
                 The main technical ingredient is a special local
                 central limit theorem for suitable lattice random walks
                 with finitely many steps.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Dobzinski:2012:QCC,
  author =       "Shahar Dobzinski and Jan Vondrak",
  title =        "From query complexity to computational complexity",
  crossref =     "ACM:2012:SPA",
  pages =        "1107--1116",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214076",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider submodular optimization problems, and
                 provide a general way of translating oracle
                 inapproximability results arising from the symmetry gap
                 technique to computational complexity inapproximability
                 results, where the submodular function is given
                 explicitly (under the assumption that NP $\not=$ RP).
                 Applications of our technique include an optimal
                 computational hardness of $(1/2 +
                 \epsilon)$-approximation for maximizing a symmetric
                 nonnegative submodular function, an optimal hardness of
                 $(1 - (1 - 1 / k)^k + \epsilon)$-approximation for
                 welfare maximization in combinatorial auctions with $k$
                 submodular bidders (for constant $k$), super-constant
                 hardness for maximizing a nonnegative submodular
                 function over matroid bases, and tighter bounds for
                 maximizing a monotone submodular function subject to a
                 cardinality constraint. Unlike the vast majority of
                 computational inapproximability results, our approach
                 does not use the PCP machinery or the Unique Games
                 Conjecture, but relies instead on a direct reduction
                 from Unique-SAT using list-decodable codes.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lee:2012:MWS,
  author =       "James R. Lee and Shayan Oveis Gharan and Luca
                 Trevisan",
  title =        "Multi-way spectral partitioning and higher-order
                 {Cheeger} inequalities",
  crossref =     "ACM:2012:SPA",
  pages =        "1117--1130",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214078",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A basic fact in spectral graph theory is that the
                 number of connected components in an undirected graph
                 is equal to the multiplicity of the eigenvalue zero in
                 the Laplacian matrix of the graph. In particular, the
                 graph is disconnected if and only if there are at least
                 two eigenvalues equal to zero. Cheeger's inequality and
                 its variants provide an approximate version of the
                 latter fact; they state that a graph has a sparse cut
                 if and only if there are at least two eigenvalues that
                 are close to zero. It has been conjectured that an
                 analogous characterization holds for higher
                 multiplicities, i.e., there are $k$ eigenvalues close
                 to zero if and only if the vertex set can be
                 partitioned into $k$ subsets, each defining a sparse
                 cut. We resolve this conjecture. Our result provides a
                 theoretical justification for clustering algorithms
                 that use the bottom $k$ eigenvectors to embed the
                 vertices into R$^k$, and then apply geometric
                 considerations to the embedding. We also show that
                 these techniques yield a nearly optimal quantitative
                 connection between the expansion of sets of size
                 $\approx n/k$ and $\lambda_k$, the $k$th smallest
                 eigenvalue of the normalized Laplacian, where $n$ is
                 the number of vertices. In particular, we show that in
                 every graph there are at least $k / 2$ disjoint sets
                 (one of which will have size at most $2 n / k$), each
                 having expansion at most $O(\sqrt{\lambda_k \log k})$.
                 Louis, Raghavendra, Tetali, and Vempala have
                 independently proved a slightly weaker version of this
                 last result [LRTV12]. The $\sqrt{\log k}$ bound is
                 tight, up to constant factors, for the ``noisy
                 hypercube'' graphs.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Louis:2012:MSC,
  author =       "Anand Louis and Prasad Raghavendra and Prasad Tetali
                 and Santosh Vempala",
  title =        "Many sparse cuts via higher eigenvalues",
  crossref =     "ACM:2012:SPA",
  pages =        "1131--1140",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214079",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Cheeger's fundamental inequality states that any
                 edge-weighted graph has a vertex subset $S$ such that
                 its expansion (a.k.a. conductance) is bounded as
                 follows:\par

                 $$\phi(S) {\hbox{\tiny \rm def} \atop =} (w(S,
                 \bar{S})) / \min \{w(S), w(\bar{S})\} \leq \sqrt{2
                 \lambda_2}$$\par

                 where $w$ is the total edge weight of a subset or a cut
                 and $\lambda_2$ is the second smallest eigenvalue of
                 the normalized Laplacian of the graph. Here we prove
                 the following natural generalization: for any integer
                 $k \in [n]$, there exist $c k$ disjoint subsets $S_1$,
                 \ldots{}, $S_{c k}$, such that\par

                 $$\max_i \phi(S_i) \leq C \sqrt{\lambda_k \log
                 k}$$\par

                 where $\lambda_k$ is the $k$th smallest eigenvalue of
                 the normalized Laplacian and $c < 1$, $C > 0$ are
                 suitable absolute constants. Our proof is via a
                 polynomial-time algorithm to find such subsets,
                 consisting of a spectral projection and a randomized
                 rounding. As a consequence, we get the same upper bound
                 for the small set expansion problem, namely for any
                 $k$, there is a subset $S$ whose weight is at most a
                 $O(1/k)$ fraction of the total weight and $\phi(S) \leq
                 C \sqrt{\lambda_k \log k}$. Both results are the best
                 possible up to constant factors.\par

                 The underlying algorithmic problem, namely finding $k$
                 subsets such that the maximum expansion is minimized,
                 besides extending sparse cuts to more than one subset,
                 appears to be a natural clustering problem in its own
                 right.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Orecchia:2012:AEL,
  author =       "Lorenzo Orecchia and Sushant Sachdeva and Nisheeth K.
                 Vishnoi",
  title =        "Approximating the exponential, the {Lanczos} method
                 and an {$\tilde{O}(m)$}-time spectral algorithm for
                 balanced separator",
  crossref =     "ACM:2012:SPA",
  pages =        "1141--1160",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214080",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give a novel spectral approximation algorithm for
                 the balanced (edge-)separator problem that, given a
                 graph $G$, a constant balance $b \in (0,1/2]$, and a
                 parameter $\gamma$, either finds an
                 $\Omega(b)$-balanced cut of conductance $O(\sqrt
                 \gamma)$ in $G$, or outputs a certificate that all
                 $b$-balanced cuts in $G$ have conductance at least
                 $\gamma$, and runs in time $\tilde{O}(m)$. This settles
                 the question of designing asymptotically optimal
                 spectral algorithms for balanced separator. Our
                 algorithm relies on a variant of the heat kernel random
                 walk and requires, as a subroutine, an algorithm to
                 compute $\exp(-L) v$ where $L$ is the Laplacian of a
                 graph related to $G$ and $v$ is a vector. Algorithms
                 for computing the matrix-exponential-vector product
                 efficiently comprise our next set of results. Our main
                 result here is a new algorithm which computes a good
                 approximation to $\exp(-A) v$ for a class of symmetric
                 positive semidefinite (PSD) matrices $A$ and a given
                 vector $v$, in time roughly $\tilde{O}(m_A)$,
                 independent of the norm of $A$, where $m_A$ is the
                 number of non-zero entries of $A$. This uses, in a
                 non-trivial way, the result of Spielman and Teng on
                 inverting symmetric and diagonally-dominant matrices in
                 $\tilde{O}(m_A)$ time. Finally, using old and new
                 uniform approximations to $e^{-x}$ we show how to
                 obtain, via the Lanczos method, a simple algorithm to
                 compute $\exp(-A) v$ for symmetric PSD matrices that
                 runs in time roughly $O(t_A \cdot \sqrt{\norm(A)})$,
                 where $t_A$ is the time required for the computation of
                 the vector $A w$ for given vector w. As an application,
                 we obtain a simple and practical algorithm, with output
                 conductance $O(\sqrt \gamma)$, for balanced separator
                 that runs in time $O(m / \sqrt \gamma)$. This latter
                 algorithm matches the running time, but improves on the
                 approximation guarantee of the Evolving-Sets-based
                 algorithm by Andersen and Peres for balanced
                 separator.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goemans:2012:MIG,
  author =       "Michel X. Goemans and Neil Olver and Thomas
                 Rothvo{\ss} and Rico Zenklusen",
  title =        "Matroids and integrality gaps for hypergraphic
                 {Steiner} tree relaxations",
  crossref =     "ACM:2012:SPA",
  pages =        "1161--1176",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214081",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Until recently, LP relaxations have only played a very
                 limited role in the design of approximation algorithms
                 for the Steiner tree problem. In particular, no
                 (efficiently solvable) Steiner tree relaxation was
                 known to have an integrality gap bounded away from $2$,
                 before Byrka et al. [3] showed an upper bound of
                 $\approx 1.55$ of a hypergraphic LP relaxation and
                 presented a $\ln(4) + \epsilon \approx 1.39$
                 approximation based on this relaxation. Interestingly,
                 even though their approach is LP based, they do not
                 compare the solution produced against the LP
                 value.\par

                 We take a fresh look at hypergraphic LP relaxations for
                 the Steiner tree problem --- one that heavily exploits
                 methods and results from the theory of matroids and
                 submodular functions --- which leads to stronger
                 integrality gaps, faster algorithms, and a variety of
                 structural insights of independent interest. More
                 precisely, along the lines of the algorithm of Byrka et
                 al.[3], we present a deterministic $\ln(4) + \epsilon$
                 approximation that compares against the LP value and
                 therefore proves a matching $\ln(4)$ upper bound on the
                 integrality gap of hypergraphic
                 relaxations.\par

                 Similarly to [3], we iteratively fix one component and
                 update the LP solution. However, whereas in [3] the LP
                 is solved at every iteration after contracting a
                 component, we show how feasibility can be maintained by
                 a greedy procedure on a well-chosen matroid. Apart from
                 avoiding the expensive step of solving a hypergraphic
                 LP at each iteration, our algorithm can be analyzed
                 using a simple potential function. This potential
                 function gives an easy means to determine stronger
                 approximation guarantees and integrality gaps when
                 considering restricted graph topologies. In particular,
                 this readily leads to a $73/60 \approx 1.217$ upper
                 bound on the integrality gap of hypergraphic
                 relaxations for quasi-bipartite
                 graphs.\par

                 Additionally, for the case of quasi-bipartite graphs,
                 we present a simple algorithm to transform an optimal
                 solution to the bidirected cut relaxation to an optimal
                 solution of the hypergraphic relaxation, leading to a
                 fast $73/60$ approximation for quasi-bipartite graphs.
                 Furthermore, we show how the separation problem of the
                 hypergraphic relaxation can be solved by computing
                 maximum flows, which provides a way to obtain a fast
                 independence oracle for the matroids that we use in our
                 approach.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brodal:2012:SFH,
  author =       "Gerth St{\o}lting Brodal and George Lagogiannis and
                 Robert E. Tarjan",
  title =        "Strict {Fibonacci} heaps",
  crossref =     "ACM:2012:SPA",
  pages =        "1177--1184",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214082",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present the first pointer-based heap implementation
                 with time bounds matching those of Fibonacci heaps in
                 the worst case. We support make-heap, insert, find-min,
                 meld and decrease-key in worst-case $O(1)$ time, and
                 delete and delete-min in worst-case $O(\lg n)$ time,
                 where $n$ is the size of the heap. The data structure
                 uses linear space. A previous, very complicated,
                 solution achieving the same time bounds in the RAM
                 model made essential use of arrays and extensive use of
                 redundant counter schemes to maintain balance. Our
                 solution uses neither. Our key simplification is to
                 discard the structure of the smaller heap when doing a
                 meld. We use the pigeonhole principle in place of the
                 redundant counter mechanism.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bulnek:2012:TLB,
  author =       "Jan Bul{\'a}nek and Michal Kouck{\'y} and Michael
                 Saks",
  title =        "Tight lower bounds for the online labeling problem",
  crossref =     "ACM:2012:SPA",
  pages =        "1185--1198",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214083",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider the file maintenance problem (also called
                 the online labeling problem) in which $n$ integer items
                 from the set $\{1, \ldots{}, r\}$ are to be stored in
                 an array of size $m \geq n$. The items are presented
                 sequentially in an arbitrary order, and must be stored
                 in the array in sorted order (but not necessarily in
                 consecutive locations in the array). Each new item must
                 be stored in the array before the next item is
                 received. If $r \leq m$ then we can simply store item
                 $j$ in location $j$ but if $r > m$ then we may have to
                 shift the location of stored items to make space for a
                 newly arrived item. The algorithm is charged each time
                 an item is stored in the array, or moved to a new
                 location. The goal is to minimize the total number of
                 such moves the algorithm has to do. This problem is
                 non-trivial when $n \leq m < r$. In the case that $m =
                 Cn$ for some $C > 1$, algorithms for this problem with
                 cost $O(\log(n)^2)$ per item have been given [Itai et
                 al. (1981), Willard (1992), Bender et al. (2002)]. When
                 $m = n$, algorithms with cost $O(\log(n)^3)$ per item
                 were given [Zhang (1993),Bird and Sadnicki (2007)]. In
                 this paper we prove lower bounds that show that these
                 algorithms are optimal, up to constant factors.
                 Previously, the only lower bound known for this range
                 of parameters was a lower bound of $\Omega(\log(n)^2)$
                 for the restricted class of smooth algorithms [Dietz et
                 al. (2005), Zhang (1993)]. We also provide an algorithm
                 for the sparse case: If the number of items is
                 polylogarithmic in the array size then the problem can
                 be solved in amortized constant time per item.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Abraham:2012:FDA,
  author =       "Ittai Abraham and Shiri Chechik and Cyril Gavoille",
  title =        "Fully dynamic approximate distance oracles for planar
                 graphs via forbidden-set distance labels",
  crossref =     "ACM:2012:SPA",
  pages =        "1199--1218",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214084",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "This paper considers fully dynamic $(1 + \epsilon)$
                 distance oracles and $(1 + \epsilon)$ forbidden-set
                 labeling schemes for planar graphs. For a given
                 $n$-vertex planar graph $G$ with edge weights drawn
                 from $[1,M]$ and parameter $\epsilon > 0$, our
                 forbidden-set labeling scheme uses labels of length
                 $\lambda = O(\epsilon^{-1} \log^2 n \log (n M) \cdot
                 \log n)$. Given the labels of two vertices $s$ and $t$
                 and of a set $F$ of faulty vertices\slash edges, our
                 scheme approximates the distance between $s$ and $t$ in
                 $G \backslash F$ with stretch $(1 + \epsilon)$, in
                 $O(|F|^2 \lambda)$ time.\par

                 We then present a general method to transform $(1 +
                 \epsilon)$ forbidden-set labeling schemas into a fully
                 dynamic $(1 + \epsilon)$ distance oracle. Our fully
                 dynamic $(1 + \epsilon)$ distance oracle is of size
                 $O(n \log n \cdot (\epsilon^{-1} + \log n))$ and has
                 $\tilde{O}(n^{1/2})$ query and update time, both the
                 query and the update time are worst case. This improves
                 on the best previously known $(1 + \epsilon)$ dynamic
                 distance oracle for planar graphs, which has worst case
                 query time $\tilde{O}(n^{2/3})$ and amortized update
                 time of $\tilde{O}(n^{2/3})$.\par

                 Our $(1 + \epsilon)$ forbidden-set labeling scheme can
                 also be extended into a forbidden-set labeled routing
                 scheme with stretch $(1 + \epsilon)$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lopez-Alt:2012:FMC,
  author =       "Adriana L{\'o}pez-Alt and Eran Tromer and Vinod
                 Vaikuntanathan",
  title =        "On-the-fly multiparty computation on the cloud via
                 multikey fully homomorphic encryption",
  crossref =     "ACM:2012:SPA",
  pages =        "1219--1234",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214086",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We propose a new notion of secure multiparty
                 computation aided by a computationally-powerful but
                 untrusted ``cloud'' server. In this notion that we call
                 on-the-fly multiparty computation (MPC), the cloud can
                 non-interactively perform arbitrary, dynamically chosen
                 computations on data belonging to arbitrary sets of
                 users chosen on-the-fly. All user's input data and
                 intermediate results are protected from snooping by the
                 cloud as well as other users. This extends the standard
                 notion of fully homomorphic encryption (FHE), where
                 users can only enlist the cloud's help in evaluating
                 functions on their own encrypted data. In on-the-fly
                 MPC, each user is involved only when initially
                 uploading his (encrypted) data to the cloud, and in a
                 final output decryption phase when outputs are
                 revealed; the complexity of both is independent of the
                 function being computed and the total number of users
                 in the system. When users upload their data, they need
                 not decide in advance which function will be computed,
                 nor who they will compute with; they need only
                 retroactively approve the eventually-chosen functions
                 and on whose data the functions were evaluated. This
                 notion is qualitatively the best possible in minimizing
                 interaction, since the users' interaction in the
                 decryption stage is inevitable: we show that removing
                 it would imply generic program obfuscation and is thus
                 impossible. Our contributions are two-fold:- We show
                 how on-the-fly MPC can be achieved using a new type of
                 encryption scheme that we call multikey FHE, which is
                 capable of operating on inputs encrypted under
                 multiple, unrelated keys. A ciphertext resulting from a
                 multikey evaluation can be jointly decrypted using the
                 secret keys of all the users involved in the
                 computation. --- We construct a multikey FHE scheme
                 based on NTRU, a very efficient public-key encryption
                 scheme proposed in the 1990s. It was previously not
                 known how to make NTRU fully homomorphic even for a
                 single party. We view the construction of (multikey)
                 FHE from NTRU encryption as a main contribution of
                 independent interest. Although the transformation to a
                 fully homomorphic system deteriorates the efficiency of
                 NTRU somewhat, we believe that this system is a leading
                 candidate for a practical FHE scheme.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Boyle:2012:MCS,
  author =       "Elette Boyle and Shafi Goldwasser and Abhishek Jain
                 and Yael Tauman Kalai",
  title =        "Multiparty computation secure against continual memory
                 leakage",
  crossref =     "ACM:2012:SPA",
  pages =        "1235--1254",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214087",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We construct a multiparty computation (MPC) protocol
                 that is secure even if a malicious adversary, in
                 addition to corrupting $1 - \epsilon$ fraction of all
                 parties for an arbitrarily small constant $\epsilon >
                 0$, can leak information about the secret state of each
                 honest party. This leakage can be continuous for an
                 unbounded number of executions of the MPC protocol,
                 computing different functions on the same or different
                 set of inputs. We assume a (necessary) ``leak-free''
                 preprocessing stage. We emphasize that we achieve
                 leakage resilience without weakening the security
                 guarantee of classical MPC. Namely, an adversary who is
                 given leakage on honest parties' states, is guaranteed
                 to learn nothing beyond the input and output values of
                 corrupted parties. This is in contrast with previous
                 works on leakage in the multi-party protocol setting,
                 which weaken the security notion, and only guarantee
                 that a protocol which leaks $l$ bits about the parties'
                 secret states, yields at most $l$ bits of leakage on
                 the parties' private inputs. For some functions, such
                 as voting, such leakage can be detrimental. Our result
                 relies on standard cryptographic assumptions, and our
                 security parameter is polynomially related to the
                 number of parties.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hardt:2012:BRR,
  author =       "Moritz Hardt and Aaron Roth",
  title =        "Beating randomized response on incoherent matrices",
  crossref =     "ACM:2012:SPA",
  pages =        "1255--1268",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214088",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Computing accurate low rank approximations of large
                 matrices is a fundamental data mining task. In many
                 applications however the matrix contains sensitive
                 information about individuals. In such case we would
                 like to release a low rank approximation that satisfies
                 a strong privacy guarantee such as differential
                 privacy. Unfortunately, to date the best known
                 algorithm for this task that satisfies differential
                 privacy is based on naive input perturbation or
                 randomized response: Each entry of the matrix is
                 perturbed independently by a sufficiently large random
                 noise variable, a low rank approximation is then
                 computed on the resulting matrix. We give (the first)
                 significant improvements in accuracy over randomized
                 response under the natural and necessary assumption
                 that the matrix has low coherence. Our algorithm is
                 also very efficient and finds a constant rank
                 approximation of an $m \times n$ matrix in time $O(m
                 n)$. Note that even generating the noise matrix
                 required for randomized response already requires time
                 $O(mn)$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhaskara:2012:UDP,
  author =       "Aditya Bhaskara and Daniel Dadush and Ravishankar
                 Krishnaswamy and Kunal Talwar",
  title =        "Unconditional differentially private mechanisms for
                 linear queries",
  crossref =     "ACM:2012:SPA",
  pages =        "1269--1284",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214089",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We investigate the problem of designing differentially
                 private mechanisms for a set of $d$ linear queries over
                 a database, while adding as little error as possible.
                 Hardt and Talwar [HT10] related this problem to
                 geometric properties of a convex body defined by the
                 set of queries and gave a $O(\log^3 d)$-approximation
                 to the minimum $l_2^2$ error, assuming a conjecture
                 from convex geometry called the Slicing or Hyperplane
                 conjecture. In this work we give a mechanism that works
                 unconditionally, and also gives an improved $O(\log^2
                 d)$ approximation to the expected $l_2^2$ error. We
                 remove the dependence on the Slicing conjecture by
                 using a result of Klartag [Kla06] that shows that any
                 convex body is close to one for which the conjecture
                 holds; our main contribution is in making this result
                 constructive by using recent techniques of Dadush,
                 Peikert and Vempala [DPV10]. The improvement in
                 approximation ratio relies on a stronger lower bound we
                 derive on the optimum. This new lower bound goes beyond
                 the packing argument that has traditionally been used
                 in Differential Privacy and allows us to add the
                 packing lower bounds obtained from orthogonal
                 subspaces. We are able to achieve this via a
                 symmetrization argument which argues that there always
                 exists a near optimal differentially private mechanism
                 which adds noise that is independent of the input
                 database! We believe this result should be of
                 independent interest, and also discuss some interesting
                 consequences.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Muthukrishnan:2012:OPH,
  author =       "S. Muthukrishnan and Aleksandar Nikolov",
  title =        "Optimal private halfspace counting via discrepancy",
  crossref =     "ACM:2012:SPA",
  pages =        "1285--1292",
  year =         "2012",
  DOI =          "https://doi.org/10.1145/2213977.2214090",
  bibdate =      "Thu Nov 8 19:11:58 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A {\em range counting\/} problem is specified by a set
                 $P$ of size $|P| = n$ of points in $\mathbb{R}^d$, an
                 integer {\em weight\/} $x_p$ associated to each point
                 $p \in P$, and a {\em range space\/} ${\cal R}
                 \subseteq 2^P$. Given a query range $R \in {\cal R}$,
                 the output is $R(x) = \sum_{p \in R} x_p$. The {\em
                 average squared error\/} of an algorithm ${\cal A}$ is
                 $1/|R| \sum_{R \in {\cal R}} ({\cal A}(R, x) -
                 R(x))^2$. Range counting for different range spaces is
                 a central problem in Computational Geometry.\par

                 We study $(\epsilon, \delta)$-differentially private
                 algorithms for range counting. Our main results are for
                 the range space given by hyperplanes, that is, the
                 halfspace counting problem. We present an $(\epsilon,
                 \delta)$-differentially private algorithm for halfspace
                 counting in $d$ dimensions which is $O(n^{1 - 1/d})$
                 approximate for average squared error. This contrasts
                 with the $\Omega(n)$ lower bound established by the
                 classical result of Dinur and Nissim on approximation
                 for arbitrary subset counting queries. We also show a
                 matching lower bound of $\Omega(n^{1 - 1 /d})$
                 approximation for any $(\epsilon,
                 \delta)$-differentially private algorithm for halfspace
                 counting.\par

                 Both bounds are obtained using discrepancy theory. For
                 the lower bound, we use a modified discrepancy measure
                 and bound approximation of $(\epsilon,
                 \delta)$-differentially private algorithms for range
                 counting queries in terms of this discrepancy. We also
                 relate the modified discrepancy measure to classical
                 combinatorial discrepancy, which allows us to exploit
                 known discrepancy lower bounds. This approach also
                 yields a lower bound of $\Omega((\log n)^{d - 1})$ for
                 $(\epsilon, \delta)$-differentially private {\em
                 orthogonal\/} range counting in $d$ dimensions, the
                 first known superconstant lower bound for this problem.
                 For the upper bound, we use an approach inspired by
                 partial coloring methods for proving discrepancy upper
                 bounds, and obtain $(\epsilon, \delta)$-differentially
                 private algorithms for range counting with polynomially
                 bounded shatter function range spaces.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kane:2013:PLF,
  author =       "Daniel M. Kane and Raghu Meka",
  title =        "A {PRG} for {Lipschitz} functions of polynomials with
                 applications to sparsest cut",
  crossref =     "ACM:2013:SPF",
  pages =        "1--10",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488610",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/prng.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give improved pseudorandom generators (PRGs) for
                 Lipschitz functions of low-degree polynomials over the
                 hypercube. These are functions of the form $ \psi
                 (P(x)) $, where $ P : {1, - 1}^n \to R $ is a
                 low-degree polynomial and $ \psi : R \to R $ is a
                 function with small Lipschitz constant. PRGs for smooth
                 functions of low-degree polynomials have received a lot
                 of attention recently and play an important role in
                 constructing PRGs for the natural class of polynomial
                 threshold functions [12,13,24,16,15]. In spite of the
                 recent progress, no nontrivial PRGs were known for
                 fooling Lipschitz functions of degree $ O(\log n) $
                 polynomials even for constant error rate. In this work,
                 we give the first such generator obtaining a
                 seed-length of $ (\log n) O(l^2 / \epsilon^2) $ for
                 fooling degree $l$ polynomials with error $ \epsilon $.
                 Previous generators had an exponential dependence on
                 the degree $l$. We use our PRG to get better
                 integrality gap instances for sparsest cut, a
                 fundamental problem in graph theory with many
                 applications in graph optimization. We give an instance
                 of uniform sparsest cut for which a powerful
                 semi-definite relaxation (SDP) first introduced by
                 Goemans and Linial and studied in the seminal work of
                 Arora, Rao and Vazirani [3] has an integrality gap of $
                 \exp (\Omega ((\log \log n)^{1 / 2})) $. Understanding
                 the performance of the Goemans--Linial SDP for uniform
                 sparsest cut is an important open problem in
                 approximation algorithms and metric embeddings. Our
                 work gives a near-exponential improvement over previous
                 lower bounds which achieved a gap of $ \Omega (\log
                 \log n) $ [11,21]. Our gap instance builds on the
                 recent short code gadgets of Barak et al. [5].",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kwok:2013:ICI,
  author =       "Tsz Chiu Kwok and Lap Chi Lau and Yin Tat Lee and
                 Shayan Oveis Gharan and Luca Trevisan",
  title =        "Improved {Cheeger}'s inequality: analysis of spectral
                 partitioning algorithms through higher order spectral
                 gap",
  crossref =     "ACM:2013:SPF",
  pages =        "11--20",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488611",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Let $ \phi (G) $ be the minimum conductance of an
                 undirected graph $G$, and let $ 0 = \lambda_1 \leq
                 \lambda_2 \leq \ldots {} \leq \lambda_n \leq 2 $ be the
                 eigenvalues of the normalized Laplacian matrix of $G$.
                 We prove that for any graph $G$ and any $ k \geq 2 $, $
                 [\phi (G) = O(k) l_2 / \sqrt l_k] $, and this
                 performance guarantee is achieved by the spectral
                 partitioning algorithm. This improves Cheeger's
                 inequality, and the bound is optimal up to a constant
                 factor for any $k$. Our result shows that the spectral
                 partitioning algorithm is a constant factor
                 approximation algorithm for finding a sparse cut if $
                 l_k $ is a constant for some constant $k$. This
                 provides some theoretical justification to its
                 empirical performance in image segmentation and
                 clustering problems. We extend the analysis to spectral
                 algorithms for other graph partitioning problems,
                 including multi-way partition, balanced separator, and
                 maximum cut.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Williams:2013:NPV,
  author =       "Ryan Williams",
  title =        "Natural proofs versus derandomization",
  crossref =     "ACM:2013:SPF",
  pages =        "21--30",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488612",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study connections between Natural Proofs,
                 derandomization, and the problem of proving ``weak''
                 circuit lower bounds such as $ {\rm NEXP} \not \subset
                 {\rm TC}^0 $, which are still wide open. Natural Proofs
                 have three properties: they are constructive (an
                 efficient algorithm $A$ is embedded in them), have
                 largeness ($A$ accepts a large fraction of strings),
                 and are useful ($A$ rejects all strings which are truth
                 tables of small circuits). Strong circuit lower bounds
                 that are ``naturalizing'' would contradict present
                 cryptographic understanding, yet the vast majority of
                 known circuit lower bound proofs are naturalizing. So
                 it is imperative to understand how to pursue un-Natural
                 Proofs. Some heuristic arguments say constructivity
                 should be circumventable. Largeness is inherent in many
                 proof techniques, and it is probably our presently weak
                 techniques that yield constructivity. We prove:
                 Constructivity is unavoidable, even for NEXP lower
                 bounds. Informally, we prove for all ``typical''
                 non-uniform circuit classes $C$, $ {\rm NEXP} \not
                 \subset C $ if and only if there is a polynomial-time
                 algorithm distinguishing some function from all
                 functions computable by $C$-circuits. Hence $ {\rm
                 NEXP} \not \subset C $ is equivalent to exhibiting a
                 constructive property useful against $C$. There are no
                 P-natural properties useful against $C$ if and only if
                 randomized exponential time can be ``derandomized''
                 using truth tables of circuits from $C$ as random
                 seeds. Therefore the task of proving there are no
                 $P$-natural properties is inherently a derandomization
                 problem, weaker than but implied by the existence of
                 strong pseudorandom functions. These characterizations
                 are applied to yield several new results. The two main
                 applications are that $ {\rm NEXP} \cap {\rm coNEXP} $
                 does not have $ n^{\log n} $ size ACC circuits, and a
                 mild derandomization result for RP.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bei:2013:CTE,
  author =       "Xiaohui Bei and Ning Chen and Shengyu Zhang",
  title =        "On the complexity of trial and error",
  crossref =     "ACM:2013:SPF",
  pages =        "31--40",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488613",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Motivated by certain applications from physics,
                 biochemistry, economics, and computer science in which
                 the objects under investigation are unknown or not
                 directly accessible because of various limitations, we
                 propose a trial-and-error model to examine search
                 problems in which inputs are unknown. More
                 specifically, we consider constraint satisfaction
                 problems $ \wedge_i C_i $, where the constraints $ C_i
                 $ are hidden, and the goal is to find a solution
                 satisfying all constraints. We can adaptively propose a
                 candidate solution (i.e., trial), and there is a
                 verification oracle that either confirms that it is a
                 valid solution, or returns the index i of a violated
                 constraint (i.e., error), with the exact content of $
                 C_i $ still hidden. We studied the time and trial
                 complexities of a number of natural CSPs, summarized as
                 follows. On one hand, despite the seemingly very little
                 information provided by the oracle, efficient
                 algorithms do exist for Nash, Core, Stable Matching,
                 and SAT problems, whose unknown-input versions are
                 shown to be as hard as the corresponding known-input
                 versions up to a factor of polynomial. The techniques
                 employed vary considerably, including, e.g., order
                 theory and the ellipsoid method with a strong
                 separation oracle. On the other hand, there are
                 problems whose complexities are substantially increased
                 in the unknown-input model. In particular, no
                 time-efficient algorithms exist for Graph Isomorphism
                 and Group Isomorphism (unless PH collapses or P = NP).
                 The proofs use quite nonstandard reductions, in which
                 an efficient simulator is carefully designed to
                 simulate a desirable but computationally unaffordable
                 oracle. Our model investigates the value of input
                 information, and our results demonstrate that the lack
                 of input information can introduce various levels of
                 extra difficulty. The model accommodates a wide range
                 of combinatorial and algebraic structures, and exhibits
                 intimate connections with (and hopefully can also serve
                 as a useful supplement to) certain existing learning
                 and complexity theories.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhawalkar:2013:COF,
  author =       "Kshipra Bhawalkar and Sreenivas Gollapudi and Kamesh
                 Munagala",
  title =        "Coevolutionary opinion formation games",
  crossref =     "ACM:2013:SPF",
  pages =        "41--50",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488615",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present game-theoretic models of opinion formation
                 in social networks where opinions themselves co-evolve
                 with friendships. In these models, nodes form their
                 opinions by maximizing agreements with friends weighted
                 by the strength of the relationships, which in turn
                 depend on difference in opinion with the respective
                 friends. We define a social cost of this process by
                 generalizing recent work of Bindel et al., FOCS 2011.
                 We tightly bound the price of anarchy of the resulting
                 dynamics via local smoothness arguments, and
                 characterize it as a function of how much nodes value
                 their own (intrinsic) opinion, as well as how strongly
                 they weigh links to friends with whom they agree
                 more.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chawla:2013:PIM,
  author =       "Shuchi Chawla and Jason D. Hartline and David Malec
                 and Balasubramanian Sivan",
  title =        "Prior-independent mechanisms for scheduling",
  crossref =     "ACM:2013:SPF",
  pages =        "51--60",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488616",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the makespan minimization problem with
                 unrelated selfish machines under the assumption that
                 job sizes are stochastic. We design simple truthful
                 mechanisms that under different distributional
                 assumptions provide constant and sublogarithmic
                 approximations to expected makespan. Our mechanisms are
                 prior-independent in that they do not rely on knowledge
                 of the job size distributions. Prior-independent
                 approximations were previously known only for the
                 revenue maximization objective [13, 11, 26]. In
                 contrast to our results, in prior-free settings no
                 truthful anonymous deterministic mechanism for the
                 makespan objective can provide a sublinear
                 approximation [3].",
  acknowledgement = ack-nhfb,
}

@InProceedings{Feldman:2013:CWE,
  author =       "Michal Feldman and Nick Gravin and Brendan Lucier",
  title =        "Combinatorial {Walrasian Equilibrium}",
  crossref =     "ACM:2013:SPF",
  pages =        "61--70",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488617",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study a combinatorial market design problem, where
                 a collection of indivisible objects is to be priced and
                 sold to potential buyers subject to equilibrium
                 constraints. The classic solution concept for such
                 problems is Walrasian Equilibrium (WE), which provides
                 a simple and transparent pricing structure that
                 achieves optimal social welfare. The main weakness of
                 the WE notion is that it exists only in very
                 restrictive cases. To overcome this limitation, we
                 introduce the notion of a Combinatorial Walrasian
                 equilibium (CWE), a natural relaxation of WE. The
                 difference between a CWE and a (non-combinatorial) WE
                 is that the seller can package the items into
                 indivisible bundles prior to sale, and the market does
                 not necessarily clear. We show that every valuation
                 profile admits a CWE that obtains at least half of the
                 optimal (unconstrained) social welfare. Moreover, we
                 devise a poly-time algorithm that, given an arbitrary
                 allocation X, computes a CWE that achieves at least
                 half of the welfare of X. Thus, the economic problem of
                 finding a CWE with high social welfare reduces to the
                 algorithmic problem of social-welfare approximation. In
                 addition, we show that every valuation profile admits a
                 CWE that extracts a logarithmic fraction of the optimal
                 welfare as revenue. Finally, these results are
                 complemented by strong lower bounds when the seller is
                 restricted to using item prices only, which motivates
                 the use of bundles. The strength of our results derives
                 partly from their generality --- our results hold for
                 arbitrary valuations that may exhibit complex
                 combinations of substitutes and complements.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Naor:2013:ERN,
  author =       "Assaf Naor and Oded Regev and Thomas Vidick",
  title =        "Efficient rounding for the noncommutative
                 {Grothendieck} inequality",
  crossref =     "ACM:2013:SPF",
  pages =        "71--80",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488618",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The classical Grothendieck inequality has applications
                 to the design of approximation algorithms for NP-hard
                 optimization problems. We show that an algorithmic
                 interpretation may also be given for a noncommutative
                 generalization of the Grothendieck inequality due to
                 Pisier and Haagerup. Our main result, an efficient
                 rounding procedure for this inequality, leads to a
                 constant-factor polynomial time approximation algorithm
                 for an optimization problem which generalizes the Cut
                 Norm problem of Frieze and Kannan, and is shown here to
                 have additional applications to robust principle
                 component analysis and the orthogonal Procrustes
                 problem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Clarkson:2013:LRA,
  author =       "Kenneth L. Clarkson and David P. Woodruff",
  title =        "Low rank approximation and regression in input
                 sparsity time",
  crossref =     "ACM:2013:SPF",
  pages =        "81--90",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488620",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We design a new distribution over $ \poly (r
                 \epsilon^{-1}) \times n $ matrices $S$ so that for any
                 fixed $ n \times d $ matrix $A$ of rank $r$, with
                 probability at least $ 9 / 10 $, $ S A x_2 = (1 \pm
                 \epsilon) A x_2 $ simultaneously for all $ x \in R^d $.
                 Such a matrix $S$ is called a subspace embedding.
                 Furthermore, $ S A $ can be computed in $ O({\rm
                 nnz}(A)) + \tilde O (r^2 \epsilon^{-2}) $ time, where $
                 {\rm nnz}(A) $ is the number of non-zero entries of
                 $A$. This improves over all previous subspace
                 embeddings, which required at least $ \Omega (n d \log
                 d) $ time to achieve this property. We call our
                 matrices $S$ sparse embedding matrices. Using our
                 sparse embedding matrices, we obtain the fastest known
                 algorithms for overconstrained least-squares
                 regression, low-rank approximation, approximating all
                 leverage scores, and $ l_p $ -regression: to output an
                 $ x' $ for which $ A x' - b_2 \leq (1 + \epsilon)
                 \min_x A x - b_2 $ for an $ n \times d $ matrix $A$ and
                 an $ n \times 1 $ column vector $b$, we obtain an
                 algorithm running in $ O({\rm nnz}(A)) + \tilde O(d^3
                 \epsilon^{-2}) $ time, and another in $ O({\rm nnz}(A)
                 \log (1 / \epsilon)) + \tilde O(d^3 \log (1 /
                 \epsilon)) $ time. (Here $ \tilde O(f) = f \cdot l o
                 g^{O(1)} (f) $.) to obtain a decomposition of an $ n
                 \times n $ matrix $A$ into a product of an $ n \times k
                 $ matrix $L$, a $ k \times k $ diagonal matrix $D$, and
                 a $ n \times k $ matrix $W$, for which $ \{ F A - L D W
                 \} \leq (1 + \epsilon) F \{ A - A_k \} $, where $ A_k $
                 is the best rank-$k$ approximation, our algorithm runs
                 in $ O({\rm nnz}(A)) + \tilde O(n k^2 \epsilon^{-4}
                 \log n + k^3 \epsilon^{-5} \log^2 n) $ time. to output
                 an approximation to all leverage scores of an $ n
                 \times d $ input matrix $A$ simultaneously, with
                 constant relative error, our algorithms run in $ O({\rm
                 nnz}(A) \log n) + \tilde O(r^3) $ time. to output an $
                 x' $ for which $ A x' - b_p \leq (1 + \epsilon) \min_x
                 A x - b_p $ for an $ n \times d $ matrix $A$ and an $ n
                 \times 1 $ column vector $b$, we obtain an algorithm
                 running in $ O({\rm nnz}(A) \log n) + \poly (r
                 \epsilon^{-1}) $ time, for any constant $ 1 \leq p <
                 \infty $. We optimize the polynomial factors in the
                 above stated running times, and show various tradeoffs.
                 Finally, we provide preliminary experimental results
                 which suggest that our algorithms are of interest in
                 practice.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Meng:2013:LDS,
  author =       "Xiangrui Meng and Michael W. Mahoney",
  title =        "Low-distortion subspace embeddings in input-sparsity
                 time and applications to robust linear regression",
  crossref =     "ACM:2013:SPF",
  pages =        "91--100",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488621",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Low-distortion embeddings are critical building blocks
                 for developing random sampling and random projection
                 algorithms for common linear algebra problems. We show
                 that, given a matrix $ A \in R^{n x d} $ with $ n \gg d
                 $ and $ a p \in [1, 2) $, with a constant probability,
                 we can construct a low-distortion embedding matrix $
                 \Pi \in R^{O({\rm poly}(d)) x n} $ that embeds $ A_p $,
                 the $ l_p $ subspace spanned by $A$'s columns, into $
                 (R^{O({\rm poly}(d))}, | \cdot |_p) $; the distortion
                 of our embeddings is only $ O({\rm poly}(d)) $, and we
                 can compute $ \Pi A $ in $ O(n n z(A)) $ time, i.e.,
                 input-sparsity time. Our result generalizes the
                 input-sparsity time $ l_2 $ subspace embedding by
                 Clarkson and Woodruff [STOC'13]; and for completeness,
                 we present a simpler and improved analysis of their
                 construction for $ l_2 $. These input-sparsity time $
                 l_p $ embeddings are optimal, up to constants, in terms
                 of their running time; and the improved running time
                 propagates to applications such as $ (1 p m \epsilon)
                 $-distortion $ l_p $ subspace embedding and
                 relative-error $ l_p $ regression. For $ l_2 $, we show
                 that a $ (1 + \epsilon) $-approximate solution to the $
                 l_2 $ regression problem specified by the matrix $A$
                 and a vector $ b \in R^n $ can be computed in $ O(n n
                 z(A) + d^3 \log (d / \epsilon) / \epsilon^2) $ time;
                 and for $ l_p $, via a subspace-preserving sampling
                 procedure, we show that a $ (1 p m \epsilon)
                 $-distortion embedding of $ A_p $ into $ R^{O({\rm
                 poly}(d))} $ can be computed in $ O(n n z(A) \cdot \log
                 n) $ time, and we also show that a $ (1 + \epsilon)
                 $-approximate solution to the $ l_p $ regression
                 problem $ \min_{x \in R^d} |A x - b|_p $ can be
                 computed in $ O(n n z(A) \cdot \log n + {\rm poly}(d)
                 \log (1 / \epsilon) / \epsilon^2) $ time. Moreover, we
                 can also improve the embedding dimension or
                 equivalently the sample size to $ O(d^{3 + p / 2} \log
                 (1 / \epsilon) / \epsilon^2) $ without increasing the
                 complexity.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Nelson:2013:SLB,
  author =       "Jelani Nelson and Huy L. Nguyen",
  title =        "Sparsity lower bounds for dimensionality reducing
                 maps",
  crossref =     "ACM:2013:SPF",
  pages =        "101--110",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488622",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give near-tight lower bounds for the sparsity
                 required in several dimensionality reducing linear
                 maps. First, consider the Johnson--Lindenstrauss (JL)
                 lemma which states that for any set of $n$ vectors in $
                 R^d $ there is an $ A \in R^{m \times d} $ with $ m =
                 O(\epsilon^{-2} \log n) $ such that mapping by $A$
                 preserves the pairwise Euclidean distances up to a $ 1
                 \pm \epsilon $ factor. We show there exists a set of
                 $n$ vectors such that any such $A$ with at most $s$
                 non-zero entries per column must have $ s = \Omega
                 (\epsilon^{-1} \log n / \log (1 / \epsilon)) $ if $ m <
                 O(n / \log (1 / \epsilon)) $. This improves the lower
                 bound of $ \Omega (\min \{ \epsilon^{-2}, \epsilon^{-1}
                 \sqrt (\log_m d) \}) $ by [Dasgupta-Kumar-Sarlos, STOC
                 2010], which only held against the stronger property of
                 distributional JL, and only against a certain
                 restricted class of distributions. Meanwhile our lower
                 bound is against the JL lemma itself, with no
                 restrictions. Our lower bound matches the sparse JL
                 upper bound of [Kane-Nelson, SODA 2012] up to an $
                 O(\log (1 / \epsilon)) $ factor. Next, we show that any
                 $ m \times n $ matrix with the $k$-restricted isometry
                 property (RIP) with constant distortion must have $
                 \Omega (k \log (n / k)) $ non-zeroes per column if $ m
                 = O(k \log (n / k)) $, the optimal number of rows for
                 RIP, and $ k < n / \polylog n $. This improves the
                 previous lower bound of $ \Omega (\min \{ k, n / m \})
                 $ by [Chandar, 2010] and shows that for most $k$ it is
                 impossible to have a sparse RIP matrix with an optimal
                 number of rows. Both lower bounds above also offer a
                 tradeoff between sparsity and the number of rows.
                 Lastly, we show that any oblivious distribution over
                 subspace embedding matrices with 1 non-zero per column
                 and preserving distances in a $d$ dimensional-subspace
                 up to a constant factor must have at least $ \Omega
                 (d^2) $ rows. This matches an upper bound in
                 [Nelson-Nguy{\^e}n, arXiv abs/1211.1002] and shows the
                 impossibility of obtaining the best of both of
                 constructions in that work, namely 1 non-zero per
                 column and $ d \cdot \polylog d $ rows.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bitansky:2013:RCB,
  author =       "Nir Bitansky and Ran Canetti and Alessandro Chiesa and
                 Eran Tromer",
  title =        "Recursive composition and bootstrapping for {SNARKS}
                 and proof-carrying data",
  crossref =     "ACM:2013:SPF",
  pages =        "111--120",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488623",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Succinct non-interactive arguments of knowledge
                 (SNARKs) enable verifying NP statements with complexity
                 that is essentially independent of that required for
                 classical NP verification. In particular, they provide
                 strong solutions to the problem of verifiably
                 delegating computation. We construct the first
                 fully-succinct publicly-verifiable SNARK. To do that,
                 we first show how to ``bootstrap'' any SNARK that
                 requires expensive preprocessing to obtain a SNARK that
                 does not, while preserving public verifiability. We
                 then apply this transformation to known SNARKs with
                 preprocessing. Moreover, the SNARK we construct only
                 requires of the prover time and space that are
                 essentially the same as that required for classical NP
                 verification. Our transformation assumes only
                 collision-resistant hashing; curiously, it does not
                 rely on PCPs. We also show an analogous transformation
                 for privately-verifiable SNARKs, assuming
                 fully-homomorphic encryption. At the heart of our
                 transformations is a technique for recursive
                 composition of SNARKs. This technique uses in an
                 essential way the proof-carrying data (PCD) framework,
                 which extends SNARKs to the setting of distributed
                 networks of provers and verifiers. Concretely, to
                 bootstrap a given SNARK, we recursively compose the
                 SNARK to obtain a ``weak'' PCD system for shallow
                 distributed computations, and then use the PCD
                 framework to attain stronger notions of SNARKs and PCD
                 systems.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hardt:2013:HRL,
  author =       "Moritz Hardt and David P. Woodruff",
  title =        "How robust are linear sketches to adaptive inputs?",
  crossref =     "ACM:2013:SPF",
  pages =        "121--130",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488624",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Linear sketches are powerful algorithmic tools that
                 turn an $n$-dimensional input into a concise
                 lower-dimensional representation via a linear
                 transformation. Such sketches have seen a wide range of
                 applications including norm estimation over data
                 streams, compressed sensing, and distributed computing.
                 In almost any realistic setting, however, a linear
                 sketch faces the possibility that its inputs are
                 correlated with previous evaluations of the sketch.
                 Known techniques no longer guarantee the correctness of
                 the output in the presence of such correlations. We
                 therefore ask: Are linear sketches inherently
                 non-robust to adaptively chosen inputs? We give a
                 strong affirmative answer to this question.
                 Specifically, we show that no linear sketch
                 approximates the Euclidean norm of its input to within
                 an arbitrary multiplicative approximation factor on a
                 polynomial number of adaptively chosen inputs. The
                 result remains true even if the dimension of the sketch
                 is d=n-o(n) and the sketch is given unbounded
                 computation time. Our result is based on an algorithm
                 with running time polynomial in d that adaptively finds
                 a distribution over inputs on which the sketch is
                 incorrect with constant probability. Our result implies
                 several corollaries for related problems including
                 l$_p$ -norm estimation and compressed sensing. Notably,
                 we resolve an open problem in compressed sensing
                 regarding the feasibility of l$_2$ /l$_2$ -recovery
                 guarantees in presence of computationally bounded
                 adversaries.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bohm:2013:EDO,
  author =       "Stanislav B{\"o}hm and Stefan G{\"o}ller and Petr
                 Jancar",
  title =        "Equivalence of deterministic one-counter automata is
                 {NL}-complete",
  crossref =     "ACM:2013:SPF",
  pages =        "131--140",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488626",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove that language equivalence of deterministic
                 one-counter automata is NL-complete. This improves the
                 superpolynomial time complexity upper bound shown by
                 Valiant and Paterson in 1975. Our main contribution is
                 to prove that two deterministic one-counter automata
                 are inequivalent if and only if they can be
                 distinguished by a word of length polynomial in the
                 size of the two input automata.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Burgisser:2013:ELB,
  author =       "Peter B{\"u}rgisser and Christian Ikenmeyer",
  title =        "Explicit lower bounds via geometric complexity
                 theory",
  crossref =     "ACM:2013:SPF",
  pages =        "141--150",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488627",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove the lower bound $ R (M_m) \geq 3 / 2 m^2 - 2
                 $ on the border rank of $ m \times m $ matrix
                 multiplication by exhibiting explicit representation
                 theoretic (occurrence) obstructions in the sense of
                 Mulmuley and Sohoni's geometric complexity theory (GCT)
                 program. While this bound is weaker than the one
                 recently obtained by Landsberg and Ottaviani, these are
                 the first significant lower bounds obtained within the
                 GCT program. Behind the proof is an explicit
                 description of the highest weight vectors in Sym$^d
                 \otimes^3 (C^n)*$ in terms of combinatorial objects,
                 called obstruction designs. This description results
                 from analyzing the process of polarization and
                 Schur--Weyl duality.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Braverman:2013:IEC,
  author =       "Mark Braverman and Ankit Garg and Denis Pankratov and
                 Omri Weinstein",
  title =        "From information to exact communication",
  crossref =     "ACM:2013:SPF",
  pages =        "151--160",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488628",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We develop a new local characterization of the
                 zero-error information complexity function for
                 two-party communication problems, and use it to compute
                 the exact internal and external information complexity
                 of the 2-bit AND function: $ {\rm IC}({\rm AND}, 0) =
                 C_{\wedge } \cong 1.4923 $ bits, and $ {\rm IC}^{ext}
                 ({\rm AND}, 0) = \log_2 3 \cong 1.5839 $ bits. This
                 leads to a tight (upper and lower bound)
                 characterization of the communication complexity of the
                 set intersection problem on subsets of $ \{ 1, \ldots
                 {}, n \} $ (the player are required to compute the
                 intersection of their sets), whose randomized
                 communication complexity tends to $ C_{\wedge } \cdot n
                 \pm o(n) $ as the error tends to zero. The
                 information-optimal protocol we present has an infinite
                 number of rounds. We show this is necessary by proving
                 that the rate of convergence of the r-round information
                 cost of AND to $ {\rm IC}({\rm AND}, 0) = C_{\wedge } $
                 behaves like $ \Theta (1 / r^2) $, i.e. that the
                 $r$-round information complexity of AND is $ C_\wedge
                 + \Theta (1 / r^2) $. We leverage the tight analysis
                 obtained for the information complexity of AND to
                 calculate and prove the exact communication complexity
                 of the set disjointness function $ {\rm Disj}_n (X, Y)
                 = - v_{i = 1}^n {\rm AND}(x_i, y_i) $ with error
                 tending to $0$, which turns out to be $ = C_{\rm DISJ}
                 \cdot n \pm o(n) $, where $ C_{\rm DISJ} \cong 0.4827
                 $. Our rate of convergence results imply that an
                 asymptotically optimal protocol for set disjointness
                 will have to use $ \omega (1) $ rounds of
                 communication, since every $r$-round protocol will be
                 sub-optimal by at least $ \Omega (n / r^2) $ bits of
                 communication. We also obtain the tight bound of $ 2 /
                 \ln 2 k \pm o(k) $ on the communication complexity of
                 disjointness of sets of size $ \leq k $. An asymptotic
                 bound of $ \Theta (k) $ was previously shown by Hastad
                 and Wigderson.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Braverman:2013:ICA,
  author =       "Mark Braverman and Ankur Moitra",
  title =        "An information complexity approach to extended
                 formulations",
  crossref =     "ACM:2013:SPF",
  pages =        "161--170",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488629",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove an unconditional lower bound that any linear
                 program that achieves an $ O(n^{1 - \epsilon }) $
                 approximation for clique has size $ 2^{ \Omega (n
                 \epsilon)} $. There has been considerable recent
                 interest in proving unconditional lower bounds against
                 any linear program. Fiorini et al. proved that there is
                 no polynomial sized linear program for traveling
                 salesman. Braun et al. proved that there is no
                 polynomial sized $ O(n^{1 / 2 - \epsilon })
                 $-approximate linear program for clique. Here we prove
                 an optimal and unconditional lower bound against linear
                 programs for clique that matches Hastad's celebrated
                 hardness result. Interestingly, the techniques used to
                 prove such lower bounds have closely followed the
                 progression of techniques used in communication
                 complexity. Here we develop an information theoretic
                 framework to approach these questions, and we use it to
                 prove our main result. Also we resolve a related
                 question: How many bits of communication are needed to
                 get $ \epsilon $ advantage over random guessing for
                 disjointness? Kalyanasundaram and Schnitger proved that
                 a protocol that gets constant advantage requires $
                 \Omega (n) $ bits of communication. This result in
                 conjunction with amplification implies that any
                 protocol that gets $ \epsilon $-advantage requires $
                 \Omega (\epsilon^2 n) $ bits of communication. Here we
                 improve this bound to $ \Omega (\epsilon n) $, which is
                 optimal for any $ \epsilon > 0 $.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Komargodski:2013:ACL,
  author =       "Ilan Komargodski and Ran Raz",
  title =        "Average-case lower bounds for formula size",
  crossref =     "ACM:2013:SPF",
  pages =        "171--180",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488630",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give an explicit function $ h : \{ 0, 1 \}^n \to \{
                 0, 1 \} $ such that any deMorgan formula of size $
                 O(n^{2.499}) $ agrees with $h$ on at most $ 1 / 2 +
                 \epsilon $ fraction of the inputs, where $ \epsilon $
                 is exponentially small (i.e. $ \epsilon = 2^{-n \Omega
                 (1)} $ ). We also show, using the same technique, that
                 any boolean formula of size $ O(n^{1.999}) $ over the
                 complete basis, agrees with $h$ on at most $ 1 / 2 +
                 \epsilon $ fraction of the inputs, where $ \epsilon $
                 is exponentially small (i.e. $ \epsilon = 2^{-vn \Omega
                 (1)} $ ). Our construction is based on Andreev's $
                 \Omega (n^{2.5 - o(1)}) $ formula size lower bound that
                 was proved for the case of exact computation.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chen:2013:CNM,
  author =       "Xi Chen and Dimitris Paparas and Mihalis Yannakakis",
  title =        "The complexity of non-monotone markets",
  crossref =     "ACM:2013:SPF",
  pages =        "181--190",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488632",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We introduce the notion of non-monotone utilities,
                 which covers a wide variety of utility functions in
                 economic theory. We show that it is PPAD-hard to
                 compute an approximate Arrow--Debreu market equilibrium
                 in markets with linear and non-monotone utilities.
                 Building on this result, we settle the long-standing
                 open problem regarding the computation of an
                 approximate Arrow--Debreu market equilibrium in markets
                 with CES utilities, by proving that it is PPAD-complete
                 when the Constant Elasticity of Substitution parameter,
                 $ \rho $, is any constant less than $ - 1 $.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cheung:2013:TBG,
  author =       "Yun Kuen Cheung and Richard Cole and Nikhil Devanur",
  title =        "Tatonnement beyond gross substitutes?: gradient
                 descent to the rescue",
  crossref =     "ACM:2013:SPF",
  pages =        "191--200",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488633",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Tatonnement is a simple and natural rule for updating
                 prices in Exchange (Arrow--Debreu) markets. In this
                 paper we define a class of markets for which
                 tatonnement is equivalent to gradient descent. This is
                 the class of markets for which there is a convex
                 potential function whose gradient is always equal to
                 the negative of the excess demand and we call it Convex
                 Potential Function (CPF) markets. We show the following
                 results. CPF markets contain the class of Eisenberg
                 Gale (EG) markets, defined previously by Jain and
                 Vazirani. The subclass of CPF markets for which the
                 demand is a differentiable function contains exactly
                 those markets whose demand function has a symmetric
                 negative semi-definite Jacobian. We define a family of
                 continuous versions of tatonnement based on gradient
                 descent using a Bregman divergence. As we show, all
                 processes in this family converge to an equilibrium for
                 any CPF market. This is analogous to the classic result
                 for markets satisfying the Weak Gross Substitutes
                 property. A discrete version of tatonnement converges
                 toward the equilibrium for the following markets of
                 complementary goods; its convergence rate for these
                 settings is analyzed using a common potential function.
                 Fisher markets in which all buyers have Leontief
                 utilities. The tatonnement process reduces the distance
                 to the equilibrium, as measured by the potential
                 function, to an $ \epsilon $ fraction of its initial
                 value in $ O(1 / \epsilon) $ rounds of price updates.
                 Fisher markets in which all buyers have complementary
                 CES utilities. Here, the distance to the equilibrium is
                 reduced to an $ \epsilon $ fraction of its initial
                 value in $ O(\log (1 / \epsilon)) $ rounds of price
                 updates. This shows that tatonnement converges for the
                 entire range of Fisher markets when buyers have
                 complementary CES utilities, in contrast to prior work,
                 which could analyze only the substitutes range,
                 together with a small portion of the complementary
                 range.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Feldman:2013:SAA,
  author =       "Michal Feldman and Hu Fu and Nick Gravin and Brendan
                 Lucier",
  title =        "Simultaneous auctions are (almost) efficient",
  crossref =     "ACM:2013:SPF",
  pages =        "201--210",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488634",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Simultaneous item auctions are simple and practical
                 procedures for allocating items to bidders with
                 potentially complex preferences. In a simultaneous
                 auction, every bidder submits independent bids on all
                 items simultaneously. The allocation and prices are
                 then resolved for each item separately, based solely on
                 the bids submitted on that item. We study the
                 efficiency of Bayes-Nash equilibrium (BNE) outcomes of
                 simultaneous first- and second-price auctions when
                 bidders have complement-free (a.k.a. subadditive)
                 valuations. While it is known that the social welfare
                 of every pure Nash equilibrium (NE) constitutes a
                 constant fraction of the optimal social welfare, a pure
                 NE rarely exists, and moreover, the full information
                 assumption is often unrealistic. Therefore, quantifying
                 the welfare loss in Bayes-Nash equilibria is of
                 particular interest. Previous work established a
                 logarithmic bound on the ratio between the social
                 welfare of a BNE and the expected optimal social
                 welfare in both first-price auctions (Hassidim et al.,
                 2011) and second-price auctions (Bhawalkar and
                 Roughgarden, 2011), leaving a large gap between a
                 constant and a logarithmic ratio. We introduce a new
                 proof technique and use it to resolve both of these
                 gaps in a unified way. Specifically, we show that the
                 expected social welfare of any BNE is at least 1/2 of
                 the optimal social welfare in the case of first-price
                 auctions, and at least 1/4 in the case of second-price
                 auctions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Syrgkanis:2013:CEM,
  author =       "Vasilis Syrgkanis and Eva Tardos",
  title =        "Composable and efficient mechanisms",
  crossref =     "ACM:2013:SPF",
  pages =        "211--220",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488635",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We initiate the study of efficient mechanism design
                 with guaranteed good properties even when players
                 participate in multiple mechanisms simultaneously or
                 sequentially. We define the class of smooth mechanisms,
                 related to smooth games defined by Roughgarden, that
                 can be thought of as mechanisms that generate
                 approximately market clearing prices. We show that
                 smooth mechanisms result in high quality outcome both
                 in equilibrium and in learning outcomes in the full
                 information setting, as well as in Bayesian equilibrium
                 with uncertainty about participants. Our main result is
                 to show that smooth mechanisms compose well: smoothness
                 locally at each mechanism implies global efficiency.
                 For mechanisms where good performance requires that
                 bidders do not bid above their value, we identify the
                 notion of a weakly smooth mechanism. Weakly smooth
                 mechanisms, such as the Vickrey auction, are
                 approximately efficient under the no-overbidding
                 assumption, and the weak smoothness property is also
                 maintained by composition. In most of the paper we
                 assume participants have quasi-linear valuations. We
                 also extend some of our results to settings where
                 participants have budget constraints.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goyal:2013:NBB,
  author =       "Vipul Goyal",
  title =        "Non-black-box simulation in the fully concurrent
                 setting",
  crossref =     "ACM:2013:SPF",
  pages =        "221--230",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488637",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present a new zero-knowledge argument protocol by
                 relying on the non-black-box simulation technique of
                 Barak (FOCS'01). Similar to the protocol of Barak, ours
                 is public-coin, is based on the existence of
                 collision-resistant hash functions, and, is not based
                 on ``rewinding techniques'' but rather uses
                 non-black-box simulation. However in contrast to the
                 protocol of Barak, our protocol is secure even if there
                 are any unbounded (polynomial) number of concurrent
                 sessions. This gives us the first construction of
                 public-coin concurrent zero-knowledge. Prior to our
                 work, Pass, Tseng and Wikstrom (SIAM J. Comp. 2011) had
                 shown that using black-box simulation, getting a
                 construction for even public-coin parallel
                 zero-knowledge is impossible. A public-coin concurrent
                 zero-knowledge protocol directly implies the existence
                 of a concurrent resettably-sound zero-knowledge
                 protocol. This is an improvement over the corresponding
                 construction of Deng, Goyal and Sahai (FOCS'09) which
                 was based on stronger assumptions. Furthermore, this
                 also directly leads to an alternative (and arguable
                 cleaner) construction of a simultaneous resettable
                 zero-knowledge argument system. An important feature of
                 our protocol is the existence of a ``straight-line''
                 simulator. This gives a fundamentally different tool
                 for constructing concurrently secure computation
                 protocols (for functionalities even beyond
                 zero-knowledge). The round complexity of our protocol
                 is $ n^\epsilon $ (for any constant $ \epsilon > 0 $ ),
                 and, the simulator runs in strict polynomial time. The
                 main technique behind our construction is purely
                 combinatorial in nature.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chung:2013:NBB,
  author =       "Kai-Min Chung and Rafael Pass and Karn Seth",
  title =        "Non-black-box simulation from one-way functions and
                 applications to resettable security",
  crossref =     "ACM:2013:SPF",
  pages =        "231--240",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488638",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The simulation paradigm, introduced by Goldwasser,
                 Micali and Rackoff, is of fundamental importance to
                 modern cryptography. In a breakthrough work from 2001,
                 Barak (FOCS'01) introduced a novel non-black-box
                 simulation technique. This technique enabled the
                 construction of new cryptographic primitives, such as
                 resettably-sound zero-knowledge arguments, that cannot
                 be proven secure using just black-box simulation
                 techniques. The work of Barak and its follow-ups,
                 however, all require stronger cryptographic hardness
                 assumptions than the minimal assumption of one-way
                 functions. In this work, we show how to perform
                 non-black-box simulation assuming just the existence of
                 one-way functions. In particular, we demonstrate the
                 existence of a constant-round resettably-sound
                 zero-knowledge argument based only on the existence of
                 one-way functions. Using this technique, we determine
                 necessary and sufficient assumptions for several other
                 notions of resettable security of zero-knowledge
                 proofs. An additional benefit of our approach is that
                 it seemingly makes practical implementations of
                 non-black-box zero-knowledge viable.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bitansky:2013:IAO,
  author =       "Nir Bitansky and Omer Paneth",
  title =        "On the impossibility of approximate obfuscation and
                 applications to resettable cryptography",
  crossref =     "ACM:2013:SPF",
  pages =        "241--250",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488639",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The traditional notion of program obfuscation requires
                 that an obfuscation \char`{\~P}rog of a program Prog
                 computes the exact same function as Prog, but beyond
                 that, the code of \char`{\~P}rog should not leak any
                 information about Prog. This strong notion of virtual
                 black-box security was shown by Barak et al. (CRYPTO
                 2001) to be impossible to achieve, for certain
                 unobfuscatable function families. The same work raised
                 the question of approximate obfuscation, where the
                 obfuscated \char`{\~P}rog is only required to
                 approximate Prog; that is, \char`{\~P}rog only agrees
                 with Prog with high enough probability on some input
                 distribution. We show that, assuming trapdoor
                 permutations, there exist families of robust
                 unobfuscatable functions for which even approximate
                 obfuscation is impossible. Specifically, obfuscation is
                 impossible even if the obfuscated \char`{\~P}rog is
                 only required to agree with Prog with probability
                 slightly more than 1/2, on a uniformly sampled input
                 (below 1/2-agreement, the function obfuscated by
                 \char`{\~P}rog is not uniquely defined). Additionally,
                 assuming only one-way functions, we rule out
                 approximate obfuscation where \char`{\~P}rog may output
                 bot with probability close to $1$, but otherwise must
                 agree with Prog. We demonstrate the power of robust
                 unobfuscatable functions by exhibiting new implications
                 to resettable protocols. Concretely, we reduce the
                 assumptions required for resettably-sound
                 zero-knowledge to one-way functions, as well as reduce
                 round-complexity. We also present a new simplified
                 construction of a simultaneously-resettable
                 zero-knowledge protocol. Finally, we construct a
                 three-message simultaneously-resettable
                 witness-indistinguishable argument of knowledge (with a
                 non-black-box knowledge extractor). Our constructions
                 use a new non-black-box simulation technique that is
                 based on a special kind of ``resettable slots''. These
                 slots are useful for a non-black-box simulator, but not
                 for a resetting prover.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Miles:2013:SCG,
  author =       "Eric Miles and Emanuele Viola",
  title =        "Shielding circuits with groups",
  crossref =     "ACM:2013:SPF",
  pages =        "251--260",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488640",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show how to efficiently compile any given circuit C
                 into a leakage-resistant circuit C' such that any
                 function on the wires of C' that leaks information
                 during a computation C'(x) yields advantage in
                 computing the product of |C'|$^{ \Omega (1)}$ elements
                 of the alternating group A$_u$. In combination with new
                 compression bounds for A$_u$ products, also obtained
                 here, C' withstands leakage from virtually any class of
                 functions against which average-case lower bounds are
                 known. This includes communication protocols, and A$
                 C^0 $ circuits augmented with few arbitrary symmetric
                 gates. If N$ C^1 $ ' T$ C^0 $ then the construction
                 resists T$ C^0 $ leakage as well. We also conjecture
                 that our construction resists N$ C^1 $ leakage. In
                 addition, we extend the construction to the multi-query
                 setting by relying on a simple secure hardware
                 component. We build on Barrington's theorem [JCSS '89]
                 and on the previous leakage-resistant constructions by
                 Ishai et al. [Crypto '03] and Faust et al. [Eurocrypt
                 '10]. Our construction exploits properties of A$_u$
                 beyond what is sufficient for Barrington's theorem.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Babai:2013:QTC,
  author =       "Laszlo Babai and John Wilmes",
  title =        "Quasipolynomial-time canonical form for {Steiner}
                 designs",
  crossref =     "ACM:2013:SPF",
  pages =        "261--270",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488642",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A Steiner 2-design is a finite geometry consisting of
                 a set of ``points'' together with a set of ``lines''
                 (subsets of points of uniform cardinality) such that
                 each pair of points belongs to exactly one line. In
                 this paper we analyse the individualization/refinement
                 heuristic and conclude that after individualizing $
                 O(\log n) $ points (assigning individual colors to
                 them), the refinement process gives each point an
                 individual color. The following consequences are
                 immediate: (a) isomorphism of Steiner 2-designs can be
                 tested in $ n^{O(\log n)} $ time, where $n$ is the
                 number of lines; (b) a canonical form of Steiner
                 2-designs can be computed within the same time bound;
                 (c) all isomorphisms between two Steiner 2-designs can
                 be listed within the same time bound; (d) the number of
                 automorphisms of a Steiner 2-design is at most
                 n$^{O(\log n)}$ (a fact of interest to finite geometry
                 and group theory.) The best previous bound in each of
                 these four statements was moderately exponential, $
                 \exp (\tilde O(n^{1 / 4})) $ (Spielman, STOC'96). Our
                 result removes an exponential bottleneck from
                 Spielman's analysis of the Graph Isomorphism problem
                 for strongly regular graphs. The results extend to
                 Steiner $t$-designs for all $ t \geq 2 $. Strongly
                 regular (s.r.) graphs have been known as hard cases for
                 graph isomorphism testing; the best previously known
                 bound for this case is moderately exponential, $ \exp
                 (\tilde O(n^{1 / 3})) $ where $n$ is the number of
                 vertices (Spielman, STOC'96). Line graphs of Steiner
                 $2$-designs enter as a critical subclass via Neumaier's
                 1979 classification of s.r. graphs. Previously, $
                 n^{O(\log n)} $ isomorphism testing and canonical forms
                 for Steiner 2-designs was known for the case when the
                 lines of the Steiner 2-design have bounded length
                 (Babai and Luks, STOC'83). That paper relied on Luks's
                 group-theoretic divide-and-conquer algorithms and did
                 not yield a subexponential bound on the number of
                 automorphisms. To analyse the
                 individualization/refinement heuristic, we develop a
                 new structure theory of Steiner 2-designs based on the
                 analysis of controlled growth and on an addressing
                 scheme that produces a hierarchy of increasing sets of
                 pairwise independent, uniformly distributed points.
                 This scheme represents a new expression of the
                 structural homogeneity of Steiner 2-designs that allows
                 applications of the second moment method. We also
                 address the problem of reconstruction of Steiner
                 2-designs from their line-graphs beyond the point of
                 unique reconstructability, in a manner analogous to
                 list-decoding, and as a consequence achieve an $ \exp
                 (\tilde O(n^{1 / 6})) $ bound for isomorphism testing
                 for this class of s.r. graphs. Results, essentially
                 identical to our main results, were obtained
                 simultaneously by Xi Chen, Xiaorui Sun, and Shang-Hua
                 Teng, building on a different philosophy and
                 combinatorial structure theory than the present paper.
                 They do not claim an analysis of the
                 individualization/refinement algorithm but of a more
                 complex combinatorial algorithm. We comment on how this
                 paper fits into the overall project of improved
                 isomorphism testing for strongly regular graphs (the
                 ultimate goal being subexponential $ \exp (n^{o(1)}) $
                 time). In the remaining cases we need to deal with s.r.
                 graphs satisfying ``Neumaier's claw bound,'' permitting
                 the use of a separate set of asymptotic structural
                 tools. In joint work (in progress) with Chen, Sun, and
                 Teng, we address that case and have already pushed the
                 overall bound below $ \exp (\tilde O(n^{1 / 4})) $ The
                 present paper is a methodologically distinct and
                 stand-alone part of the overall project.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chen:2013:MSD,
  author =       "Xi Chen and Xiaorui Sun and Shang-Hua Teng",
  title =        "Multi-stage design for quasipolynomial-time
                 isomorphism testing of {Steiner} $2$-systems",
  crossref =     "ACM:2013:SPF",
  pages =        "271--280",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488643",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A standard heuristic for testing graph isomorphism is
                 to first assign distinct labels to a small set of
                 vertices of an input graph, and then propagate to
                 create new vertex labels across the graph, aiming to
                 assign distinct and isomorphism-invariant labels to all
                 vertices in the graph. This is usually referred to as
                 the individualization/refinement method for canonical
                 labeling of graphs. We present a quasipolynomial-time
                 algorithm for isomorphism testing of Steiner 2-systems.
                 A Steiner 2-system consists of points and lines, where
                 each line passes the same number of points and each
                 pair of points uniquely determines a line. Each Steiner
                 2-system induces a Steiner graph, in which vertices
                 represent lines and edges represent intersections of
                 lines. Steiner graphs are an important subfamily of
                 strongly regular graphs whose isomorphism testing has
                 challenged researchers for years. Inspired by both the
                 individualization/refinement method and the previous
                 analyses of Babai and Spielman, we consider an extended
                 framework for isomorphism testing of Steiner 2-systems,
                 in which we use a small set of randomly chosen points
                 and lines to build isomorphism-invariant multi-stage
                 combinatorial structures that are sufficient to
                 distinguish all pairs of points of a Steiner 2-system.
                 Applying this framework, we show that isomorphism of
                 Steiner 2-systems with $n$ lines can be tested in time
                 $\smash n^{O(\log n)}$, improving the previous best
                 bound of $ \smash \exp (\tilde O(n^{1 / 4})) $ by
                 Spielman. Before our result, quasipolynomial-time
                 isomorphism testing was only known for the case when
                 the line size is polylogarithmic, as shown by Babai and
                 Luks. A result essentially identical to ours was
                 obtained simultaneously by Laszlo Babai and John
                 Wilmes. They performed a direct analysis of the
                 individualization/refinement method, building on a
                 different philosophy and combinatorial structure
                 theory. We comment on how this paper fits into the
                 overall project of improved isomorphism testing for
                 strongly regular graphs (the ultimate goal being
                 subexponential $ \exp (n^{o(1)}) $ time). In the
                 remaining cases, we only need to deal with strongly
                 regular graphs satisfying ``Neumaier's claw bound,''
                 permitting the use of a separate set of asymptotic
                 structural tools. In joint work (in progress) with
                 Babai and Wilmes, we address that case and have already
                 pushed the overall bound below $ \smash \exp (\tilde
                 O(n^{1 / 4})) $. The present paper is a
                 methodologically distinct and stand-alone part of the
                 overall project.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gupta:2013:SCB,
  author =       "Anupam Gupta and Kunal Talwar and David Witmer",
  title =        "Sparsest cut on bounded treewidth graphs: algorithms
                 and hardness results",
  crossref =     "ACM:2013:SPF",
  pages =        "281--290",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488644",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give a 2-approximation algorithm for the
                 non-uniform Sparsest Cut problem that runs in time $
                 n^{O(k)} $, where $k$ is the treewidth of the graph.
                 This improves on the previous $ 2^{2 k} $ approximation
                 in time $ \poly (n) 2^{O(k)} $ due to Chlamtac et al.
                 [18]. To complement this algorithm, we show the
                 following hardness results: If the non-uniform Sparsest
                 Cut has a $ \rho $-approximation for series-parallel
                 graphs (where $ \rho \geq 1 $ ), then the MaxCut
                 problem has an algorithm with approximation factor
                 arbitrarily close to $ 1 / \rho $. Hence, even for such
                 restricted graphs (which have treewidth 2), the
                 Sparsest Cut problem is NP-hard to approximate better
                 than $ 17 / 16 - \epsilon $ for $ \epsilon > 0 $;
                 assuming the Unique Games Conjecture the hardness
                 becomes $ 1 / \alpha_{GW} - \epsilon $. For graphs with
                 large (but constant) treewidth, we show a hardness
                 result of $ 2 - \epsilon $ assuming the Unique Games
                 Conjecture. Our algorithm rounds a linear program based
                 on (a subset of) the Sherali--Adams lift of the
                 standard Sparsest Cut LP. We show that even for
                 treewidth-2 graphs, the LP has an integrality gap close
                 to 2 even after polynomially many rounds of
                 Sherali--Adams. Hence our approach cannot be improved
                 even on such restricted graphs without using a stronger
                 relaxation.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chekuri:2013:LTG,
  author =       "Chandra Chekuri and Julia Chuzhoy",
  title =        "Large-treewidth graph decompositions and
                 applications",
  crossref =     "ACM:2013:SPF",
  pages =        "291--300",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488645",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Treewidth is a graph parameter that plays a
                 fundamental role in several structural and algorithmic
                 results. We study the problem of decomposing a given
                 graph $G$ into node-disjoint subgraphs, where each
                 subgraph has sufficiently large treewidth. We prove two
                 theorems on the tradeoff between the number of the
                 desired subgraphs h, and the desired lower bound r on
                 the treewidth of each subgraph. The theorems assert
                 that, given a graph $G$ with treewidth $k$, a
                 decomposition with parameters $ h, r $ is feasible
                 whenever $ h r^2 \leq k / \polylog (k) $, or $ h^3 r
                 \leq k / \polylog (k) $ holds. We then show a framework
                 for using these theorems to bypass the well-known
                 Grid-Minor Theorem of Robertson and Seymour in some
                 applications. In particular, this leads to
                 substantially improved parameters in some
                 Erd{\H{o}}s--Posa-type results, and faster algorithms
                 for some fixed-parameter tractable problems.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cygan:2013:FHC,
  author =       "Marek Cygan and Stefan Kratsch and Jesper Nederlof",
  title =        "Fast {Hamiltonicity} checking via bases of perfect
                 matchings",
  crossref =     "ACM:2013:SPF",
  pages =        "301--310",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488646",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "For an even integer $ t \geq 2 $, the Matching
                 Connectivity matrix $ H_t $ is a matrix that has rows
                 and columns both labeled by all perfect matchings of
                 the complete graph $ K_t $ on $t$ vertices; an entry $
                 H_t [M_1, M_2] $ is $1$ if $ M_1 \cup M_2 $ is a
                 Hamiltonian cycle and $0$ otherwise. Motivated by the
                 computational study of the Hamiltonicity problem, we
                 present three results on the structure of $ H_t $: We
                 first show that $ H_t $ has rank exactly $ 2^{t / 2 -
                 1} $ over GF(2) via an appropriate factorization that
                 explicitly provides families of matchings $ X_t $
                 forming bases for $ H_t $. Second, we show how to
                 quickly change representation between such bases.
                 Third, we notice that the sets of matchings $ X_t $
                 induce permutation matrices within $ H_t $. We use the
                 factorization to derive an $ 1.888^n n^{O(1)} $ time
                 Monte Carlo algorithm that solves the Hamiltonicity
                 problem in directed bipartite graphs. Our algorithm as
                 well counts the number of Hamiltonian cycles modulo two
                 in directed bipartite or undirected graphs in the same
                 time bound. Moreover, we use the fast basis change
                 algorithm from the second result to present a Monte
                 Carlo algorithm that given an undirected graph on $n$
                 vertices along with a path decomposition of width at
                 most pw decides Hamiltonicity in $ (2 + \sqrt 2)^{pw}
                 n^{O(1)} $ time. Finally, we use the third result to
                 show that for every $ \epsilon > 0 $ this cannot be
                 improved to $ (2 + \sqrt 2 - \epsilon)^{pw} n^{O(1)} $
                 time unless the Strong Exponential Time Hypothesis
                 fails, i.e., a faster algorithm for this problem would
                 imply the breakthrough result of an $ O((2 - \epsilon
                 ')^n) $ time algorithm for CNF-Sat.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Keevash:2013:PTP,
  author =       "Peter Keevash and Fiachra Knox and Richard Mycroft",
  title =        "Polynomial-time perfect matchings in dense
                 hypergraphs",
  crossref =     "ACM:2013:SPF",
  pages =        "311--320",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488647",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Let H be a $k$-graph on $n$ vertices, with minimum
                 codegree at least n/k + cn for some fixed c > 0. In
                 this paper we construct a polynomial-time algorithm
                 which finds either a perfect matching in H or a
                 certificate that none exists. This essentially solves a
                 problem of Karpinski, Rucinski and Szymanska, who
                 previously showed that this problem is NP-hard for a
                 minimum codegree of n/k --- cn. Our algorithm relies on
                 a theoretical result of independent interest, in which
                 we characterise any such hypergraph with no perfect
                 matching using a family of lattice-based
                 constructions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Agrawal:2013:QPH,
  author =       "Manindra Agrawal and Chandan Saha and Nitin Saxena",
  title =        "Quasi-polynomial hitting-set for set-depth-{$ \Delta
                 $} formulas",
  crossref =     "ACM:2013:SPF",
  pages =        "321--330",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488649",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We call a depth-4 formula $C$ set-depth-4 if there
                 exists a (unknown) partition $ X_1 \sqcup \cdots \sqcup
                 X_d $ of the variable indices $ [n] $ that the top
                 product layer respects, i.e. $ C({{\rm term} x}) =
                 \Sigma_{i = 1}^k \prod_{j = 1}^d f_{i, j} ({{\rm term}
                 x}_{X j}) $, where $ f_{i, j} $ is a sparse polynomial
                 in $ F[{{\rm term} x}_{X j}] $. Extending this
                 definition to any depth --- we call a depth-$D$ formula
                 $C$ (consisting of alternating layers of $ \Sigma $ and
                 $ \Pi $ gates, with a $ \Sigma $-gate on top) a
                 set-depth-$D$ formula if every $ \Pi $-layer in $C$
                 respects a (unknown) partition on the variables; if $D$
                 is even then the product gates of the bottom-most $ \Pi
                 $-layer are allowed to compute arbitrary monomials. In
                 this work, we give a hitting-set generator for
                 set-depth-$D$ formulas (over any field) with running
                 time polynomial in $ \exp ((D^2 \log s)^{\Delta - 1})
                 $, where $s$ is the size bound on the input
                 set-depth-$D$ formula. In other words, we give a
                 quasi-polynomial time blackbox polynomial identity test
                 for such constant-depth formulas. Previously, the very
                 special case of $ D = 3 $ (also known as
                 set-multilinear depth-3 circuits) had no known
                 sub-exponential time hitting-set generator. This was
                 declared as an open problem by Shpilka {\&} Yehudayoff
                 (FnT-TCS 2010); the model being first studied by Nisan
                 {\&} Wigderson (FOCS 1995) and recently by Forbes {\&}
                 Shpilka (STOC 2012 {\&} ECCC TR12-115). Our work
                 settles this question, not only for depth-3 but, up to
                 depth $ \epsilon \log s / \log \log s $, for a fixed
                 constant $ \epsilon < 1 $. The technique is to
                 investigate depth-$D$ formulas via depth-$ (D - 1) $
                 formulas over a Hadamard algebra, after applying a
                 shift' on the variables. We propose a new algebraic
                 conjecture about the low-support rank-concentration in
                 the latter formulas, and manage to prove it in the case
                 of set-depth-$D$ formulas.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hardt:2013:BWC,
  author =       "Moritz Hardt and Aaron Roth",
  title =        "Beyond worst-case analysis in private singular vector
                 computation",
  crossref =     "ACM:2013:SPF",
  pages =        "331--340",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488650",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider differentially private approximate
                 singular vector computation. Known worst-case lower
                 bounds show that the error of any differentially
                 private algorithm must scale polynomially with the
                 dimension of the singular vector. We are able to
                 replace this dependence on the dimension by a natural
                 parameter known as the coherence of the matrix that is
                 often observed to be significantly smaller than the
                 dimension both theoretically and empirically. We also
                 prove a matching lower bound showing that our guarantee
                 is nearly optimal for every setting of the coherence
                 parameter. Notably, we achieve our bounds by giving a
                 robust analysis of the well-known power iteration
                 algorithm, which may be of independent interest. Our
                 algorithm also leads to improvements in worst-case
                 settings and to better low-rank approximations in the
                 spectral norm.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hsu:2013:DPA,
  author =       "Justin Hsu and Aaron Roth and Jonathan Ullman",
  title =        "Differential privacy for the analyst via private
                 equilibrium computation",
  crossref =     "ACM:2013:SPF",
  pages =        "341--350",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488651",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give new mechanisms for answering exponentially
                 many queries from multiple analysts on a private
                 database, while protecting dif- ferential privacy both
                 for the individuals in the database and for the
                 analysts. That is, our mechanism's answer to each query
                 is nearly insensitive to changes in the queries asked
                 by other analysts. Our mechanism is the first to offer
                 differential privacy on the joint distribution over
                 analysts' answers, providing privacy for data an-
                 alysts even if the other data analysts collude or
                 register multiple accounts. In some settings, we are
                 able to achieve nearly optimal error rates (even
                 compared to mechanisms which do not offer an- alyst
                 privacy), and we are able to extend our techniques to
                 handle non-linear queries. Our analysis is based on a
                 novel view of the private query-release problem as a
                 two-player zero-sum game, which may be of independent
                 interest.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Nikolov:2013:GDP,
  author =       "Aleksandar Nikolov and Kunal Talwar and Li Zhang",
  title =        "The geometry of differential privacy: the sparse and
                 approximate cases",
  crossref =     "ACM:2013:SPF",
  pages =        "351--360",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488652",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study trade-offs between accuracy and privacy in
                 the context of linear queries over histograms. This is
                 a rich class of queries that includes contingency
                 tables and range queries and has been the focus of a
                 long line of work. For a given set of $d$ linear
                 queries over a database $ x \in R^N $, we seek to find
                 the differentially private mechanism that has the
                 minimum mean squared error. For pure differential
                 privacy, [5, 32] give an $ O(\log^2 d) $ approximation
                 to the optimal mechanism. Our first contribution is to
                 give an efficient $ O(\log^2 d) $ approximation
                 guarantee for the case of $ (\epsilon, \delta)
                 $-differential privacy. Our mechanism adds carefully
                 chosen correlated Gaussian noise to the answers. We
                 prove its approximation guarantee relative to the
                 hereditary discrepancy lower bound of [44], using tools
                 from convex geometry. We next consider the sparse case
                 when the number of queries exceeds the number of
                 individuals in the database, i.e. when $ d > n \Delta
                 |x|_1 $. The lower bounds used in the previous
                 approximation algorithm no longer apply --- in fact
                 better mechanisms are known in this setting [7, 27, 28,
                 31, 49]. Our second main contribution is to give an
                 efficient $ (\epsilon, \delta) $-differentially private
                 mechanism that, for any given query set $A$ and an
                 upper bound $n$ on $ |x|_1 $, has mean squared error
                 within $ \polylog (d, N) $ of the optimal for $A$ and
                 $n$. This approximation is achieved by coupling the
                 Gaussian noise addition approach with linear regression
                 over the $ l_1 $ ball. Additionally, we show a similar
                 polylogarithmic approximation guarantee for the optimal
                 $ \epsilon $-differentially private mechanism in this
                 sparse setting. Our work also shows that for arbitrary
                 counting queries, i.e. $A$ with entries in $ \{ 0, 1 \}
                 $, there is an $ \epsilon $-differentially private
                 mechanism with expected error $ \tilde O(\sqrt n) $ per
                 query, improving on the $ \tilde O(n^{2 / 3}) $ bound
                 of [7] and matching the lower bound implied by [15] up
                 to logarithmic factors. The connection between the
                 hereditary discrepancy and the privacy mechanism
                 enables us to derive the first polylogarithmic
                 approximation to the hereditary discrepancy of a matrix
                 $A$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ullman:2013:ACQ,
  author =       "Jonathan Ullman",
  title =        "Answering $ n_{2 + o(1)} $ counting queries with
                 differential privacy is hard",
  crossref =     "ACM:2013:SPF",
  pages =        "361--370",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488653",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A central problem in differentially private data
                 analysis is how to design efficient algorithms capable
                 of answering large numbers of counting queries on a
                 sensitive database. Counting queries are of the form
                 ``What fraction of individual records in the database
                 satisfy the property $q$ ?'' We prove that if one-way
                 functions exist, then there is no algorithm that takes
                 as input a database $ {\rm db} \in {\rm dbset} $, and $
                 k = \tilde \Theta (n^2) $ arbitrary efficiently
                 computable counting queries, runs in time $ \poly (d,
                 n) $, and returns an approximate answer to each query,
                 while satisfying differential privacy. We also consider
                 the complexity of answering ``simple'' counting
                 queries, and make some progress in this direction by
                 showing that the above result holds even when we
                 require that the queries are computable by
                 constant-depth ($ {\rm AC}^0 $) circuits. Our result
                 is almost tight because it is known that $ \tilde
                 \Omega (n^2) $ counting queries can be answered
                 efficiently while satisfying differential privacy.
                 Moreover, many more than $ n^2 $ queries (even
                 exponential in $n$) can be answered in exponential
                 time. We prove our results by extending the connection
                 between differentially private query release and
                 cryptographic traitor-tracing schemes to the setting
                 where the queries are given to the sanitizer as input,
                 and by constructing a traitor-tracing scheme that is
                 secure in this setting.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Thorup:2013:BPS,
  author =       "Mikkel Thorup",
  title =        "Bottom-$k$ and priority sampling, set similarity and
                 subset sums with minimal independence",
  crossref =     "ACM:2013:SPF",
  pages =        "371--380",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488655",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider bottom-$k$ sampling for a set X, picking a
                 sample S$_k$ (X) consisting of the k elements that are
                 smallest according to a given hash function h. With
                 this sample we can estimate the relative size f=|Y|/|X|
                 of any subset Y as |S$_k$ (X) intersect Y|/k. A
                 standard application is the estimation of the Jaccard
                 similarity f=|A intersect B|/|A union B| between sets A
                 and B. Given the bottom-$k$ samples from A and B, we
                 construct the bottom-$k$ sample of their union as S$_k$
                 (A union B)=S$_k$ (S$_k$ (A) union S$_k$ (B)), and then
                 the similarity is estimated as |S$_k$ (A union B)
                 intersect S$_k$ (A) intersect S$_k$ (B)|/k. We show
                 here that even if the hash function is only
                 2-independent, the expected relative error is $ O(1
                 \sqrt (f_k)) $. For $ f_k = \Omega(1) $ this is
                 within a constant factor of the expected relative error
                 with truly random hashing. For comparison, consider the
                 classic approach of kxmin-wise where we use $k$ hash
                 independent functions $ h_1, \ldots {}, h_k $, storing
                 the smallest element with each hash function. For
                 kxmin-wise there is an at least constant bias with
                 constant independence, and it is not reduced with
                 larger $k$. Recently Feigenblat et al. showed that
                 bottom-$k$ circumvents the bias if the hash function is
                 8-independent and $k$ is sufficiently large. We get
                 down to 2-independence for any $k$. Our result is based
                 on a simply union bound, transferring generic
                 concentration bounds for the hashing scheme to the
                 bottom-$k$ sample, e.g., getting stronger probability
                 error bounds with higher independence. For weighted
                 sets, we consider priority sampling which adapts
                 efficiently to the concrete input weights, e.g.,
                 benefiting strongly from heavy-tailed input. This time,
                 the analysis is much more involved, but again we show
                 that generic concentration bounds can be applied.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lenzen:2013:FRT,
  author =       "Christoph Lenzen and Boaz Patt-Shamir",
  title =        "Fast routing table construction using small messages:
                 extended abstract",
  crossref =     "ACM:2013:SPF",
  pages =        "381--390",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488656",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We describe a distributed randomized algorithm to
                 construct routing tables. Given $ 0 < \epsilon \leq 1 /
                 2 $, the algorithm runs in time $ \tilde O(n^{1 / 2 +
                 \epsilon } + {\rm HD}) $, where $n$ is the number of
                 nodes and HD denotes the diameter of the network in
                 hops (i.e., as if the network is unweighted). The
                 weighted length of the produced routes is at most $
                 O(\epsilon^{-1} \log \epsilon^{-1}) $ times the optimal
                 weighted length. This is the first algorithm to break
                 the $ \Omega (n) $ complexity barrier for computing
                 weighted shortest paths even for a single source.
                 Moreover, the algorithm nearly meets the $ \tilde
                 \Omega (n^{1 / 2} + {\rm HD}) $ lower bound for
                 distributed computation of routing tables and
                 approximate distances (with optimality, up to $
                 \polylog $ factors, for $ \epsilon = 1 / \log n $).
                 The presented techniques have many applications,
                 including improved distributed approximation algorithms
                 for Generalized Steiner Forest, all-pairs distance
                 estimation, and estimation of the weighted diameter.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Mendes:2013:MAA,
  author =       "Hammurabi Mendes and Maurice Herlihy",
  title =        "Multidimensional approximate agreement in {Byzantine}
                 asynchronous systems",
  crossref =     "ACM:2013:SPF",
  pages =        "391--400",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488657",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The problem of $ \epsilon $-approximate agreement in
                 Byzantine asynchronous systems is well-understood when
                 all values lie on the real line. In this paper, we
                 generalize the problem to consider values that lie in $
                 R^m $, for $ m \geq 1 $, and present an optimal
                 protocol in regard to fault tolerance. Our scenario is
                 the following. Processes start with values in $ R^m $,
                 for $ m \geq 1 $, and communicate via message-passing.
                 The system is asynchronous: there is no upper bound on
                 processes' relative speeds or on message delay. Some
                 faulty processes can display arbitrarily malicious
                 (i.e. Byzantine) behavior. Non-faulty processes must
                 decide on values that are: (1) in $ R^m $; (2) within
                 distance $ \epsilon $ of each other; and (3) in the
                 convex hull of the non-faulty processes' inputs. We
                 give an algorithm with a matching lower bound on fault
                 tolerance: we require $ n > t(m + 2) $, where $n$ is
                 the number of processes, $t$ is the number of Byzantine
                 processes, and input and output values reside in $ R^m
                 $. Non-faulty processes send $ O(n^2 d \log (m /
                 \epsilon \max \{ \delta (d) : 1 \leq d \leq m \})) $
                 messages in total, where $ \delta (d) $ is the range of
                 non-faulty inputs projected at coordinate $d$. The
                 Byzantine processes do not affect the algorithm's
                 running time.",
  acknowledgement = ack-nhfb,
}

@InProceedings{King:2013:BAP,
  author =       "Valerie King and Jared Saia",
  title =        "{Byzantine} agreement in polynomial expected time:
                 [extended abstract]",
  crossref =     "ACM:2013:SPF",
  pages =        "401--410",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488658",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In the classic asynchronous Byzantine agreement
                 problem, communication is via asynchronous
                 message-passing and the adversary is adaptive with full
                 information. In particular, the adversary can
                 adaptively determine which processors to corrupt and
                 what strategy these processors should use as the
                 algorithm proceeds; the scheduling of the delivery of
                 messages is set by the adversary, so that the delays
                 are unpredictable to the algorithm; and the adversary
                 knows the states of all processors at any time, and is
                 assumed to be computationally unbounded. Such an
                 adversary is also known as ``strong''. We present a
                 polynomial expected time algorithm to solve
                 asynchronous Byzantine Agreement with a strong
                 adversary that controls up to a constant fraction of
                 the processors. This is the first improvement in
                 running time for this problem since Ben-Or's
                 exponential expected time solution in 1983. Our
                 algorithm tolerates an adversary that controls up to a
                 $ 1 / 500 $ fraction of the processors.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chakrabarty:2013:MTB,
  author =       "Deeparnab Chakrabarty and C. Seshadhri",
  title =        "A $ o(n) $ monotonicity tester for {Boolean} functions
                 over the hypercube",
  crossref =     "ACM:2013:SPF",
  pages =        "411--418",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488660",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Given oracle access to a Boolean function $ f : \{ 0,
                 1 \}^n \to \{ 0, 1 \} $, we design a randomized tester
                 that takes as input a parameter $ \epsilon > 0 $, and
                 outputs Yes if the function is monotonically
                 non-increasing, and outputs No with probability $ > 2 /
                 3 $, if the function is $ \epsilon $-far from being
                 monotone, that is, $f$ needs to be modified at $
                 \epsilon $-fraction of the points to make it monotone.
                 Our non-adaptive, one-sided tester makes $ \tilde
                 O(n^{5 / 6} \epsilon^{-5 / 3}) $ queries to the
                 oracle.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chakrabarty:2013:OBM,
  author =       "Deeparnab Chakrabarty and C. Seshadhri",
  title =        "Optimal bounds for monotonicity and {Lipschitz}
                 testing over hypercubes and hypergrids",
  crossref =     "ACM:2013:SPF",
  pages =        "419--428",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488661",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The problem of monotonicity testing over the hypergrid
                 and its special case, the hypercube, is a classic
                 question in property testing. We are given query access
                 to $ f : [k]^n \to R $ (for some ordered range $R$).
                 The hypergrid/cube has a natural partial order given by
                 coordinate-wise ordering, denoted by prec. A function
                 is monotone if for all pairs $ x \prec y $, $ f(x) \leq
                 f(y) $. The distance to monotonicity, $ \epsilon_f $,
                 is the minimum fraction of values of $f$ that need to
                 be changed to make f monotone. For $ k = 2 $ (the
                 boolean hypercube), the usual tester is the edge
                 tester, which checks monotonicity on adjacent pairs of
                 domain points. It is known that the edge tester using $
                 O(\epsilon^{-1} n \log |R|) $ samples can distinguish a
                 monotone function from one where $ \epsilon_f >
                 \epsilon $. On the other hand, the best lower bound for
                 monotonicity testing over general $R$ is $ \Omega (n)
                 $. We resolve this long standing open problem and prove
                 that $ O(n / \epsilon) $ samples suffice for the edge
                 tester. For hypergrids, known testers require $
                 O(\epsilon^{-1} n \log k \log |R|) $ samples, while the
                 best known (non-adaptive) lower bound is $ \Omega
                 (\epsilon^{-1} n \log k) $. We give a (non-adaptive)
                 monotonicity tester for hypergrids running in $
                 O(\epsilon^{{-1} n \log k}) $ time. Our techniques lead
                 to optimal property testers (with the same running
                 time) for the natural Lipschitz property on hypercubes
                 and hypergrids. (A $c$-Lipschitz function is one where
                 $ |f(x) - f(y)| \leq c || x - y ||_1 $.) In fact, we
                 give a general unified proof for $ O(\epsilon^{-1} n
                 \log k) $-query testers for a class of
                 ``bounded-derivative'' properties, a class containing
                 both monotonicity and Lipschitz.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhattacharyya:2013:ELC,
  author =       "Arnab Bhattacharyya and Eldar Fischer and Hamed Hatami
                 and Pooya Hatami and Shachar Lovett",
  title =        "Every locally characterized affine-invariant property
                 is testable",
  crossref =     "ACM:2013:SPF",
  pages =        "429--436",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488662",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Set $ F = F_p $ for any fixed prime $ p \geq 2 $. An
                 affine-invariant property is a property of functions
                 over $ F^n $ that is closed under taking affine
                 transformations of the domain. We prove that all
                 affine-invariant properties having local
                 characterizations are testable. In fact, we show a
                 proximity-oblivious test for any such property cP,
                 meaning that given an input function $f$, we make a
                 constant number of queries to $f$, always accept if $f$
                 satisfies cP, and otherwise reject with probability
                 larger than a positive number that depends only on the
                 distance between $f$ and cP. More generally, we show
                 that any affine-invariant property that is closed under
                 taking restrictions to subspaces and has bounded
                 complexity is testable. We also prove that any property
                 that can be described as the property of decomposing
                 into a known structure of low-degree polynomials is
                 locally characterized and is, hence, testable. For
                 example, whether a function is a product of two
                 degree-$d$ polynomials, whether a function splits into
                 a product of d linear polynomials, and whether a
                 function has low rank are all examples of
                 degree-structural properties and are therefore locally
                 characterized. Our results depend on a new Gowers
                 inverse theorem by Tao and Ziegler for low
                 characteristic fields that decomposes any polynomial
                 with large Gowers norm into a function of a small
                 number of low-degree non-classical polynomials. We
                 establish a new equidistribution result for high rank
                 non-classical polynomials that drives the proofs of
                 both the testability results and the local
                 characterization of degree-structural properties.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawarabayashi:2013:TSF,
  author =       "Ken-ichi Kawarabayashi and Yuichi Yoshida",
  title =        "Testing subdivision-freeness: property testing meets
                 structural graph theory",
  crossref =     "ACM:2013:SPF",
  pages =        "437--446",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488663",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Testing a property $P$ of graphs in the bounded-degree
                 model deals with the following problem: given a graph
                 $G$ of bounded degree $d$, we should distinguish (with
                 probability 2/3, say) between the case that $G$
                 satisfies P and the case that one should add/remove at
                 least $ \epsilon d n $ edges of $G$ to make it satisfy
                 $P$. In sharp contrast to property testing of dense
                 graphs, which is relatively well understood, only few
                 properties are known to be testable with a constant
                 number of queries in the bounded-degree model. In
                 particular, no global monotone (i.e., closed under edge
                 deletions) property that expander graphs can satisfy
                 has been shown to be testable in constant time so far.
                 In this paper, we identify for the first time a natural
                 family of global monotone property that expander graphs
                 can satisfy and can be efficiently tested in the
                 bounded degree model. Specifically, we show that, for
                 any integer $ t \geq 1 $, $ K_t $-subdivision-freeness
                 is testable with a constant number of queries in the
                 bounded-degree model. This property was not previously
                 known to be testable even with o(n) queries. Note that
                 an expander graph with all degree less than $ t - 1 $
                 does not have a $ K_t $-subdivision. The proof is based
                 on a novel combination of some results that develop the
                 framework of partitioning oracles, together with
                 structural graph theory results that develop the
                 seminal graph minor theory by Robertson and Seymour. As
                 far as we aware, this is the first result that bridges
                 property testing and structural graph theory. Although
                 we know a rough structure for graphs without $H$-minors
                 from the famous graph minor theory by Robertson and
                 Seymour, there is no corresponding structure theorem
                 for graphs without $H$-subdivisions so far, even $ K_5
                 $-subdivision-free graphs. Therefore, subdivisions and
                 minors are very different in a graph structural
                 sense.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chan:2013:ARP,
  author =       "Siu On Chan",
  title =        "Approximation resistance from pairwise independent
                 subgroups",
  crossref =     "ACM:2013:SPF",
  pages =        "447--456",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488665",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show optimal (up to constant factor) NP-hardness
                 for Max-$k$-CSP over any domain, whenever k is larger
                 than the domain size. This follows from our main result
                 concerning predicates over abelian groups. We show that
                 a predicate is approximation resistant if it contains a
                 subgroup that is balanced pairwise independent. This
                 gives an unconditional analogue of Austrin--Mossel
                 hardness result, bypassing the Unique-Games Conjecture
                 for predicates with an abelian subgroup structure. Our
                 main ingredient is a new gap-amplification technique
                 inspired by XOR-lemmas. Using this technique, we also
                 improve the NP-hardness of approximating
                 Independent-Set on bounded-degree graphs,
                 Almost-Coloring, Two-Prover-One-Round-Game, and various
                 other problems.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Huang:2013:ARS,
  author =       "Sangxia Huang",
  title =        "Approximation resistance on satisfiable instances for
                 predicates with few accepting inputs",
  crossref =     "ACM:2013:SPF",
  pages =        "457--466",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488666",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove that for all integer $ k \geq 3 $, there is a
                 predicate P on k Boolean variables with 2$^{\tilde O(k
                 1 / 3)}$ accepting assignments that is approximation
                 resistant even on satisfiable instances. That is, given
                 a satisfiable CSP instance with constraint P, we cannot
                 achieve better approximation ratio than simply picking
                 random assignments. This improves the best previously
                 known result by Hastad and Khot where the predicate has
                 2$^{O(k 1 / 2)}$ accepting assignments. Our
                 construction is inspired by several recent
                 developments. One is the idea of using direct sums to
                 improve soundness of PCPs, developed by Chan [5]. We
                 also use techniques from Wenner [32] to construct PCPs
                 with perfect completeness without relying on the d-to-1
                 Conjecture.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Garg:2013:WEA,
  author =       "Sanjam Garg and Craig Gentry and Amit Sahai and Brent
                 Waters",
  title =        "Witness encryption and its applications",
  crossref =     "ACM:2013:SPF",
  pages =        "467--476",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488667",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We put forth the concept of witness encryption. A
                 witness encryption scheme is defined for an NP language
                 L (with corresponding witness relation R). In such a
                 scheme, a user can encrypt a message M to a particular
                 problem instance x to produce a ciphertext. A recipient
                 of a ciphertext is able to decrypt the message if x is
                 in the language and the recipient knows a witness w
                 where R(x,w) holds. However, if x is not in the
                 language, then no polynomial-time attacker can
                 distinguish between encryptions of any two equal length
                 messages. We emphasize that the encrypter himself may
                 have no idea whether $x$ is actually in the language.
                 Our contributions in this paper are threefold. First,
                 we introduce and formally define witness encryption.
                 Second, we show how to build several cryptographic
                 primitives from witness encryption. Finally, we give a
                 candidate construction based on the NP-complete Exact
                 Cover problem and Garg, Gentry, and Halevi's recent
                 construction of ``approximate'' multilinear maps. Our
                 method for witness encryption also yields the first
                 candidate construction for an open problem posed by
                 Rudich in 1989: constructing computational secret
                 sharing schemes for an NP-complete access structure.",
  acknowledgement = ack-nhfb,
}

@InProceedings{De:2013:MSD,
  author =       "Anindya De and Elchanan Mossel and Joe Neeman",
  title =        "Majority is stablest: discrete and {SoS}",
  crossref =     "ACM:2013:SPF",
  pages =        "477--486",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488668",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The Majority is Stablest Theorem has numerous
                 applications in hardness of approximation and social
                 choice theory. We give a new proof of the Majority is
                 Stablest Theorem by induction on the dimension of the
                 discrete cube. Unlike the previous proof, it uses
                 neither the ``invariance principle'' nor Borell's
                 result in Gaussian space. The new proof is general
                 enough to include all previous variants of majority is
                 stablest such as ``it ain't over until it's over'' and
                 ``Majority is most predictable''. Moreover, the new
                 proof allows us to derive a proof of Majority is
                 Stablest in a constant level of the Sum of Squares
                 hierarchy. This implies in particular that Khot-Vishnoi
                 instance of Max-Cut does not provide a gap instance for
                 the Lasserre hierarchy.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Beck:2013:SEH,
  author =       "Christopher Beck and Russell Impagliazzo",
  title =        "Strong {ETH} holds for regular resolution",
  crossref =     "ACM:2013:SPF",
  pages =        "487--494",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488669",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We obtain asymptotically sharper lower bounds on
                 resolution complexity for $k$-CNF's than was known
                 previously. We show that for any large enough $k$ there
                 are $k$-CNF's which require resolution width $ (1 -
                 \tilde O(k^{-1 / 4}))n $, regular resolution size
                 2$^{(1 - \tilde O(k^{-1 / 4}))n}$, and general
                 resolution size (3/2)$^{(1 - \tilde O(k - 1 / 4))n}$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lee:2013:NCO,
  author =       "James R. Lee and Manor Mendel and Mohammad Moharrami",
  title =        "A node-capacitated {Okamura--Seymour} theorem",
  crossref =     "ACM:2013:SPF",
  pages =        "495--504",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488671",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The classical Okamura-Seymour theorem states that for
                 an edge-capacitated, multi-commodity flow instance in
                 which all terminals lie on a single face of a planar
                 graph, there exists a feasible concurrent flow if and
                 only if the cut conditions are satisfied. Simple
                 examples show that a similar theorem is impossible in
                 the node-capacitated setting. Nevertheless, we prove
                 that an approximate flow/cut theorem does hold: For
                 some universal $ \epsilon > 0 $, if the node cut
                 conditions are satisfied, then one can simultaneously
                 route an $ \epsilon $-fraction of all the demands. This
                 answers an open question of Chekuri and Kawarabayashi.
                 More generally, we show that this holds in the setting
                 of multi-commodity polymatroid networks introduced by
                 Chekuri, et. al. Our approach employs a new type of
                 random metric embedding in order to round the convex
                 programs corresponding to these more general flow
                 problems.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Klein:2013:SRS,
  author =       "Philip N. Klein and Shay Mozes and Christian Sommer",
  title =        "Structured recursive separator decompositions for
                 planar graphs in linear time",
  crossref =     "ACM:2013:SPF",
  pages =        "505--514",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488672",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Given a triangulated planar graph $G$ on $n$ vertices
                 and an integer $r$ $r$-division of $G$ with few holes
                 is a decomposition of $G$ into $ O(n / r) $ regions of
                 size at most r such that each region contains at most a
                 constant number of faces that are not faces of $G$
                 (also called holes), and such that, for each region,
                 the total number of vertices on these faces is $
                 O(\sqrt r) $. We provide an algorithm for computing
                 $r$-divisions with few holes in linear time. In fact,
                 our algorithm computes a structure, called
                 decomposition tree, which represents a recursive
                 decomposition of $G$ that includes $r$-divisions for
                 essentially all values of $r$. In particular, given an
                 exponentially increasing sequence $ \{ \vec r \} =
                 (r_1, r_2, \ldots {}) $, our algorithm can produce a
                 recursive $ \{ \vec r \} $-division with few holes in
                 linear time. $r$-divisions with few holes have been
                 used in efficient algorithms to compute shortest paths,
                 minimum cuts, and maximum flows. Our linear-time
                 algorithm improves upon the decomposition algorithm
                 used in the state-of-the-art algorithm for minimum
                 st--cut (Italiano, Nussbaum, Sankowski, and
                 Wulff-Nilsen, STOC 2011), removing one of the
                 bottlenecks in the overall running time of their
                 algorithm (analogously for minimum cut in planar and
                 bounded-genus graphs).",
  acknowledgement = ack-nhfb,
}

@InProceedings{Roditty:2013:FAA,
  author =       "Liam Roditty and Virginia Vassilevska Williams",
  title =        "Fast approximation algorithms for the diameter and
                 radius of sparse graphs",
  crossref =     "ACM:2013:SPF",
  pages =        "515--524",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488673",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The diameter and the radius of a graph are fundamental
                 topological parameters that have many important
                 practical applications in real world networks. The
                 fastest combinatorial algorithm for both parameters
                 works by solving the all-pairs shortest paths problem
                 (APSP) and has a running time of $ \tilde O(m n) $ in
                 $m$-edge, $n$-node graphs. In a seminal paper,
                 Aingworth, Chekuri, Indyk and Motwani [SODA'96 and
                 SICOMP'99] presented an algorithm that computes in $
                 \tilde O(m \sqrt n + n^2) $ time an estimate $D$ for
                 the diameter $D$, such that $ \lfloor 2 / 3 D \rfloor
                 \leq^D \leq D $. Their paper spawned a long line of
                 research on approximate APSP. For the specific problem
                 of diameter approximation, however, no improvement has
                 been achieved in over 15 years. Our paper presents the
                 first improvement over the diameter approximation
                 algorithm of Aingworth et. al, producing an algorithm
                 with the same estimate but with an expected running
                 time of $ \tilde O(m \sqrt n) $. We thus show that for
                 all sparse enough graphs, the diameter can be
                 3/2-approximated in $ o(n^2) $ time. Our algorithm is
                 obtained using a surprisingly simple method of
                 neighborhood depth estimation that is strong enough to
                 also approximate, in the same running time, the radius
                 and more generally, all of the eccentricities, i.e. for
                 every node the distance to its furthest node. We also
                 provide strong evidence that our diameter approximation
                 result may be hard to improve. We show that if for some
                 constant $ \epsilon > 0 $ there is an $ O(m^{2 -
                 \epsilon }) $ time $ (3 / 2 - \epsilon) $-approximation
                 algorithm for the diameter of undirected unweighted
                 graphs, then there is an $ O*((2 - \delta)^n) $ time
                 algorithm for CNF-SAT on $n$ variables for constant $
                 \delta > 0 $, and the strong exponential time
                 hypothesis of [Impagliazzo, Paturi, Zane JCSS'01] is
                 false. Motivated by this negative result, we give
                 several improved diameter approximation algorithms for
                 special cases. We show for instance that for unweighted
                 graphs of constant diameter $D$ not divisible by $3$,
                 there is an $ O(m^{2 - \epsilon }) $ time algorithm
                 that gives a $ (3 / 2 - \epsilon) $ approximation for
                 constant $ \epsilon > 0 $. This is interesting since
                 the diameter approximation problem is hardest to solve
                 for small $D$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gu:2013:PDM,
  author =       "Albert Gu and Anupam Gupta and Amit Kumar",
  title =        "The power of deferral: maintaining a
                 constant-competitive {Steiner} tree online",
  crossref =     "ACM:2013:SPF",
  pages =        "525--534",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488674",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In the online Steiner tree problem, a sequence of
                 points is revealed one-by-one: when a point arrives, we
                 only have time to add a single edge connecting this
                 point to the previous ones, and we want to minimize the
                 total length of edges added. Here, a tight bound has
                 been known for two decades: the greedy algorithm
                 maintains a tree whose cost is $ O(\log n) $ times the
                 Steiner tree cost, and this is best possible. But
                 suppose, in addition to the new edge we add, we have
                 time to change a single edge from the previous set of
                 edges: can we do much better? Can we, e.g., maintain a
                 tree that is constant-competitive? We answer this
                 question in the affirmative. We give a primal-dual
                 algorithm that makes only a single swap per step (in
                 addition to adding the edge connecting the new point to
                 the previous ones), and such that the tree's cost is
                 only a constant times the optimal cost. Our dual-based
                 analysis is quite different from previous primal-only
                 analyses. In particular, we give a correspondence
                 between radii of dual balls and lengths of tree edges;
                 since dual balls are associated with points and hence
                 do not move around (in contrast to edges), we can
                 closely monitor the edge lengths based on the dual
                 radii. Showing that these dual radii cannot change too
                 rapidly is the technical heart of the paper, and allows
                 us to give a hard bound on the number of swaps per
                 arrival, while maintaining a constant-competitive tree
                 at all times. Previous results for this problem gave an
                 algorithm that performed an amortized constant number
                 of swaps: for each n, the number of swaps in the first
                 $n$ steps was O(n). We also give a simpler tight
                 analysis for this amortized case.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Buchbinder:2013:SPE,
  author =       "Niv Buchbinder and Joseph (Seffi) Naor and Roy
                 Schwartz",
  title =        "Simplex partitioning via exponential clocks and the
                 multiway cut problem",
  crossref =     "ACM:2013:SPF",
  pages =        "535--544",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488675",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The Multiway-Cut problem is a fundamental graph
                 partitioning problem in which the objective is to find
                 a minimum weight set of edges disconnecting a given set
                 of special vertices called terminals. This problem is
                 NP-hard and there is a well known geometric relaxation
                 in which the graph is embedded into a high dimensional
                 simplex. Rounding a solution to the geometric
                 relaxation is equivalent to partitioning the simplex.
                 We present a novel simplex partitioning algorithm which
                 is based on em competing exponential clocks and
                 distortion. Unlike previous methods, it utilizes cuts
                 that are not parallel to the faces of the simplex.
                 Applying this partitioning algorithm to the multiway
                 cut problem, we obtain a simple (4/3)-approximation
                 algorithm, thus, improving upon the current best known
                 result. This bound is further pushed to obtain an
                 approximation factor of 1.32388. It is known that under
                 the assumption of the unique games conjecture, the best
                 possible approximation for the Multiway-Cut problem can
                 be attained via the geometric relaxation.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gorbunov:2013:ABE,
  author =       "Sergey Gorbunov and Vinod Vaikuntanathan and Hoeteck
                 Wee",
  title =        "Attribute-based encryption for circuits",
  crossref =     "ACM:2013:SPF",
  pages =        "545--554",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488677",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In an attribute-based encryption (ABE) scheme, a
                 ciphertext is associated with an $l$-bit public index
                 pind and a message $m$, and a secret key is associated
                 with a Boolean predicate $P$. The secret key allows to
                 decrypt the ciphertext and learn $m$ iff $P({\rm pind})
                 = 1$. Moreover, the scheme should be secure against
                 collusions of users, namely, given secret keys for
                 polynomially many predicates, an adversary learns
                 nothing about the message if none of the secret keys
                 can individually decrypt the ciphertext. We present
                 attribute-based encryption schemes for circuits of any
                 arbitrary polynomial size, where the public parameters
                 and the ciphertext grow linearly with the depth of the
                 circuit. Our construction is secure under the standard
                 learning with errors (LWE) assumption. Previous
                 constructions of attribute-based encryption were for
                 Boolean formulas, captured by the complexity class
                 ${\rm NC}^1 $. In the course of our construction, we
                 present a new framework for constructing ABE
                 schemes. As a by-product of our framework, we obtain
                 ABE schemes for polynomial-size branching programs,
                 corresponding to the complexity class LOGSPACE, under
                 quantitatively better assumptions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goldwasser:2013:RGC,
  author =       "Shafi Goldwasser and Yael Kalai and Raluca Ada Popa
                 and Vinod Vaikuntanathan and Nickolai Zeldovich",
  title =        "Reusable garbled circuits and succinct functional
                 encryption",
  crossref =     "ACM:2013:SPF",
  pages =        "555--564",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488678",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Garbled circuits, introduced by Yao in the mid 80s,
                 allow computing a function f on an input x without
                 leaking anything about f or x besides f(x). Garbled
                 circuits found numerous applications, but every known
                 construction suffers from one limitation: it offers no
                 security if used on multiple inputs x. In this paper,
                 we construct for the first time reusable garbled
                 circuits. The key building block is a new succinct
                 single-key functional encryption scheme. Functional
                 encryption is an ambitious primitive: given an
                 encryption $ {\rm Enc}(x) $ of a value $x$, and a
                 secret key $ {\rm sk}_f $ for a function $f$, anyone
                 can compute $ f(x) $ without learning any other
                 information about $x$. We construct, for the first
                 time, a succinct functional encryption scheme for {\em
                 any} polynomial-time function f where succinctness
                 means that the ciphertext size does not grow with the
                 size of the circuit for $f$, but only with its depth.
                 The security of our construction is based on the
                 intractability of the Learning with Errors (LWE)
                 problem and holds as long as an adversary has access to
                 a single key $ {\rm sk}_f $ (or even an a priori
                 bounded number of keys for different functions).
                 Building on our succinct single-key functional
                 encryption scheme, we show several new applications in
                 addition to reusable garbled circuits, such as a
                 paradigm for general function obfuscation which we call
                 token-based obfuscation, homomorphic encryption for a
                 class of Turing machines where the evaluation runs in
                 input-specific time rather than worst-case time, and a
                 scheme for delegating computation which is publicly
                 verifiable and maintains the privacy of the
                 computation.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kalai:2013:DBS,
  author =       "Yael Tauman Kalai and Ran Raz and Ron D. Rothblum",
  title =        "Delegation for bounded space",
  crossref =     "ACM:2013:SPF",
  pages =        "565--574",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488679",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We construct a 1-round delegation scheme for every
                 language computable in time t=t(n) and space s=s(n),
                 where the running time of the prover is poly(t) and the
                 running time of the verifier is $ \tilde O(n + p o l
                 y(s)) $ (where $ \tilde O $ hides $ \polylog (t) $
                 factors). The proof exploits a curious connection
                 between the problem of computation delegation and the
                 model of multi-prover interactive proofs that are sound
                 against no-signaling (cheating) strategies, a model
                 that was studied in the context of multi-prover
                 interactive proofs with provers that share quantum
                 entanglement, and is motivated by the physical
                 principle that information cannot travel faster than
                 light. For any language computable in time $ t = t(n) $
                 and space $ s = s(n) $, we construct MIPs that are
                 sound against no-signaling strategies, where the
                 running time of the provers is $ \poly (t) $, the
                 number of provers is $ \tilde O(s) $, and the running
                 time of the verifier is $ \tilde O(s + n) $. We then
                 show how to use the method suggested by Aiello et-al
                 (ICALP, 2000) to convert our MIP into a 1-round
                 delegation scheme, by using a computational private
                 information retrieval (PIR) scheme. Thus, assuming the
                 existence of a sub-exponentially secure PIR scheme, we
                 get our 1-round delegation scheme.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brakerski:2013:CHL,
  author =       "Zvika Brakerski and Adeline Langlois and Chris Peikert
                 and Oded Regev and Damien Stehl{\'e}",
  title =        "Classical hardness of learning with errors",
  crossref =     "ACM:2013:SPF",
  pages =        "575--584",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488680",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show that the Learning with Errors (LWE) problem is
                 classically at least as hard as standard worst-case
                 lattice problems. Previously this was only known under
                 quantum reductions. Our techniques capture the tradeoff
                 between the dimension and the modulus of LWE instances,
                 leading to a much better understanding of the landscape
                 of the problem. The proof is inspired by techniques
                 from several recent cryptographic constructions, most
                 notably fully homomorphic encryption schemes.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ben-Sasson:2013:CEP,
  author =       "Eli Ben-Sasson and Alessandro Chiesa and Daniel Genkin
                 and Eran Tromer",
  title =        "On the concrete efficiency of
                 probabilistically-checkable proofs",
  crossref =     "ACM:2013:SPF",
  pages =        "585--594",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488681",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Probabilistically-Checkable Proofs (PCPs) form the
                 algorithmic core that enables fast verification of long
                 computations in many cryptographic constructions. Yet,
                 despite the wonderful asymptotic savings they bring,
                 PCPs are also the infamous computational bottleneck
                 preventing these powerful cryptographic constructions
                 from being used in practice. To address this problem,
                 we present several results about the computational
                 efficiency of PCPs. We construct the first PCP where
                 the prover and verifier time complexities are
                 quasi-optimal (i.e., optimal up to poly-logarithmic
                 factors). The prover and verifier are also
                 highly-parallelizable, and these computational
                 guarantees hold even when proving and verifying the
                 correctness of random-access machine computations. Our
                 construction is explicit and has the requisite
                 properties for being used in the cryptographic
                 applications mentioned above. Next, to better
                 understand the efficiency of our PCP, we propose a new
                 efficiency measure for PCPs (and their major
                 components, locally-testable codes and PCPs of
                 proximity). We define a concrete-efficiency threshold
                 that indicates the smallest problem size beyond which
                 the PCP becomes ``useful'', in the sense that using it
                 is cheaper than performing naive verification (i.e.,
                 rerunning the computation); our definition accounts for
                 both the prover and verifier complexity. We then show
                 that our PCP has a finite concrete-efficiency
                 threshold. That such a PCP exists does not follow from
                 existing works on PCPs with polylogarithmic-time
                 verifiers. As in [Ben-Sasson and Sudan, STOC '05], PCPs
                 of proximity for Reed--Solomon (RS) codes are the main
                 component of our PCP. We construct a PCP of proximity
                 that reduces the concrete-efficiency threshold for
                 testing proximity to RS codes from 2$^{683}$ in their
                 work to 2$^{43}$, which is tantalizingly close to
                 practicality.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cadek:2013:ECM,
  author =       "Martin Cadek and Marek Krcal and Jiri Matousek and
                 Lukas Vokrinek and Uli Wagner",
  title =        "Extending continuous maps: polynomiality and
                 undecidability",
  crossref =     "ACM:2013:SPF",
  pages =        "595--604",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488683",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider several basic problems of algebraic
                 topology, with connections to combinatorial and
                 geometric questions, from the point of view of
                 computational complexity. The extension problem asks,
                 given topological spaces $X$, $Y$, a subspace $ A
                 \subseteq X $, and a (continuous) map $ f : A \to Y $,
                 whether $f$ can be extended to a map $ X \to Y $. For
                 computational purposes, we assume that X and Y are
                 represented as finite simplicial complexes, $A$ is a
                 subcomplex of $X$, and $f$ is given as a simplicial
                 map. In this generality the problem is undecidable, as
                 follows from Novikov's result from the 1950s on
                 uncomputability of the fundamental group $ \pi_1 (Y) $.
                 We thus study the problem under the assumption that,
                 for some $ k \geq 2 $, $Y$ is $ (k - 1) $-connected;
                 informally, this means that $Y$ has ``no holes up to
                 dimension $ k - 1 $'' i.e., the first $ k - 1 $
                 homotopy groups of $Y$ vanish (a basic example of such
                 a $Y$ is the sphere $ S^k $). We prove that, on the
                 one hand, this problem is still undecidable for $ \dim
                 X = 2 k $. On the other hand, for every fixed $ k \geq
                 2 $, we obtain an algorithm that solves the extension
                 problem in polynomial time assuming $Y$ $ (k - 1)
                 $-connected and $ \dim X \leq 2 k - 1 $. For $ \dim X
                 \leq 2 k - 2 $, the algorithm also provides a
                 classification of all extensions up to homotopy
                 (continuous deformation). This relies on results of our
                 SODA 2012 paper, and the main new ingredient is a
                 machinery of objects with polynomial-time homology,
                 which is a polynomial-time analog of objects with
                 effective homology developed earlier by Sergeraert et
                 al. We also consider the computation of the higher
                 homotopy groups $ \pi_k (Y) $, $ k \geq 2 $, for a
                 1-connected $Y$. Their computability was established by
                 Brown in 1957; we show that $ \pi_k(Y) $ can be
                 computed in polynomial time for every fixed $ k \geq 2
                 $. On the other hand, Anick proved in 1989 that
                 computing $ \pi_k(Y) $ is \#P-hard if $k$ is a part of
                 input, where $Y$ is a cell complex with certain rather
                 compact encoding. We strengthen his result to
                 \#P-hardness for $Y$ given as a simplicial complex.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Har-Peled:2013:NPL,
  author =       "Sariel Har-Peled and Benjamin Adam Raichel",
  title =        "Net and prune: a linear time algorithm for {Euclidean}
                 distance problems",
  crossref =     "ACM:2013:SPF",
  pages =        "605--614",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488684",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We provide a general framework for getting linear time
                 constant factor approximations (and in many cases
                 FPTAS's) to a copious amount of well known and well
                 studied problems in Computational Geometry, such as
                 $k$-center clustering and furthest nearest neighbor.
                 The new approach is robust to variations in the input
                 problem, and yet it is simple, elegant and practical.
                 In particular, many of these well studied problems
                 which fit easily into our framework, either previously
                 had no linear time approximation algorithm, or required
                 rather involved algorithms and analysis. A short list
                 of the problems we consider include furthest nearest
                 neighbor, $k$-center clustering, smallest disk
                 enclosing k points, $k$-th largest distance, $k$-th
                 smallest $m$-nearest neighbor distance, $k$-th heaviest
                 edge in the MST and other spanning forest type
                 problems, problems involving upward closed set systems,
                 and more. Finally, we show how to extend our framework
                 such that the linear running time bound holds with high
                 probability.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Caputo:2013:RLT,
  author =       "Pietro Caputo and Fabio Martinelli and Alistair
                 Sinclair and Alexandre Stauffer",
  title =        "Random lattice triangulations: structure and
                 algorithms",
  crossref =     "ACM:2013:SPF",
  pages =        "615--624",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488685",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The paper concerns lattice triangulations, i.e.,
                 triangulations of the integer points in a polygon in $
                 R^2 $ whose vertices are also integer points. Lattice
                 triangulations have been studied extensively both as
                 geometric objects in their own right and by virtue of
                 applications in algebraic geometry. Our focus is on
                 random triangulations in which a triangulation $ \sigma
                 $ has weight $ \lambda^{| \sigma |} $, where $ \lambda
                 $ is a positive real parameter and $ | \sigma | $ is
                 the total length of the edges in $ \sigma $.
                 Empirically, this model exhibits a ``phase transition''
                 at $ \lambda = 1 $ (corresponding to the uniform
                 distribution): for $ \lambda < 1 $ distant edges behave
                 essentially independently, while for $ \lambda > 1 $
                 very large regions of aligned edges appear. We
                 substantiate this picture as follows. For $ \lambda < 1
                 $ sufficiently small, we show that correlations between
                 edges decay exponentially with distance (suitably
                 defined), and also that the Glauber dynamics (a local
                 Markov chain based on flipping edges) is rapidly mixing
                 (in time polynomial in the number of edges). This
                 dynamics has been proposed by several authors as an
                 algorithm for generating random triangulations. By
                 contrast, for $ \lambda > 1 $ we show that the mixing
                 time is exponential. These are apparently the first
                 rigorous quantitative results on spatial mixing
                 properties and dynamics of random lattice
                 triangulations.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sinclair:2013:LYT,
  author =       "Alistair Sinclair and Piyush Srivastava",
  title =        "{Lee--Yang} theorems and the complexity of computing
                 averages",
  crossref =     "ACM:2013:SPF",
  pages =        "625--634",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488686",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the complexity of computing average
                 quantities related to spin systems, such as the mean
                 magnetization and susceptibility in the ferromagnetic
                 Ising model, and the average dimer count (or average
                 size of a matching) in the monomer-dimer model. By
                 establishing connections between the complexity of
                 computing these averages and the location of the
                 complex zeros of the partition function, we show that
                 these averages are \#P-hard to compute. In case of the
                 Ising model, our approach requires us to prove an
                 extension of the famous Lee--Yang Theorem from the
                 1950s.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Cai:2013:CDR,
  author =       "Jin-Yi Cai and Heng Guo and Tyson Williams",
  title =        "A complete dichotomy rises from the capture of
                 vanishing signatures: extended abstract",
  crossref =     "ACM:2013:SPF",
  pages =        "635--644",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488687",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We prove a complexity dichotomy theorem for Holant
                 problems over an arbitrary set of complex-valued
                 symmetric constraint functions {F} on Boolean
                 variables. This extends and unifies all previous
                 dichotomies for Holant problems on symmetric constraint
                 functions (taking values without a finite modulus). We
                 define and characterize all symmetric vanishing
                 signatures. They turned out to be essential to the
                 complete classification of Holant problems. The
                 dichotomy theorem has an explicit tractability
                 criterion. A Holant problem defined by a set of
                 constraint functions {F} is solvable in polynomial time
                 if it satisfies this tractability criterion, and is
                 \#P-hard otherwise. The tractability criterion can be
                 intuitively stated as follows: A set {F} is tractable
                 if (1) every function in {F} has arity at most two, or
                 (2) {F} is transformable to an affine type, or (3) {F}
                 is transformable to a product type, or (4) {F} is
                 vanishing, combined with the right type of binary
                 functions, or (5) {F} belongs to a special category of
                 vanishing type Fibonacci gates. The proof of this
                 theorem utilizes many previous dichotomy theorems on
                 Holant problems and Boolean \#CSP. Holographic
                 transformations play an indispensable role, not only as
                 a proof technique, but also in the statement of the
                 dichotomy criterion.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Miller:2013:SLO,
  author =       "Gary L. Miller",
  title =        "Solving large optimization problems using spectral
                 graph theory",
  crossref =     "ACM:2013:SPF",
  pages =        "981--981",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488689",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Spectral Graph Theory is the interplay between linear
                 algebra and combinatorial graph theory. One application
                 of this interplay is a nearly linear time solver for
                 Symmetric Diagonally Dominate systems (SDD). This
                 seemingly restrictive class of systems has received
                 much interest in the last 15 years. Both algorithm
                 design theory and practical implementations have made
                 substantial progress. There is also a growing number of
                 problems that can be efficiently solved using SDD
                 solvers including: image segmentation, image denoising,
                 finding solutions to elliptic equations, computing
                 maximum flow in a graph, graph sparsification, and
                 graphics. All these examples can be viewed as special
                 case of convex optimization problems.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Elkin:2013:OES,
  author =       "Michael Elkin and Shay Solomon",
  title =        "Optimal {Euclidean} spanners: really short, thin and
                 lanky",
  crossref =     "ACM:2013:SPF",
  pages =        "645--654",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488691",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The degree, the (hop-)diameter, and the weight are the
                 most basic and well-studied parameters of geometric
                 spanners. In a seminal STOC'95 paper, titled
                 ``Euclidean spanners: short, thin and lanky'', Arya et
                 al. [2] devised a construction of Euclidean $ (1 +
                 \epsilon) $-spanners that achieves constant degree,
                 diameter $ O(\log n) $, weight $ O(\log^2 n) \cdot
                 \omega ({\rm MST}) $, and has running time $ O(n \cdot
                 \log n) $. This construction applies to $n$-point
                 constant-dimensional Euclidean spaces. Moreover, Arya
                 et al. conjectured that the weight bound can be
                 improved by a logarithmic factor, without increasing
                 the degree and the diameter of the spanner, and within
                 the same running time. This conjecture of Arya et al.
                 became one of the most central open problems in the
                 area of Euclidean spanners. Nevertheless, the only
                 progress since 1995 towards its resolution was achieved
                 in the lower bounds front: Any spanner with diameter $
                 O(\log n) $ must incur weight $ \Omega (\log n) \cdot
                 \omega ({\rm MST}) $, and this lower bound holds
                 regardless of the stretch or the degree of the spanner
                 [12, 1]. In this paper we resolve the long-standing
                 conjecture of Arya et al. in the affirmative. We
                 present a spanner construction with the same stretch,
                 degree, diameter, and running time, as in Arya et al.'s
                 result, but with optimal weight $ O(\log n) \cdot
                 \omega ({\rm MST}) $. So our spanners are as thin and
                 lanky as those of Arya et al., but they are really
                 short! Moreover, our result is more general in three
                 ways. First, we demonstrate that the conjecture holds
                 true not only in constant-dimensional Euclidean spaces,
                 but also in doubling metrics. Second, we provide a
                 general tradeoff between the three involved parameters,
                 which is tight in the entire range. Third, we devise a
                 transformation that decreases the lightness of spanners
                 in general metrics, while keeping all their other
                 parameters in check. Our main result is obtained as a
                 corollary of this transformation.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Feldman:2013:SAL,
  author =       "Vitaly Feldman and Elena Grigorescu and Lev Reyzin and
                 Santosh Vempala and Ying Xiao",
  title =        "Statistical algorithms and a lower bound for detecting
                 planted cliques",
  crossref =     "ACM:2013:SPF",
  pages =        "655--664",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488692",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We introduce a framework for proving lower bounds on
                 computational problems over distributions, based on a
                 class of algorithms called statistical algorithms. For
                 such algorithms, access to the input distribution is
                 limited to obtaining an estimate of the expectation of
                 any given function on a sample drawn randomly from the
                 input distribution, rather than directly accessing
                 samples. Most natural algorithms of interest in theory
                 and in practice, e.g., moments-based methods, local
                 search, standard iterative methods for convex
                 optimization, MCMC and simulated annealing, are
                 statistical algorithms or have statistical
                 counterparts. Our framework is inspired by and
                 generalize the statistical query model in learning
                 theory [34]. Our main application is a nearly optimal
                 lower bound on the complexity of any statistical
                 algorithm for detecting planted bipartite clique
                 distributions (or planted dense subgraph distributions)
                 when the planted clique has size $ O(n^{1 / 2 -
                 \delta}) $ for any constant $ \delta > 0 $. Variants of
                 these problems have been assumed to be hard to prove
                 hardness for other problems and for cryptographic
                 applications.  Our lower bounds provide concrete
                 evidence of hardness, thus supporting these
                 assumptions.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Jain:2013:LRM,
  author =       "Prateek Jain and Praneeth Netrapalli and Sujay
                 Sanghavi",
  title =        "Low-rank matrix completion using alternating
                 minimization",
  crossref =     "ACM:2013:SPF",
  pages =        "665--674",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488693",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Alternating minimization represents a widely
                 applicable and empirically successful approach for
                 finding low-rank matrices that best fit the given data.
                 For example, for the problem of low-rank matrix
                 completion, this method is believed to be one of the
                 most accurate and efficient, and formed a major
                 component of the winning entry in the Netflix Challenge
                 [17]. In the alternating minimization approach, the
                 low-rank target matrix is written in a bi-linear form,
                 i.e. X = UV$^+$; the algorithm then alternates between
                 finding the best U and the best V. Typically, each
                 alternating step in isolation is convex and tractable.
                 However the overall problem becomes non-convex and is
                 prone to local minima. In fact, there has been almost
                 no theoretical understanding of when this approach
                 yields a good result. In this paper we present one of
                 the first theoretical analyses of the performance of
                 alternating minimization for matrix completion, and the
                 related problem of matrix sensing. For both these
                 problems, celebrated recent results have shown that
                 they become well-posed and tractable once certain (now
                 standard) conditions are imposed on the problem. We
                 show that alternating minimization also succeeds under
                 similar conditions. Moreover, compared to existing
                 results, our paper shows that alternating minimization
                 guarantees faster (in particular, geometric)
                 convergence to the true matrix, while allowing a
                 significantly simpler analysis.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Alon:2013:ARM,
  author =       "Noga Alon and Troy Lee and Adi Shraibman and Santosh
                 Vempala",
  title =        "The approximate rank of a matrix and its algorithmic
                 applications: approximate rank",
  crossref =     "ACM:2013:SPF",
  pages =        "675--684",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488694",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the $ \epsilon $-rank of a real matrix A,
                 defined for any $ \epsilon > 0 $ as the minimum rank
                 over matrices that approximate every entry of $A$ to
                 within an additive $ \epsilon $. This parameter is
                 connected to other notions of approximate rank and is
                 motivated by problems from various topics including
                 communication complexity, combinatorial optimization,
                 game theory, computational geometry and learning
                 theory. Here we give bounds on the $ \epsilon $-rank
                 and use them for algorithmic applications. Our main
                 algorithmic results are (a) polynomial-time additive
                 approximation schemes for Nash equilibria for 2-player
                 games when the payoff matrices are positive
                 semidefinite or have logarithmic rank and (b) an
                 additive PTAS for the densest subgraph problem for
                 similar classes of weighted graphs. We use
                 combinatorial, geometric and spectral techniques; our
                 main new tool is an algorithm for efficiently covering
                 a convex body with translates of another convex body.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Harris:2013:CSP,
  author =       "David G. Harris and Aravind Srinivasan",
  title =        "Constraint satisfaction, packet routing, and the
                 {Lovasz} local lemma",
  crossref =     "ACM:2013:SPF",
  pages =        "685--694",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488696",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Constraint-satisfaction problems (CSPs) form a basic
                 family of NP-hard optimization problems that includes
                 satisfiability. Motivated by the sufficient condition
                 for the satisfiability of SAT formulae that is offered
                 by the Lovasz Local Lemma, we seek such sufficient
                 conditions for arbitrary CSPs. To this end, we identify
                 a variable-covering radius--type parameter for the
                 infeasible configurations of a given CSP, and also
                 develop an extension of the Lovasz Local Lemma in which
                 many of the events to be avoided have probabilities
                 arbitrarily close to one; these lead to a general
                 sufficient condition for the satisfiability of
                 arbitrary CSPs. One primary application is to
                 packet-routing in the classical Leighton-Maggs-Rao
                 setting, where we introduce several additional ideas in
                 order to prove the existence of near-optimal schedules;
                 further applications in combinatorial optimization are
                 also shown.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Thapper:2013:CFV,
  author =       "Johan Thapper and Stanislav Zivny",
  title =        "The complexity of finite-valued {CSPs}",
  crossref =     "ACM:2013:SPF",
  pages =        "695--704",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488697",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Let $ \Gamma $ be a set of rational-valued functions
                 on a fixed finite domain; such a set is called a
                 finite-valued constraint language. The valued
                 constraint satisfaction problem, VCSP( $ \Gamma $), is
                 the problem of minimising a function given as a sum of
                 functions from $ \Gamma $ . We establish a dichotomy
                 theorem with respect to exact solvability for all
                 finite-valued languages defined on domains of arbitrary
                 finite size. We show that every core language $ \Gamma
                 $ either admits a binary idempotent and symmetric
                 fractional polymorphism in which case the basic linear
                 programming relaxation solves any instance of VCSP( $
                 \Gamma $) exactly, or $ \Gamma $ satisfies a simple
                 hardness condition that allows for a polynomial-time
                 reduction from Max-Cut to VCSP( $ \Gamma $). In other
                 words, there is a single algorithm for all tractable
                 cases and a single reason for intractability. Our
                 results show that for exact solvability of VCSPs the
                 basic linear programming relaxation suffices and
                 semidefinite relaxations do not add any power. Our
                 results generalise all previous partial classifications
                 of finite-valued languages: the classification of
                 {0,1}-valued languages containing all unary functions
                 obtained by Deineko et al. [JACM'06]; the
                 classifications of {0,1}-valued languages on
                 two-element, three-element, and four-element domains
                 obtained by Creignou [JCSS'95], Jonsson et al.
                 [SICOMP'06], and Jonsson et al.[CP'11], respectively;
                 the classifications of finite-valued languages on
                 two-element and three-element domains obtained by Cohen
                 et al. [AIJ'06] and Huber et al. [SODA'13],
                 respectively; the classification of finite-valued
                 languages containing all {0,1}-valued unary functions
                 obtained by Kolmogorov and Zivny [JACM'13]; and the
                 classification of Min-0-Ext problems obtained by Hirai
                 [SODA'13].",
  acknowledgement = ack-nhfb,
}

@InProceedings{Coja-Oghlan:2013:GAK,
  author =       "Amin Coja-Oghlan and Konstantinos Panagiotou",
  title =        "Going after the {k-SAT} threshold",
  crossref =     "ACM:2013:SPF",
  pages =        "705--714",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488698",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Random $k$-SAT is the single most intensely studied
                 example of a random constraint satisfaction problem.
                 But despite substantial progress over the past decade,
                 the threshold for the existence of satisfying
                 assignments is not known precisely for any $ k \geq 3
                 $. The best current results, based on the second moment
                 method, yield upper and lower bounds that differ by an
                 additive $ k \cdot {\ln 2} / 2 $, a term that is
                 unbounded in $k$ (Achlioptas, Peres: STOC 2003). The
                 basic reason for this gap is the inherent asymmetry of
                 the Boolean values 'true' and 'false' in contrast to
                 the perfect symmetry, e.g., among the various colors in
                 a graph coloring problem. Here we develop a new
                 asymmetric second moment method that allows us to
                 tackle this issue head on for the first time in the
                 theory of random CSPs. This technique enables us to
                 compute the $k$-SAT threshold up to an additive $ \ln 2
                 - 1 / 2 + O(1 / k) \sim 0.19 $. Independently of the
                 rigorous work, physicists have developed a
                 sophisticated but non-rigorous technique called the
                 ``cavity method'' for the study of random CSPs (Mezard,
                 Parisi, Zecchina: Science~2002). Our result matches the
                 best bound that can be obtained from the so-called
                 ``replica symmetric'' version of the cavity method, and
                 indeed our proof directly harnesses parts of the
                 physics calculations.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kol:2013:ICC,
  author =       "Gillat Kol and Ran Raz",
  title =        "Interactive channel capacity",
  crossref =     "ACM:2013:SPF",
  pages =        "715--724",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488699",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the interactive channel capacity of an $
                 \epsilon $-noisy channel. The interactive channel
                 capacity $ C(\epsilon) $ is defined as the minimal
                 ratio between the communication complexity of a problem
                 (over a non-noisy channel), and the communication
                 complexity of the same problem over the binary
                 symmetric channel with noise rate $ \epsilon $, where
                 the communication complexity tends to infinity. Our
                 main result is the upper bound $ C(\epsilon) \leq 1 -
                 \Omega (\sqrt H(\epsilon)) $. This compares with
                 Shannon's non-interactive channel capacity of $ 1 -
                 H(\epsilon) $. In particular, for a small enough $
                 \epsilon $, our result gives the first separation
                 between interactive and non-interactive channel
                 capacity, answering an open problem by Schulman
                 [Schulman1]. We complement this result by the lower
                 bound $ C(\epsilon) \geq 1 - O(\sqrt H(\epsilon)) $,
                 proved for the case where the players take alternating
                 turns.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bernstein:2013:MSP,
  author =       "Aaron Bernstein",
  title =        "Maintaining shortest paths under deletions in weighted
                 directed graphs: [extended abstract]",
  crossref =     "ACM:2013:SPF",
  pages =        "725--734",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488701",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present an improved algorithm for maintaining
                 all-pairs $ 1 + \epsilon $ approximate shortest paths
                 under deletions and weight-increases. The previous
                 state of the art for this problem was total update time
                 $ \tilde O (n^2 \sqrt m / \epsilon) $ for directed,
                 unweighted graphs [2], and $ \tilde O(m n / \epsilon) $
                 for undirected, unweighted graphs [12]. Both algorithms
                 were randomized and had constant query time. Note that
                 $ \tilde O(m n) $ is a natural barrier because even
                 with a $ (1 + \epsilon) $ approximation, there is no $
                 o(m n) $ combinatorial algorithm for the static
                 all-pairs shortest path problem. Our algorithm works on
                 directed, weighted graphs and has total (randomized)
                 update time $ \tilde O (m n \log (R) / \epsilon) $
                 where $R$ is the ratio of the largest edge weight ever
                 seen in the graph, to the smallest such weight (our
                 query time is constant). Note that $ \log (R) = O(\log
                 (n)) $ as long as weights are polynomial in $n$.
                 Although $ \tilde O(m n \log (R) / \epsilon) $ is the
                 total time over all updates, our algorithm also
                 requires a clearly unavoidable constant time per
                 update. Thus, we effectively expand the $ \tilde O(m n)
                 $ total update time bound from undirected, unweighted
                 graphs to directed graphs with polynomial weights. This
                 is in fact the first non-trivial algorithm for
                 decremental all-pairs shortest paths that works on
                 weighted graphs (previous algorithms could only handle
                 small integer weights). By a well known reduction from
                 decremental algorithms to fully dynamic ones [9], our
                 improved decremental algorithm leads to improved
                 query-update tradeoffs for fully dynamic $ (1 +
                 \epsilon) $ approximate APSP algorithm in directed
                 graphs.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Eisenstat:2013:LTA,
  author =       "David Eisenstat and Philip N. Klein",
  title =        "Linear-time algorithms for max flow and
                 multiple-source shortest paths in unit-weight planar
                 graphs",
  crossref =     "ACM:2013:SPF",
  pages =        "735--744",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488702",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give simple linear-time algorithms for two problems
                 in planar graphs: max st-flow in directed graphs with
                 unit capacities, and multiple-source shortest paths in
                 undirected graphs with unit lengths.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Neiman:2013:SDA,
  author =       "Ofer Neiman and Shay Solomon",
  title =        "Simple deterministic algorithms for fully dynamic
                 maximal matching",
  crossref =     "ACM:2013:SPF",
  pages =        "745--754",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488703",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A maximal matching can be maintained in fully dynamic
                 (supporting both addition and deletion of edges)
                 $n$-vertex graphs using a trivial deterministic
                 algorithm with a worst-case update time of $ O(n) $. No
                 deterministic algorithm that outperforms the naive $
                 O(n) $ one was reported up to this date. The only
                 progress in this direction is due to Ivkovic and Lloyd
                 [14], who in 1993 devised a deterministic algorithm
                 with an amortized update time of O((n+m)$^{ \sqrt 2 /
                 2}$), where m is the number of edges. In this paper we
                 show the first deterministic fully dynamic algorithm
                 that outperforms the trivial one. Specifically, we
                 provide a deterministic worst-case update time of $
                 O(\sqrt m) $. Moreover, our algorithm maintains a
                 matching which is in fact a 3/2-approximate maximum
                 cardinality matching (MCM). We remark that no fully
                 dynamic algorithm for maintaining $ (2 - \epsilon)
                 $-approximate MCM improving upon the naive $ O(n) $ was
                 known prior to this work, even allowing amortized time
                 bounds and randomization. For low arboricity graphs
                 (e.g., planar graphs and graphs excluding fixed
                 minors), we devise another simple deterministic
                 algorithm with sub-logarithmic update time.
                 Specifically, it maintains a fully dynamic maximal
                 matching with amortized update time of $ O(\log n /
                 \log \log n) $. This result addresses an open question
                 of Onak and Rubinfeld [19]. We also show a
                 deterministic algorithm with optimal space usage of $
                 O(n + m) $, that for arbitrary graphs maintains a
                 maximal matching with amortized update time of $
                 O(\sqrt m) $.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lee:2013:NAC,
  author =       "Yin Tat Lee and Satish Rao and Nikhil Srivastava",
  title =        "A new approach to computing maximum flows using
                 electrical flows",
  crossref =     "ACM:2013:SPF",
  pages =        "755--764",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488704",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We give an algorithm which computes a $ (1 - \epsilon)
                 $-approximately maximum st-flow in an undirected
                 uncapacitated graph in time $ O(1 / \epsilon \sqrt m /
                 F \cdot m \log^2 n) $ where $F$ is the flow value. By
                 trading this off against the Karger-Levine algorithm
                 for undirected graphs which takes $ \tilde O(m + n F) $
                 time, we obtain a running time of $ \tilde O(m n^{1 /
                 3} / \epsilon^{2 / 3}) $ for uncapacitated graphs,
                 improving the previous best dependence on $ \epsilon $
                 by a factor of $ O(1 / \epsilon^3) $. Like the
                 algorithm of Christiano, Kelner, Madry, Spielman and
                 Teng, our algorithm reduces the problem to electrical
                 flow computations which are carried out in linear time
                 using fast Laplacian solvers. However, in contrast to
                 previous work, our algorithm does not reweight the
                 edges of the graph in any way, and instead uses local
                 (i.e., non s-t) electrical flows to reroute the flow on
                 congested edges. The algorithm is simple and may be
                 viewed as trying to find a point at the intersection of
                 two convex sets (the affine subspace of st-flows of
                 value $F$ and the $ l_\infty $ ball) by an accelerated
                 version of the method of alternating projections due to
                 Nesterov. By combining this with Ford and Fulkerson's
                 augmenting paths algorithm, we obtain an exact
                 algorithm with running time $ \tilde O(m^{5 / 4} F^{1 /
                 4}) $ for uncapacitated undirected graphs, improving
                 the previous best running time of $ \tilde O(m + \min
                 (n F, m^{3 / 2})) $. We give a related algorithm with
                 the same running time for approximate minimum cut,
                 based on minimizing a smoothed version of the $ l_1 $
                 norm inside the cut space of the input graph. We show
                 that the minimizer of this norm is related to an
                 approximate blocking flow and use this to give an
                 algorithm for computing a length $k$ approximately
                 blocking flow in time $ \tilde O(m \sqrt k) $.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Orlin:2013:MFN,
  author =       "James B. Orlin",
  title =        "{Max} flows in {$ O(n m) $} time, or better",
  crossref =     "ACM:2013:SPF",
  pages =        "765--774",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488705",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In this paper, we present improved polynomial time
                 algorithms for the max flow problem defined on sparse
                 networks with $n$ nodes and $m$ arcs. We show how to
                 solve the max flow problem in $ O(n m + m^{31 / 16}
                 \log^2 n) $ time. In the case that $ m = O(n^{1.06}) $,
                 this improves upon the best previous algorithm due to
                 King, Rao, and Tarjan, who solved the max flow problem
                 in $ O(n m \log_{m / (n \log n)} n) $ time. This
                 establishes that the max flow problem is solvable in $
                 O(n m) $ time for all values of $n$ and $m$. In the
                 case that $ m = O(n) $, we improve the running time to
                 $ O(n^2 / \log n) $.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bringmann:2013:SSD,
  author =       "Karl Bringmann and Kasper Green Larsen",
  title =        "Succinct sampling from discrete distributions",
  crossref =     "ACM:2013:SPF",
  pages =        "775--782",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488707",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We revisit the classic problem of sampling from a
                 discrete distribution: Given n non-negative w-bit
                 integers x$_1$ ,..,x$_n$, the task is to build a data
                 structure that allows sampling i with probability
                 proportional to x$_i$. The classic solution is Walker's
                 alias method that takes, when implemented on a Word
                 RAM, $ O(n) $ preprocessing time, $ O(1) $ expected
                 query time for one sample, and $ n(w + 2 \lg (n) +
                 o(1)) $ bits of space. Using the terminology of
                 succinct data structures, this solution has redundancy
                 $ 2 n \lg (n) + o(n) $ bits, i.e., it uses $ 2 n \lg
                 (n) + o(n) $ bits in addition to the information
                 theoretic minimum required for storing the input. In
                 this paper, we study whether this space usage can be
                 improved. In the systematic case, in which the input is
                 read-only, we present a novel data structure using $ r
                 + O(w) $ redundant bits, $ O(n / r) $ expected query
                 time and $ O(n) $ preprocessing time for any $r$. This
                 is an improvement in redundancy by a factor of $ \Omega
                 (\log n) $ over the alias method for $ r = n $, even
                 though the alias method is not systematic. Moreover, we
                 complement our data structure with a lower bound
                 showing that this trade-off is tight for systematic
                 data structures. In the non-systematic case, in which
                 the input numbers may be represented in more clever
                 ways than just storing them one-by-one, we demonstrate
                 a very surprising separation from the systematic case:
                 With only 1 redundant bit, it is possible to support
                 optimal $ O(1) $ expected query time and $ O(n) $
                 preprocessing time! On the one hand, our results
                 improve upon the space requirement of the classic
                 solution for a fundamental sampling problem, on the
                 other hand, they provide the strongest known separation
                 between the systematic and non-systematic case for any
                 data structure problem. Finally, we also believe our
                 upper bounds are practically efficient and simpler than
                 Walker's alias method.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Li:2013:NIS,
  author =       "Xin Li",
  title =        "New independent source extractors with exponential
                 improvement",
  crossref =     "ACM:2013:SPF",
  pages =        "783--792",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488708",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the problem of constructing explicit
                 extractors for independent general weak random sources.
                 For weak sources on $n$ bits with min-entropy $k$,
                 previously the best known extractor needs to use at
                 least $ \log n / \log k $ independent sources [22, 3].
                 In this paper we give a new extractor that only uses $
                 O(\log (\log n / \log k)) + O(1) $ independent sources.
                 Thus, our result improves the previous best result
                 exponentially. We then use our new extractor to give
                 improved network extractor protocols, as defined in
                 [14]. The network extractor protocols also give new
                 results in distributed computing with general weak
                 random sources, which dramatically improve previous
                 results. For example, we can tolerate a nearly optimal
                 fraction of faulty players in synchronous Byzantine
                 agreement and leader election, even if the players only
                 have access to independent $n$-bit weak random sources
                 with min-entropy as small as $ k = \polylog (n) $. Our
                 extractor for independent sources is based on a new
                 condenser for somewhere random sources with a special
                 structure. We believe our techniques are interesting in
                 their own right and are promising for further
                 improvement.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Rothblum:2013:IPP,
  author =       "Guy N. Rothblum and Salil Vadhan and Avi Wigderson",
  title =        "Interactive proofs of proximity: delegating
                 computation in sublinear time",
  crossref =     "ACM:2013:SPF",
  pages =        "793--802",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488709",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study interactive proofs with sublinear-time
                 verifiers. These proof systems can be used to ensure
                 approximate correctness for the results of computations
                 delegated to an untrusted server. Following the
                 literature on property testing, we seek proof systems
                 where with high probability the verifier accepts every
                 input in the language, and rejects every input that is
                 far from the language. The verifier's query complexity
                 (and computation complexity), as well as the
                 communication, should all be sublinear. We call such a
                 proof system an Interactive Proof of Proximity (IPP).
                 On the positive side, our main result is that all
                 languages in NC have Interactive Proofs of Proximity
                 with roughly $ \sqrt n $ query and communication and
                 complexities, and $ \polylog (n) $ communication
                 rounds. This is achieved by identifying a natural
                 language, membership in an affine subspace (for a
                 structured class of subspaces), that is complete for
                 constructing interactive proofs of proximity, and
                 providing efficient protocols for it. In building an
                 IPP for this complete language, we show a tradeoff
                 between the query and communication complexity and the
                 number of rounds. For example, we give a 2-round
                 protocol with roughly $ n^{3 / 4} $ queries and
                 communication. On the negative side, we show that there
                 exist natural languages in N$ C^1 $, for which the sum
                 of queries and communication in any constant-round
                 interactive proof of proximity must be polynomially
                 related to $n$. In particular, for any 2-round
                 protocol, the sum of queries and communication must be
                 at least $ \tilde \Omega (\sqrt n) $. Finally, we
                 construct much better IPPs for specific functions, such
                 as bipartiteness on random or well-mixing graphs, and
                 the majority function. The query complexities of these
                 protocols are provably better (by exponential or
                 polynomial factors) than what is possible in the
                 standard property testing model, i.e. without a
                 prover.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ajtai:2013:LBR,
  author =       "Miklos Ajtai",
  title =        "Lower bounds for {RAMs} and quantifier elimination",
  crossref =     "ACM:2013:SPF",
  pages =        "803--812",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488710",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "For each natural number $d$ we consider a finite
                 structure $ M_d $ whose universe is the set of all $ 0,
                 1 $-sequence of length $ n = 2^d $, each representing a
                 natural number in the set $ \{ 0, 1, \ldots {}, 2^n - 1
                 \} $ in binary form. The operations included in the
                 structure are the four constants $ 0, 1, 2^n - 1, n $,
                 multiplication and addition modulo $ 2^n $, the unary
                 function $ \{ \min 2^x, 2^n - 1 \} $, the binary
                 functions $ \lfloor x / y \rfloor $ (with $ \lfloor x /
                 0 \rfloor = 0 $), $ \max (x, y) $, $ \min (x, y) $,
                 and the boolean vector operations, $ {\rm vee} $, $-$
                 defined on $ 0, 1 $ sequences of length $n$, by
                 performing the operations on all components
                 simultaneously. These are essentially the arithmetic
                 operations that can be performed on a RAM, with
                 wordlength $n$, by a single instruction. We show that
                 there exists an $ \epsilon > 0 $ and a term (that is,
                 an algebraic expression) $ F(x, y) $ built up from the
                 mentioned operations, with the only free variables $x$,
                 $y$, such that if $ G_d(y), d = 0, 1, 2, \ldots {} $,
                 is a sequence of terms, and for all $ d = 0, 1, 2,
                 \ldots {}, M_d $ models $ \forall x, [G_d (x) \to
                 \exists y, F(x, y) = 0] $, then for infinitely many
                 integers $d$, the depth of the term $ G_d $, that is,
                 the maximal number of nestings of the operations in it,
                 is at least $ \epsilon (\log d)^{1 / 2} = \epsilon
                 (\log \log n)^{1 / 2} $. The following is a
                 consequence. We are considering RAMs $ N_n $, with
                 wordlength $ n = 2^d $, whose arithmetic instructions
                 are the arithmetic operations listed above, and also
                 have the usual other RAM instructions. The size of the
                 memory is restricted only by the address space, that
                 is, it is $ 2^n $ words. The RAMs has a finite
                 instruction set, each instruction is encoded by a fixed
                 natural number independently of $n$. Therefore a
                 program $P$ can run on each machine $ N_n $, if $ n =
                 2^d $ is sufficiently large. We show that there exists
                 an $ \epsilon > 0 $ and a program $P$, such that it
                 satisfies the following two conditions. (i) For all
                 sufficiently large $ n = 2^d $, if $P$ running on N$_n$
                 gets an input consisting of two words $a$ and $b$,
                 then, in constant time, it gives a $ 0, 1 $ output $
                 P_n(a, b) $. (ii) Suppose that $Q$ is a program such
                 that for each sufficiently large $ n = 2^d $, if $Q$,
                 running on $ N_n $, gets a word $a$ of length $n$ as an
                 input, then it decides whether there exists a word $b$
                 of length $n$ such that $ P_n(a, b) = 0. $ Then, for
                 infinitely many positive integers $d$, there exists a
                 word $a$ of length $ n = 2^d $, such that the running
                 time of $Q$ on $ N_n $ at input $a$ is at least $
                 \epsilon (\log d)^{1 / 2} (\log \log d)^{-1} \geq (\log
                 d)^{1 / 2 - \epsilon } = (\log \log n)^{1 / 2 -
                 \epsilon } $.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Beck:2013:STR,
  author =       "Chris Beck and Jakob Nordstrom and Bangsheng Tang",
  title =        "Some trade-off results for polynomial calculus:
                 extended abstract",
  crossref =     "ACM:2013:SPF",
  pages =        "813--822",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488711",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present size-space trade-offs for the polynomial
                 calculus (PC) and polynomial calculus resolution (PCR)
                 proof systems. These are the first true size-space
                 trade-offs in any algebraic proof system, showing that
                 size and space cannot be simultaneously optimized in
                 these models. We achieve this by extending essentially
                 all known size-space trade-offs for resolution to PC
                 and PCR. As such, our results cover space complexity
                 from constant all the way up to exponential and yield
                 mostly superpolynomial or even exponential size
                 blow-ups. Since the upper bounds in our trade-offs hold
                 for resolution, our work shows that there are formulas
                 for which adding algebraic reasoning on top of
                 resolution does not improve the trade-off properties in
                 any significant way. As byproducts of our analysis, we
                 also obtain trade-offs between space and degree in PC
                 and PCR exactly matching analogous results for space
                 versus width in resolution, and strengthen the
                 resolution trade-offs in [Beame, Beck, and Impagliazzo
                 '12] to apply also to $k$-CNF formulas.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhowmick:2013:NBM,
  author =       "Abhishek Bhowmick and Zeev Dvir and Shachar Lovett",
  title =        "New bounds for matching vector families",
  crossref =     "ACM:2013:SPF",
  pages =        "823--832",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488713",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A Matching Vector (MV) family modulo m is a pair of
                 ordered lists $ U = (u_1, \ldots {}, u_t) $ and $ V =
                 (v_1, \ldots {}, v_t) $ where $ u_i, v_j \in Z_m^n $
                 with the following inner product pattern: for any $ i,
                 \{ u_i, v_i \} = 0 $, and for any $ i \neq j $, $ \{
                 u_i, v_j \} \neq 0 $. A MV family is called
                 $q$-restricted if inner products $ \{ u_i, v_j \} $
                 take at most $q$ different values. Our interest in MV
                 families stems from their recent application in the
                 construction of sub-exponential locally decodable codes
                 (LDCs). There, $q$-restricted MV families are used to
                 construct LDCs with $q$ queries, and there is special
                 interest in the regime where $q$ is constant. When $m$
                 is a prime it is known that such constructions yield
                 codes with exponential block length. However, for
                 composite $m$ the behaviour is dramatically different.
                 A recent work by Efremenko [8] (based on an approach
                 initiated by Yekhanin [24]) gives the first
                 sub-exponential LDC with constant queries. It is based
                 on a construction of a MV family of super-polynomial
                 size by Grolmusz [10] modulo composite $m$. In this
                 work, we prove two lower bounds on the block length of
                 LDCs which are based on black box construction using MV
                 families. When $q$ is constant (or sufficiently small),
                 we prove that such LDCs must have a quadratic block
                 length. When the modulus $m$ is constant (as it is in
                 the construction of Efremenko [8]) we prove a
                 super-polynomial lower bound on the block-length of the
                 LDCs, assuming a well-known conjecture in additive
                 combinatorics, the polynomial Freiman-Ruzsa conjecture
                 over Z$_m$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ben-Sasson:2013:NFL,
  author =       "Eli Ben-Sasson and Ariel Gabizon and Yohay Kaplan and
                 Swastik Kopparty and Shubangi Saraf",
  title =        "A new family of locally correctable codes based on
                 degree-lifted algebraic geometry codes",
  crossref =     "ACM:2013:SPF",
  pages =        "833--842",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488714",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We describe new constructions of error correcting
                 codes, obtained by ``degree-lifting'' a short algebraic
                 geometry base-code of block-length $q$ to a lifted-code
                 of block-length $ q^m $, for arbitrary integer $m$. The
                 construction generalizes the way degree-d, univariate
                 polynomials evaluated over the $q$-element field (also
                 known as Reed--Solomon codes) are ``lifted'' to
                 degree-d, $m$-variate polynomials (Reed--Muller codes).
                 A number of properties are established: The rate of the
                 degree-lifted code is approximately a 1/m!-fraction of
                 the rate of the base-code. The relative distance of the
                 degree-lifted code is at least as large as that of the
                 base-code. This is proved using a generalization of the
                 Schwartz-Zippel Lemma to degree-lifted
                 Algebraic-Geometry codes. [Local correction] If the
                 base code is invariant under a group that is ``close''
                 to being doubly-transitive (in a precise manner defined
                 later) then the degree-lifted code is locally
                 correctable with query complexity at most $ q^2 $. The
                 automorphisms of the base-code are crucially used to
                 generate query-sets, abstracting the use of
                 affine-lines in the local correction procedure of
                 Reed--Muller codes. Taking a concrete illustrating
                 example, we show that degree-lifted Hermitian codes
                 form a family of locally correctable codes over an
                 alphabet that is significantly smaller than that
                 obtained by Reed--Muller codes of similar constant
                 rate, message length, and distance.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Guruswami:2013:LDR,
  author =       "Venkatesan Guruswami and Chaoping Xing",
  title =        "List decoding {Reed--Solomon}, algebraic-geometric,
                 and {Gabidulin} subcodes up to the singleton bound",
  crossref =     "ACM:2013:SPF",
  pages =        "843--852",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488715",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider Reed--Solomon (RS) codes whose evaluation
                 points belong to a subfield, and give a
                 linear-algebraic list decoding algorithm that can
                 correct a fraction of errors approaching the code
                 distance, while pinning down the candidate messages to
                 a well-structured affine space of dimension a constant
                 factor smaller than the code dimension. By pre-coding
                 the message polynomials into a subspace-evasive set, we
                 get a Monte Carlo construction of a subcode of
                 Reed--Solomon codes that can be list decoded from a
                 fraction $ (1 - R - \epsilon) $ of errors in polynomial
                 time (for any fixed $ \epsilon > 0 $) with a list size
                 of $ O(1 / \epsilon) $. Our methods extend to
                 algebraic-geometric (AG) codes, leading to a similar
                 claim over constant-sized alphabets. This matches
                 parameters of recent results based on folded variants
                 of RS and AG codes. but our construction here gives
                 subcodes of Reed--Solomon and AG codes themselves
                 (albeit with restrictions on the evaluation points).
                 Further, the underlying algebraic idea also extends
                 nicely to Gabidulin's construction of rank-metric codes
                 based on linearized polynomials. This gives the first
                 construction of positive rate rank-metric codes list
                 decodable beyond half the distance, and in fact gives
                 codes of rate $R$ list decodable up to the optimal $ (1
                 - R - \epsilon) $ fraction of rank errors. A similar
                 claim holds for the closely related subspace codes
                 studied by Koetter and Kschischang. We introduce a new
                 notion called subspace designs as another way to
                 pre-code messages and prune the subspace of candidate
                 solutions. Using these, we also get a deterministic
                 construction of a polynomial time list decodable
                 subcode of RS codes. By using a cascade of several
                 subspace designs, we extend our approach to AG codes,
                 which gives the first deterministic construction of an
                 algebraic code family of rate $R$ with efficient list
                 decoding from $ 1 - R - \epsilon $ fraction of errors
                 over an alphabet of constant size (that depends only on
                 $ \epsilon $). The list size bound is almost a
                 constant (governed by $ \log * $ (block length)), and
                 the code can be constructed in quasi-polynomial time.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Wootters:2013:LDR,
  author =       "Mary Wootters",
  title =        "On the list decodability of random linear codes with
                 large error rates",
  crossref =     "ACM:2013:SPF",
  pages =        "853--860",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488716",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "It is well known that a random $q$-ary code of rate $
                 \Omega (\epsilon^2) $ is list decodable up to radius $
                 (1 - 1 / q - \epsilon) $ with list sizes on the order
                 of $ 1 / \epsilon^2 $, with probability $ 1 - o(1) $.
                 However, until recently, a similar statement about
                 random linear codes has until remained elusive. In a
                 recent paper, Cheraghchi, Guruswami, and Velingker show
                 a connection between list decodability of random linear
                 codes and the Restricted Isometry Property from
                 compressed sensing, and use this connection to prove
                 that a random linear code of rate $ \Omega (\epsilon^2
                 / \log^3 (1 / \epsilon)) $ achieves the list decoding
                 properties above, with constant probability. We improve
                 on their result to show that in fact we may take the
                 rate to be $ \Omega (\epsilon^2) $, which is optimal,
                 and further that the success probability is $ 1 - o(1)
                 $, rather than constant. As an added benefit, our proof
                 is relatively simple. Finally, we extend our methods to
                 more general ensembles of linear codes. As an example,
                 we show that randomly punctured Reed--Muller codes have
                 the same list decoding properties as the original
                 codes, even when the rate is improved to a constant.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brandao:2013:QFT,
  author =       "Fernando G. S. L. Brandao and Aram W. Harrow",
  title =        "Quantum {de Finetti} theorems under local measurements
                 with applications",
  crossref =     "ACM:2013:SPF",
  pages =        "861--870",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488718",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Quantum de Finetti theorems are a useful tool in the
                 study of correlations in quantum multipartite states.
                 In this paper we prove two new quantum de Finetti
                 theorems, both showing that under tests formed by local
                 measurements in each of the subsystems one can get a
                 much improved error dependence on the dimension of the
                 subsystems. We also obtain similar results for
                 non-signaling probability distributions. We give the
                 following applications of the results to quantum
                 complexity theory, polynomial optimization, and quantum
                 information theory: We prove the optimality of the
                 Chen-Drucker protocol for 3-SAT, under the assumption
                 there is no subexponential-time algorithm for SAT. In
                 the protocol a prover sends to a verifier $ \sqrt n
                 \polylog (n) $ unentangled quantum states, each
                 composed of $ O(\log (n)) $ qubits, as a proof of the
                 satisfiability of a 3-SAT instance with $n$ variables
                 and $ O(n) $ clauses. The quantum verifier checks the
                 validity of the proof by performing local measurements
                 on each of the proofs and classically processing the
                 outcomes. We show that any similar protocol with $
                 O(n^{1 / 2 - \epsilon }) $ qubits would imply a $ \exp
                 (n^{1 - 2 \epsilon } \polylog (n)) $-time algorithm for
                 3-SAT. We show that the maximum winning probability of
                 free games (in which the questions to each prover are
                 chosen independently) can be estimated by linear
                 programming in time $ \exp (O(\log |Q| + \log^2 |A| /
                 \epsilon^2)) $, with $ |Q| $ and $ |A| $ the question
                 and answer alphabet sizes, respectively, matching the
                 performance of a previously known algorithm due to
                 Aaronson, Impagliazzo, Moshkovitz, and Shor. This
                 result follows from a new monogamy relation for
                 non-locality, showing that $k$-extendible non-signaling
                 distributions give at most a $ O(k^{-1 / 2}) $
                 advantage over classical strategies for free games. We
                 also show that 3-SAT with $n$ variables can be reduced
                 to obtaining a constant error approximation of the
                 maximum winning probability under entangled strategies
                 of $ O(\sqrt n) $-player one-round non-local games, in
                 which only two players are selected to send $ O(\sqrt
                 n) $-bit messages. We show that the optimization of
                 certain polynomials over the complex hypersphere can be
                 performed in quasipolynomial time in the number of
                 variables $n$ by considering $ O(\log (n)) $ rounds of
                 the Sum-of-Squares (Parrilo/Lasserre) hierarchy of
                 semidefinite programs. This can be considered an
                 analogue to the hypersphere of a similar known results
                 for the simplex. As an application to entanglement
                 theory, we find a quasipolynomial-time algorithm for
                 deciding multipartite separability. We consider a
                 quantum tomography result due to Aaronson --- showing
                 that given an unknown $n$-qubit state one can perform
                 tomography that works well for most observables by
                 measuring only $ O(n) $ independent and identically
                 distributed (i.i.d.) copies of the state --- and relax
                 the assumption of having i.i.d copies of the state to
                 merely the ability to select subsystems at random from
                 a quantum multipartite state. The proofs of the new
                 quantum de Finetti theorems are based on information
                 theory, in particular on the chain rule of mutual
                 information. The results constitute improvements and
                 generalizations of a recent de Finetti theorem due to
                 Brandao, Christandl and Yard.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Brandao:2013:PSA,
  author =       "Fernando G. S. L. Brandao and Aram W. Harrow",
  title =        "Product-state approximations to quantum ground
                 states",
  crossref =     "ACM:2013:SPF",
  pages =        "871--880",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488719",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "The local Hamiltonian problem consists of estimating
                 the ground-state energy (given by the minimum
                 eigenvalue) of a local quantum Hamiltonian. It can be
                 considered as a quantum generalization of constraint
                 satisfaction problems (CSPs) and has a key role in
                 quantum complexity theory, being the first and most
                 natural QMA -complete problem known. An interesting
                 regime for the local Hamiltonian problem is that of
                 extensive error, where one is interested in estimating
                 the mean ground-state energy to constant accuracy. The
                 problem is NP -hard by the PCP theorem, but whether it
                 is QMA -hard is an important open question in quantum
                 complexity theory. A positive solution would represent
                 a quantum analogue of the PCP theorem. A key feature
                 that distinguishes quantum Hamiltonians from classical
                 CSPs is that the solutions may involve complicated
                 entangled states. In this paper, we demonstrate several
                 large classes of Hamiltonians for which product (i.e.
                 unentangled) states can approximate the ground state
                 energy to within a small extensive error. First, we
                 show the mere existence of a good product-state
                 approximation for the ground-state energy of 2-local
                 Hamiltonians with one of more of the following
                 properties: (1) super-constant degree, (2) small
                 expansion, or (3) a ground state with sublinear
                 entanglement with respect to some partition into small
                 pieces. The approximation based on degree is a new and
                 surprising difference between quantum Hamiltonians and
                 classical CSPs, since in the classical setting, higher
                 degree is usually associated with harder CSPs. The
                 approximation based on expansion is not new, but the
                 approximation based on low entanglement was previously
                 known only in the regime where the entanglement was
                 close to zero. Since the existence of a low-energy
                 product state can be checked in NP, this implies that
                 any Hamiltonian used for a quantum PCP theorem should
                 have: (1) constant degree, (2) constant expansion, (3)
                 a ``volume law'' for entanglement with respect to any
                 partition into small parts. Second, we show that in
                 several cases, good product-state approximations not
                 only exist, but can be found in deterministic
                 polynomial time: (1) 2-local Hamiltonians on any planar
                 graph, solving an open problem of Bansal, Bravyi, and
                 Terhal, (2) dense k -local Hamiltonians for any
                 constant k, solving an open problem of Gharibian and
                 Kempe, and (3) 2-local Hamiltonians on graphs with low
                 threshold rank, via a quantum generalization of a
                 recent result of Barak, Raghavendra and Steurer. Our
                 work involves two new tools which may be of independent
                 interest. First, we prove a new quantum version of the
                 de Finetti theorem which does not require the usual
                 assumption of symmetry. Second, we describe a way to
                 analyze the application of the Lasserre/Parrilo SDP
                 hierarchy to local quantum Hamiltonians.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ta-Shma:2013:IWC,
  author =       "Amnon Ta-Shma",
  title =        "Inverting well conditioned matrices in quantum
                 logspace",
  crossref =     "ACM:2013:SPF",
  pages =        "881--890",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488720",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We show that quantum computers improve on the best
                 known classical algorithms for matrix inversion (and
                 singular value decomposition) as far as space is
                 concerned. This adds to the (still short) list of
                 important problems where quantum computers are of help.
                 Specifically, we show that the inverse of a well
                 conditioned matrix can be approximated in quantum
                 logspace with intermediate measurements. This should be
                 compared with the best known classical algorithm for
                 the problem that requires $ \Omega (\log^2 n) $ space.
                 We also show how to approximate the spectrum of a
                 normal matrix, or the singular values of an arbitrary
                 matrix, with $ \epsilon $ additive accuracy, and how to
                 approximate the singular value decomposition (SVD) of a
                 matrix whose singular values are well separated. The
                 technique builds on ideas from several previous works,
                 including simulating Hamiltonians in small quantum
                 space (building on [2] and [10]), treating a Hermitian
                 matrix as a Hamiltonian and running the quantum phase
                 estimation procedure on it (building on [5]) and making
                 small space probabilistic (and quantum) computation
                 consistent through the use of offline randomness and
                 the shift and truncate method (building on [8]).",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ambainis:2013:SAE,
  author =       "Andris Ambainis",
  title =        "Superlinear advantage for exact quantum algorithms",
  crossref =     "ACM:2013:SPF",
  pages =        "891--900",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488721",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "A quantum algorithm is exact if, on any input data, it
                 outputs the correct answer with certainty (probability
                 1). A key question is: how big is the advantage of
                 exact quantum algorithms over their classical
                 counterparts: deterministic algorithms. For total
                 Boolean functions in the query model, the biggest known
                 gap was just a factor of $2$: PARITY of $N$ input bits
                 requires $N$ queries classically but can be computed
                 with $ N / 2 $ queries by an exact quantum algorithm.
                 We present the first example of a Boolean function $
                 f(x_1, \ldots {}, x_N) $ for which exact quantum
                 algorithms have superlinear advantage over
                 deterministic algorithms. Any deterministic algorithm
                 that computes our function must use $N$ queries but an
                 exact quantum algorithm can compute it with $
                 O(N^{0.8675 \ldots }) $ queries. A modification of our
                 function gives a similar result for communication
                 complexity: there is a function f which can be computed
                 by an exact quantum protocol that communicates $
                 O(N^{0.8675 \ldots }) $ quantum bits but requires $
                 \Omega (N) $ bits of communication for classical
                 protocols.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Li:2013:AKM,
  author =       "Shi Li and Ola Svensson",
  title =        "Approximating $k$-median via pseudo-approximation",
  crossref =     "ACM:2013:SPF",
  pages =        "901--910",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488723",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We present a novel approximation algorithm for
                 $k$-median that achieves an approximation guarantee of
                 $ 1 + \sqrt 3 + \epsilon $ , improving upon the
                 decade-old ratio of $ 3 + \epsilon $ . Our approach is
                 based on two components, each of which, we believe, is
                 of independent interest. First, we show that in order
                 to give an $ \alpha $ -approximation algorithm for
                 $k$-median, it is sufficient to give a
                 pseudo-approximation algorithm that finds an $ \alpha
                 $-approximate solution by opening $ k + O(1) $
                 facilities. This is a rather surprising result as there
                 exist instances for which opening $ k + 1 $ facilities
                 may lead to a significant smaller cost than if only $k$
                 facilities were opened. Second, we give such a
                 pseudo-approximation algorithm with $ \alpha = 1 +
                 \sqrt 3 + \epsilon $. Prior to our work, it was not
                 even known whether opening $ k + o(k) $ facilities
                 would help improve the approximation ratio.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kelner:2013:SCA,
  author =       "Jonathan A. Kelner and Lorenzo Orecchia and Aaron
                 Sidford and Zeyuan Allen Zhu",
  title =        "A simple, combinatorial algorithm for solving {SDD}
                 systems in nearly-linear time",
  crossref =     "ACM:2013:SPF",
  pages =        "911--920",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488724",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In this paper, we present a simple combinatorial
                 algorithm that solves symmetric diagonally dominant
                 (SDD) linear systems in nearly-linear time. It uses
                 little of the machinery that previously appeared to be
                 necessary for a such an algorithm. It does not require
                 recursive preconditioning, spectral sparsification, or
                 even the Chebyshev Method or Conjugate Gradient. After
                 constructing a ``nice'' spanning tree of a graph
                 associated with the linear system, the entire algorithm
                 consists of the repeated application of a simple update
                 rule, which it implements using a lightweight data
                 structure. The algorithm is numerically stable and can
                 be implemented without the increased bit-precision
                 required by previous solvers. As such, the algorithm
                 has the fastest known running time under the standard
                 unit-cost RAM model. We hope the simplicity of the
                 algorithm and the insights yielded by its analysis will
                 be useful in both theory and practice.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sherstov:2013:CLB,
  author =       "Alexander A. Sherstov",
  title =        "Communication lower bounds using directional
                 derivatives",
  crossref =     "ACM:2013:SPF",
  pages =        "921--930",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488725",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the set disjointness problem in the most
                 powerful bounded-error model: the
                 number-on-the-forehead model with $k$ parties and
                 arbitrary classical or quantum communication. We obtain
                 a communication lower bound of $ \Omega (\sqrt (n) /
                 2^k *k) $ bits, which is essentially optimal. Proving
                 it was a longstanding open problem even in restricted
                 settings, such as one-way classical protocols with $ k
                 = 4 $ parties (Wigderson 1997). The proof contributes a
                 novel technique for lower bounds on multiparty
                 communication, based on directional derivatives of
                 communication protocols over the reals.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Andoni:2013:HFU,
  author =       "Alexandr Andoni and Assaf Goldberger and Andrew
                 McGregor and Ely Porat",
  title =        "Homomorphic fingerprints under misalignments:
                 sketching edit and shift distances",
  crossref =     "ACM:2013:SPF",
  pages =        "931--940",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488726",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "Fingerprinting is a widely-used technique for
                 efficiently verifying that two files are identical.
                 More generally, linear sketching is a form of lossy
                 compression (based on random projections) that also
                 enables the ``dissimilarity'' of non-identical files to
                 be estimated. Many sketches have been proposed for
                 dissimilarity measures that decompose coordinate-wise
                 such as the Hamming distance between alphanumeric
                 strings, or the Euclidean distance between vectors.
                 However, virtually nothing is known on sketches that
                 would accommodate alignment errors. With such errors,
                 Hamming or Euclidean distances are rendered useless: a
                 small misalignment may result in a file that looks very
                 dissimilar to the original file according such
                 measures. In this paper, we present the first linear
                 sketch that is robust to a small number of alignment
                 errors. Specifically, the sketch can be used to
                 determine whether two files are within a small Hamming
                 distance of being a cyclic shift of each other.
                 Furthermore, the sketch is homomorphic with respect to
                 rotations: it is possible to construct the sketch of a
                 cyclic shift of a file given only the sketch of the
                 original file. The relevant dissimilarity measure,
                 known as the shift distance, arises in the context of
                 embedding edit distance and our result addressed an
                 open problem [Question 13 in
                 Indyk-McGregor-Newman-Onak'11] with a rather surprising
                 outcome. Our sketch projects a length $n$ file into $
                 D(n) \cdot \polylog n $ dimensions where $ D(n)l n $ is
                 the number of divisors of $n$. The striking fact is
                 that this is near-optimal, i.e., the $ D(n) $
                 dependence is inherent to a problem that is ostensibly
                 about lossy compression. In contrast, we then show that
                 any sketch for estimating the edit distance between two
                 files, even when small, requires sketches whose size is
                 nearly linear in $n$. This lower bound addresses a
                 long-standing open problem on the low distortion
                 embeddings of edit distance [Question 2.15 in
                 Naor-Matousek'11, Indyk'01], for the case of linear
                 embeddings.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chonev:2013:OPH,
  author =       "Ventsislav Chonev and Jo{\"e}l Ouaknine and James
                 Worrell",
  title =        "The orbit problem in higher dimensions",
  crossref =     "ACM:2013:SPF",
  pages =        "941--950",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488728",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We consider higher-dimensional versions of Kannan and
                 Lipton's Orbit Problem---determining whether a target
                 vector space V may be reached from a starting point x
                 under repeated applications of a linear transformation
                 A. Answering two questions posed by Kannan and Lipton
                 in the 1980s, we show that when V has dimension one,
                 this problem is solvable in polynomial time, and when V
                 has dimension two or three, the problem is in
                 NP$^{RP}$.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Azar:2013:LSD,
  author =       "Yossi Azar and Ilan Reuven Cohen and Iftah Gamzu",
  title =        "The loss of serving in the dark",
  crossref =     "ACM:2013:SPF",
  pages =        "951--960",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488729",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study the following balls and bins stochastic
                 process: There is a buffer with B bins, and there is a
                 stream of balls X = {X$_1$, X$_2$, \ldots{} ,X$_T$ }
                 such that X$_i$ is the number of balls that arrive
                 before time i but after time i-1. Once a ball arrives,
                 it is stored in one of the unoccupied bins. If all the
                 bins are occupied then the ball is thrown away. In each
                 time step, we select a bin uniformly at random, clear
                 it, and gain its content. Once the stream of balls
                 ends, all the remaining balls in the buffer are cleared
                 and added to our gain. We are interested in analyzing
                 the expected gain of this randomized process with
                 respect to that of an optimal gain-maximizing strategy,
                 which gets the same online stream of balls, and clears
                 a ball from a bin, if exists, at any step. We name this
                 gain ratio the loss of serving in the dark. In this
                 paper, we determine the exact loss of serving in the
                 dark. We prove that the expected gain of the randomized
                 process is worse by a factor of $ \rho + \epsilon $
                 from that of the optimal gain-maximizing strategy for
                 any $ \epsilon > 0 $, where $ \rho = \max_{ \alpha > 1}
                 \alpha e^{ \alpha } / ((\alpha - 1)e^{ \alpha } + e -
                 1) \sim 1.69996 $ and $ B = \Omega (1 / \epsilon^3) $.
                 We also demonstrate that this bound is essentially
                 tight as there are specific ball streams for which the
                 above-mentioned gain ratio tends to $ \rho $. Our
                 stochastic process occurs naturally in many
                 applications. We present a prompt and truthful
                 mechanism for bounded capacity auctions, and an
                 application relating to packets scheduling.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Azar:2013:TBO,
  author =       "Yossi Azar and Ilan Reuven Cohen and Seny Kamara and
                 Bruce Shepherd",
  title =        "Tight bounds for online vector bin packing",
  crossref =     "ACM:2013:SPF",
  pages =        "961--970",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488730",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "In the $d$-dimensional bin packing problem (VBP), one
                 is given vectors $ x_1, x_2, \ldots {}, x_n \in R^d $
                 and the goal is to find a partition into a minimum
                 number of feasible sets: $ \{ 1, 2, \ldots {}, n \} =
                 \cup_i^s B_i $. A set $ B_i $ is feasible if $
                 \Sigma_{j \in B i} x_j \leq 1 $, where $1$ denotes the
                 all $1$'s vector. For online VBP, it has been
                 outstanding for almost 20 years to clarify the gap
                 between the best lower bound $ \Omega (1) $ on the
                 competitive ratio versus the best upper bound of $ O(d)
                 $. We settle this by describing a $ \Omega (d^{1 -
                 \epsilon }) $ lower bound. We also give strong lower
                 bounds (of $ \Omega (d^{1 / B - \epsilon }) $) if the
                 bin size $ B \in Z_+ $ is allowed to grow. Finally, we
                 discuss almost-matching upper bound results for general
                 values of $B$; we show an upper bound whose exponent is
                 additively ``shifted by 1'' from the lower bound
                 exponent.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Li:2013:SCO,
  author =       "Jian Li and Wen Yuan",
  title =        "Stochastic combinatorial optimization via {Poisson}
                 approximation",
  crossref =     "ACM:2013:SPF",
  pages =        "971--980",
  year =         "2013",
  DOI =          "https://doi.org/10.1145/2488608.2488731",
  bibdate =      "Mon Mar 3 06:30:33 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  abstract =     "We study several stochastic combinatorial problems,
                 including the expected utility maximization problem,
                 the stochastic knapsack problem and the stochastic bin
                 packing problem. A common technical challenge in these
                 problems is to optimize some function (other than the
                 expectation) of the sum of a set of random variables.
                 The difficulty is mainly due to the fact that the
                 probability distribution of the sum is the convolution
                 of a set of distributions, which is not an easy
                 objective function to work with. To tackle this
                 difficulty, we introduce the Poisson approximation
                 technique. The technique is based on the Poisson
                 approximation theorem discovered by Le Cam, which
                 enables us to approximate the distribution of the sum
                 of a set of random variables using a compound Poisson
                 distribution. Using the technique, we can reduce a
                 variety of stochastic problems to the corresponding
                 deterministic multiple-objective problems, which either
                 can be solved by standard dynamic programming or have
                 known solutions in the literature. For the problems
                 mentioned above, we obtain the following results: We
                 first study the expected utility maximization problem
                 introduced recently [Li and Despande, FOCS11]. For
                 monotone and Lipschitz utility functions, we obtain an
                 additive PTAS if there is a multidimensional PTAS for
                 the multi-objective version of the problem, strictly
                 generalizing the previous result. The result implies
                 the first additive PTAS for maximizing threshold
                 probability for the stochastic versions of global
                 min-cut, matroid base and matroid intersection. For the
                 stochastic bin packing problem (introduced in
                 [Kleinberg, Rabani and Tardos, STOC97]), we show there
                 is a polynomial time algorithm which uses at most the
                 optimal number of bins, if we relax the size of each
                 bin and the overflow probability by e for any constant
                 $ \epsilon > 0 $. Based on this result, we obtain a
                 3-approximation if only the size of each bin can be
                 relaxed by $ \epsilon $ , improving the known $ O(1 /
                 \epsilon) $ factor for constant overflow probability.
                 For stochastic knapsack, we show a $ (1 + \epsilon)
                 $-approximation using $ \epsilon $ extra capacity for
                 any $ \epsilon > 0 $, even when the size and reward of
                 each item may be correlated and cancelations of items
                 are allowed. This generalizes the previous work
                 [Balghat, Goel and Khanna, SODA11] for the case without
                 correlation and cancelation. Our algorithm is also
                 simpler. We also present a factor $ 2 + \epsilon $
                 approximation algorithm for stochastic knapsack with
                 cancelations, for any constant $ \epsilon > 0 $,
                 improving the current known approximation factor of 8
                 [Gupta, Krishnaswamy, Molinaro and Ravi, FOCS11]. We
                 also study an interesting variant of the stochastic
                 knapsack problem, where the size and the profit of each
                 item are revealed before the decision is made. The
                 problem falls into the framework of Bayesian online
                 selection problems, which has been studied a lot
                 recently.",
  acknowledgement = ack-nhfb,
}

%%% ====================================================================
%%% Cross-referenced entries must come last:
@Proceedings{ACM:2006:SPT,
  editor =       "{ACM}",
  booktitle =    "{STOC'06: Proceedings of the Thirty-Eighth Annual ACM
                 Symposium on Theory of Computing 2006, Seattle, WA,
                 USA, May 21--23, 2006}",
  title =        "{STOC'06: Proceedings of the Thirty-Eighth Annual ACM
                 Symposium on Theory of Computing 2006, Seattle, WA,
                 USA, May 21--23, 2006}",
  publisher =    pub-ACM,
  address =      pub-ACM:adr,
  pages =        "770 (est.)",
  year =         "2006",
  ISBN =         "1-59593-134-1",
  ISBN-13 =      "978-1-59593-134-4",
  LCCN =         "QA75.5 .A22 2006",
  bibdate =      "Thu May 25 06:13:58 2006",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/stoc.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc2000.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib;
                 z3950.gbv.de:20011/gvk",
  note =         "ACM order number 508060.",
  URL =          "http://portal.acm.org/citation.cfm?id=1132516",
  acknowledgement = ack-nhfb,
}

@Proceedings{ACM:2010:SPA,
  editor =       "{ACM}",
  booktitle =    "{STOC'10: Proceedings of the 2010 ACM International
                 Symposium on Theory of Computing: June 5--8, 2010,
                 Cambridge, MA, USA}",
  title =        "{STOC'10: Proceedings of the 2010 ACM International
                 Symposium on Theory of Computing: June 5--8, 2010,
                 Cambridge, MA, USA}",
  publisher =    pub-ACM,
  address =      pub-ACM:adr,
  pages =        "xiv + 797",
  year =         "2010",
  ISBN =         "1-60558-817-2",
  ISBN-13 =      "978-1-60558-817-9",
  LCCN =         "QA 76.6 .A152 2010",
  bibdate =      "Wed Sep 1 10:37:53 MDT 2010",
  bibsource =    "z3950.gbv.de:20011/gvk;
                 http://www.math.utah.edu/pub/tex/bib/stoc.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  URL =          "http://www.gbv.de/dms/tib-ub-hannover/63314455x.",
  acknowledgement = ack-nhfb,
  remark =       "42nd annual STOC meeting.",
}

@Proceedings{ACM:2011:SPA,
  editor =       "{ACM}",
  booktitle =    "{STOC'11: Proceedings of the 2011 ACM International
                 Symposium on Theory of Computing: June 6--8, 2011, San
                 Jose, CA, USA}",
  title =        "{STOC'11: Proceedings of the 2011 ACM International
                 Symposium on Theory of Computing: June 6--8, 2011, San
                 Jose, CA, USA}",
  publisher =    pub-ACM,
  address =      pub-ACM:adr,
  pages =        "xxx + 822 (est.)",
  year =         "2011",
  ISBN =         "1-4503-0691-8",
  ISBN-13 =      "978-1-4503-0691-1",
  LCCN =         "????",
  bibdate =      "Wed Sep 1 10:37:53 MDT 2010",
  bibsource =    "z3950.gbv.de:20011/gvk;
                 http://www.math.utah.edu/pub/tex/bib/stoc.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  URL =          "http://www.gbv.de/dms/tib-ub-hannover/63314455x.",
  acknowledgement = ack-nhfb,
  remark =       "43rd annual STOC meeting.",
}

@Proceedings{ACM:2012:SPA,
  editor =       "{ACM}",
  booktitle =    "{STOC'12: Proceedings of the 2012 ACM International
                 Symposium on Theory of Computing: May 19--22, 2012, New
                 York, NY, USA}",
  title =        "{STOC'12: Proceedings of the 2012 ACM International
                 Symposium on Theory of Computing: May 19--22, 2012, New
                 York, NY, USA}",
  publisher =    pub-ACM,
  address =      pub-ACM:adr,
  pages =        "1292 (est.)",
  year =         "2012",
  ISBN =         "1-4503-1245-4",
  ISBN-13 =      "978-1-4503-1245-5",
  LCCN =         "????",
  bibdate =      "Thu Nov 08 19:12:21 2012",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/stoc2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc.bib;
                 z3950.gbv.de:20011/gvk",
  URL =          "http://www.gbv.de/dms/tib-ub-hannover/63314455x.",
  acknowledgement = ack-nhfb,
  remark =       "44th annual STOC meeting.",
}

@Proceedings{ACM:2013:SPF,
  editor =       "{ACM}",
  booktitle =    "{STOC '13: Proceedings of the Forty-fifth Annual ACM
                 Symposium on Theory of Computing: June 1--4, 2013, Palo
                 Alto, California, USA}",
  title =        "{STOC '13: Proceedings of the Forty-fifth Annual ACM
                 Symposium on Theory of Computing: June 1--4, 2013, Palo
                 Alto, California, USA}",
  publisher =    pub-ACM,
  address =      pub-ACM:adr,
  pages =        "980 (est.)",
  year =         "2013",
  ISBN =         "1-4503-2029-5",
  ISBN-13 =      "978-1-4503-2029-0",
  bibdate =      "Mon Mar 3 06:36:05 2014",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 http://www.math.utah.edu/pub/tex/bib/prng.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc.bib;
                 http://www.math.utah.edu/pub/tex/bib/stoc2010.bib",
  acknowledgement = ack-nhfb,
  remark =       "45th annual STOC meeting.",
}